content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
a = read.table("SNP_gwas_mc_merge_nogc.tbl.uniq", header=T, stringsAsFactors=F)
a = a[!duplicated(a$SNP),]
write.table(a[,c("SNP","A1","A2","b","p","N")], file="clean1.txt", row.names=F, col.names=T, quote=F, append=F)
system("source activate ldsc")
system("python ~/software/ldsc/munge_sumstats.py --sumstats clean1.txt --out clean")
system("~/software/ldsc/ldsc.py --h2 clean.sumstats.gz --ref-ld-chr /ysm-gpfs/pi/zhao/gz222/prs_comparison/LDSC/eur_w_ld_chr/ --out ./clean_h2 --w-ld-chr /ysm-gpfs/pi/zhao/gz222/prs_comparison/LDSC/eur_w_ld_chr/")
# prepare for PRS-CS
b = read.table("clean.sumstats.gz", header=T, stringsAsFactors=F)
snp = read.table("../../snp_list/1kgma5_prscs_inter.txt", stringsAsFactors=F)
b = b[b$SNP %in% snp$V1,]
b$P = 2*pnorm(-abs(b$Z))
colnames(b) = c("SNP","A1","A2","BETA","N","P")
write.table(b[,c(1,2,3,4,6)], file="PRS_cs.txt", append=F, sep="\t", quote=F, row.names=F, col.names=T)
# prepare for gctb
d = a[a[, "SNP"] %in% b[,1],]
write.table(d, file="gctb.ma", row.names=F, col.names=T, quote=F, sep="\t", append=F)
# prepare for LDpred
bim = read.table("../../ref/1000G/eur_SNPmaf5_nomhc.bim", header=F, stringsAsFactors=F)
b = dplyr::left_join(b, bim, by=c("SNP"="V2"))
tmp = b[,c(7,9,1:4,6)]
colnames(tmp)[1:2] = c("CHR", "POS")
a = a[,c("SNP","b")]
tmp = dplyr::left_join(tmp, a, by=c("SNP"))
table(is.na(tmp[,"b"]))
write.table(tmp[,c("CHR","POS","SNP","A1","A2","b","P")], file="ldpred.txt", append=F, sep="\t", quote=F, row.names=F, col.names=T)
system("rm -rf clean1.txt")
print(median(b$N))
| /UKB_real/BMI/summ_stats/clean1.R | no_license | eldronzhou/SDPR_paper | R | false | false | 1,549 | r | a = read.table("SNP_gwas_mc_merge_nogc.tbl.uniq", header=T, stringsAsFactors=F)
a = a[!duplicated(a$SNP),]
write.table(a[,c("SNP","A1","A2","b","p","N")], file="clean1.txt", row.names=F, col.names=T, quote=F, append=F)
system("source activate ldsc")
system("python ~/software/ldsc/munge_sumstats.py --sumstats clean1.txt --out clean")
system("~/software/ldsc/ldsc.py --h2 clean.sumstats.gz --ref-ld-chr /ysm-gpfs/pi/zhao/gz222/prs_comparison/LDSC/eur_w_ld_chr/ --out ./clean_h2 --w-ld-chr /ysm-gpfs/pi/zhao/gz222/prs_comparison/LDSC/eur_w_ld_chr/")
# prepare for PRS-CS
b = read.table("clean.sumstats.gz", header=T, stringsAsFactors=F)
snp = read.table("../../snp_list/1kgma5_prscs_inter.txt", stringsAsFactors=F)
b = b[b$SNP %in% snp$V1,]
b$P = 2*pnorm(-abs(b$Z))
colnames(b) = c("SNP","A1","A2","BETA","N","P")
write.table(b[,c(1,2,3,4,6)], file="PRS_cs.txt", append=F, sep="\t", quote=F, row.names=F, col.names=T)
# prepare for gctb
d = a[a[, "SNP"] %in% b[,1],]
write.table(d, file="gctb.ma", row.names=F, col.names=T, quote=F, sep="\t", append=F)
# prepare for LDpred
bim = read.table("../../ref/1000G/eur_SNPmaf5_nomhc.bim", header=F, stringsAsFactors=F)
b = dplyr::left_join(b, bim, by=c("SNP"="V2"))
tmp = b[,c(7,9,1:4,6)]
colnames(tmp)[1:2] = c("CHR", "POS")
a = a[,c("SNP","b")]
tmp = dplyr::left_join(tmp, a, by=c("SNP"))
table(is.na(tmp[,"b"]))
write.table(tmp[,c("CHR","POS","SNP","A1","A2","b","P")], file="ldpred.txt", append=F, sep="\t", quote=F, row.names=F, col.names=T)
system("rm -rf clean1.txt")
print(median(b$N))
|
### With familiarization_mumble
familiarization_mumble = read.csv("/Users/andesgomez/Documents/Stanford/Autumn2013-Masters/PayedWork/andres_data/scale_6stimuli_yes_fam_mumblemumble_26_february_FMMM.csv",header=TRUE, sep="\t", row.names=NULL, stringsAsFactors = FALSE)
familiarization_mumble$target = familiarization_mumble$Answer.choice == "\"target\""
familiarization_mumble$logical = familiarization_mumble$Answer.choice == "\"logical\""
familiarization_mumble$foil = familiarization_mumble$Answer.choice == "\"foil\""
#familiarization_mumble = subset(familiarization_mumble, familiarization_mumble$Answer.name_check_correct == "\"TRUE\"")
fc_mumble_table <- aggregate(cbind(target,
logical,
foil) ~
Answer.familiarization_cond, data=familiarization_mumble, mean)
mean_target <- mean(familiarization_mumble$target)
familiarization_mumble$males = familiarization_mumble$Answer.gender == "\"m\"" | familiarization_mumble$Answer.gender == "\"male\"" | familiarization_mumble$Answer.gender == "\"M\"" | familiarization_mumble$Answer.gender == "\"Male\"" | familiarization_mumble$Answer.gender == "\"MALE\""
familiarization_mumble$females = familiarization_mumble$Answer.gender == "\"f\"" | familiarization_mumble$Answer.gender == "\"female\"" | familiarization_mumble$Answer.gender == "\"F\"" | familiarization_mumble$Answer.gender == "\"Female\"" | familiarization_mumble$Answer.gender == "\"FEMALE\""
familiarization_mumble$twenties = familiarization_mumble$Answer.age == "\"20\"" | familiarization_mumble$Answer.age == "\"21\"" | familiarization_mumble$Answer.age == "\"22\"" | familiarization_mumble$Answer.age == "\"23\"" | familiarization_mumble$Answer.age == "\"24\"" | familiarization_mumble$Answer.age == "\"25\"" | familiarization_mumble$Answer.age == "\"26\"" | familiarization_mumble$Answer.age == "\"27\"" | familiarization_mumble$Answer.age == "\"28\"" | familiarization_mumble$Answer.age == "\"29\""
familiarization_mumble$thirties = familiarization_mumble$Answer.age == "\"30\"" | familiarization_mumble$Answer.age == "\"31\"" | familiarization_mumble$Answer.age == "\"32\"" | familiarization_mumble$Answer.age == "\"33\"" | familiarization_mumble$Answer.age == "\"34\"" | familiarization_mumble$Answer.age == "\"35\"" | familiarization_mumble$Answer.age == "\"36\"" | familiarization_mumble$Answer.age == "\"37\"" | familiarization_mumble$Answer.age == "\"38\"" | familiarization_mumble$Answer.age == "\"39\""
familiarization_mumble$fourties = familiarization_mumble$Answer.age == "\"40\"" | familiarization_mumble$Answer.age == "\"41\"" | familiarization_mumble$Answer.age == "\"42\"" | familiarization_mumble$Answer.age == "\"43\"" | familiarization_mumble$Answer.age == "\"44\"" | familiarization_mumble$Answer.age == "\"45\"" | familiarization_mumble$Answer.age == "\"46\"" | familiarization_mumble$Answer.age == "\"47\"" | familiarization_mumble$Answer.age == "\"48\"" | familiarization_mumble$Answer.age == "\"49\""
familiarization_mumble$fifties = familiarization_mumble$Answer.age == "\"50\"" | familiarization_mumble$Answer.age == "\"51\"" | familiarization_mumble$Answer.age == "\"52\"" | familiarization_mumble$Answer.age == "\"53\"" | familiarization_mumble$Answer.age == "\"54\"" | familiarization_mumble$Answer.age == "\"55\"" | familiarization_mumble$Answer.age == "\"56\"" | familiarization_mumble$Answer.age == "\"57\"" | familiarization_mumble$Answer.age == "\"58\"" | familiarization_mumble$Answer.age == "\"59\""
# Analysis of variance and Regression
single_group_variance = aov(logical ~ as.factor(Answer.target_frequency) + as.factor(Answer.item), data = familiarization_mumble)
summary(single_group_variance)
familiarization_mumble_variance = aov(target ~ as.factor(Answer.familiarization_cond) + as.factor(Answer.item) + as.factor(Answer.target_position) + as.factor(Answer.logical_position), data = familiarization_mumble)
summary(familiarization_mumble_variance)
familiarization_mumble_control = aov(target ~ as.factor(Answer.familiarization_cond) + as.factor(Answer.item) + as.factor(females) + as.factor(males) + twenties + fifties + fourties, data = familiarization_mumble)
summary(familiarization_mumble_control)
manip_check_dist
manip_check_target
name_check_correct | /andres_analysis_incomplete/mumblemumble_familiarization.R | no_license | algekalipso1/pragmods | R | false | false | 4,344 | r | ### With familiarization_mumble
familiarization_mumble = read.csv("/Users/andesgomez/Documents/Stanford/Autumn2013-Masters/PayedWork/andres_data/scale_6stimuli_yes_fam_mumblemumble_26_february_FMMM.csv",header=TRUE, sep="\t", row.names=NULL, stringsAsFactors = FALSE)
familiarization_mumble$target = familiarization_mumble$Answer.choice == "\"target\""
familiarization_mumble$logical = familiarization_mumble$Answer.choice == "\"logical\""
familiarization_mumble$foil = familiarization_mumble$Answer.choice == "\"foil\""
#familiarization_mumble = subset(familiarization_mumble, familiarization_mumble$Answer.name_check_correct == "\"TRUE\"")
fc_mumble_table <- aggregate(cbind(target,
logical,
foil) ~
Answer.familiarization_cond, data=familiarization_mumble, mean)
mean_target <- mean(familiarization_mumble$target)
familiarization_mumble$males = familiarization_mumble$Answer.gender == "\"m\"" | familiarization_mumble$Answer.gender == "\"male\"" | familiarization_mumble$Answer.gender == "\"M\"" | familiarization_mumble$Answer.gender == "\"Male\"" | familiarization_mumble$Answer.gender == "\"MALE\""
familiarization_mumble$females = familiarization_mumble$Answer.gender == "\"f\"" | familiarization_mumble$Answer.gender == "\"female\"" | familiarization_mumble$Answer.gender == "\"F\"" | familiarization_mumble$Answer.gender == "\"Female\"" | familiarization_mumble$Answer.gender == "\"FEMALE\""
familiarization_mumble$twenties = familiarization_mumble$Answer.age == "\"20\"" | familiarization_mumble$Answer.age == "\"21\"" | familiarization_mumble$Answer.age == "\"22\"" | familiarization_mumble$Answer.age == "\"23\"" | familiarization_mumble$Answer.age == "\"24\"" | familiarization_mumble$Answer.age == "\"25\"" | familiarization_mumble$Answer.age == "\"26\"" | familiarization_mumble$Answer.age == "\"27\"" | familiarization_mumble$Answer.age == "\"28\"" | familiarization_mumble$Answer.age == "\"29\""
familiarization_mumble$thirties = familiarization_mumble$Answer.age == "\"30\"" | familiarization_mumble$Answer.age == "\"31\"" | familiarization_mumble$Answer.age == "\"32\"" | familiarization_mumble$Answer.age == "\"33\"" | familiarization_mumble$Answer.age == "\"34\"" | familiarization_mumble$Answer.age == "\"35\"" | familiarization_mumble$Answer.age == "\"36\"" | familiarization_mumble$Answer.age == "\"37\"" | familiarization_mumble$Answer.age == "\"38\"" | familiarization_mumble$Answer.age == "\"39\""
familiarization_mumble$fourties = familiarization_mumble$Answer.age == "\"40\"" | familiarization_mumble$Answer.age == "\"41\"" | familiarization_mumble$Answer.age == "\"42\"" | familiarization_mumble$Answer.age == "\"43\"" | familiarization_mumble$Answer.age == "\"44\"" | familiarization_mumble$Answer.age == "\"45\"" | familiarization_mumble$Answer.age == "\"46\"" | familiarization_mumble$Answer.age == "\"47\"" | familiarization_mumble$Answer.age == "\"48\"" | familiarization_mumble$Answer.age == "\"49\""
familiarization_mumble$fifties = familiarization_mumble$Answer.age == "\"50\"" | familiarization_mumble$Answer.age == "\"51\"" | familiarization_mumble$Answer.age == "\"52\"" | familiarization_mumble$Answer.age == "\"53\"" | familiarization_mumble$Answer.age == "\"54\"" | familiarization_mumble$Answer.age == "\"55\"" | familiarization_mumble$Answer.age == "\"56\"" | familiarization_mumble$Answer.age == "\"57\"" | familiarization_mumble$Answer.age == "\"58\"" | familiarization_mumble$Answer.age == "\"59\""
# Analysis of variance and Regression
single_group_variance = aov(logical ~ as.factor(Answer.target_frequency) + as.factor(Answer.item), data = familiarization_mumble)
summary(single_group_variance)
familiarization_mumble_variance = aov(target ~ as.factor(Answer.familiarization_cond) + as.factor(Answer.item) + as.factor(Answer.target_position) + as.factor(Answer.logical_position), data = familiarization_mumble)
summary(familiarization_mumble_variance)
familiarization_mumble_control = aov(target ~ as.factor(Answer.familiarization_cond) + as.factor(Answer.item) + as.factor(females) + as.factor(males) + twenties + fifties + fourties, data = familiarization_mumble)
summary(familiarization_mumble_control)
manip_check_dist
manip_check_target
name_check_correct |
# This .R code file consists of:
# Algorithm 3: Separable Coordinate Descent Algorithm
# for solving quadratic form objective function with L1 penalty(LASSO)
# Arthurs: STA 243 Final Project Group Members:
# Han Chen, Ninghui Li, Chenghan Sun
##### Separable Coordinate Descent Method#####
SpCD <- function(A, b, xs, lambda = 1, iter_k = 1, xk = NULL, cr = NULL,
alpha = 0.001, tol = 1e-2, maxIter = 1e7){
### Solve quadratic form functions with L1 penalty min_x f(x) = (Ax - b) ^ 2 + lambda|x| ###
### Algorithm: Separable Regularised version Coordinate Descent "Richtarik, P., Takac, M.: Iteration complexity of a randomized block-coordinate descent
### methods for minimizing a composite function"
### A the input matrix, b vector, xs the true parameters
### lambda the tuning parameter
### alpha : usually set as 1 / L_max, where L_max is the maximum of component Lipschitz constants.
### xk initial value of the optimization problem
### stopping criterion f(xk) - fstar < tol, where fstar = f(xs), we stop the function if the iteration exceed maxIter.
# set k as the counter
# CGD method terminates when norm(xk-xs)/norm(xs) smaller than given epsi = 10^-3
# denote norm(xk-xs)/norm(xs) = cr (criterion)
k = 1
cr = c(1)
# initialize x
if (is.null(xk)){
xk = zeros(n, 1)
}
#gradient
gd_k = 0
# Define the objective function
quadratic_obj = function(xk, y){
fun_val = 0.5*norm((y - A%*%xk), "2")^2 + lambda * sum(abs(xk))
return(fun_val)
}
fstar = quadratic_obj(xs, b)
fx = c(quadratic_obj(xk, b) - fstar)
error = c()
while (fx[k] >= tol) {
# update the gradient
u = A%*%xk - b
A1 = t(A)
gd_k = A1[iter_k, ]%*%u
# update xk
## Here we use the soft_threshold to solve the suboptimization problem
a_k = 1 / alpha
c_k = 1 / alpha * xk[iter_k] - gd_k
if(c_k < -1 * lambda){
z_k = (c_k + lambda) / a_k
}else if (c_k > lambda){
z_k = (c_k - lambda) / a_k
}else{
z_k = 0
}
xk[iter_k] = z_k
#print(z_k)
# update stopping criterion
cr[k+1] = norm(xk-xs, "2") / norm(xs, "2")
#if (mod(k, 1000) == 0) {
#print(c(paste("step", k),paste("error", cr[k+1]) ))
#print(c(paste("step", k), paste("error", fx[k] - fstar) ))
#print(gd[iter_k])
#print(xk)
#print(z_k)
#print(xk)
#}
# update error
fx = c(fx, quadratic_obj(xk, b) - fstar)
error = c(error, norm((xk - xs), "2"))
# update k
k = k+1
# update iter_k
iter_k = mod(iter_k, n) + 1
if (k > maxIter) {
print(paste("Algorithm unfinished by reaching the maximum iterations."))
break
}
}
return(list(k = k, cr = cr, error = error, fx = fx ))
}
| /codebase/Separable_RCD.R | no_license | hango1996/CoorDes-Algs | R | false | false | 2,828 | r | # This .R code file consists of:
# Algorithm 3: Separable Coordinate Descent Algorithm
# for solving quadratic form objective function with L1 penalty(LASSO)
# Arthurs: STA 243 Final Project Group Members:
# Han Chen, Ninghui Li, Chenghan Sun
##### Separable Coordinate Descent Method#####
SpCD <- function(A, b, xs, lambda = 1, iter_k = 1, xk = NULL, cr = NULL,
alpha = 0.001, tol = 1e-2, maxIter = 1e7){
### Solve quadratic form functions with L1 penalty min_x f(x) = (Ax - b) ^ 2 + lambda|x| ###
### Algorithm: Separable Regularised version Coordinate Descent "Richtarik, P., Takac, M.: Iteration complexity of a randomized block-coordinate descent
### methods for minimizing a composite function"
### A the input matrix, b vector, xs the true parameters
### lambda the tuning parameter
### alpha : usually set as 1 / L_max, where L_max is the maximum of component Lipschitz constants.
### xk initial value of the optimization problem
### stopping criterion f(xk) - fstar < tol, where fstar = f(xs), we stop the function if the iteration exceed maxIter.
# set k as the counter
# CGD method terminates when norm(xk-xs)/norm(xs) smaller than given epsi = 10^-3
# denote norm(xk-xs)/norm(xs) = cr (criterion)
k = 1
cr = c(1)
# initialize x
if (is.null(xk)){
xk = zeros(n, 1)
}
#gradient
gd_k = 0
# Define the objective function
quadratic_obj = function(xk, y){
fun_val = 0.5*norm((y - A%*%xk), "2")^2 + lambda * sum(abs(xk))
return(fun_val)
}
fstar = quadratic_obj(xs, b)
fx = c(quadratic_obj(xk, b) - fstar)
error = c()
while (fx[k] >= tol) {
# update the gradient
u = A%*%xk - b
A1 = t(A)
gd_k = A1[iter_k, ]%*%u
# update xk
## Here we use the soft_threshold to solve the suboptimization problem
a_k = 1 / alpha
c_k = 1 / alpha * xk[iter_k] - gd_k
if(c_k < -1 * lambda){
z_k = (c_k + lambda) / a_k
}else if (c_k > lambda){
z_k = (c_k - lambda) / a_k
}else{
z_k = 0
}
xk[iter_k] = z_k
#print(z_k)
# update stopping criterion
cr[k+1] = norm(xk-xs, "2") / norm(xs, "2")
#if (mod(k, 1000) == 0) {
#print(c(paste("step", k),paste("error", cr[k+1]) ))
#print(c(paste("step", k), paste("error", fx[k] - fstar) ))
#print(gd[iter_k])
#print(xk)
#print(z_k)
#print(xk)
#}
# update error
fx = c(fx, quadratic_obj(xk, b) - fstar)
error = c(error, norm((xk - xs), "2"))
# update k
k = k+1
# update iter_k
iter_k = mod(iter_k, n) + 1
if (k > maxIter) {
print(paste("Algorithm unfinished by reaching the maximum iterations."))
break
}
}
return(list(k = k, cr = cr, error = error, fx = fx ))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ChIPseqSpikeFree.R
\name{ChIPseqSpikeFree}
\alias{ChIPseqSpikeFree}
\title{wrapper function - perform ChIP-seq spike-free normalization in one step.}
\usage{
ChIPseqSpikeFree(bamFiles, chromFile = "hg19",
metaFile = "sample_meta.txt", prefix = "test")
}
\arguments{
\item{bamFiles}{a vector of bam filenames.}
\item{chromFile}{chrom.size file. Given "hg19","mm10","mm9" or "hg38", will load chrom.size file from package folder.}
\item{metaFile}{a filename of metadata file. the file must have three columns: ID (bam filename without full path), ANTIBODY and GROUP}
\item{prefix}{prefix of output filename.}
}
\value{
A data.frame of the updated metaFile with scaling factor
}
\description{
This function wraps all steps.
If you run ChIPseqSpikeFree() seperately for two batches, the scaling factors will be not comparable between two batches.
The correct way is to combine bamFiles parameter and create a new metadata file to include all bam files. Then re-run ChIPseqSpikeFree().
}
\examples{
##1 first You need to generate a sample_meta.txt (tab-delimited txt file).
#metaFile <- "your/path/sample_meta.txt"
#meta <- ReadMeta(metaFile)
#head(meta)
#ID ANTIBODY GROUP
#ChIPseq1.bam H3K27me3 WT
#ChIPseq2.bam H3K27me3 K27M
##2. bam files
#bams <- c("ChIPseq1.bam","ChIPseq2.bam")
#prefix <- "test"
##3. run ChIPseqSpikeFree pipeline
#ChIPseqSpikeFree(bamFiles=bams, chromFile="mm9",metaFile=metaFile,prefix="test")
}
| /man/ChIPseqSpikeFree.Rd | permissive | moodswh/ChIPseqSpikeInFree | R | false | true | 1,509 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ChIPseqSpikeFree.R
\name{ChIPseqSpikeFree}
\alias{ChIPseqSpikeFree}
\title{wrapper function - perform ChIP-seq spike-free normalization in one step.}
\usage{
ChIPseqSpikeFree(bamFiles, chromFile = "hg19",
metaFile = "sample_meta.txt", prefix = "test")
}
\arguments{
\item{bamFiles}{a vector of bam filenames.}
\item{chromFile}{chrom.size file. Given "hg19","mm10","mm9" or "hg38", will load chrom.size file from package folder.}
\item{metaFile}{a filename of metadata file. the file must have three columns: ID (bam filename without full path), ANTIBODY and GROUP}
\item{prefix}{prefix of output filename.}
}
\value{
A data.frame of the updated metaFile with scaling factor
}
\description{
This function wraps all steps.
If you run ChIPseqSpikeFree() seperately for two batches, the scaling factors will be not comparable between two batches.
The correct way is to combine bamFiles parameter and create a new metadata file to include all bam files. Then re-run ChIPseqSpikeFree().
}
\examples{
##1 first You need to generate a sample_meta.txt (tab-delimited txt file).
#metaFile <- "your/path/sample_meta.txt"
#meta <- ReadMeta(metaFile)
#head(meta)
#ID ANTIBODY GROUP
#ChIPseq1.bam H3K27me3 WT
#ChIPseq2.bam H3K27me3 K27M
##2. bam files
#bams <- c("ChIPseq1.bam","ChIPseq2.bam")
#prefix <- "test"
##3. run ChIPseqSpikeFree pipeline
#ChIPseqSpikeFree(bamFiles=bams, chromFile="mm9",metaFile=metaFile,prefix="test")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mvpa_iterate.R
\name{mvpa_iterate}
\alias{mvpa_iterate}
\title{mvpa_iterate}
\usage{
mvpa_iterate(mod_spec, vox_list, ids = 1:length(vox_iter),
compute_performance = TRUE, return_fits = FALSE)
}
\arguments{
\item{mod_spec}{a class of type \code{mvpa_model}}
\item{vox_list}{a \code{list} of voxel indices/coordinates}
\item{ids}{a \code{vector} of ids for each voxel set}
\item{compute_performance}{compute and store performance measures for each voxel set}
\item{return_fits}{return the model fit for each voxel set?}
}
\description{
Fit a classification/regression model for each voxel set in a list
}
| /man/mvpa_iterate.Rd | no_license | sungoku/rMVPA | R | false | true | 688 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mvpa_iterate.R
\name{mvpa_iterate}
\alias{mvpa_iterate}
\title{mvpa_iterate}
\usage{
mvpa_iterate(mod_spec, vox_list, ids = 1:length(vox_iter),
compute_performance = TRUE, return_fits = FALSE)
}
\arguments{
\item{mod_spec}{a class of type \code{mvpa_model}}
\item{vox_list}{a \code{list} of voxel indices/coordinates}
\item{ids}{a \code{vector} of ids for each voxel set}
\item{compute_performance}{compute and store performance measures for each voxel set}
\item{return_fits}{return the model fit for each voxel set?}
}
\description{
Fit a classification/regression model for each voxel set in a list
}
|
/code/new_photo.R | no_license | chiyuhao/Circadian-algorithms-and-genes | R | false | false | 170,959 | r | ||
\name{isd}
\alias{isd}
\title{Classify changes over time}
\description{Classify changes over time using the ISD-system introduced by Galtung (1969).}
\usage{isd(V, tolerance=0.1)}
\arguments{
\item{V}{A vector with length 3}
\item{tolerance}{Specify how similar values have to be to be treated as different (optional). Differences smaller than or equal to the tolerance are ignored.}
}
\details{This function implements the ISD-system introduced by Galtung (1969). The input is a vector of length 3. Each value stands for a different point in time. The ISD-system examines the two transition points, and classifies the changes over time.}
\value{The function returns a list. The \code{type} returns a number corresponding to the pattern described by Galtung. The \code{description} returns a string where the two transitions are spelled out (increase, flat, decrease).}
\references{Galtung, J. (1969) Theory and Methods of Social Research. Oslo: Universitetsforlaget.}
\author{Didier Ruedin}
\seealso{\code{\link{ajus}}}
| /man/isd.Rd | no_license | cran/agrmt | R | false | false | 1,025 | rd | \name{isd}
\alias{isd}
\title{Classify changes over time}
\description{Classify changes over time using the ISD-system introduced by Galtung (1969).}
\usage{isd(V, tolerance=0.1)}
\arguments{
\item{V}{A vector with length 3}
\item{tolerance}{Specify how similar values have to be to be treated as different (optional). Differences smaller than or equal to the tolerance are ignored.}
}
\details{This function implements the ISD-system introduced by Galtung (1969). The input is a vector of length 3. Each value stands for a different point in time. The ISD-system examines the two transition points, and classifies the changes over time.}
\value{The function returns a list. The \code{type} returns a number corresponding to the pattern described by Galtung. The \code{description} returns a string where the two transitions are spelled out (increase, flat, decrease).}
\references{Galtung, J. (1969) Theory and Methods of Social Research. Oslo: Universitetsforlaget.}
\author{Didier Ruedin}
\seealso{\code{\link{ajus}}}
|
library(DT)
library(shiny)
library(igraph)
library(plotly)
library(rstackdeque)
source("external/graph_utils.R", local = TRUE)
source("external/makenetjson.R", local = TRUE)
source("external/protein_label_dictionary.R",local = TRUE)
#initial_data <- "./www/data/ctd.csv"
#graph <- build_initial_graph(initial_data)
#communities <- get_communities(graph)
#htmlloaded = FALSE
#s1 <- rstack()
s2 <-rstack()
s3 <- rstack()
mp <<- NULL
sortedlabel<-NULL
protienDSpathway<<-data.frame()
function(input, output, session){
global <- reactiveValues()
global$is_comm_graph = TRUE
global$currentCommId <- -1
#global$viz_stack <- insert_top(s1, list(graph, communities))
global$name <- insert_top(s2, "")
global$commID <- insert_top(s3, -1)
# reset button
observeEvent(input$reset_button, {
getcommunity_from_graphdb(-1)
observe({
session$sendCustomMessage(type = "updategraph",message="clear")
})
#global$viz_stack <- rstack()
#global$viz_stack <- insert_top(global$viz_stack, list(graph, communities))
#global$name <- insert_top(s2, "")
})
observeEvent(input$variable, {
print(input$variable)
})
#Search button
observeEvent(input$search_button,{
searchelm <- input$searchentitiy
lbllist <<- c()
withProgress(message = "Searching ...",value = 0,{
if(grepl(",",searchelm) == FALSE){
getallparentforentity(searchelm)
}
else
{
res<-unlist(strsplit(searchelm,","))
lapply(res,getallparentforentity)
}
})
lbllist <- unique(lbllist)
memcommunity<-paste(lbllist,collapse=",")
memcommunity<-paste(searchelm,memcommunity,sep=",")
#memcommunity=input$searchentitiy
observe({
session$sendCustomMessage(type = "commmemmsg" ,
message = list(id=memcommunity))
})
})
# table click
observe({
row <- input$degree_table_rows_selected
if (length(row)){
print(row)
session$sendCustomMessage(type = "commmemmsg" ,
message = list(id=tail(row, n=1)))
}
})
# disease pathway table click
observe({
row <- input$plotgraph1_rows_selected
last_selected_row = tail(row, n=1)
print(last_selected_row)
if( (!is.null(row)) && (length(row)>=1)){
proteins<-protienDSpathway[protienDSpathway$Pathway==unlist(last_selected_row),]$Protein
session$sendCustomMessage(type = "commmemmsg" ,
message = list(id=paste(proteins,collapse=",")))
}
})
# back button
observeEvent(input$back_button, {
size <- length(global$viz_stack)
if (size > 1){
global$viz_stack <- without_top(global$viz_stack)
global$name <- without_top(global$name)
}
})
# on-click from sigma.js
observeEvent(input$comm_id, {
print(input$comm_id)
global$currentCommId<-input$comm_id
getcommunity_from_graphdb(input$comm_id)
update_stats()
observe({
session$sendCustomMessage(type = "updategraph",message="xyz")
})
})
# render with sigma the current graph (in json)
output$graph_with_sigma <- renderUI({
getcommunity_from_graphdb(-1)
#makenetjson(data[[1]], "./www/data/current_graph.json", data[[2]])
update_stats()
observe({
session$sendCustomMessage(type = "updategraph",message="")
})
return(includeHTML("./www/graph.html"))
})
# update the summary stats
update_stats <- function(){
con <- file("./www/data/current_graph.json")
open(con)
line <- readLines(con, n = 1, warn = FALSE)
close(con)
x<-fromJSON(line)
edges<-x$edges[c('source','target')]
vertex_data<-x$nodes[c('id','name','type')]
if(nrow(vertex_data) > 1){
graph <- graph_from_data_frame(edges, directed = FALSE, vertices =
vertex_data)
nodes <- get.data.frame(graph, what="vertices")
nodes$degree <- degree(graph)
nodes$pagerank <- page_rank(graph)$vector
colnames(nodes) <- c("Name", "Type", "Degree", "PageRank")
global$nodes <- nodes
}
else
{
global$nodes <- NULL
}
}
# Plot the degree distribution of the current graph
output$degree_distribution <- renderPlotly({
if (!is.null(global$nodes)){
plot_ly(global$nodes, x = Degree, type="histogram", color="#FF8800")
}
})
# Plot the pagerank distribution of the current graph
output$pagerank_distribution <- renderPlotly({
if (!is.null(global$nodes)){
plot_ly(global$nodes, x = PageRank, type="histogram", color="#FF8800")
}
})
# Generate a table of node degrees
output$degree_table <- DT::renderDataTable({
if (!is.null(global$nodes)){
table <- global$nodes[c("Name", "Degree", "PageRank")]
}
},
options = list(order = list(list(1, 'desc'))),
rownames = FALSE,
selection = "single"
)
# Generate the current graph name (as a list of community labels)
output$name <- renderText({
name <- as.list(rev(global$name))
name <- paste(name, collapse = "/", sep="/")
#return(paste(c("Current Community", name)))
})
output$plotgraph1 <- DT::renderDataTable({
protienDSpathway<<-data.frame()
sortedlabel<-NULL
lf<-NULL
lbls<-NULL
# This takes forever. If we can load a previously built object do it; otherwise don't hold your breath
withProgress(message = "Loading ...",value = 0,{
if(is.null(mp)){
filename = 'mp.rds'
if (file.exists(filename)){
mp <<- NULL
mp <<- readRDS(filename)
} else {
mp <<- getproteinlabeldict()
saveRDS(mp, file=filename)
}
}
})
if(global$currentCommId==-1)
return (NULL)
finallist<-c()
lbllist <<- c()
withProgress(message = "Loading ...",value = 0,{
getrawentititesfromComm(global$currentCommId)
})
table <- data.frame(Protein="No pathway data available")
if (nrow(protienDSpathway)>1){
labelfreq <- table(protienDSpathway)
if (ncol(labelfreq)>1){
z<-apply(labelfreq,1,sum)
sortedlabel<-labelfreq[order(as.numeric(z), decreasing=TRUE),]
table<-as.data.frame.matrix(sortedlabel)
} else {
table <- as.data.frame.matrix(labelfreq)
}
row.names(table) <- strtrim(row.names(table), 50)
}
table
},
rownames = TRUE,
selection = "single")
}
| /server.R | no_license | vinodma/Viewerdemo | R | false | false | 6,514 | r | library(DT)
library(shiny)
library(igraph)
library(plotly)
library(rstackdeque)
source("external/graph_utils.R", local = TRUE)
source("external/makenetjson.R", local = TRUE)
source("external/protein_label_dictionary.R",local = TRUE)
#initial_data <- "./www/data/ctd.csv"
#graph <- build_initial_graph(initial_data)
#communities <- get_communities(graph)
#htmlloaded = FALSE
#s1 <- rstack()
s2 <-rstack()
s3 <- rstack()
mp <<- NULL
sortedlabel<-NULL
protienDSpathway<<-data.frame()
function(input, output, session){
global <- reactiveValues()
global$is_comm_graph = TRUE
global$currentCommId <- -1
#global$viz_stack <- insert_top(s1, list(graph, communities))
global$name <- insert_top(s2, "")
global$commID <- insert_top(s3, -1)
# reset button
observeEvent(input$reset_button, {
getcommunity_from_graphdb(-1)
observe({
session$sendCustomMessage(type = "updategraph",message="clear")
})
#global$viz_stack <- rstack()
#global$viz_stack <- insert_top(global$viz_stack, list(graph, communities))
#global$name <- insert_top(s2, "")
})
observeEvent(input$variable, {
print(input$variable)
})
#Search button
observeEvent(input$search_button,{
searchelm <- input$searchentitiy
lbllist <<- c()
withProgress(message = "Searching ...",value = 0,{
if(grepl(",",searchelm) == FALSE){
getallparentforentity(searchelm)
}
else
{
res<-unlist(strsplit(searchelm,","))
lapply(res,getallparentforentity)
}
})
lbllist <- unique(lbllist)
memcommunity<-paste(lbllist,collapse=",")
memcommunity<-paste(searchelm,memcommunity,sep=",")
#memcommunity=input$searchentitiy
observe({
session$sendCustomMessage(type = "commmemmsg" ,
message = list(id=memcommunity))
})
})
# table click
observe({
row <- input$degree_table_rows_selected
if (length(row)){
print(row)
session$sendCustomMessage(type = "commmemmsg" ,
message = list(id=tail(row, n=1)))
}
})
# disease pathway table click
observe({
row <- input$plotgraph1_rows_selected
last_selected_row = tail(row, n=1)
print(last_selected_row)
if( (!is.null(row)) && (length(row)>=1)){
proteins<-protienDSpathway[protienDSpathway$Pathway==unlist(last_selected_row),]$Protein
session$sendCustomMessage(type = "commmemmsg" ,
message = list(id=paste(proteins,collapse=",")))
}
})
# back button
observeEvent(input$back_button, {
size <- length(global$viz_stack)
if (size > 1){
global$viz_stack <- without_top(global$viz_stack)
global$name <- without_top(global$name)
}
})
# on-click from sigma.js
observeEvent(input$comm_id, {
print(input$comm_id)
global$currentCommId<-input$comm_id
getcommunity_from_graphdb(input$comm_id)
update_stats()
observe({
session$sendCustomMessage(type = "updategraph",message="xyz")
})
})
# render with sigma the current graph (in json)
output$graph_with_sigma <- renderUI({
getcommunity_from_graphdb(-1)
#makenetjson(data[[1]], "./www/data/current_graph.json", data[[2]])
update_stats()
observe({
session$sendCustomMessage(type = "updategraph",message="")
})
return(includeHTML("./www/graph.html"))
})
# update the summary stats
update_stats <- function(){
con <- file("./www/data/current_graph.json")
open(con)
line <- readLines(con, n = 1, warn = FALSE)
close(con)
x<-fromJSON(line)
edges<-x$edges[c('source','target')]
vertex_data<-x$nodes[c('id','name','type')]
if(nrow(vertex_data) > 1){
graph <- graph_from_data_frame(edges, directed = FALSE, vertices =
vertex_data)
nodes <- get.data.frame(graph, what="vertices")
nodes$degree <- degree(graph)
nodes$pagerank <- page_rank(graph)$vector
colnames(nodes) <- c("Name", "Type", "Degree", "PageRank")
global$nodes <- nodes
}
else
{
global$nodes <- NULL
}
}
# Plot the degree distribution of the current graph
output$degree_distribution <- renderPlotly({
if (!is.null(global$nodes)){
plot_ly(global$nodes, x = Degree, type="histogram", color="#FF8800")
}
})
# Plot the pagerank distribution of the current graph
output$pagerank_distribution <- renderPlotly({
if (!is.null(global$nodes)){
plot_ly(global$nodes, x = PageRank, type="histogram", color="#FF8800")
}
})
# Generate a table of node degrees
output$degree_table <- DT::renderDataTable({
if (!is.null(global$nodes)){
table <- global$nodes[c("Name", "Degree", "PageRank")]
}
},
options = list(order = list(list(1, 'desc'))),
rownames = FALSE,
selection = "single"
)
# Generate the current graph name (as a list of community labels)
output$name <- renderText({
name <- as.list(rev(global$name))
name <- paste(name, collapse = "/", sep="/")
#return(paste(c("Current Community", name)))
})
output$plotgraph1 <- DT::renderDataTable({
protienDSpathway<<-data.frame()
sortedlabel<-NULL
lf<-NULL
lbls<-NULL
# This takes forever. If we can load a previously built object do it; otherwise don't hold your breath
withProgress(message = "Loading ...",value = 0,{
if(is.null(mp)){
filename = 'mp.rds'
if (file.exists(filename)){
mp <<- NULL
mp <<- readRDS(filename)
} else {
mp <<- getproteinlabeldict()
saveRDS(mp, file=filename)
}
}
})
if(global$currentCommId==-1)
return (NULL)
finallist<-c()
lbllist <<- c()
withProgress(message = "Loading ...",value = 0,{
getrawentititesfromComm(global$currentCommId)
})
table <- data.frame(Protein="No pathway data available")
if (nrow(protienDSpathway)>1){
labelfreq <- table(protienDSpathway)
if (ncol(labelfreq)>1){
z<-apply(labelfreq,1,sum)
sortedlabel<-labelfreq[order(as.numeric(z), decreasing=TRUE),]
table<-as.data.frame.matrix(sortedlabel)
} else {
table <- as.data.frame.matrix(labelfreq)
}
row.names(table) <- strtrim(row.names(table), 50)
}
table
},
rownames = TRUE,
selection = "single")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/forecasting.R
\name{forecast_arima}
\alias{forecast_arima}
\title{ARIMA Forecast}
\usage{
forecast_arima(time, values, n_future = 30, ARMA = 8, ARMA_min = 5,
AR = NA, MA = NA, wd_excluded = NA, plot = TRUE,
plot_days = 90, project = NA)
}
\arguments{
\item{time}{POSIX. Vector with date values}
\item{values}{Numeric. Vector with numerical values}
\item{n_future}{Integer. How many steps do you wish to forecast?}
\item{ARMA}{Integer. How many days should the model look back for ARMA?
Between 5 and 10 days recommmended. If set to 0 then it will forecast
until the end of max date's month; if set to -1, until the end of
max date's following month}
\item{ARMA_min}{Integer. How many days should the model look back for ARMA?
Between 5 and 10 days recommmended. If set to 0 then it will forecast
until the end of max date's month; if set to -1, until the end of
max date's following month}
\item{AR}{Integer. Force AR value if known}
\item{MA}{Integer. Force MA value if known}
\item{wd_excluded}{Character vector. Which weekdays are excluded in
your training set. If there are, please define know which ones. Example:
c('Sunday','Thursday'). If set to 'auto' then it will detect automatically
which weekdays have no data and forcast without these days.}
\item{plot}{Boolean. If you wish to plot your results}
\item{plot_days}{Integer. How many days back you wish to plot?}
\item{project}{Character. Name of your forecast project}
}
\description{
This function automates the ARIMA iterations and modeling for
time forecasting. For the moment, units can only be days.
}
\details{
The ARIMA method is appropriate only for a time series that is
stationary (i.e., its mean, variance, and autocorrelation should
be approximately constant through time) and it is recommended
that there are at least 50 observations in the input data.
The model consists of two parts, an autoregressive (AR) part
and a moving average (MA) part. The AR part involves regressing
the variable on its own lagged (i.e., past) values. The MA part
involves modeling the error term as a linear combination of error
terms occurring contemporaneously and at various times in the past.
One thing to keep in mind when we think about ARIMA models is
given by the great power to capture very complex patters of
temporal correlation (Cochrane, 1997: 25)
}
| /man/forecast_arima.Rd | no_license | nfultz/lares | R | false | true | 2,428 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/forecasting.R
\name{forecast_arima}
\alias{forecast_arima}
\title{ARIMA Forecast}
\usage{
forecast_arima(time, values, n_future = 30, ARMA = 8, ARMA_min = 5,
AR = NA, MA = NA, wd_excluded = NA, plot = TRUE,
plot_days = 90, project = NA)
}
\arguments{
\item{time}{POSIX. Vector with date values}
\item{values}{Numeric. Vector with numerical values}
\item{n_future}{Integer. How many steps do you wish to forecast?}
\item{ARMA}{Integer. How many days should the model look back for ARMA?
Between 5 and 10 days recommmended. If set to 0 then it will forecast
until the end of max date's month; if set to -1, until the end of
max date's following month}
\item{ARMA_min}{Integer. How many days should the model look back for ARMA?
Between 5 and 10 days recommmended. If set to 0 then it will forecast
until the end of max date's month; if set to -1, until the end of
max date's following month}
\item{AR}{Integer. Force AR value if known}
\item{MA}{Integer. Force MA value if known}
\item{wd_excluded}{Character vector. Which weekdays are excluded in
your training set. If there are, please define know which ones. Example:
c('Sunday','Thursday'). If set to 'auto' then it will detect automatically
which weekdays have no data and forcast without these days.}
\item{plot}{Boolean. If you wish to plot your results}
\item{plot_days}{Integer. How many days back you wish to plot?}
\item{project}{Character. Name of your forecast project}
}
\description{
This function automates the ARIMA iterations and modeling for
time forecasting. For the moment, units can only be days.
}
\details{
The ARIMA method is appropriate only for a time series that is
stationary (i.e., its mean, variance, and autocorrelation should
be approximately constant through time) and it is recommended
that there are at least 50 observations in the input data.
The model consists of two parts, an autoregressive (AR) part
and a moving average (MA) part. The AR part involves regressing
the variable on its own lagged (i.e., past) values. The MA part
involves modeling the error term as a linear combination of error
terms occurring contemporaneously and at various times in the past.
One thing to keep in mind when we think about ARIMA models is
given by the great power to capture very complex patters of
temporal correlation (Cochrane, 1997: 25)
}
|
## van der Heijden & Mooijaart (1995), Table 2c, p. 23
# See also ?hmskew
library(logmult)
data(ocg1973)
# 5:1 is here to take "Farmers" as reference category (angle 0)
model <- hmskew(ocg1973[5:1, 5:1], weighting="uniform", start=NA)
model
ass <- model$assoc.hmskew
# First column of the table
round(ass$row[,,1] * sqrt(ass$phi[1,1]), d=2)[5:1,]
summary(model)
# First score for Farmers is slightly different from the original article
stopifnot(isTRUE(all.equal(round(ass$row[,,1] * sqrt(ass$phi[1,1]), d=2)[5:1,],
matrix(c(-0.08, -0.2, -0.23, -0.11, 0.61,
0.34, 0.3, -0.13, -0.51, 0), 5, 2),
check.attributes=FALSE)))
# Right part of the table
round(ass$phi[1] * (ass$row[,2,1] %o% ass$row[,1,1] - ass$row[,1,1] %o% ass$row[,2,1]), d=3)[5:1, 5:1]
# Plot
plot(model, coords="cartesian")
# Test anova
indep <- gnm(Freq ~ O + D, data=ocg1973, family=poisson)
symm <- gnm(Freq ~ O + D + Symm(O, D), data=ocg1973, family=poisson)
anova(indep, symm, model, test="LR")
| /tests/vanderHeijden-Mooijaart1995.R | no_license | nalimilan/logmult | R | false | false | 1,075 | r | ## van der Heijden & Mooijaart (1995), Table 2c, p. 23
# See also ?hmskew
library(logmult)
data(ocg1973)
# 5:1 is here to take "Farmers" as reference category (angle 0)
model <- hmskew(ocg1973[5:1, 5:1], weighting="uniform", start=NA)
model
ass <- model$assoc.hmskew
# First column of the table
round(ass$row[,,1] * sqrt(ass$phi[1,1]), d=2)[5:1,]
summary(model)
# First score for Farmers is slightly different from the original article
stopifnot(isTRUE(all.equal(round(ass$row[,,1] * sqrt(ass$phi[1,1]), d=2)[5:1,],
matrix(c(-0.08, -0.2, -0.23, -0.11, 0.61,
0.34, 0.3, -0.13, -0.51, 0), 5, 2),
check.attributes=FALSE)))
# Right part of the table
round(ass$phi[1] * (ass$row[,2,1] %o% ass$row[,1,1] - ass$row[,1,1] %o% ass$row[,2,1]), d=3)[5:1, 5:1]
# Plot
plot(model, coords="cartesian")
# Test anova
indep <- gnm(Freq ~ O + D, data=ocg1973, family=poisson)
symm <- gnm(Freq ~ O + D + Symm(O, D), data=ocg1973, family=poisson)
anova(indep, symm, model, test="LR")
|
#'
#' Extracts and processes spectra from a specified file list, according to
#' loaded options and given parameters.
#'
#' The filenames of the raw LC-MS runs are read from the array \code{files}
#' in the global enviroment.
#' See the vignette \code{vignette("RMassBank")} for further details about the
#' workflow.
#'
#' @param w A \code{msmsWorkspace} to work with.
#' @param filetable The path to a .csv-file that contains the columns "Files" and "ID" supplying
#' the relationships between files and compound IDs. Either this or the parameter "files" need
#' to be specified.
#' @param files A vector or list containing the filenames of the files that are to be read as spectra.
#' For the IDs to be inferred from the filenames alone, there need to be exactly 2 underscores.
#' @param cpdids A vector or list containing the compound IDs of the files that are to be read as spectra.
#' The ordering of this and \code{files} implicitly assigns each ID to the corresponding file.
#' If this is supplied, then the IDs implicitly named in the filenames are ignored.
#' @param readMethod Several methods are available to get peak lists from the files.
#' Currently supported are "mzR", "xcms", "MassBank" and "peaklist".
#' The first two read MS/MS raw data, and differ in the strategy
#' used to extract peaks. MassBank will read existing records,
#' so that e.g. a recalibration can be performed, and "peaklist"
#' just requires a CSV with two columns and the column header "mz", "int".
#' @param mode \code{"pH", "pNa", "pM", "pNH4", "mH", "mM", "mFA"} for different ions
#' ([M+H]+, [M+Na]+, [M]+, [M+NH4]+, [M-H]-, [M]-, [M+FA]-).
#' @param confirmMode Defaults to false (use most intense precursor). Value 1 uses
#' the 2nd-most intense precursor for a chosen ion (and its data-dependent scans)
#' , etc.
#' @param useRtLimit Whether to enforce the given retention time window.
#' @param Args A list of arguments that will be handed to the xcms-method findPeaks via do.call
#' @param settings Options to be used for processing. Defaults to the options loaded via
#' \code{\link{loadRmbSettings}} et al. Refer to there for specific settings.
#' @param progressbar The progress bar callback to use. Only needed for specialized applications.
#' Cf. the documentation of \code{\link{progressBarHook}} for usage.
#' @param MSe A boolean value that determines whether the spectra were recorded using MSe or not
#' @param plots A boolean value that determines whether the pseudospectra in XCMS should be plotted
#' @return The \code{msmsWorkspace} with msms-spectra read.
#' @seealso \code{\link{msmsWorkspace-class}}, \code{\link{msmsWorkflow}}
#' @author Michael Stravs, Eawag <michael.stravs@@eawag.ch>
#' @author Erik Mueller, UFZ
#' @export
msmsRead <- function(w, filetable = NULL, files = NULL, cpdids = NULL,
readMethod, mode, confirmMode = FALSE, useRtLimit = TRUE,
Args = NULL, settings = getOption("RMassBank"),
progressbar = "progressBarHook", MSe = FALSE, plots = FALSE){
.checkMbSettings()
##Read the files and cpdids according to the definition
##All cases are silently accepted, as long as they can be handled according to one definition
if(!any(mode %in% knownAdducts())) stop(paste("The ionization mode", mode, "is unknown."))
if(is.null(filetable)){
##If no filetable is supplied, filenames must be named explicitly
if(is.null(files))
stop("Please supply the files")
##Assign the filenames to the workspace
w@files <- unlist(files)
##If no filetable is supplied, cpdids must be delivered explicitly or implicitly within the filenames
if(is.null(cpdids)){
splitfn <- strsplit(files,"_")
splitsfn <- sapply(splitfn, function(x) x[length(x)-1])
if(suppressWarnings(any(is.na(as.numeric(splitsfn)[1]))))
stop("Please supply the cpdids corresponding to the files in the filetable or the filenames")
cpdids <- splitsfn
}
} else{
##If a filetable is supplied read it
tab <- read.csv(filetable, stringsAsFactors = FALSE)
w@files <- tab[,"Files"]
cpdids <- tab[,"ID"]
}
##If there's more cpdids than filenames or the other way around, then abort
if(length(w@files) != length(cpdids)){
stop("There are a different number of cpdids than files")
}
if(!(readMethod %in% c("mzR","peaklist","xcms","minimal","msp"))){
stop("The supplied method does not exist")
}
if(!all(file.exists(w@files))){
stop("The supplied files ", paste(w@files[!file.exists(w@files)]), " don't exist")
}
# na.ids <- which(is.na(sapply(cpdids, findSmiles)))
# if(length(na.ids)){
# stop("The supplied compound ids ", paste(cpdids[na.ids], collapse=" "), " don't have a corresponding smiles entry. Maybe they are missing from the compound list")
# }
##This should work
if(readMethod == "minimal"){
##Edit options
opt <- getOption("RMassBank")
opt$recalibrator$MS1 <- "recalibrate.identity"
opt$recalibrator$MS2 <- "recalibrate.identity"
opt$add_annotation==FALSE
options(RMassBank=opt)
##Edit analyzemethod
analyzeMethod <- "intensity"
}
if(readMethod == "mzR"){
##Progressbar
nLen <- length(w@files)
nProg <- 0
pb <- do.call(progressbar, list(object=NULL, value=0, min=0, max=nLen))
count <- 1
envir <- environment()
w@spectra <- as(lapply(w@files, function(fileName) {
# Find compound ID
cpdID <- cpdids[count]
# Set counter up
envir$count <- envir$count + 1
# Retrieve spectrum data
spec <- findMsMsHR(fileName = fileName,
cpdID = cpdID, mode = mode, confirmMode = confirmMode, useRtLimit = useRtLimit,
ppmFine = settings$findMsMsRawSettings$ppmFine,
mzCoarse = settings$findMsMsRawSettings$mzCoarse,
fillPrecursorScan = settings$findMsMsRawSettings$fillPrecursorScan,
rtMargin = settings$rtMargin,
deprofile = settings$deprofile)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
} ), "SimpleList")
names(w@spectra) <- basename(as.character(w@files))
}
##xcms-readmethod
if(readMethod == "xcms"){
##Load libraries
requireNamespace("xcms",quietly=TRUE)
requireNamespace("CAMERA",quietly=TRUE)
##Find unique files and cpdIDs
ufiles <- unique(w@files)
uIDs <- unique(cpdids)
nLen <- length(ufiles)
##Progressbar
nProg <- 0
pb <- do.call(progressbar, list(object=NULL, value=0, min=0, max=nLen))
i <- 1
##Routine for the case of multiple cpdIDs per file
if(length(uIDs) > length(ufiles)){
w@spectra <- as(unlist(lapply(ufiles, function(currentFile){
fileIDs <- cpdids[which(w@files == currentFile)]
spec <- findMsMsHRperxcms(currentFile, fileIDs, mode=mode, findPeaksArgs=Args, plots, MSe = MSe)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),FALSE),"SimpleList")
} else {
##Routine for the other cases
w@spectra <- as(lapply(uIDs, function(ID){
# Find files corresponding to the compoundID
currentFile <- w@files[which(cpdids == ID)]
# Retrieve spectrum data
spec <- findMsMsHRperxcms(currentFile, ID, mode=mode, findPeaksArgs=Args, plots, MSe = MSe)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),"SimpleList")
##If there are more files than unique cpdIDs, only remember the first file for every cpdID
w@files <- w@files[sapply(uIDs, function(ID){
return(which(cpdids == ID)[1])
})]
}
}
##Peaklist-readmethod
if((readMethod == "peaklist") || (readMethod=="minimal")){
w <- createSpecsFromPeaklists(w, cpdids, filenames=w@files, mode=mode)
uIDs <- unique(cpdids)
files <- list()
for(i in 1:length(uIDs)){
indices <- sapply(cpdids,function(a){return(uIDs[i] %in% a)})
files[[i]] <- w@files[indices]
}
w@files <- sapply(files,function(file){return(file[1])})
message("Peaks read")
}
##MSP-readmethod
if(readMethod == "msp"){
##Find unique files and cpdIDs
ufiles <- unique(w@files)
uIDs <- unique(cpdids)
nLen <- length(ufiles)
##Progressbar
nProg <- 0
pb <- do.call(progressbar, list(object=NULL, value=0, min=0, max=nLen))
i <- 1
##Routine for the case of multiple cpdIDs per file
if(length(uIDs) > length(ufiles)){
w@spectra <- as(unlist(lapply(ufiles, function(currentFile){
fileIDs <- cpdids[which(w@files == currentFile)]
spec <- findMsMsHRperMsp(fileName = currentFile, cpdIDs = fileIDs, mode=mode)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),FALSE),"SimpleList")
} else {
##Routine for the other cases
w@spectra <- as(lapply(uIDs, function(ID){
# Find files corresponding to the compoundID
currentFile <- w@files[which(cpdids == ID)]
# Retrieve spectrum data
spec <- findMsMsHRperMsp(fileName = currentFile, cpdIDs = ID, mode=mode)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),"SimpleList")
##If there are more files than unique cpdIDs, only remember the first file for every cpdID
w@files <- w@files[sapply(uIDs, function(ID){
return(which(cpdids == ID)[1])
})]
}
}
## verbose output
if(RMassBank.env$verbose.output)
for(parentIdx in seq_along(w@spectra))
if(!w@spectra[[parentIdx]]@found)
cat(paste("### Warning ### No precursor ion was detected for ID '", w@spectra[[parentIdx]]@id, "'\n", sep = ""))
return(w)
}
#'
#' Extracts and processes spectra from a list of xcms-Objects
#'
#' The filenames of the raw LC-MS runs are read from the array \code{files}
#' in the global enviroment.
#' See the vignette \code{vignette("RMassBank")} for further details about the
#' workflow.
#'
#' @param w A \code{msmsWorkspace} to work with.
#' @param xRAW A list of xcmsRaw objects whose peaks should be detected and added to the workspace.
#' The relevant data must be in the MS1 data of the xcmsRaw object. You can coerce the
#' msn-data in a usable object with the \code{msn2xcmsRaw} function of xcms.
#' @param cpdids A vector or list containing the compound IDs of the files that are to be read as spectra.
#' The ordering of this and \code{files} implicitly assigns each ID to the corresponding file.
#' If this is supplied, then the IDs implicitly named in the filenames are ignored.
#' @param mode \code{"pH", "pNa", "pM", "pNH4", "mH", "mM", "mFA"} for different ions
#' ([M+H]+, [M+Na]+, [M]+, [M+NH4]+, [M-H]-, [M]-, [M+FA]-).
#' @param findPeaksArgs A list of arguments that will be handed to the xcms-method findPeaks via do.call
#' @param settings Options to be used for processing. Defaults to the options loaded via
#' \code{\link{loadRmbSettings}} et al. Refer to there for specific settings.
#' @param progressbar The progress bar callback to use. Only needed for specialized applications.
#' Cf. the documentation of \code{\link{progressBarHook}} for usage.
#' @param plots A boolean value that determines whether the pseudospectra in XCMS should be plotted
#' @return The \code{msmsWorkspace} with msms-spectra read.
#' @seealso \code{\link{msmsWorkspace-class}}, \code{\link{msmsWorkflow}}
#' @author Michael Stravs, Eawag <michael.stravs@@eawag.ch>
#' @author Erik Mueller, UFZ
#' @export
msmsRead.RAW <- function(w, xRAW = NULL, cpdids = NULL, mode, findPeaksArgs = NULL,
settings = getOption("RMassBank"), progressbar = "progressBarHook", plots = FALSE){
requireNamespace("xcms", quietly=TRUE)
##xRAW will be coerced into a list of length 1 if it is an xcmsRaw-object
if(class(xRAW) == "xcmsRaw"){
xRAW <- list(xRAW)
}
##Error messages
if((class(xRAW) != "list") || any(sapply(xRAW, function(x) class(x) != "xcmsRaw"))){
stop("No list of xcmsRaw-objects supplied")
}
if(is.null(cpdids)){
stop("No cpdids supplied")
}
#msnExist <- which(sapply(xRAW,function(x) length(x@msnPrecursorScan) != 0))
#print(length(msnExist))
#print(length(xRAW))
#if(length(msnExist) != length(xRAW)){
# stop(paste("No msn data in list elements", setdiff(1:length(xRAW),msnExist)))
#}
requireNamespace("CAMERA",quietly=TRUE)
parentMass <- findMz(cpdids[1], mode=mode)$mzCenter
if(is.na(parentMass)){
stop(paste("There was no matching entry to the supplied cpdID", cpdids[1] ,"\n Please check the cpdIDs and the compoundlist."))
}
RT <- findRt(cpdids[1])$RT * 60
mzabs <- 0.1
getRT <- function(xa) {
rt <- sapply(xa@pspectra, function(x) {median(peaks(xa@xcmsSet)[x, "rt"])})
}
suppressWarnings(setReplicate <- xcms::xcmsSet(files=xRAW[[1]]@filepath, method="MS1"))
xsmsms <- as.list(replicate(length(xRAW),setReplicate))
candidates <- list()
anmsms <- list()
psp <- list()
spectra <- list()
whichmissing <- vector()
metaspec <- list()
for(i in 1:length(xRAW)){
devnull <- suppressWarnings(capture.output(xcms::peaks(xsmsms[[i]]) <- do.call(xcms::findPeaks,c(findPeaksArgs, object = xRAW[[i]]))))
if (nrow(xcms::peaks(xsmsms[[i]])) == 0) { ##If there are no peaks
spectra[[i]] <- matrix(0,2,7)
next
} else{
## Get pspec
pl <- xcms::peaks(xsmsms[[i]])[,c("mz", "rt"), drop=FALSE]
## Best: find precursor peak
candidates[[i]] <- which( pl[,"mz", drop=FALSE] < parentMass + mzabs & pl[,"mz", drop=FALSE] > parentMass - mzabs
& pl[,"rt", drop=FALSE] < RT * 1.1 & pl[,"rt", drop=FALSE] > RT * 0.9 )
devnull <- capture.output(anmsms[[i]] <- CAMERA::xsAnnotate(xsmsms[[i]]))
devnull <- capture.output(anmsms[[i]] <- CAMERA::groupFWHM(anmsms[[i]]))
if(length(candidates[[i]]) > 0){
closestCandidate <- which.min (abs( RT - pl[candidates[[i]], "rt", drop=FALSE]))
psp[[i]] <- which(sapply(anmsms[[i]]@pspectra, function(x) {candidates[[i]][closestCandidate] %in% x}))
} else{
psp[[i]] <- which.min( abs(getRT(anmsms[[i]]) - RT) )
}
## Now find the pspec for compound
## 2nd best: Spectrum closest to MS1
##psp <- which.min( abs(getRT(anmsms) - actualRT))
## 3rd Best: find pspec closest to RT from spreadsheet
##psp <- which.min( abs(getRT(anmsms) - RT) )
if((plots == TRUE) && (length(psp[[i]]) > 0)){
CAMERA::plotPsSpectrum(anmsms[[i]], psp[[i]], log=TRUE, mzrange=c(0, findMz(cpdids[1])[[3]]), maxlabel=10)
}
if(length(psp[[i]]) != 0){
spectra[[i]] <- CAMERA::getpspectra(anmsms[[i]], psp[[i]])
} else {
whichmissing <- c(whichmissing,i)
}
}
}
if(length(spectra) != 0){
for(i in whichmissing){
spectra[[i]] <- matrix(0,2,7)
}
}
sp <- toRMB(spectra,cpdids,"mH")
sp@id <- as.character(as.integer(cpdids))
sp@name <- findName(cpdids)
sp@formula <- findFormula(cpdids)
sp@mode <- mode
if(length(w@spectra) != 0){
IDindex <- sapply(w@spectra,function(s) s@id == cpdids)
if(length(IDindex)){
spectraNum <- length(w@spectra[[which(IDindex)]]@children)
w@spectra[[which(IDindex)]]@children[[spectraNum+1]] <- sp@children[[1]]
} else {
w@spectra[[length(w@spectra)+1]] <- sp
}
} else{
w@spectra[[1]] <- sp
}
if(all(w@files != xRAW[[1]]@filepath)){
w@files <- c(w@files,xRAW[[1]]@filepath)
} else{
for(i in 2:(length(w@files)+1)){
currentFPath <- paste0(xRAW[[1]]@filepath,"_",i)
if(all(w@files != currentFPath)){
w@files <- c(w@files,currentFPath)
break
}
}
}
return(w)
}
#'
#' Extracts and processes spectra from a specified file list, according to
#' loaded options and given parameters.
#'
#' The filenames of the raw LC-MS runs are read from the array \code{files}
#' in the global enviroment.
#' See the vignette \code{vignette("RMassBank")} for further details about the
#' workflow.
#'
#' @param cl Cluster.
#' @param w A \code{msmsWorkspace} to work with.
#' @param filetable The path to a .csv-file that contains the columns
#' "Files" and "ID" supplying the relationships between files and
#' compound IDs. Either this or the parameter "files" need to be
#' specified.
#' @param files A vector or list containing the filenames of the files
#' that are to be read as spectra. For the IDs to be inferred
#' from the filenames alone, there need to be exactly 2
#' underscores.
#' @param cpdids A vector or list containing the compound IDs of the
#' files that are to be read as spectra. The ordering of this and
#' \code{files} implicitly assigns each ID to the corresponding
#' file. If this is supplied, then the IDs implicitly named in
#' the filenames are ignored.
#' @param readMethod Several methods are available to get peak lists
#' from the files. Currently supported are "mzR", "xcms",
#' "MassBank" and "peaklist". The first two read MS/MS raw data,
#' and differ in the strategy used to extract peaks. MassBank will
#' read existing records, so that e.g. a recalibration can be
#' performed, and "peaklist" just requires a CSV with two columns
#' and the column header "mz", "int".
#' @param mode \code{"pH", "pNa", "pM", "pNH4", "mH", "mM", "mFA"} for
#' different ions ([M+H]+, [M+Na]+, [M]+, [M+NH4]+, [M-H]-, [M]-,
#' [M+FA]-).
#' @param confirmMode Defaults to false (use most intense
#' precursor). Value 1 uses the 2nd-most intense precursor for a
#' chosen ion (and its data-dependent scans) , etc.
#' @param useRtLimit Whether to enforce the given retention time
#' window.
#' @param Args A list of arguments that will be handed to the
#' xcms-method findPeaks via do.call
#' @param settings Options to be used for processing. Defaults to the
#' options loaded via \code{\link{loadRmbSettings}} et al. Refer
#' to there for specific settings.
#' @param progressbar The progress bar callback to use. Only needed
#' for specialized applications. Cf. the documentation of
#' \code{\link{progressBarHook}} for usage.
#' @param MSe A boolean value that determines whether the spectra were
#' recorded using MSe or not
#' @param plots A boolean value that determines whether the
#' pseudospectra in XCMS should be plotted
#' @return The \code{msmsWorkspace} with msms-spectra read.
#' @seealso \code{\link{msmsWorkspace-class}},
#' \code{\link{msmsWorkflow}}
#' @author Michael Stravs, Eawag <michael.stravs@@eawag.ch>
#' @author Todor Kondić, LCSB-ECI <todor.kondic@@uni.lu>
#' @export
msmsRead.parallel <- function(cl,w, filetable = NULL, files = NULL, cpdids = NULL,
readMethod, mode, confirmMode = FALSE, useRtLimit = TRUE,
Args = NULL, settings = getOption("RMassBank"),
progressbar = "progressBarHook", MSe = FALSE, plots = FALSE){
.checkMbSettings()
##Read the files and cpdids according to the definition
##All cases are silently accepted, as long as they can be handled according to one definition
if(!any(mode %in% knownAdducts())) stop(paste("The ionization mode", mode, "is unknown."))
if(is.null(filetable)){
##If no filetable is supplied, filenames must be named explicitly
if(is.null(files))
stop("Please supply the files")
##Assign the filenames to the workspace
w@files <- unlist(files)
##If no filetable is supplied, cpdids must be delivered explicitly or implicitly within the filenames
if(is.null(cpdids)){
splitfn <- strsplit(files,"_")
splitsfn <- sapply(splitfn, function(x) x[length(x)-1])
if(suppressWarnings(any(is.na(as.numeric(splitsfn)[1]))))
stop("Please supply the cpdids corresponding to the files in the filetable or the filenames")
cpdids <- splitsfn
}
} else{
##If a filetable is supplied read it
tab <- read.csv(filetable, stringsAsFactors = FALSE)
w@files <- tab[,"Files"]
cpdids <- tab[,"ID"]
}
##If there's more cpdids than filenames or the other way around, then abort
if(length(w@files) != length(cpdids)){
stop("There are a different number of cpdids than files")
}
if(!(readMethod %in% c("mzR","peaklist","xcms","minimal","msp"))){
stop("The supplied method does not exist")
}
if(!all(file.exists(w@files))){
stop("The supplied files ", paste(w@files[!file.exists(w@files)]), " don't exist")
}
# na.ids <- which(is.na(sapply(cpdids, findSmiles)))
# if(length(na.ids)){
# stop("The supplied compound ids ", paste(cpdids[na.ids], collapse=" "), " don't have a corresponding smiles entry. Maybe they are missing from the compound list")
# }
##This should work
if(readMethod == "minimal"){
##Edit options
opt <- getOption("RMassBank")
opt$recalibrator$MS1 <- "recalibrate.identity"
opt$recalibrator$MS2 <- "recalibrate.identity"
opt$add_annotation==FALSE
options(RMassBank=opt)
##Edit analyzemethod
analyzeMethod <- "intensity"
}
if(readMethod == "mzR") {
##Progressbar
## nLen <- length(w@files)
## nProg <- 0
## pb <- do.call(progressbar, list(object=NULL, value=0, min=0, max=nLen))
doone <- function(fn,cpdID) {
spec <- findMsMsHR(fileName = fn,
cpdID = cpdID, mode = mode, confirmMode = confirmMode, useRtLimit = useRtLimit,
ppmFine = settings$findMsMsRawSettings$ppmFine,
mzCoarse = settings$findMsMsRawSettings$mzCoarse,
fillPrecursorScan = settings$findMsMsRawSettings$fillPrecursorScan,
rtMargin = settings$rtMargin,
deprofile = settings$deprofile)
message("File: ",fn," ;Compound ID: ",cpdID,"; Status: DONE")
gc()
spec
}
parallel::clusterExport(cl,c("readMethod","mode","confirmMode","useRtLimit","settings"),envir=environment())
cllct <- parallel::clusterMap(cl,doone,w@files,cpdids)
w@spectra <- as(cllct,"SimpleList")
names(w@spectra) <- basename(as.character(w@files))
}
##xcms-readmethod
if(readMethod == "xcms"){
##Load libraries
requireNamespace("xcms",quietly=TRUE)
requireNamespace("CAMERA",quietly=TRUE)
##Find unique files and cpdIDs
ufiles <- unique(w@files)
uIDs <- unique(cpdids)
nLen <- length(ufiles)
##Progressbar
nProg <- 0
pb <- do.call(progressbar, list(object=NULL, value=0, min=0, max=nLen))
i <- 1
##Routine for the case of multiple cpdIDs per file
if(length(uIDs) > length(ufiles)){
w@spectra <- as(unlist(lapply(ufiles, function(currentFile){
fileIDs <- cpdids[which(w@files == currentFile)]
spec <- findMsMsHRperxcms(currentFile, fileIDs, mode=mode, findPeaksArgs=Args, plots, MSe = MSe)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),FALSE),"SimpleList")
} else {
##Routine for the other cases
w@spectra <- as(lapply(uIDs, function(ID){
# Find files corresponding to the compoundID
currentFile <- w@files[which(cpdids == ID)]
# Retrieve spectrum data
spec <- findMsMsHRperxcms(currentFile, ID, mode=mode, findPeaksArgs=Args, plots, MSe = MSe)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),"SimpleList")
##If there are more files than unique cpdIDs, only remember the first file for every cpdID
w@files <- w@files[sapply(uIDs, function(ID){
return(which(cpdids == ID)[1])
})]
}
}
##Peaklist-readmethod
if((readMethod == "peaklist") || (readMethod=="minimal")){
w <- createSpecsFromPeaklists(w, cpdids, filenames=w@files, mode=mode)
uIDs <- unique(cpdids)
files <- list()
for(i in 1:length(uIDs)){
indices <- sapply(cpdids,function(a){return(uIDs[i] %in% a)})
files[[i]] <- w@files[indices]
}
w@files <- sapply(files,function(file){return(file[1])})
message("Peaks read")
}
##MSP-readmethod
if(readMethod == "msp"){
##Find unique files and cpdIDs
ufiles <- unique(w@files)
uIDs <- unique(cpdids)
nLen <- length(ufiles)
##Progressbar
nProg <- 0
pb <- do.call(progressbar, list(object=NULL, value=0, min=0, max=nLen))
i <- 1
##Routine for the case of multiple cpdIDs per file
if(length(uIDs) > length(ufiles)){
w@spectra <- as(unlist(lapply(ufiles, function(currentFile){
fileIDs <- cpdids[which(w@files == currentFile)]
spec <- findMsMsHRperMsp(fileName = currentFile, cpdIDs = fileIDs, mode=mode)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),FALSE),"SimpleList")
} else {
##Routine for the other cases
w@spectra <- as(lapply(uIDs, function(ID){
# Find files corresponding to the compoundID
currentFile <- w@files[which(cpdids == ID)]
# Retrieve spectrum data
spec <- findMsMsHRperMsp(fileName = currentFile, cpdIDs = ID, mode=mode)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),"SimpleList")
##If there are more files than unique cpdIDs, only remember the first file for every cpdID
w@files <- w@files[sapply(uIDs, function(ID){
return(which(cpdids == ID)[1])
})]
}
}
## verbose output
if(RMassBank.env$verbose.output)
for(parentIdx in seq_along(w@spectra))
if(!w@spectra[[parentIdx]]@found)
cat(paste("### Warning ### No precursor ion was detected for ID '", w@spectra[[parentIdx]]@id, "'\n", sep = ""))
return(w)
}
| /R/msmsRead.R | no_license | MaliRemorker/RMassBank | R | false | false | 26,917 | r | #'
#' Extracts and processes spectra from a specified file list, according to
#' loaded options and given parameters.
#'
#' The filenames of the raw LC-MS runs are read from the array \code{files}
#' in the global enviroment.
#' See the vignette \code{vignette("RMassBank")} for further details about the
#' workflow.
#'
#' @param w A \code{msmsWorkspace} to work with.
#' @param filetable The path to a .csv-file that contains the columns "Files" and "ID" supplying
#' the relationships between files and compound IDs. Either this or the parameter "files" need
#' to be specified.
#' @param files A vector or list containing the filenames of the files that are to be read as spectra.
#' For the IDs to be inferred from the filenames alone, there need to be exactly 2 underscores.
#' @param cpdids A vector or list containing the compound IDs of the files that are to be read as spectra.
#' The ordering of this and \code{files} implicitly assigns each ID to the corresponding file.
#' If this is supplied, then the IDs implicitly named in the filenames are ignored.
#' @param readMethod Several methods are available to get peak lists from the files.
#' Currently supported are "mzR", "xcms", "MassBank" and "peaklist".
#' The first two read MS/MS raw data, and differ in the strategy
#' used to extract peaks. MassBank will read existing records,
#' so that e.g. a recalibration can be performed, and "peaklist"
#' just requires a CSV with two columns and the column header "mz", "int".
#' @param mode \code{"pH", "pNa", "pM", "pNH4", "mH", "mM", "mFA"} for different ions
#' ([M+H]+, [M+Na]+, [M]+, [M+NH4]+, [M-H]-, [M]-, [M+FA]-).
#' @param confirmMode Defaults to false (use most intense precursor). Value 1 uses
#' the 2nd-most intense precursor for a chosen ion (and its data-dependent scans)
#' , etc.
#' @param useRtLimit Whether to enforce the given retention time window.
#' @param Args A list of arguments that will be handed to the xcms-method findPeaks via do.call
#' @param settings Options to be used for processing. Defaults to the options loaded via
#' \code{\link{loadRmbSettings}} et al. Refer to there for specific settings.
#' @param progressbar The progress bar callback to use. Only needed for specialized applications.
#' Cf. the documentation of \code{\link{progressBarHook}} for usage.
#' @param MSe A boolean value that determines whether the spectra were recorded using MSe or not
#' @param plots A boolean value that determines whether the pseudospectra in XCMS should be plotted
#' @return The \code{msmsWorkspace} with msms-spectra read.
#' @seealso \code{\link{msmsWorkspace-class}}, \code{\link{msmsWorkflow}}
#' @author Michael Stravs, Eawag <michael.stravs@@eawag.ch>
#' @author Erik Mueller, UFZ
#' @export
msmsRead <- function(w, filetable = NULL, files = NULL, cpdids = NULL,
readMethod, mode, confirmMode = FALSE, useRtLimit = TRUE,
Args = NULL, settings = getOption("RMassBank"),
progressbar = "progressBarHook", MSe = FALSE, plots = FALSE){
.checkMbSettings()
##Read the files and cpdids according to the definition
##All cases are silently accepted, as long as they can be handled according to one definition
if(!any(mode %in% knownAdducts())) stop(paste("The ionization mode", mode, "is unknown."))
if(is.null(filetable)){
##If no filetable is supplied, filenames must be named explicitly
if(is.null(files))
stop("Please supply the files")
##Assign the filenames to the workspace
w@files <- unlist(files)
##If no filetable is supplied, cpdids must be delivered explicitly or implicitly within the filenames
if(is.null(cpdids)){
splitfn <- strsplit(files,"_")
splitsfn <- sapply(splitfn, function(x) x[length(x)-1])
if(suppressWarnings(any(is.na(as.numeric(splitsfn)[1]))))
stop("Please supply the cpdids corresponding to the files in the filetable or the filenames")
cpdids <- splitsfn
}
} else{
##If a filetable is supplied read it
tab <- read.csv(filetable, stringsAsFactors = FALSE)
w@files <- tab[,"Files"]
cpdids <- tab[,"ID"]
}
##If there's more cpdids than filenames or the other way around, then abort
if(length(w@files) != length(cpdids)){
stop("There are a different number of cpdids than files")
}
if(!(readMethod %in% c("mzR","peaklist","xcms","minimal","msp"))){
stop("The supplied method does not exist")
}
if(!all(file.exists(w@files))){
stop("The supplied files ", paste(w@files[!file.exists(w@files)]), " don't exist")
}
# na.ids <- which(is.na(sapply(cpdids, findSmiles)))
# if(length(na.ids)){
# stop("The supplied compound ids ", paste(cpdids[na.ids], collapse=" "), " don't have a corresponding smiles entry. Maybe they are missing from the compound list")
# }
##This should work
if(readMethod == "minimal"){
##Edit options
opt <- getOption("RMassBank")
opt$recalibrator$MS1 <- "recalibrate.identity"
opt$recalibrator$MS2 <- "recalibrate.identity"
opt$add_annotation==FALSE
options(RMassBank=opt)
##Edit analyzemethod
analyzeMethod <- "intensity"
}
if(readMethod == "mzR"){
##Progressbar
nLen <- length(w@files)
nProg <- 0
pb <- do.call(progressbar, list(object=NULL, value=0, min=0, max=nLen))
count <- 1
envir <- environment()
w@spectra <- as(lapply(w@files, function(fileName) {
# Find compound ID
cpdID <- cpdids[count]
# Set counter up
envir$count <- envir$count + 1
# Retrieve spectrum data
spec <- findMsMsHR(fileName = fileName,
cpdID = cpdID, mode = mode, confirmMode = confirmMode, useRtLimit = useRtLimit,
ppmFine = settings$findMsMsRawSettings$ppmFine,
mzCoarse = settings$findMsMsRawSettings$mzCoarse,
fillPrecursorScan = settings$findMsMsRawSettings$fillPrecursorScan,
rtMargin = settings$rtMargin,
deprofile = settings$deprofile)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
} ), "SimpleList")
names(w@spectra) <- basename(as.character(w@files))
}
##xcms-readmethod
if(readMethod == "xcms"){
##Load libraries
requireNamespace("xcms",quietly=TRUE)
requireNamespace("CAMERA",quietly=TRUE)
##Find unique files and cpdIDs
ufiles <- unique(w@files)
uIDs <- unique(cpdids)
nLen <- length(ufiles)
##Progressbar
nProg <- 0
pb <- do.call(progressbar, list(object=NULL, value=0, min=0, max=nLen))
i <- 1
##Routine for the case of multiple cpdIDs per file
if(length(uIDs) > length(ufiles)){
w@spectra <- as(unlist(lapply(ufiles, function(currentFile){
fileIDs <- cpdids[which(w@files == currentFile)]
spec <- findMsMsHRperxcms(currentFile, fileIDs, mode=mode, findPeaksArgs=Args, plots, MSe = MSe)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),FALSE),"SimpleList")
} else {
##Routine for the other cases
w@spectra <- as(lapply(uIDs, function(ID){
# Find files corresponding to the compoundID
currentFile <- w@files[which(cpdids == ID)]
# Retrieve spectrum data
spec <- findMsMsHRperxcms(currentFile, ID, mode=mode, findPeaksArgs=Args, plots, MSe = MSe)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),"SimpleList")
##If there are more files than unique cpdIDs, only remember the first file for every cpdID
w@files <- w@files[sapply(uIDs, function(ID){
return(which(cpdids == ID)[1])
})]
}
}
##Peaklist-readmethod
if((readMethod == "peaklist") || (readMethod=="minimal")){
w <- createSpecsFromPeaklists(w, cpdids, filenames=w@files, mode=mode)
uIDs <- unique(cpdids)
files <- list()
for(i in 1:length(uIDs)){
indices <- sapply(cpdids,function(a){return(uIDs[i] %in% a)})
files[[i]] <- w@files[indices]
}
w@files <- sapply(files,function(file){return(file[1])})
message("Peaks read")
}
##MSP-readmethod
if(readMethod == "msp"){
##Find unique files and cpdIDs
ufiles <- unique(w@files)
uIDs <- unique(cpdids)
nLen <- length(ufiles)
##Progressbar
nProg <- 0
pb <- do.call(progressbar, list(object=NULL, value=0, min=0, max=nLen))
i <- 1
##Routine for the case of multiple cpdIDs per file
if(length(uIDs) > length(ufiles)){
w@spectra <- as(unlist(lapply(ufiles, function(currentFile){
fileIDs <- cpdids[which(w@files == currentFile)]
spec <- findMsMsHRperMsp(fileName = currentFile, cpdIDs = fileIDs, mode=mode)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),FALSE),"SimpleList")
} else {
##Routine for the other cases
w@spectra <- as(lapply(uIDs, function(ID){
# Find files corresponding to the compoundID
currentFile <- w@files[which(cpdids == ID)]
# Retrieve spectrum data
spec <- findMsMsHRperMsp(fileName = currentFile, cpdIDs = ID, mode=mode)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),"SimpleList")
##If there are more files than unique cpdIDs, only remember the first file for every cpdID
w@files <- w@files[sapply(uIDs, function(ID){
return(which(cpdids == ID)[1])
})]
}
}
## verbose output
if(RMassBank.env$verbose.output)
for(parentIdx in seq_along(w@spectra))
if(!w@spectra[[parentIdx]]@found)
cat(paste("### Warning ### No precursor ion was detected for ID '", w@spectra[[parentIdx]]@id, "'\n", sep = ""))
return(w)
}
#'
#' Extracts and processes spectra from a list of xcms-Objects
#'
#' The filenames of the raw LC-MS runs are read from the array \code{files}
#' in the global enviroment.
#' See the vignette \code{vignette("RMassBank")} for further details about the
#' workflow.
#'
#' @param w A \code{msmsWorkspace} to work with.
#' @param xRAW A list of xcmsRaw objects whose peaks should be detected and added to the workspace.
#' The relevant data must be in the MS1 data of the xcmsRaw object. You can coerce the
#' msn-data in a usable object with the \code{msn2xcmsRaw} function of xcms.
#' @param cpdids A vector or list containing the compound IDs of the files that are to be read as spectra.
#' The ordering of this and \code{files} implicitly assigns each ID to the corresponding file.
#' If this is supplied, then the IDs implicitly named in the filenames are ignored.
#' @param mode \code{"pH", "pNa", "pM", "pNH4", "mH", "mM", "mFA"} for different ions
#' ([M+H]+, [M+Na]+, [M]+, [M+NH4]+, [M-H]-, [M]-, [M+FA]-).
#' @param findPeaksArgs A list of arguments that will be handed to the xcms-method findPeaks via do.call
#' @param settings Options to be used for processing. Defaults to the options loaded via
#' \code{\link{loadRmbSettings}} et al. Refer to there for specific settings.
#' @param progressbar The progress bar callback to use. Only needed for specialized applications.
#' Cf. the documentation of \code{\link{progressBarHook}} for usage.
#' @param plots A boolean value that determines whether the pseudospectra in XCMS should be plotted
#' @return The \code{msmsWorkspace} with msms-spectra read.
#' @seealso \code{\link{msmsWorkspace-class}}, \code{\link{msmsWorkflow}}
#' @author Michael Stravs, Eawag <michael.stravs@@eawag.ch>
#' @author Erik Mueller, UFZ
#' @export
msmsRead.RAW <- function(w, xRAW = NULL, cpdids = NULL, mode, findPeaksArgs = NULL,
settings = getOption("RMassBank"), progressbar = "progressBarHook", plots = FALSE){
requireNamespace("xcms", quietly=TRUE)
##xRAW will be coerced into a list of length 1 if it is an xcmsRaw-object
if(class(xRAW) == "xcmsRaw"){
xRAW <- list(xRAW)
}
##Error messages
if((class(xRAW) != "list") || any(sapply(xRAW, function(x) class(x) != "xcmsRaw"))){
stop("No list of xcmsRaw-objects supplied")
}
if(is.null(cpdids)){
stop("No cpdids supplied")
}
#msnExist <- which(sapply(xRAW,function(x) length(x@msnPrecursorScan) != 0))
#print(length(msnExist))
#print(length(xRAW))
#if(length(msnExist) != length(xRAW)){
# stop(paste("No msn data in list elements", setdiff(1:length(xRAW),msnExist)))
#}
requireNamespace("CAMERA",quietly=TRUE)
parentMass <- findMz(cpdids[1], mode=mode)$mzCenter
if(is.na(parentMass)){
stop(paste("There was no matching entry to the supplied cpdID", cpdids[1] ,"\n Please check the cpdIDs and the compoundlist."))
}
RT <- findRt(cpdids[1])$RT * 60
mzabs <- 0.1
getRT <- function(xa) {
rt <- sapply(xa@pspectra, function(x) {median(peaks(xa@xcmsSet)[x, "rt"])})
}
suppressWarnings(setReplicate <- xcms::xcmsSet(files=xRAW[[1]]@filepath, method="MS1"))
xsmsms <- as.list(replicate(length(xRAW),setReplicate))
candidates <- list()
anmsms <- list()
psp <- list()
spectra <- list()
whichmissing <- vector()
metaspec <- list()
for(i in 1:length(xRAW)){
devnull <- suppressWarnings(capture.output(xcms::peaks(xsmsms[[i]]) <- do.call(xcms::findPeaks,c(findPeaksArgs, object = xRAW[[i]]))))
if (nrow(xcms::peaks(xsmsms[[i]])) == 0) { ##If there are no peaks
spectra[[i]] <- matrix(0,2,7)
next
} else{
## Get pspec
pl <- xcms::peaks(xsmsms[[i]])[,c("mz", "rt"), drop=FALSE]
## Best: find precursor peak
candidates[[i]] <- which( pl[,"mz", drop=FALSE] < parentMass + mzabs & pl[,"mz", drop=FALSE] > parentMass - mzabs
& pl[,"rt", drop=FALSE] < RT * 1.1 & pl[,"rt", drop=FALSE] > RT * 0.9 )
devnull <- capture.output(anmsms[[i]] <- CAMERA::xsAnnotate(xsmsms[[i]]))
devnull <- capture.output(anmsms[[i]] <- CAMERA::groupFWHM(anmsms[[i]]))
if(length(candidates[[i]]) > 0){
closestCandidate <- which.min (abs( RT - pl[candidates[[i]], "rt", drop=FALSE]))
psp[[i]] <- which(sapply(anmsms[[i]]@pspectra, function(x) {candidates[[i]][closestCandidate] %in% x}))
} else{
psp[[i]] <- which.min( abs(getRT(anmsms[[i]]) - RT) )
}
## Now find the pspec for compound
## 2nd best: Spectrum closest to MS1
##psp <- which.min( abs(getRT(anmsms) - actualRT))
## 3rd Best: find pspec closest to RT from spreadsheet
##psp <- which.min( abs(getRT(anmsms) - RT) )
if((plots == TRUE) && (length(psp[[i]]) > 0)){
CAMERA::plotPsSpectrum(anmsms[[i]], psp[[i]], log=TRUE, mzrange=c(0, findMz(cpdids[1])[[3]]), maxlabel=10)
}
if(length(psp[[i]]) != 0){
spectra[[i]] <- CAMERA::getpspectra(anmsms[[i]], psp[[i]])
} else {
whichmissing <- c(whichmissing,i)
}
}
}
if(length(spectra) != 0){
for(i in whichmissing){
spectra[[i]] <- matrix(0,2,7)
}
}
sp <- toRMB(spectra,cpdids,"mH")
sp@id <- as.character(as.integer(cpdids))
sp@name <- findName(cpdids)
sp@formula <- findFormula(cpdids)
sp@mode <- mode
if(length(w@spectra) != 0){
IDindex <- sapply(w@spectra,function(s) s@id == cpdids)
if(length(IDindex)){
spectraNum <- length(w@spectra[[which(IDindex)]]@children)
w@spectra[[which(IDindex)]]@children[[spectraNum+1]] <- sp@children[[1]]
} else {
w@spectra[[length(w@spectra)+1]] <- sp
}
} else{
w@spectra[[1]] <- sp
}
if(all(w@files != xRAW[[1]]@filepath)){
w@files <- c(w@files,xRAW[[1]]@filepath)
} else{
for(i in 2:(length(w@files)+1)){
currentFPath <- paste0(xRAW[[1]]@filepath,"_",i)
if(all(w@files != currentFPath)){
w@files <- c(w@files,currentFPath)
break
}
}
}
return(w)
}
#'
#' Extracts and processes spectra from a specified file list, according to
#' loaded options and given parameters.
#'
#' The filenames of the raw LC-MS runs are read from the array \code{files}
#' in the global enviroment.
#' See the vignette \code{vignette("RMassBank")} for further details about the
#' workflow.
#'
#' @param cl Cluster.
#' @param w A \code{msmsWorkspace} to work with.
#' @param filetable The path to a .csv-file that contains the columns
#' "Files" and "ID" supplying the relationships between files and
#' compound IDs. Either this or the parameter "files" need to be
#' specified.
#' @param files A vector or list containing the filenames of the files
#' that are to be read as spectra. For the IDs to be inferred
#' from the filenames alone, there need to be exactly 2
#' underscores.
#' @param cpdids A vector or list containing the compound IDs of the
#' files that are to be read as spectra. The ordering of this and
#' \code{files} implicitly assigns each ID to the corresponding
#' file. If this is supplied, then the IDs implicitly named in
#' the filenames are ignored.
#' @param readMethod Several methods are available to get peak lists
#' from the files. Currently supported are "mzR", "xcms",
#' "MassBank" and "peaklist". The first two read MS/MS raw data,
#' and differ in the strategy used to extract peaks. MassBank will
#' read existing records, so that e.g. a recalibration can be
#' performed, and "peaklist" just requires a CSV with two columns
#' and the column header "mz", "int".
#' @param mode \code{"pH", "pNa", "pM", "pNH4", "mH", "mM", "mFA"} for
#' different ions ([M+H]+, [M+Na]+, [M]+, [M+NH4]+, [M-H]-, [M]-,
#' [M+FA]-).
#' @param confirmMode Defaults to false (use most intense
#' precursor). Value 1 uses the 2nd-most intense precursor for a
#' chosen ion (and its data-dependent scans) , etc.
#' @param useRtLimit Whether to enforce the given retention time
#' window.
#' @param Args A list of arguments that will be handed to the
#' xcms-method findPeaks via do.call
#' @param settings Options to be used for processing. Defaults to the
#' options loaded via \code{\link{loadRmbSettings}} et al. Refer
#' to there for specific settings.
#' @param progressbar The progress bar callback to use. Only needed
#' for specialized applications. Cf. the documentation of
#' \code{\link{progressBarHook}} for usage.
#' @param MSe A boolean value that determines whether the spectra were
#' recorded using MSe or not
#' @param plots A boolean value that determines whether the
#' pseudospectra in XCMS should be plotted
#' @return The \code{msmsWorkspace} with msms-spectra read.
#' @seealso \code{\link{msmsWorkspace-class}},
#' \code{\link{msmsWorkflow}}
#' @author Michael Stravs, Eawag <michael.stravs@@eawag.ch>
#' @author Todor Kondić, LCSB-ECI <todor.kondic@@uni.lu>
#' @export
msmsRead.parallel <- function(cl,w, filetable = NULL, files = NULL, cpdids = NULL,
readMethod, mode, confirmMode = FALSE, useRtLimit = TRUE,
Args = NULL, settings = getOption("RMassBank"),
progressbar = "progressBarHook", MSe = FALSE, plots = FALSE){
.checkMbSettings()
##Read the files and cpdids according to the definition
##All cases are silently accepted, as long as they can be handled according to one definition
if(!any(mode %in% knownAdducts())) stop(paste("The ionization mode", mode, "is unknown."))
if(is.null(filetable)){
##If no filetable is supplied, filenames must be named explicitly
if(is.null(files))
stop("Please supply the files")
##Assign the filenames to the workspace
w@files <- unlist(files)
##If no filetable is supplied, cpdids must be delivered explicitly or implicitly within the filenames
if(is.null(cpdids)){
splitfn <- strsplit(files,"_")
splitsfn <- sapply(splitfn, function(x) x[length(x)-1])
if(suppressWarnings(any(is.na(as.numeric(splitsfn)[1]))))
stop("Please supply the cpdids corresponding to the files in the filetable or the filenames")
cpdids <- splitsfn
}
} else{
##If a filetable is supplied read it
tab <- read.csv(filetable, stringsAsFactors = FALSE)
w@files <- tab[,"Files"]
cpdids <- tab[,"ID"]
}
##If there's more cpdids than filenames or the other way around, then abort
if(length(w@files) != length(cpdids)){
stop("There are a different number of cpdids than files")
}
if(!(readMethod %in% c("mzR","peaklist","xcms","minimal","msp"))){
stop("The supplied method does not exist")
}
if(!all(file.exists(w@files))){
stop("The supplied files ", paste(w@files[!file.exists(w@files)]), " don't exist")
}
# na.ids <- which(is.na(sapply(cpdids, findSmiles)))
# if(length(na.ids)){
# stop("The supplied compound ids ", paste(cpdids[na.ids], collapse=" "), " don't have a corresponding smiles entry. Maybe they are missing from the compound list")
# }
##This should work
if(readMethod == "minimal"){
##Edit options
opt <- getOption("RMassBank")
opt$recalibrator$MS1 <- "recalibrate.identity"
opt$recalibrator$MS2 <- "recalibrate.identity"
opt$add_annotation==FALSE
options(RMassBank=opt)
##Edit analyzemethod
analyzeMethod <- "intensity"
}
if(readMethod == "mzR") {
##Progressbar
## nLen <- length(w@files)
## nProg <- 0
## pb <- do.call(progressbar, list(object=NULL, value=0, min=0, max=nLen))
doone <- function(fn,cpdID) {
spec <- findMsMsHR(fileName = fn,
cpdID = cpdID, mode = mode, confirmMode = confirmMode, useRtLimit = useRtLimit,
ppmFine = settings$findMsMsRawSettings$ppmFine,
mzCoarse = settings$findMsMsRawSettings$mzCoarse,
fillPrecursorScan = settings$findMsMsRawSettings$fillPrecursorScan,
rtMargin = settings$rtMargin,
deprofile = settings$deprofile)
message("File: ",fn," ;Compound ID: ",cpdID,"; Status: DONE")
gc()
spec
}
parallel::clusterExport(cl,c("readMethod","mode","confirmMode","useRtLimit","settings"),envir=environment())
cllct <- parallel::clusterMap(cl,doone,w@files,cpdids)
w@spectra <- as(cllct,"SimpleList")
names(w@spectra) <- basename(as.character(w@files))
}
##xcms-readmethod
if(readMethod == "xcms"){
##Load libraries
requireNamespace("xcms",quietly=TRUE)
requireNamespace("CAMERA",quietly=TRUE)
##Find unique files and cpdIDs
ufiles <- unique(w@files)
uIDs <- unique(cpdids)
nLen <- length(ufiles)
##Progressbar
nProg <- 0
pb <- do.call(progressbar, list(object=NULL, value=0, min=0, max=nLen))
i <- 1
##Routine for the case of multiple cpdIDs per file
if(length(uIDs) > length(ufiles)){
w@spectra <- as(unlist(lapply(ufiles, function(currentFile){
fileIDs <- cpdids[which(w@files == currentFile)]
spec <- findMsMsHRperxcms(currentFile, fileIDs, mode=mode, findPeaksArgs=Args, plots, MSe = MSe)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),FALSE),"SimpleList")
} else {
##Routine for the other cases
w@spectra <- as(lapply(uIDs, function(ID){
# Find files corresponding to the compoundID
currentFile <- w@files[which(cpdids == ID)]
# Retrieve spectrum data
spec <- findMsMsHRperxcms(currentFile, ID, mode=mode, findPeaksArgs=Args, plots, MSe = MSe)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),"SimpleList")
##If there are more files than unique cpdIDs, only remember the first file for every cpdID
w@files <- w@files[sapply(uIDs, function(ID){
return(which(cpdids == ID)[1])
})]
}
}
##Peaklist-readmethod
if((readMethod == "peaklist") || (readMethod=="minimal")){
w <- createSpecsFromPeaklists(w, cpdids, filenames=w@files, mode=mode)
uIDs <- unique(cpdids)
files <- list()
for(i in 1:length(uIDs)){
indices <- sapply(cpdids,function(a){return(uIDs[i] %in% a)})
files[[i]] <- w@files[indices]
}
w@files <- sapply(files,function(file){return(file[1])})
message("Peaks read")
}
##MSP-readmethod
if(readMethod == "msp"){
##Find unique files and cpdIDs
ufiles <- unique(w@files)
uIDs <- unique(cpdids)
nLen <- length(ufiles)
##Progressbar
nProg <- 0
pb <- do.call(progressbar, list(object=NULL, value=0, min=0, max=nLen))
i <- 1
##Routine for the case of multiple cpdIDs per file
if(length(uIDs) > length(ufiles)){
w@spectra <- as(unlist(lapply(ufiles, function(currentFile){
fileIDs <- cpdids[which(w@files == currentFile)]
spec <- findMsMsHRperMsp(fileName = currentFile, cpdIDs = fileIDs, mode=mode)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),FALSE),"SimpleList")
} else {
##Routine for the other cases
w@spectra <- as(lapply(uIDs, function(ID){
# Find files corresponding to the compoundID
currentFile <- w@files[which(cpdids == ID)]
# Retrieve spectrum data
spec <- findMsMsHRperMsp(fileName = currentFile, cpdIDs = ID, mode=mode)
gc()
# Progress:
nProg <<- nProg + 1
pb <- do.call(progressbar, list(object=pb, value= nProg))
return(spec)
}),"SimpleList")
##If there are more files than unique cpdIDs, only remember the first file for every cpdID
w@files <- w@files[sapply(uIDs, function(ID){
return(which(cpdids == ID)[1])
})]
}
}
## verbose output
if(RMassBank.env$verbose.output)
for(parentIdx in seq_along(w@spectra))
if(!w@spectra[[parentIdx]]@found)
cat(paste("### Warning ### No precursor ion was detected for ID '", w@spectra[[parentIdx]]@id, "'\n", sep = ""))
return(w)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_c.R
\name{find_cmin}
\alias{find_cmin}
\title{Find the minimum c value over a grid of values}
\usage{
find_cmin(cBias, ngrid = 4 * length(cBias))
}
\arguments{
\item{cBias}{vector of conditional bias for a sample}
\item{ngrid}{integer scalar indicating the length of the grid of values that
is generated to estimate the optimum value for c}
}
\value{
A scalar, representing the value c that minimizes the maximum Bias
of the robust HT estimator over a grid of values.
}
\description{
Compute the value c that minimizes the maximum absolute conditional Bias of
the robust HT estimator over a grid of values.
}
\examples{
# Generate population data
N <- 50; n <- 5
set.seed(0)
x <- rgamma(500, scale=10, shape=5)
y <- abs( 2*x + 3.7*sqrt(x) * rnorm(N) )
# Select sample
pik <- n * x/sum(x)
s <- sample(N, n)
ys <- y[s]
piks <- pik[s]
# Compute conditional bias
cb <- conditional_bias(y=ys, pk=piks, sampling = "poisson")
# Find the minimum c
find_cmin(cb, ngrid = 200)
}
| /man/find_cmin.Rd | no_license | rhobis/robustHT | R | false | true | 1,065 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_c.R
\name{find_cmin}
\alias{find_cmin}
\title{Find the minimum c value over a grid of values}
\usage{
find_cmin(cBias, ngrid = 4 * length(cBias))
}
\arguments{
\item{cBias}{vector of conditional bias for a sample}
\item{ngrid}{integer scalar indicating the length of the grid of values that
is generated to estimate the optimum value for c}
}
\value{
A scalar, representing the value c that minimizes the maximum Bias
of the robust HT estimator over a grid of values.
}
\description{
Compute the value c that minimizes the maximum absolute conditional Bias of
the robust HT estimator over a grid of values.
}
\examples{
# Generate population data
N <- 50; n <- 5
set.seed(0)
x <- rgamma(500, scale=10, shape=5)
y <- abs( 2*x + 3.7*sqrt(x) * rnorm(N) )
# Select sample
pik <- n * x/sum(x)
s <- sample(N, n)
ys <- y[s]
piks <- pik[s]
# Compute conditional bias
cb <- conditional_bias(y=ys, pk=piks, sampling = "poisson")
# Find the minimum c
find_cmin(cb, ngrid = 200)
}
|
#' Check perseus compatibility of an object
#'
#' @title MatrixDataCheck: a function to check the validity of an object as a perseus data frame
#'
#' @param object object to check consistency with perseus data frames
#' @param ... additional arguments passed to the respective method
#' @param main Main Data frame
#' @param annotationRows Rows containing annotation information
#' @param annotationCols Collumns containing annotation information
#' @param descriptions Descriptions of all the columns
#' @param imputeData Is imputed or not
#' @param qualityData quality number
#' @param all_colnames The colnames to be used
#'
#'
#' @return a logical indicating the validity of the object
#' (or series of objects) as a perseus DF or the string of errors
#'
#' @rdname MatrixDataCheck
#'
#' @export
#'
#' @examples
#'
#' require(PerseusR)
#'
#' mat <- matrixData(
#' main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#'
#' MatrixDataCheck(mat)
#'
#'
MatrixDataCheck <- function(object, ...) {
UseMethod("MatrixDataCheck", object)
}
#' @rdname MatrixDataCheck
#' @method MatrixDataCheck default
#'
#' @export
MatrixDataCheck.default <- function(object = NULL, main,
annotationRows, annotationCols,
descriptions, imputeData, qualityData,
all_colnames, ...) {
errors <- character()
# We could consider using a numeric matrix instead of
# a df as the main matrix (since by default is a single
# class )
numCols <- sapply(main, is.numeric)
if (!all(numCols)) {
msg <- paste('Main columns should be numeric: Columns',
paste(names(which(!numCols)), sep = ','),
'are not numeric')
errors <- c(errors, msg)
}
if (ncol(annotationRows) > 0) {
catAnnotRows <- sapply(annotationRows, is.factor)
numAnnotRows <- sapply(annotationRows, is.numeric)
if (!all(catAnnotRows | numAnnotRows)) {
msg <- paste('Annotation rows should be factors or numeric: Rows',
paste(names(which(!(catAnnotRows | numAnnotRows))), sep = ','),
'are not factors')
errors <- c(errors, msg)
}
nColMain <- ncol(main)
nColAnnotRows <- nrow(annotationRows)
if (nColMain != nColAnnotRows) {
msg <- paste('Size of annotation rows not matching:',
nColMain, 'main columns, but',
nColAnnotRows, 'annotations')
errors <- c(errors, msg)
}
}
nMain <- nrow(main)
nAnnot <- nrow(annotationCols)
if (nAnnot > 0 && nMain > 0 && nMain != nAnnot) {
msg <- paste('Number of rows not matching:',
nMain, 'rows in main data, but',
nAnnot, 'rows in annotation columns.')
errors <- c(errors, msg)
}
nDescr <- length(descriptions)
if (nDescr > 0 && nDescr != length(all_colnames)) {
msg <- paste('Descriptions do not fit columns, found',
nDescr, 'expected', length(all_colnames))
errors <- c(errors, msg)
}
if (length(errors) == 0) TRUE else stop(errors)
}
#' @return \code{NULL}
#'
#' @inheritParams MatrixDataCheck.default
#'
#' @rdname MatrixDataCheck
#' @method MatrixDataCheck matrixData
#'
#' @export
MatrixDataCheck.matrixData <- function(object, ...) {
mainDF <- object@main
annotationRows <- object@annotRows
annotationCols <- object@annotCols
descriptions <- object@description
imputeData <- object@imputeData
qualityData <- object@qualityData
all_colnames <- c(colnames(mainDF), colnames(annotationCols))
ret <- MatrixDataCheck.default(main = mainDF,
annotationRows = annotationRows,
annotationCols = annotationCols,
descriptions = descriptions,
imputeData = imputeData,
qualityData = qualityData,
all_colnames = all_colnames)
return(ret)
}
#' @return \code{NULL}
#'
#' @inheritParams MatrixDataCheck.default
#'
#' @rdname MatrixDataCheck
#' @method MatrixDataCheck list
#'
#' @export
MatrixDataCheck.list <- function(object, ...) {
stopifnot(is.list(object))
stopifnot(sum(c('main', 'annotCols') %in% names(object)) > 0)
slots <- c('main', 'annotRows',
'annotCols', 'descriptions', 'imputeData',
'qualityData')
defaults <- c(
replicate(3, quote(data.frame())),
quote(character(
length = ncol(object$main) + ncol(object$annotationCols))))
for (element in seq_along(slots)) {
object[[slots[element]]] <- tryCatch(
object[[slots[element]]],
error = function(...) eval(defaults[[element]]) )
}
all_colnames <- c(colnames(object$main), colnames(object$annotationCols))
ret <- MatrixDataCheck.default(main = object$main,
annotationRows = object$annotRows,
annotationCols = object$annotCols,
descriptions = object$descriptions,
imputeData = object$imputeData,
qualityData = object$qualityData,
all_colnames = all_colnames)
if (is.logical(ret) & ret) {
return(ret)
} else {
stop(ret)
}
}
#' @return \code{NULL}
#'
#' @inheritParams MatrixDataCheck.default
#'
#' @rdname MatrixDataCheck
#' @method MatrixDataCheck ExpressionSet
#'
#' @export
MatrixDataCheck.ExpressionSet <- function(object, ...) {
if (!requireNamespace("Biobase", quietly = TRUE)) {
stop('This function requires the Biobase package, please install it in the bioconductor repository')
}
mainDF <- data.frame(Biobase::exprs(object))
annotationRows <- methods::as(object@phenoData, 'data.frame')
descriptions <- Biobase::annotation(object)
annotationCols <- methods::as(object@featureData, 'data.frame')
all_colnames <- c(colnames(mainDF), colnames(annotationCols))
ret <- MatrixDataCheck.default(main=mainDF,
annotationRows=annotationRows,
annotationCols=annotationCols,
descriptions=descriptions,
all_colnames=all_colnames)
if (is.logical(ret) & ret) {
return(ret)
} else {
stop(ret)
}
}
#' MatrixData
#' @slot main Main expression \code{data.frame}.
#' @slot annotCols Annotation Columns \code{data.frame}.
#' @slot annotRows Annotation Rows \code{data.frame}.
#' @slot description Column descriptions.
#' @slot imputeData Imputation \code{data.frame}.
#' @slot qualityData Quality values \code{data.frame}.
#'
#' @name matrixData-class
#' @rdname matrixData-class
#' @family matrixData basic functions
#'
#' @export
setClass("matrixData",
slots = c(main = "data.frame",
annotCols = "data.frame",
annotRows = "data.frame",
description = "character",
imputeData = "data.frame",
qualityData = "data.frame"),
validity = function(object) {MatrixDataCheck.matrixData(object)})
#' matrixData constructor
#' @param ... \code{main}, \code{annotCols}, \code{annotRows}, \code{description}, \code{imputeData}, \code{qualityData}
#' @inherit matrixData-class
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))),
#' description=c('aaa', 'bbb', 'ccc'),
#' imputeData=data.frame(impute=c('False', 'True', 'False')),
#' qualityData=data.frame(quality=c('0', '1', '0')))
#' @export
matrixData <- function(...) {
methods::new("matrixData", ...)
}
#' matrixData initializer
#' @param .Object Initialized object
#' @param ... Additional arguments
#' @description Initializes the annotCols data frame to have the
#' same number of rows as the main data. This might not be the
#' cleanest solution.
#' @importFrom methods callNextMethod
setMethod(initialize, "matrixData", function(.Object, ...) {
args <- list(...)
if ("main" %in% names(args) && !("annotCols" %in% names(args))) {
main <- args[['main']]
args[["annotCols"]] <- data.frame(matrix(nrow=nrow(main), ncol=0))
}
args[['.Object']] <- .Object
do.call(callNextMethod, args)
})
getNames <- function(x) {c(colnames(x@main), colnames(x@annotCols))}
#TODO: check if it would be better to have a list returned with one element
#having the col names and the other the row names
#' Get names
#'
#' Get the column names of main and annotation columns.
#'
#' @param x matrixData
#' @family matrixData basic functions
#' @export
#' @docType methods
#' @rdname matrixData-methods
setMethod("names", "matrixData", getNames)
#' Column names of main and annotation columns
#' @param x matrixData
#' @export
names.matrixData <- getNames
#' Get main columns
#'
#' Gets the main collumns (main matrix) of a \code{\link[PerseusR]{matrixData}}
#' object as a data.frame object
#'
#' @param mdata matrixData
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#' main(mdata)
#' @export
main <- function(mdata) {
mdata@main
}
#' Set main columns
#' @param mdata matrixData
#' @param value value
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#' value<-data.frame(c=c(0,0,0), d=c(1,1,1))
#' main(mdata) <- value
#' @export
`main<-` <- function(mdata, value) {
mdata@main <- value
methods::validObject(mdata)
return(mdata)
}
#' Get annotation columns
#' @param mdata matrixData
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#' annotCols(mdata)
#' @export
annotCols <- function(mdata) {
mdata@annotCols
}
#' Set annotation columns
#' @param mdata matrixData
#' @param value value
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#' value <- data.frame(d=c('d', 'e', 'f'))
#' annotCols(mdata) <- value
#' @export
`annotCols<-` <- function(mdata, value) {
mdata@annotCols <- value
methods::validObject(mdata)
return(mdata)
}
#' Get annotation rows
#' @param mdata matrixData
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#' annotRows(mdata)
#' @export
annotRows <- function(mdata) {
mdata@annotRows
}
#' Set annotation rows
#' @param mdata matrixData
#' @param value value
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#' value <- data.frame(y=factor(c('2','2')))
#' annotRows(mdata) <- value
#' @export
`annotRows<-` <- function(mdata, value) {
mdata@annotRows <- value
methods::validObject(mdata)
return(mdata)
}
#' Get column description
#' @param mdata matrixData
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))),
#' description=c('aaa', 'bbb', 'ccc'))
#' description(mdata)
#' @export
description <- function(mdata) {
mdata@description
}
#' Set column description
#' @param mdata matrixData
#' @param value value
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#' value <- c('aaa', 'bbb', 'ccc')
#' description(mdata) <- value
#' @export
`description<-` <- function(mdata, value) {
mdata@description <- value
methods::validObject(mdata)
return(mdata)
}
#' Get imputation of main data frame
#' @param mdata matrixData
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))),
#' imputeData=data.frame(impute=c('False', 'True', 'False')))
#' imputeData(mdata)
#' @export
imputeData <- function(mdata) {
mdata@imputeData
}
#' Set imputation of main data frame
#' @param mdata matrixData
#' @param value value
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))),
#' imputeData=data.frame(impute=c('False', 'True', 'False')))
#' value <- data.frame(impute=c('True', 'True', 'True'))
#' imputeData(mdata) <- value
#' @export
`imputeData<-` <- function(mdata, value) {
mdata@imputeData <- value
methods::validObject(mdata)
return(mdata)
}
#' Get quality values of main data frame
#' @param mdata matrixData
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))),
#' qualityData=data.frame(quality=c('1', '1', '1')))
#' qualityData(mdata)
#' @export
qualityData <- function(mdata) {
mdata@qualityData
}
#' Set quality values of main data frame
#' @param mdata matrixData
#' @param value value
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))),
#' qualityData=data.frame(quality=c('1', '1', '1')))
#' value <- data.frame(quality=c('0', '0', '0'))
#' qualityData(mdata) <- value
#' @export
`qualityData<-` <- function(mdata, value) {
mdata@qualityData <- value
methods::validObject(mdata)
return(mdata)
}
setMethod("Ops", signature(e1 = "matrixData", e2 = "matrixData"),
function(e1, e2) {
e1@main <- methods::callGeneric(main(e1), main(e2))
methods::validObject(e1)
return(e1)
}
)
setMethod("Ops", signature(e1 = "matrixData", e2 = "numeric"),
function(e1, e2) {
e1@main <- methods::callGeneric(main(e1), e2)
methods::validObject(e1)
return(e1)
}
)
setMethod("Ops", signature(e1 = "numeric", e2 = "matrixData"),
function(e1, e2) {
e1@main <- methods::callGeneric(e1, main(e2))
methods::validObject(e1)
return(e1)
}
)
| /R/matrixData.R | permissive | ouglasarks/PerseusR | R | false | false | 15,380 | r | #' Check perseus compatibility of an object
#'
#' @title MatrixDataCheck: a function to check the validity of an object as a perseus data frame
#'
#' @param object object to check consistency with perseus data frames
#' @param ... additional arguments passed to the respective method
#' @param main Main Data frame
#' @param annotationRows Rows containing annotation information
#' @param annotationCols Collumns containing annotation information
#' @param descriptions Descriptions of all the columns
#' @param imputeData Is imputed or not
#' @param qualityData quality number
#' @param all_colnames The colnames to be used
#'
#'
#' @return a logical indicating the validity of the object
#' (or series of objects) as a perseus DF or the string of errors
#'
#' @rdname MatrixDataCheck
#'
#' @export
#'
#' @examples
#'
#' require(PerseusR)
#'
#' mat <- matrixData(
#' main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#'
#' MatrixDataCheck(mat)
#'
#'
MatrixDataCheck <- function(object, ...) {
UseMethod("MatrixDataCheck", object)
}
#' @rdname MatrixDataCheck
#' @method MatrixDataCheck default
#'
#' @export
MatrixDataCheck.default <- function(object = NULL, main,
annotationRows, annotationCols,
descriptions, imputeData, qualityData,
all_colnames, ...) {
errors <- character()
# We could consider using a numeric matrix instead of
# a df as the main matrix (since by default is a single
# class )
numCols <- sapply(main, is.numeric)
if (!all(numCols)) {
msg <- paste('Main columns should be numeric: Columns',
paste(names(which(!numCols)), sep = ','),
'are not numeric')
errors <- c(errors, msg)
}
if (ncol(annotationRows) > 0) {
catAnnotRows <- sapply(annotationRows, is.factor)
numAnnotRows <- sapply(annotationRows, is.numeric)
if (!all(catAnnotRows | numAnnotRows)) {
msg <- paste('Annotation rows should be factors or numeric: Rows',
paste(names(which(!(catAnnotRows | numAnnotRows))), sep = ','),
'are not factors')
errors <- c(errors, msg)
}
nColMain <- ncol(main)
nColAnnotRows <- nrow(annotationRows)
if (nColMain != nColAnnotRows) {
msg <- paste('Size of annotation rows not matching:',
nColMain, 'main columns, but',
nColAnnotRows, 'annotations')
errors <- c(errors, msg)
}
}
nMain <- nrow(main)
nAnnot <- nrow(annotationCols)
if (nAnnot > 0 && nMain > 0 && nMain != nAnnot) {
msg <- paste('Number of rows not matching:',
nMain, 'rows in main data, but',
nAnnot, 'rows in annotation columns.')
errors <- c(errors, msg)
}
nDescr <- length(descriptions)
if (nDescr > 0 && nDescr != length(all_colnames)) {
msg <- paste('Descriptions do not fit columns, found',
nDescr, 'expected', length(all_colnames))
errors <- c(errors, msg)
}
if (length(errors) == 0) TRUE else stop(errors)
}
#' @return \code{NULL}
#'
#' @inheritParams MatrixDataCheck.default
#'
#' @rdname MatrixDataCheck
#' @method MatrixDataCheck matrixData
#'
#' @export
MatrixDataCheck.matrixData <- function(object, ...) {
mainDF <- object@main
annotationRows <- object@annotRows
annotationCols <- object@annotCols
descriptions <- object@description
imputeData <- object@imputeData
qualityData <- object@qualityData
all_colnames <- c(colnames(mainDF), colnames(annotationCols))
ret <- MatrixDataCheck.default(main = mainDF,
annotationRows = annotationRows,
annotationCols = annotationCols,
descriptions = descriptions,
imputeData = imputeData,
qualityData = qualityData,
all_colnames = all_colnames)
return(ret)
}
#' @return \code{NULL}
#'
#' @inheritParams MatrixDataCheck.default
#'
#' @rdname MatrixDataCheck
#' @method MatrixDataCheck list
#'
#' @export
MatrixDataCheck.list <- function(object, ...) {
stopifnot(is.list(object))
stopifnot(sum(c('main', 'annotCols') %in% names(object)) > 0)
slots <- c('main', 'annotRows',
'annotCols', 'descriptions', 'imputeData',
'qualityData')
defaults <- c(
replicate(3, quote(data.frame())),
quote(character(
length = ncol(object$main) + ncol(object$annotationCols))))
for (element in seq_along(slots)) {
object[[slots[element]]] <- tryCatch(
object[[slots[element]]],
error = function(...) eval(defaults[[element]]) )
}
all_colnames <- c(colnames(object$main), colnames(object$annotationCols))
ret <- MatrixDataCheck.default(main = object$main,
annotationRows = object$annotRows,
annotationCols = object$annotCols,
descriptions = object$descriptions,
imputeData = object$imputeData,
qualityData = object$qualityData,
all_colnames = all_colnames)
if (is.logical(ret) & ret) {
return(ret)
} else {
stop(ret)
}
}
#' @return \code{NULL}
#'
#' @inheritParams MatrixDataCheck.default
#'
#' @rdname MatrixDataCheck
#' @method MatrixDataCheck ExpressionSet
#'
#' @export
MatrixDataCheck.ExpressionSet <- function(object, ...) {
if (!requireNamespace("Biobase", quietly = TRUE)) {
stop('This function requires the Biobase package, please install it in the bioconductor repository')
}
mainDF <- data.frame(Biobase::exprs(object))
annotationRows <- methods::as(object@phenoData, 'data.frame')
descriptions <- Biobase::annotation(object)
annotationCols <- methods::as(object@featureData, 'data.frame')
all_colnames <- c(colnames(mainDF), colnames(annotationCols))
ret <- MatrixDataCheck.default(main=mainDF,
annotationRows=annotationRows,
annotationCols=annotationCols,
descriptions=descriptions,
all_colnames=all_colnames)
if (is.logical(ret) & ret) {
return(ret)
} else {
stop(ret)
}
}
#' MatrixData
#' @slot main Main expression \code{data.frame}.
#' @slot annotCols Annotation Columns \code{data.frame}.
#' @slot annotRows Annotation Rows \code{data.frame}.
#' @slot description Column descriptions.
#' @slot imputeData Imputation \code{data.frame}.
#' @slot qualityData Quality values \code{data.frame}.
#'
#' @name matrixData-class
#' @rdname matrixData-class
#' @family matrixData basic functions
#'
#' @export
setClass("matrixData",
slots = c(main = "data.frame",
annotCols = "data.frame",
annotRows = "data.frame",
description = "character",
imputeData = "data.frame",
qualityData = "data.frame"),
validity = function(object) {MatrixDataCheck.matrixData(object)})
#' matrixData constructor
#' @param ... \code{main}, \code{annotCols}, \code{annotRows}, \code{description}, \code{imputeData}, \code{qualityData}
#' @inherit matrixData-class
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))),
#' description=c('aaa', 'bbb', 'ccc'),
#' imputeData=data.frame(impute=c('False', 'True', 'False')),
#' qualityData=data.frame(quality=c('0', '1', '0')))
#' @export
matrixData <- function(...) {
methods::new("matrixData", ...)
}
#' matrixData initializer
#' @param .Object Initialized object
#' @param ... Additional arguments
#' @description Initializes the annotCols data frame to have the
#' same number of rows as the main data. This might not be the
#' cleanest solution.
#' @importFrom methods callNextMethod
setMethod(initialize, "matrixData", function(.Object, ...) {
args <- list(...)
if ("main" %in% names(args) && !("annotCols" %in% names(args))) {
main <- args[['main']]
args[["annotCols"]] <- data.frame(matrix(nrow=nrow(main), ncol=0))
}
args[['.Object']] <- .Object
do.call(callNextMethod, args)
})
getNames <- function(x) {c(colnames(x@main), colnames(x@annotCols))}
#TODO: check if it would be better to have a list returned with one element
#having the col names and the other the row names
#' Get names
#'
#' Get the column names of main and annotation columns.
#'
#' @param x matrixData
#' @family matrixData basic functions
#' @export
#' @docType methods
#' @rdname matrixData-methods
setMethod("names", "matrixData", getNames)
#' Column names of main and annotation columns
#' @param x matrixData
#' @export
names.matrixData <- getNames
#' Get main columns
#'
#' Gets the main collumns (main matrix) of a \code{\link[PerseusR]{matrixData}}
#' object as a data.frame object
#'
#' @param mdata matrixData
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#' main(mdata)
#' @export
main <- function(mdata) {
mdata@main
}
#' Set main columns
#' @param mdata matrixData
#' @param value value
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#' value<-data.frame(c=c(0,0,0), d=c(1,1,1))
#' main(mdata) <- value
#' @export
`main<-` <- function(mdata, value) {
mdata@main <- value
methods::validObject(mdata)
return(mdata)
}
#' Get annotation columns
#' @param mdata matrixData
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#' annotCols(mdata)
#' @export
annotCols <- function(mdata) {
mdata@annotCols
}
#' Set annotation columns
#' @param mdata matrixData
#' @param value value
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#' value <- data.frame(d=c('d', 'e', 'f'))
#' annotCols(mdata) <- value
#' @export
`annotCols<-` <- function(mdata, value) {
mdata@annotCols <- value
methods::validObject(mdata)
return(mdata)
}
#' Get annotation rows
#' @param mdata matrixData
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#' annotRows(mdata)
#' @export
annotRows <- function(mdata) {
mdata@annotRows
}
#' Set annotation rows
#' @param mdata matrixData
#' @param value value
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#' value <- data.frame(y=factor(c('2','2')))
#' annotRows(mdata) <- value
#' @export
`annotRows<-` <- function(mdata, value) {
mdata@annotRows <- value
methods::validObject(mdata)
return(mdata)
}
#' Get column description
#' @param mdata matrixData
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))),
#' description=c('aaa', 'bbb', 'ccc'))
#' description(mdata)
#' @export
description <- function(mdata) {
mdata@description
}
#' Set column description
#' @param mdata matrixData
#' @param value value
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))))
#' value <- c('aaa', 'bbb', 'ccc')
#' description(mdata) <- value
#' @export
`description<-` <- function(mdata, value) {
mdata@description <- value
methods::validObject(mdata)
return(mdata)
}
#' Get imputation of main data frame
#' @param mdata matrixData
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))),
#' imputeData=data.frame(impute=c('False', 'True', 'False')))
#' imputeData(mdata)
#' @export
imputeData <- function(mdata) {
mdata@imputeData
}
#' Set imputation of main data frame
#' @param mdata matrixData
#' @param value value
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))),
#' imputeData=data.frame(impute=c('False', 'True', 'False')))
#' value <- data.frame(impute=c('True', 'True', 'True'))
#' imputeData(mdata) <- value
#' @export
`imputeData<-` <- function(mdata, value) {
mdata@imputeData <- value
methods::validObject(mdata)
return(mdata)
}
#' Get quality values of main data frame
#' @param mdata matrixData
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))),
#' qualityData=data.frame(quality=c('1', '1', '1')))
#' qualityData(mdata)
#' @export
qualityData <- function(mdata) {
mdata@qualityData
}
#' Set quality values of main data frame
#' @param mdata matrixData
#' @param value value
#' @family matrixData basic functions
#' @examples
#' mdata <- matrixData(main=data.frame(a=1:3, b=6:8),
#' annotCols=data.frame(c=c('a','b','c')),
#' annotRows=data.frame(x=factor(c('1','1'))),
#' qualityData=data.frame(quality=c('1', '1', '1')))
#' value <- data.frame(quality=c('0', '0', '0'))
#' qualityData(mdata) <- value
#' @export
`qualityData<-` <- function(mdata, value) {
mdata@qualityData <- value
methods::validObject(mdata)
return(mdata)
}
setMethod("Ops", signature(e1 = "matrixData", e2 = "matrixData"),
function(e1, e2) {
e1@main <- methods::callGeneric(main(e1), main(e2))
methods::validObject(e1)
return(e1)
}
)
setMethod("Ops", signature(e1 = "matrixData", e2 = "numeric"),
function(e1, e2) {
e1@main <- methods::callGeneric(main(e1), e2)
methods::validObject(e1)
return(e1)
}
)
setMethod("Ops", signature(e1 = "numeric", e2 = "matrixData"),
function(e1, e2) {
e1@main <- methods::callGeneric(e1, main(e2))
methods::validObject(e1)
return(e1)
}
)
|
factors <- function(e) {
if (is.name(e) || typeof(e) == "closure") list(e)
else switch(deparse(e[[1]]),
"*" = c(factors(e[[2]]),factors(e[[3]])),
"(" = factors(e[[2]]),
list(e))
}
term2table <- function(rowterm, colterm, env, n) {
rowargs <- factors(rowterm)
colargs <- factors(colterm)
allargs <- c(rowargs, colargs)
rowsubset <- TRUE
colsubset <- TRUE
dropcell <- FALSE
droprow <- FALSE
dropcol <- FALSE
pctdenom <- NULL
pctsubset <- TRUE
values <- NULL
summary <- NULL
arguments <- NULL
format <- NA
justification <- NA
for (i in seq_along(allargs)) {
e <- allargs[[i]]
fn <- ""
if (is.call(e) && (fn <- deparse(e[[1]])) == ".Format")
format <- e[[2]]
else if (fn == "Justify")
justification <- as.character(e[[if (length(e) > 2) 3 else 2]])
else if (fn == "Percent") {
env1 <- new.env(parent=env)
percent <- function(x, y) 100*length(x)/length(y)
env1$Equal <- env1$Unequal <- function(...) sys.call()
env1$Percent <- function(denom="all", fn=percent) {
if (is.null(summary)) {
if (identical(denom, "all")) summary <<- function(x) fn(x, values)
else if (identical(denom, "row")) summary <<- function(x) fn(x, values[rowsubset])
else if (identical(denom, "col")) summary <<- function(x) fn(x, values[colsubset])
else if (is.call(denom) && deparse(denom[[1]]) %in% c("Equal", "Unequal")) {
summary <<- local({
pctdenom <<- sapply(as.list(denom), deparse, width.cutoff = 500L)
pctsubset <<- pctdenom[1] == "Equal"
function(x) {
fn(x, values[pctsubset])
}})
} else if (is.logical(denom)) summary <<- function(x) fn(x, values[denom])
else summary <<- function(x) fn(x, denom)
summaryname <<- "Percent"
} else
stop("Summary fn not allowed with 'Percent'")
}
eval(e, env1)
} else if (fn == "Arguments") {
if (is.null(arguments))
arguments <- e
else
stop(gettextf("Duplicate Arguments list: %s and %s", deparse(arguments), deparse(e)))
} else if (fn == "DropEmpty") {
env1 <- new.env(parent = env)
env1$DropEmpty <- function(empty = "", which = c("row", "col", "cell")) {
good <- which %in% c("row", "col", "cell")
if (!all(good))
stop(gettextf("bad 'which' value(s): %s in %s",
paste0("'", which[!good], "'", collapse = ","), deparse(e)),
call. = FALSE)
dropcell <<- "cell" %in% which
droprow <<- "row" %in% which
dropcol <<- "col" %in% which
empty <<- empty
}
empty <- NULL
eval(e, env1)
} else if (fn != "Heading" && !identical(e, 1)) {
arg <- eval(e, env)
asis <- inherits(arg, "AsIs")
if (asis || is.vector(arg) || inherits(arg, "labelledSubset")) {
if (missing(n)) n <- length(arg)
else if (n != length(arg))
stop(gettextf("Argument '%s' is not length %d", deparse(e), n))
}
if (!asis && is.logical(arg)) {
arg <- arg & !is.na(arg)
if (i <= length(rowargs))
rowsubset <- rowsubset & arg
else
colsubset <- colsubset & arg
} else if (asis || is.atomic(arg)) {
if (is.null(values)) {
values <- arg
valuename <- e
} else
stop(gettextf("Duplicate values: %s and %s", deparse(valuename),
deparse(e)))
} else if (is.function(arg)) {
if (is.null(summary)) {
summary <- arg
summaryname <- e
} else
stop(gettextf("Duplicate summary fn: %s and %s", deparse(summaryname),
deparse(e)))
} else
stop(gettextf("Unrecognized entry '%s'", deparse(e)))
}
}
if (!is.null(pctdenom)) { # We need a second pass to find the subsets
for (i in seq_along(allargs)) {
e <- allargs[[i]]
fn <- ""
if (is.call(e))
fn <- deparse(e[[1]])
if (!(fn %in% c(".Format", "Justify", "Percent", "Heading"))
&& !identical(e, 1)) {
arg <- eval(e, env)
asis <- inherits(arg, "AsIs")
if (!asis && is.logical(arg)) {
if (inherits(arg, "labelledSubset")) {
argexpr <- attr(arg, "label")
arg <- arg & !is.na(arg)
if (pctdenom[1] == "Equal" && argexpr %in% pctdenom[-1])
pctsubset <- pctsubset & arg
else if (pctdenom[1] == "Unequal" && argexpr %in% pctdenom[-1])
pctsubset <- pctsubset | !arg
} else
pctsubset <- pctsubset & !is.na(arg) & arg
}
}
}
}
if (missing(n))
stop(gettextf("Length of %s ~ %s is indeterminate", deparse(rowterm),
deparse(colterm)))
if (is.null(summary)) {
if (!is.null(arguments))
stop(gettextf("'%s specified without summary function", deparse(arguments)))
summary <- length
}
if (is.null(values) && is.null(arguments)) values <- rep(NA, n)
subset <- rowsubset & colsubset
if (is.null(arguments))
value <- summary(values[subset])
else {
arguments[[1]] <- summary
for (i in seq_along(arguments)[-1]) {
arg <- eval(arguments[[i]], env)
if (length(arg) == n)
arg <- arg[subset]
arguments[[i]] <- arg
}
if (!is.null(values)) {
named <- !is.null(names(arguments))
for (i in rev(seq_along(arguments)[-1])) {
arguments[[i+1]] <- arguments[[i]]
if (named) names(arguments)[i+1] <- names(arguments)[i]
}
arguments[[2]] <- values[subset]
if (named) names(arguments)[2] <- ""
}
value <- eval(arguments, env)
}
if (length(value) != 1) {
warning(gettextf("Summary statistic is length %d", length(value)), call. = FALSE)
value <- value[1]
}
structure(list(value), n=n, format=format,
justification=justification,
dropcell = ifelse(dropcell && !any(subset), empty, NA_character_),
droprow = droprow && !any(rowsubset),
dropcol = dropcol && !any(colsubset))
}
# This moves column names into their own column
moveColnames <- function(labels, do_move = (names != "")) {
attrs <- attributes(labels)
names <- colnames(labels)
colnamejust <- attrs$colnamejust
justification <- attrs$justification
dropcell <- attrs$dropcell
for (i in rev(seq_along(do_move))) {
if (do_move[i]) {
before <- seq_len(i-1)
after <- seq_len(ncol(labels) - i + 1) + i - 1
labels <- cbind(labels[,before,drop=FALSE], "", labels[,after,drop=FALSE])
labels[1,i] <- names[i]
names <- c(names[before], "", "", names[after][-1])
if (length(colnamejust)) {
colnamejust <- c(colnamejust[before], NA_character_, colnamejust[after])
}
if (length(justification))
justification <- cbind(justification[,before,drop=FALSE],
NA_character_, justification[,after,drop=FALSE])
if (length(dropcell))
dropcell <- cbind(dropcell[,before,drop=FALSE],
NA_character_, dropcell[,after,drop=FALSE])
}
}
attrs$colnamejust <- colnamejust
attrs$justification <- justification
attrs$dropcell <- dropcell
attrs$dim <- dim(labels)
attrs$dimnames[[2]] <- names
attributes(labels) <- attrs
labels
}
getLabels <- function(e, rows=TRUE, justify=NA, head=NULL, suppress=0,
env) {
op <- ""
justification <- NULL
colnamejust <- character(0)
Heading <- head
result <- if (rows) matrix(NA, ncol=0, nrow=1)
else matrix(NA, ncol=1, nrow=0)
nrl <- ncl <- leftjustify <- leftheading <- leftsuppress <-
leftjustification <- leftcolnamejust <-
nrr <- ncr <- rightjustify <- rightheading <- rightsuppress <-
rightjustification <- rightcolnamejust <- NULL
nearData <- leftnearData <- rightnearData <- TRUE
getLeft <- function() {
nrl <<- nrow(leftLabels)
ncl <<- ncol(leftLabels)
leftjustify <<- attr(leftLabels, "justify")
leftheading <<- attr(leftLabels, "Heading")
leftsuppress <<- attr(leftLabels, "suppress")
leftjustification <<- attr(leftLabels, "justification")
leftcolnamejust <<- attr(leftLabels, "colnamejust")
leftnearData <<- attr(leftLabels, "nearData")
}
getRight <- function() {
nrr <<- nrow(rightLabels)
ncr <<- ncol(rightLabels)
rightjustify <<- attr(rightLabels, "justify")
rightheading <<- attr(rightLabels, "Heading")
rightsuppress <<- attr(rightLabels, "suppress")
rightjustification <<- attr(rightLabels, "justification")
rightcolnamejust <<- attr(rightLabels, "colnamejust")
rightnearData <<- attr(rightLabels, "nearData")
}
if (is.call(e) && (op <- deparse(e[[1]])) == "*") {
leftLabels <- getLabels(e[[2]], rows, justify, head, suppress, env)
getLeft()
# Heading and justify are the settings to carry on to later terms
# justification is the matrix of justifications for
# each label
righthead <- Heading <- leftheading
suppress <- leftsuppress
nearData <- leftnearData
if (!is.null(leftjustify))
justify <- leftjustify
rightLabels <- getLabels(e[[3]], rows, justify, righthead, suppress, env)
getRight()
Heading <- rightheading
suppress <- rightsuppress
if (!is.null(rightjustify))
justify <- rightjustify
if (rows) {
result <- justification <- matrix(NA_character_, nrl*nrr, ncl + ncr)
colnames(result) <- c(colnames(leftLabels), colnames(rightLabels))
colnamejust <- c(leftcolnamejust, rightcolnamejust)
for (i in seq_len(nrl)) {
j <- 1 + (i-1)*nrr
k <- seq_len(ncl)
result[j, k] <- leftLabels[i,]
if (!is.null(leftjustification))
justification[j, k] <- leftjustification[i,]
j <- (i-1)*nrr + seq_len(nrr)
k <- ncl+seq_len(ncr)
result[j, k] <- rightLabels
if (!is.null(rightjustification))
justification[j, k] <- rightjustification
}
} else {
result <- justification <- matrix(NA_character_, nrl + nrr, ncl*ncr)
for (i in seq_len(ncl)) {
j <- seq_len(nrl)
k <- 1 + (i-1)*ncr
result[j, k] <- leftLabels[,i]
if (!is.null(leftjustification))
justification[j,k] <- leftjustification[,i]
j <- nrl+seq_len(nrr)
k <- (i-1)*ncr + seq_len(ncr)
result[j, k] <- rightLabels
if (!is.null(rightjustification))
justification[j,k] <- rightjustification
}
}
} else if (op == "+") {
leftLabels <- getLabels(e[[2]], rows, justify, NULL, suppress, env)
getLeft()
Heading <- leftheading
rightLabels <- getLabels(e[[3]], rows, justify, NULL, suppress, env)
getRight()
Heading <- rightheading
suppress <- rightsuppress
neardata <- leftnearData & rightnearData # neardata=FALSE is needed for Hline
if (rows) {
# Here we have a problem: we need to stack two things, each of which
# may have column names. We use the following rule:
# - if the column names for both things match, just use them.
# - if the left one has a name, and the right doesn't, use the left name
# - if both have names that don't match, add them as extra column(s)
# - if the right has a name, and the left doesn't, treat as unmatched names
leftnames <- colnames(leftLabels)
if (is.null(leftnames)) leftnames <- rep("", ncl)
rightnames <- colnames(rightLabels)
if (is.null(rightnames)) rightnames <- rep("", ncr)
if (!identical(rightnames, rep("", ncr)) &&
!identical(leftnames, rightnames)) {
rightLabels <- moveColnames(rightLabels)
# some properties may have changed; just get them again
getRight()
leftLabels <- moveColnames(leftLabels)
getLeft()
Heading <- rightheading
rightnames <- rep("", ncr)
leftnames <- rep("", ncl)
}
cols <- max(ncl, ncr)
# Pad all to same width
padblank <- rep("", abs(ncr - ncl))
padNA <- rep(NA_character_, abs(ncr - ncl))
if (ncl < ncr) {
padblankmat <- matrix("", nrl, abs(ncr - ncl))
padNAmat <- matrix(NA_character_, NROW(leftjustification), abs(ncr - ncl))
if (leftnearData) {
leftnames <- c(padblank, leftnames)
if (length(leftcolnamejust))
leftcolnamejust <- c(padNA, leftcolnamejust)
leftLabels <- cbind(padblankmat, leftLabels)
if (!is.null(leftjustification))
leftjustification <- cbind(padNA, leftjustification)
} else {
leftnames <- c(leftnames, padblank)
if (length(leftcolnamejust))
leftcolnamejust <- c(leftcolnamejust, padNA)
leftLabels <- cbind(leftLabels, padblankmat)
if (!is.null(leftjustification))
leftjustification <- cbind(leftjustification, padNAmat)
}
ncl <- ncr
} else if (ncl > ncr) {
padblankmat <- matrix("", nrr, abs(ncr - ncl))
padNAmat <- matrix(NA_character_, NROW(rightjustification), abs(ncr - ncl))
if (rightnearData) {
rightnames <- c(padblank, rightnames)
if (length(rightcolnamejust))
rightcolnamejust <- c(padNA, rightcolnamejust)
rightLabels <- cbind(padblankmat, rightLabels)
if (!is.null(rightjustification))
rightjustification <- cbind(padNAmat, rightjustification)
} else {
rightnames <- c(rightnames, padblank)
if (length(rightcolnamejust))
rightcolnamejust <- c(rightcolnamejust, padNA)
rightLabels <- cbind(rightLabels, padblankmat)
if (!is.null(rightjustification))
rightjustification <- cbind(rightjustification, padNAmat)
}
ncr <- ncl
}
result <- matrix("", nrl + nrr, cols)
justification <- matrix(NA_character_, nrl + nrr, cols)
colnames <- rep("", cols)
colnamejust <- rep(NA_character_, cols)
j <- seq_len(nrl)
result[j, ] <- leftLabels
colnames <- leftnames
if (length(leftcolnamejust))
colnamejust <- leftcolnamejust
if (length(leftjustification))
justification[j, ] <- leftjustification
j <- nrl+seq_len(nrr)
result[j, ] <- rightLabels
if (!is.null(rightjustification))
justification[j, ] <- rightjustification
if (!is.null(head)) {
colnames[1] <- head
head <- NULL
}
colnames(result) <- colnames
} else {
nrows <- max(nrl, nrr)
result <- matrix("", nrows, ncl + ncr)
justification <- matrix(NA_character_, nrows, ncl + ncr)
j <- seq_len(nrl)
if (leftnearData)
j <- j + (nrows - nrl)
k <- seq_len(ncl)
result[j, k] <- leftLabels
if (!is.null(leftjustification))
justification[j, k] <- leftjustification
j <- seq_len(nrr)
if (rightnearData)
j <- j + (nrows - nrr)
k <- ncl+seq_len(ncr)
result[j,k] <- rightLabels
if (!is.null(rightjustification))
justification[j, k] <- rightjustification
if (!is.null(head)) {
result <- rbind(rep(NA_character_, ncol(result)),
result)
result[1,1] <- head
justification <- rbind(justification[1,], justification)
}
}
} else if (op == "(") {
return(getLabels(e[[2]], rows, justify, head, suppress, env))
} else if (op == ".Format") {
result <- if (rows) matrix(NA, ncol=0, nrow=1)
else matrix(NA, ncol=1, nrow=0)
} else if (op == "Heading") {
env1 <- new.env(parent = env)
env1$Heading <- function(name = NULL, override = TRUE, character.only = FALSE,
nearData = TRUE) {
if (missing(name))
suppress <<- suppress + 1
else {
if (!character.only)
name <- as.character(substitute(name))
if (!is.logical(override) || is.na(override))
stop(gettextf("'%s' argument in '%s' must be TRUE or FALSE",
"override", deparse(e)), call. = FALSE)
if (suppress <= 0 && (is.null(Heading) || override)) {
Heading <<- as.character(name)
suppress <<- 0
} else
suppress <<- suppress - 1
nearData <<- nearData
}
}
eval(e, env1)
} else if (op == "Justify") {
justify <- as.character(e[[2]])
} else if (op == "Arguments") {
#suppress <- suppress + 1
} else if (suppress > 0) { # The rest just add a single label; suppress it
suppress <- suppress - 1
} else if (!is.null(head)) {
result <- matrix(head, 1,1, dimnames=list(NULL, ""))
Heading <- NULL
} else if (op == "Percent") {
result <- matrix(gettext("Percent"), 1,1, dimnames=list(NULL, ""))
} else if (op == "DropEmpty") { # do nothing
} else if (identical(e, 1))
result <- matrix(gettext("All"), 1,1, dimnames=list(NULL, ""))
else
result <- matrix(deparse(e), 1,1, dimnames=list(NULL, ""))
if (is.null(justification))
justification <- matrix(justify, nrow(result), ncol(result))
stopifnot(identical(dim(result), dim(justification)))
structure(result, justification = justification,
colnamejust = colnamejust, justify = justify,
Heading = Heading, suppress = suppress,
nearData = nearData)
}
expandExpressions <- function(e, env) {
if (is.call(e)) {
if ((op <- deparse(e[[1]])) %in% c("*", "+", "~", "(", "=") ) {
e[[2]] <- expandExpressions(e[[2]], env)
if (length(e) > 2)
e[[3]] <- expandExpressions(e[[3]], env)
} else if (op == "Format" || op == ".Format"
|| op == "Heading" || op == "Justify"
|| op == "Percent" || op == "Arguments"
|| op == "DropEmpty")
e
else {
v <- eval(e, envir=env)
if (is.language(v))
e <- expandExpressions(v, env)
}
}
e
}
collectFormats <- function(table) {
formats <- list()
recurse <- function(e) {
if (is.call(e)) {
if ((op <- deparse(e[[1]])) %in% c("*", "+", "~", "(") ) {
e[[2]] <- recurse(e[[2]])
if (length(e) > 2)
e[[3]] <- recurse(e[[3]])
} else if (op == c("Format")) {
if (length(e) == 2 && is.null(names(e))) {
if (is.language(e[[c(2,1)]]))
e[[c(2,1)]] <- eval(e[[c(2,1)]], environment(table))
formats <<- c(formats, list(e[[2]]))
} else {
e[[1]] <- format
formats <<- c(formats, list(e))
}
e <- call(".Format", length(formats))
}
}
e
}
result <- recurse(table)
structure(result, fmtlist=formats)
}
checkDenomExprs <- function(e, subsetLabels) {
if (is.call(e))
if ((op <- deparse(e[[1]])) %in% c("*", "+", "~", "(", "=") ) {
checkDenomExprs(e[[2]], subsetLabels)
if (length(e) > 2)
checkDenomExprs(e[[3]], subsetLabels)
} else if (op == "Percent") {
e <- match.call(Percent, e)[["denom"]]
if (is.call(e) && deparse(e[[1]]) %in% c("Equal", "Unequal"))
for (i in seq_along(e)[-1])
if (!(deparse(e[[i]]) %in% subsetLabels))
stop(gettextf("In %s\n'%s' is not a subset label. Legal labels are\n%s",
deparse(e), deparse(e[[i]]),
paste(subsetLabels, collapse=", ")), call. = FALSE)
}
}
collectSubsets <- function(e) {
result <- c()
if (is.call(e)) {
if ((op <- deparse(e[[1]])) %in% c("*", "+", "~", "(", "=") ) {
result <- c(result, collectSubsets(e[[2]]))
if (length(e) > 2)
result <- c(result, collectSubsets(e[[3]]))
} else if (op == "labelSubset")
result <- c(result, match.call(labelSubset, e)[["label"]])
}
result
}
# This both expands factors and rewrites "bindings"
expandFactors <- function(e, env) {
op <- ""
if (is.call(e) && (op <- deparse(e[[1]])) %in% c("*", "+") )
call(op, expandFactors(e[[2]],env),expandFactors(e[[3]],env))
else if (op == "(")
expandFactors(e[[2]],env)
else if (op == "=") {
rhs <- expandFactors(e[[3]], env)
if (is.call(rhs) && deparse(rhs[[1]]) == "*"
&& is.call(rhs[[2]]) && deparse(rhs[[c(2,1)]]) == "Heading") {
rhs[[c(2,2)]] <- as.name(deparse(e[[2]]))
rhs
} else
call("*", call("Heading", as.name(deparse(e[[2]]))), rhs)
} else if (op == ".Format" || op == "Heading" ||
op == "Justify" || op == "Percent" ||
op == "Arguments" || op == "DropEmpty")
e
else {
v <- eval(e, envir=env)
if (is.factor(v) & !inherits(v, "AsIs"))
e <- Factor(v, expr=e, override=FALSE)
e
}
}
# A sum of products is a list whose elements are atoms or products of atoms.
sumofprods <- function(e) {
if (!is.language(e)) return(list(e))
if (is.expression(e)) e <- e[[1]]
if (is.name(e)) result <- list(e)
else {
chr <- deparse(e[[1]])
if (chr == "+") result <- c(sumofprods(e[[2]]),sumofprods(e[[3]]))
else if (chr == "*") {
left <- sumofprods(e[[2]])
right <- sumofprods(e[[3]])
result <- list()
for (i in 1:length(left))
for (j in 1:length(right))
result <- c(result, list(call("*", left[[i]], right[[j]])))
} else if (chr == "(") result <- sumofprods(e[[2]])
else if (chr == "~") stop("Nested formulas not supported")
else result <- list(e)
}
result
}
tabledims <- function(e) {
if (identical(e,1)) return(list(1))
if (!is.language(e)) stop('Need an expression')
if (is.expression(e)) e <- e[[1]]
if (is.name(e)) result <- list(e)
else {
result <- list()
chr <- deparse(e[[1]])
if (chr == "~") {
if (length(e) == 2)
result <- c(result, tabledims(e[[2]]))
else
result <- c(result, tabledims(e[[2]]),tabledims(e[[3]]))
} else result <- list(e)
}
if (length(result) > 2) stop("Only 2 dim tables supported")
result
}
tabular <- function(table, ...)
UseMethod("tabular")
tabular.default <- function(table, ...) {
tabular.formula(as.formula(table, env=parent.frame()), ...)
}
tabular.formula <- function(table, data=NULL, n, suppressLabels=0, ...) {
formula <- table
if (length(list(...)))
warning(gettextf("extra argument(s) %s will be disregarded",
paste(sQuote(names(list(...))), collapse = ", ")),
domain = NA)
if (missing(n) && inherits(data, "data.frame"))
n <- nrow(data)
if (is.null(data))
data <- environment(table)
else if (is.list(data))
data <- list2env(data, parent=environment(table))
else if (!is.environment(data))
stop("'data' must be a dataframe, list or environment")
table <- expandExpressions(table, data)
table <- collectFormats(table)
dims <- tabledims(table)
if (length(dims) == 1) dims <- c(list(quote((` `=1))), dims)
dims[[1]] <- expandFactors(dims[[1]], data)
rlabels <- getLabels(dims[[1]], rows=TRUE, suppress=suppressLabels, env = data)
suppressLabels <- attr(rlabels, "suppress")
justify <- attr(rlabels, "justify")
dims[[2]] <- expandFactors(dims[[2]], data)
clabels <- getLabels(dims[[2]], rows=FALSE, justify=justify,
suppress=suppressLabels, env = data)
# Check if the Percent calls name nonexistent terms
subsetLabels <- unique(c(collectSubsets(dims[[1]]), collectSubsets(dims[[2]])))
checkDenomExprs(dims[[1]], subsetLabels)
checkDenomExprs(dims[[2]], subsetLabels)
rows <- sumofprods(dims[[1]])
cols <- sumofprods(dims[[2]])
result <- NULL
formats <- NULL
justifications <- NULL
dropcells <- NULL
droprow <- rep(TRUE, length(rows))
dropcol <- rep(TRUE, length(cols))
for (i in seq_along(rows)) {
row <- NULL
rowformats <- NULL
rowjustification <- NULL
rowdropcell <- NULL
for (j in seq_along(cols)) {
# term2table checks that n matches across calls
term <- term2table(rows[[i]], cols[[j]], data, n)
n <- attr(term, "n")
format <- attr(term, "format")
justification <- attr(term, "justification")
dropcell <- attr(term, "dropcell")
droprow[i] <- droprow[i] && attr(term, "droprow")
dropcol[j] <- dropcol[j] && attr(term, "dropcol")
row <- cbind(row, term)
rowformats <- cbind(rowformats, format)
rowjustification <- cbind(rowjustification, justification)
rowdropcell <- cbind(rowdropcell, dropcell)
}
result <- rbind(result, row)
formats <- rbind(formats, rowformats)
justifications <- rbind(justifications, rowjustification)
dropcells <- rbind(dropcells, rowdropcell)
}
if (any(c(droprow, dropcol))) {
result <- result[!droprow, !dropcol, drop = FALSE]
formats <- formats[!droprow, !dropcol]
justifications <- justifications[!droprow, !dropcol, drop = FALSE]
dropcells <- dropcells[!droprow, !dropcol, drop = FALSE]
save <- oldClass(rlabels)
oldClass(rlabels) <- c("tabularRowLabels", save)
rlabels <- rlabels[!droprow,, drop = FALSE]
oldClass(rlabels) <- save
save <- oldClass(clabels)
oldClass(clabels) <- c("tabularColLabels", save)
clabels <- clabels[, !dropcol, drop = FALSE]
oldClass(clabels) <- save
}
structure(result, formula=formula, rowLabels=rlabels, colLabels=clabels, table=table,
formats = formats, justification = justifications, dropcells = dropcells,
class = "tabular")
}
justify <- function(x, justification="c", width=max(nchar(x))) {
justification <- rep(justification, len=length(x))
change <- justification %in% c("c", "l", "r")
if (!any(change)) return(x)
y <- x[change]
justification <- justification[change]
y <- sub("^ *", "", y)
y <- sub(" *$", "", y)
width <- rep(width, len=length(x))
width <- width[change]
lens <- nchar(y)
ind <- justification == "c"
if (any(ind)) {
left <- (width[ind] - lens[ind]) %/% 2
right <- width[ind] - lens[ind] - left
y[ind] <- sprintf("%*s%s%*s", left, "", y[ind], right, "")
}
ind <- justification == "l"
if (any(ind)) {
right <- width[ind] - lens[ind]
y[ind] <- sprintf("%s%*s", y[ind], right, "")
}
ind <- justification == "r"
if (any(ind)) {
left <- width[ind] - lens[ind]
y[ind] <- sprintf("%*s%s", left, "", y[ind])
}
x[change] <- y
x
}
latexNumeric <- function(chars, minus=TRUE, leftpad=TRUE, rightpad=TRUE,
mathmode=TRUE) {
regexp <- "^( *)([-]?)([^ -][^ ]*)( *)$"
leadin <- sub(regexp, "\\1", chars)
sign <- sub(regexp, "\\2", chars)
rest <- sub(regexp, "\\3", chars)
tail <- sub(regexp, "\\4", chars)
if (minus && any(neg <- sign == "-")) {
if (any(leadin[!neg] == ""))
leadin <- sub("^", " ", leadin)
leadin[!neg] <- sub(" ", "", leadin[!neg])
sign[!neg] <- "\\phantom{-}"
}
if (leftpad && any(ind <- leadin != ""))
leadin[ind] <- paste("\\phantom{",
gsub(" ", "0", leadin[ind]),
"}", sep="")
if (rightpad && any(ind <- tail != ""))
tail[ind] <- paste("\\phantom{",
gsub(" ", "0", tail[ind]),
"}", sep="")
if (mathmode)
paste("$", leadin, sign, rest, tail, "$", sep="")
else
paste(leadin, sign, rest, tail, sep="")
}
format.tabular <- function(x, digits=4, justification="n",
latex=FALSE, html=FALSE,
leftpad = TRUE, rightpad = TRUE, minus = TRUE, mathmode = TRUE, ...) {
if (latex && html) stop("Only one of 'latex' and 'html' may be requested")
result <- unclass(x)
formats <- attr(x, "formats")
table <- attr(x, "table")
fmtlist <- attr(table, "fmtlist")
justify <- attr(x, "justification")
justify[is.na(justify)] <- justification
dropcells <- attr(x, "dropcells")
ischar <- sapply(result, is.character)
chars <- matrix(NA_character_, nrow(result), ncol(result))
chars[ischar] <- unlist(result[ischar])
lengths <- lapply(result, length)
for (i in seq_len(ncol(result))) {
ind <- col(result) == i & is.na(formats) & !ischar &
lengths == 1 & is.na(dropcells)
if (any(ind)) {
x <- do.call(c, result[ind])
chars[ind] <- format(x, digits=digits, ...)
if (is.numeric(x)) {
if (latex)
chars[ind] <- latexNumeric(chars[ind], leftpad = leftpad, rightpad = rightpad, minus = minus, mathmode = mathmode)
else if (html)
chars[ind] <- htmlNumeric(chars[ind], leftpad = leftpad, rightpad = rightpad, minus = minus)
}
}
}
for (i in seq_along(fmtlist)) {
ind <- !is.na(formats) & formats == i & is.na(dropcells)
if (any(ind)) {
call <- fmtlist[[i]]
isformat <- identical(call[[1]], format)
if (isformat) skip <- ischar | (lengths != 1)
else skip <- ischar & FALSE
last <- length(call)
x <- do.call(c, result[ind & !skip])
call[[last+1]] <- x
names(call)[last+1] <- "x"
chars[ind & !skip] <- eval(call, environment(table))
if (isformat) {
if (latex) {
if (is.numeric(x))
chars[ind] <- latexNumeric(chars[ind], leftpad = leftpad, rightpad = rightpad, minus = minus, mathmode = mathmode)
else
chars[ind] <- texify(chars[ind])
} else if (html) {
if (is.numeric(x))
chars[ind] <- htmlNumeric(chars[ind], leftpad = leftpad, rightpad = rightpad, minus = minus)
else
chars[ind] <- htmlify(chars[ind])
}
}
}
}
chars[!is.na(dropcells)] <- dropcells[!is.na(dropcells)]
if (!latex && !html)
for (i in seq_len(ncol(result)))
chars[,i] <- justify(chars[,i], justify[,i])
chars[]
chars
}
| /R/tabular.R | no_license | cran/tables | R | false | false | 30,150 | r | factors <- function(e) {
if (is.name(e) || typeof(e) == "closure") list(e)
else switch(deparse(e[[1]]),
"*" = c(factors(e[[2]]),factors(e[[3]])),
"(" = factors(e[[2]]),
list(e))
}
term2table <- function(rowterm, colterm, env, n) {
rowargs <- factors(rowterm)
colargs <- factors(colterm)
allargs <- c(rowargs, colargs)
rowsubset <- TRUE
colsubset <- TRUE
dropcell <- FALSE
droprow <- FALSE
dropcol <- FALSE
pctdenom <- NULL
pctsubset <- TRUE
values <- NULL
summary <- NULL
arguments <- NULL
format <- NA
justification <- NA
for (i in seq_along(allargs)) {
e <- allargs[[i]]
fn <- ""
if (is.call(e) && (fn <- deparse(e[[1]])) == ".Format")
format <- e[[2]]
else if (fn == "Justify")
justification <- as.character(e[[if (length(e) > 2) 3 else 2]])
else if (fn == "Percent") {
env1 <- new.env(parent=env)
percent <- function(x, y) 100*length(x)/length(y)
env1$Equal <- env1$Unequal <- function(...) sys.call()
env1$Percent <- function(denom="all", fn=percent) {
if (is.null(summary)) {
if (identical(denom, "all")) summary <<- function(x) fn(x, values)
else if (identical(denom, "row")) summary <<- function(x) fn(x, values[rowsubset])
else if (identical(denom, "col")) summary <<- function(x) fn(x, values[colsubset])
else if (is.call(denom) && deparse(denom[[1]]) %in% c("Equal", "Unequal")) {
summary <<- local({
pctdenom <<- sapply(as.list(denom), deparse, width.cutoff = 500L)
pctsubset <<- pctdenom[1] == "Equal"
function(x) {
fn(x, values[pctsubset])
}})
} else if (is.logical(denom)) summary <<- function(x) fn(x, values[denom])
else summary <<- function(x) fn(x, denom)
summaryname <<- "Percent"
} else
stop("Summary fn not allowed with 'Percent'")
}
eval(e, env1)
} else if (fn == "Arguments") {
if (is.null(arguments))
arguments <- e
else
stop(gettextf("Duplicate Arguments list: %s and %s", deparse(arguments), deparse(e)))
} else if (fn == "DropEmpty") {
env1 <- new.env(parent = env)
env1$DropEmpty <- function(empty = "", which = c("row", "col", "cell")) {
good <- which %in% c("row", "col", "cell")
if (!all(good))
stop(gettextf("bad 'which' value(s): %s in %s",
paste0("'", which[!good], "'", collapse = ","), deparse(e)),
call. = FALSE)
dropcell <<- "cell" %in% which
droprow <<- "row" %in% which
dropcol <<- "col" %in% which
empty <<- empty
}
empty <- NULL
eval(e, env1)
} else if (fn != "Heading" && !identical(e, 1)) {
arg <- eval(e, env)
asis <- inherits(arg, "AsIs")
if (asis || is.vector(arg) || inherits(arg, "labelledSubset")) {
if (missing(n)) n <- length(arg)
else if (n != length(arg))
stop(gettextf("Argument '%s' is not length %d", deparse(e), n))
}
if (!asis && is.logical(arg)) {
arg <- arg & !is.na(arg)
if (i <= length(rowargs))
rowsubset <- rowsubset & arg
else
colsubset <- colsubset & arg
} else if (asis || is.atomic(arg)) {
if (is.null(values)) {
values <- arg
valuename <- e
} else
stop(gettextf("Duplicate values: %s and %s", deparse(valuename),
deparse(e)))
} else if (is.function(arg)) {
if (is.null(summary)) {
summary <- arg
summaryname <- e
} else
stop(gettextf("Duplicate summary fn: %s and %s", deparse(summaryname),
deparse(e)))
} else
stop(gettextf("Unrecognized entry '%s'", deparse(e)))
}
}
if (!is.null(pctdenom)) { # We need a second pass to find the subsets
for (i in seq_along(allargs)) {
e <- allargs[[i]]
fn <- ""
if (is.call(e))
fn <- deparse(e[[1]])
if (!(fn %in% c(".Format", "Justify", "Percent", "Heading"))
&& !identical(e, 1)) {
arg <- eval(e, env)
asis <- inherits(arg, "AsIs")
if (!asis && is.logical(arg)) {
if (inherits(arg, "labelledSubset")) {
argexpr <- attr(arg, "label")
arg <- arg & !is.na(arg)
if (pctdenom[1] == "Equal" && argexpr %in% pctdenom[-1])
pctsubset <- pctsubset & arg
else if (pctdenom[1] == "Unequal" && argexpr %in% pctdenom[-1])
pctsubset <- pctsubset | !arg
} else
pctsubset <- pctsubset & !is.na(arg) & arg
}
}
}
}
if (missing(n))
stop(gettextf("Length of %s ~ %s is indeterminate", deparse(rowterm),
deparse(colterm)))
if (is.null(summary)) {
if (!is.null(arguments))
stop(gettextf("'%s specified without summary function", deparse(arguments)))
summary <- length
}
if (is.null(values) && is.null(arguments)) values <- rep(NA, n)
subset <- rowsubset & colsubset
if (is.null(arguments))
value <- summary(values[subset])
else {
arguments[[1]] <- summary
for (i in seq_along(arguments)[-1]) {
arg <- eval(arguments[[i]], env)
if (length(arg) == n)
arg <- arg[subset]
arguments[[i]] <- arg
}
if (!is.null(values)) {
named <- !is.null(names(arguments))
for (i in rev(seq_along(arguments)[-1])) {
arguments[[i+1]] <- arguments[[i]]
if (named) names(arguments)[i+1] <- names(arguments)[i]
}
arguments[[2]] <- values[subset]
if (named) names(arguments)[2] <- ""
}
value <- eval(arguments, env)
}
if (length(value) != 1) {
warning(gettextf("Summary statistic is length %d", length(value)), call. = FALSE)
value <- value[1]
}
structure(list(value), n=n, format=format,
justification=justification,
dropcell = ifelse(dropcell && !any(subset), empty, NA_character_),
droprow = droprow && !any(rowsubset),
dropcol = dropcol && !any(colsubset))
}
# This moves column names into their own column
moveColnames <- function(labels, do_move = (names != "")) {
attrs <- attributes(labels)
names <- colnames(labels)
colnamejust <- attrs$colnamejust
justification <- attrs$justification
dropcell <- attrs$dropcell
for (i in rev(seq_along(do_move))) {
if (do_move[i]) {
before <- seq_len(i-1)
after <- seq_len(ncol(labels) - i + 1) + i - 1
labels <- cbind(labels[,before,drop=FALSE], "", labels[,after,drop=FALSE])
labels[1,i] <- names[i]
names <- c(names[before], "", "", names[after][-1])
if (length(colnamejust)) {
colnamejust <- c(colnamejust[before], NA_character_, colnamejust[after])
}
if (length(justification))
justification <- cbind(justification[,before,drop=FALSE],
NA_character_, justification[,after,drop=FALSE])
if (length(dropcell))
dropcell <- cbind(dropcell[,before,drop=FALSE],
NA_character_, dropcell[,after,drop=FALSE])
}
}
attrs$colnamejust <- colnamejust
attrs$justification <- justification
attrs$dropcell <- dropcell
attrs$dim <- dim(labels)
attrs$dimnames[[2]] <- names
attributes(labels) <- attrs
labels
}
getLabels <- function(e, rows=TRUE, justify=NA, head=NULL, suppress=0,
env) {
op <- ""
justification <- NULL
colnamejust <- character(0)
Heading <- head
result <- if (rows) matrix(NA, ncol=0, nrow=1)
else matrix(NA, ncol=1, nrow=0)
nrl <- ncl <- leftjustify <- leftheading <- leftsuppress <-
leftjustification <- leftcolnamejust <-
nrr <- ncr <- rightjustify <- rightheading <- rightsuppress <-
rightjustification <- rightcolnamejust <- NULL
nearData <- leftnearData <- rightnearData <- TRUE
getLeft <- function() {
nrl <<- nrow(leftLabels)
ncl <<- ncol(leftLabels)
leftjustify <<- attr(leftLabels, "justify")
leftheading <<- attr(leftLabels, "Heading")
leftsuppress <<- attr(leftLabels, "suppress")
leftjustification <<- attr(leftLabels, "justification")
leftcolnamejust <<- attr(leftLabels, "colnamejust")
leftnearData <<- attr(leftLabels, "nearData")
}
getRight <- function() {
nrr <<- nrow(rightLabels)
ncr <<- ncol(rightLabels)
rightjustify <<- attr(rightLabels, "justify")
rightheading <<- attr(rightLabels, "Heading")
rightsuppress <<- attr(rightLabels, "suppress")
rightjustification <<- attr(rightLabels, "justification")
rightcolnamejust <<- attr(rightLabels, "colnamejust")
rightnearData <<- attr(rightLabels, "nearData")
}
if (is.call(e) && (op <- deparse(e[[1]])) == "*") {
leftLabels <- getLabels(e[[2]], rows, justify, head, suppress, env)
getLeft()
# Heading and justify are the settings to carry on to later terms
# justification is the matrix of justifications for
# each label
righthead <- Heading <- leftheading
suppress <- leftsuppress
nearData <- leftnearData
if (!is.null(leftjustify))
justify <- leftjustify
rightLabels <- getLabels(e[[3]], rows, justify, righthead, suppress, env)
getRight()
Heading <- rightheading
suppress <- rightsuppress
if (!is.null(rightjustify))
justify <- rightjustify
if (rows) {
result <- justification <- matrix(NA_character_, nrl*nrr, ncl + ncr)
colnames(result) <- c(colnames(leftLabels), colnames(rightLabels))
colnamejust <- c(leftcolnamejust, rightcolnamejust)
for (i in seq_len(nrl)) {
j <- 1 + (i-1)*nrr
k <- seq_len(ncl)
result[j, k] <- leftLabels[i,]
if (!is.null(leftjustification))
justification[j, k] <- leftjustification[i,]
j <- (i-1)*nrr + seq_len(nrr)
k <- ncl+seq_len(ncr)
result[j, k] <- rightLabels
if (!is.null(rightjustification))
justification[j, k] <- rightjustification
}
} else {
result <- justification <- matrix(NA_character_, nrl + nrr, ncl*ncr)
for (i in seq_len(ncl)) {
j <- seq_len(nrl)
k <- 1 + (i-1)*ncr
result[j, k] <- leftLabels[,i]
if (!is.null(leftjustification))
justification[j,k] <- leftjustification[,i]
j <- nrl+seq_len(nrr)
k <- (i-1)*ncr + seq_len(ncr)
result[j, k] <- rightLabels
if (!is.null(rightjustification))
justification[j,k] <- rightjustification
}
}
} else if (op == "+") {
leftLabels <- getLabels(e[[2]], rows, justify, NULL, suppress, env)
getLeft()
Heading <- leftheading
rightLabels <- getLabels(e[[3]], rows, justify, NULL, suppress, env)
getRight()
Heading <- rightheading
suppress <- rightsuppress
neardata <- leftnearData & rightnearData # neardata=FALSE is needed for Hline
if (rows) {
# Here we have a problem: we need to stack two things, each of which
# may have column names. We use the following rule:
# - if the column names for both things match, just use them.
# - if the left one has a name, and the right doesn't, use the left name
# - if both have names that don't match, add them as extra column(s)
# - if the right has a name, and the left doesn't, treat as unmatched names
leftnames <- colnames(leftLabels)
if (is.null(leftnames)) leftnames <- rep("", ncl)
rightnames <- colnames(rightLabels)
if (is.null(rightnames)) rightnames <- rep("", ncr)
if (!identical(rightnames, rep("", ncr)) &&
!identical(leftnames, rightnames)) {
rightLabels <- moveColnames(rightLabels)
# some properties may have changed; just get them again
getRight()
leftLabels <- moveColnames(leftLabels)
getLeft()
Heading <- rightheading
rightnames <- rep("", ncr)
leftnames <- rep("", ncl)
}
cols <- max(ncl, ncr)
# Pad all to same width
padblank <- rep("", abs(ncr - ncl))
padNA <- rep(NA_character_, abs(ncr - ncl))
if (ncl < ncr) {
padblankmat <- matrix("", nrl, abs(ncr - ncl))
padNAmat <- matrix(NA_character_, NROW(leftjustification), abs(ncr - ncl))
if (leftnearData) {
leftnames <- c(padblank, leftnames)
if (length(leftcolnamejust))
leftcolnamejust <- c(padNA, leftcolnamejust)
leftLabels <- cbind(padblankmat, leftLabels)
if (!is.null(leftjustification))
leftjustification <- cbind(padNA, leftjustification)
} else {
leftnames <- c(leftnames, padblank)
if (length(leftcolnamejust))
leftcolnamejust <- c(leftcolnamejust, padNA)
leftLabels <- cbind(leftLabels, padblankmat)
if (!is.null(leftjustification))
leftjustification <- cbind(leftjustification, padNAmat)
}
ncl <- ncr
} else if (ncl > ncr) {
padblankmat <- matrix("", nrr, abs(ncr - ncl))
padNAmat <- matrix(NA_character_, NROW(rightjustification), abs(ncr - ncl))
if (rightnearData) {
rightnames <- c(padblank, rightnames)
if (length(rightcolnamejust))
rightcolnamejust <- c(padNA, rightcolnamejust)
rightLabels <- cbind(padblankmat, rightLabels)
if (!is.null(rightjustification))
rightjustification <- cbind(padNAmat, rightjustification)
} else {
rightnames <- c(rightnames, padblank)
if (length(rightcolnamejust))
rightcolnamejust <- c(rightcolnamejust, padNA)
rightLabels <- cbind(rightLabels, padblankmat)
if (!is.null(rightjustification))
rightjustification <- cbind(rightjustification, padNAmat)
}
ncr <- ncl
}
result <- matrix("", nrl + nrr, cols)
justification <- matrix(NA_character_, nrl + nrr, cols)
colnames <- rep("", cols)
colnamejust <- rep(NA_character_, cols)
j <- seq_len(nrl)
result[j, ] <- leftLabels
colnames <- leftnames
if (length(leftcolnamejust))
colnamejust <- leftcolnamejust
if (length(leftjustification))
justification[j, ] <- leftjustification
j <- nrl+seq_len(nrr)
result[j, ] <- rightLabels
if (!is.null(rightjustification))
justification[j, ] <- rightjustification
if (!is.null(head)) {
colnames[1] <- head
head <- NULL
}
colnames(result) <- colnames
} else {
nrows <- max(nrl, nrr)
result <- matrix("", nrows, ncl + ncr)
justification <- matrix(NA_character_, nrows, ncl + ncr)
j <- seq_len(nrl)
if (leftnearData)
j <- j + (nrows - nrl)
k <- seq_len(ncl)
result[j, k] <- leftLabels
if (!is.null(leftjustification))
justification[j, k] <- leftjustification
j <- seq_len(nrr)
if (rightnearData)
j <- j + (nrows - nrr)
k <- ncl+seq_len(ncr)
result[j,k] <- rightLabels
if (!is.null(rightjustification))
justification[j, k] <- rightjustification
if (!is.null(head)) {
result <- rbind(rep(NA_character_, ncol(result)),
result)
result[1,1] <- head
justification <- rbind(justification[1,], justification)
}
}
} else if (op == "(") {
return(getLabels(e[[2]], rows, justify, head, suppress, env))
} else if (op == ".Format") {
result <- if (rows) matrix(NA, ncol=0, nrow=1)
else matrix(NA, ncol=1, nrow=0)
} else if (op == "Heading") {
env1 <- new.env(parent = env)
env1$Heading <- function(name = NULL, override = TRUE, character.only = FALSE,
nearData = TRUE) {
if (missing(name))
suppress <<- suppress + 1
else {
if (!character.only)
name <- as.character(substitute(name))
if (!is.logical(override) || is.na(override))
stop(gettextf("'%s' argument in '%s' must be TRUE or FALSE",
"override", deparse(e)), call. = FALSE)
if (suppress <= 0 && (is.null(Heading) || override)) {
Heading <<- as.character(name)
suppress <<- 0
} else
suppress <<- suppress - 1
nearData <<- nearData
}
}
eval(e, env1)
} else if (op == "Justify") {
justify <- as.character(e[[2]])
} else if (op == "Arguments") {
#suppress <- suppress + 1
} else if (suppress > 0) { # The rest just add a single label; suppress it
suppress <- suppress - 1
} else if (!is.null(head)) {
result <- matrix(head, 1,1, dimnames=list(NULL, ""))
Heading <- NULL
} else if (op == "Percent") {
result <- matrix(gettext("Percent"), 1,1, dimnames=list(NULL, ""))
} else if (op == "DropEmpty") { # do nothing
} else if (identical(e, 1))
result <- matrix(gettext("All"), 1,1, dimnames=list(NULL, ""))
else
result <- matrix(deparse(e), 1,1, dimnames=list(NULL, ""))
if (is.null(justification))
justification <- matrix(justify, nrow(result), ncol(result))
stopifnot(identical(dim(result), dim(justification)))
structure(result, justification = justification,
colnamejust = colnamejust, justify = justify,
Heading = Heading, suppress = suppress,
nearData = nearData)
}
expandExpressions <- function(e, env) {
if (is.call(e)) {
if ((op <- deparse(e[[1]])) %in% c("*", "+", "~", "(", "=") ) {
e[[2]] <- expandExpressions(e[[2]], env)
if (length(e) > 2)
e[[3]] <- expandExpressions(e[[3]], env)
} else if (op == "Format" || op == ".Format"
|| op == "Heading" || op == "Justify"
|| op == "Percent" || op == "Arguments"
|| op == "DropEmpty")
e
else {
v <- eval(e, envir=env)
if (is.language(v))
e <- expandExpressions(v, env)
}
}
e
}
collectFormats <- function(table) {
formats <- list()
recurse <- function(e) {
if (is.call(e)) {
if ((op <- deparse(e[[1]])) %in% c("*", "+", "~", "(") ) {
e[[2]] <- recurse(e[[2]])
if (length(e) > 2)
e[[3]] <- recurse(e[[3]])
} else if (op == c("Format")) {
if (length(e) == 2 && is.null(names(e))) {
if (is.language(e[[c(2,1)]]))
e[[c(2,1)]] <- eval(e[[c(2,1)]], environment(table))
formats <<- c(formats, list(e[[2]]))
} else {
e[[1]] <- format
formats <<- c(formats, list(e))
}
e <- call(".Format", length(formats))
}
}
e
}
result <- recurse(table)
structure(result, fmtlist=formats)
}
checkDenomExprs <- function(e, subsetLabels) {
if (is.call(e))
if ((op <- deparse(e[[1]])) %in% c("*", "+", "~", "(", "=") ) {
checkDenomExprs(e[[2]], subsetLabels)
if (length(e) > 2)
checkDenomExprs(e[[3]], subsetLabels)
} else if (op == "Percent") {
e <- match.call(Percent, e)[["denom"]]
if (is.call(e) && deparse(e[[1]]) %in% c("Equal", "Unequal"))
for (i in seq_along(e)[-1])
if (!(deparse(e[[i]]) %in% subsetLabels))
stop(gettextf("In %s\n'%s' is not a subset label. Legal labels are\n%s",
deparse(e), deparse(e[[i]]),
paste(subsetLabels, collapse=", ")), call. = FALSE)
}
}
collectSubsets <- function(e) {
result <- c()
if (is.call(e)) {
if ((op <- deparse(e[[1]])) %in% c("*", "+", "~", "(", "=") ) {
result <- c(result, collectSubsets(e[[2]]))
if (length(e) > 2)
result <- c(result, collectSubsets(e[[3]]))
} else if (op == "labelSubset")
result <- c(result, match.call(labelSubset, e)[["label"]])
}
result
}
# This both expands factors and rewrites "bindings"
expandFactors <- function(e, env) {
op <- ""
if (is.call(e) && (op <- deparse(e[[1]])) %in% c("*", "+") )
call(op, expandFactors(e[[2]],env),expandFactors(e[[3]],env))
else if (op == "(")
expandFactors(e[[2]],env)
else if (op == "=") {
rhs <- expandFactors(e[[3]], env)
if (is.call(rhs) && deparse(rhs[[1]]) == "*"
&& is.call(rhs[[2]]) && deparse(rhs[[c(2,1)]]) == "Heading") {
rhs[[c(2,2)]] <- as.name(deparse(e[[2]]))
rhs
} else
call("*", call("Heading", as.name(deparse(e[[2]]))), rhs)
} else if (op == ".Format" || op == "Heading" ||
op == "Justify" || op == "Percent" ||
op == "Arguments" || op == "DropEmpty")
e
else {
v <- eval(e, envir=env)
if (is.factor(v) & !inherits(v, "AsIs"))
e <- Factor(v, expr=e, override=FALSE)
e
}
}
# A sum of products is a list whose elements are atoms or products of atoms.
sumofprods <- function(e) {
if (!is.language(e)) return(list(e))
if (is.expression(e)) e <- e[[1]]
if (is.name(e)) result <- list(e)
else {
chr <- deparse(e[[1]])
if (chr == "+") result <- c(sumofprods(e[[2]]),sumofprods(e[[3]]))
else if (chr == "*") {
left <- sumofprods(e[[2]])
right <- sumofprods(e[[3]])
result <- list()
for (i in 1:length(left))
for (j in 1:length(right))
result <- c(result, list(call("*", left[[i]], right[[j]])))
} else if (chr == "(") result <- sumofprods(e[[2]])
else if (chr == "~") stop("Nested formulas not supported")
else result <- list(e)
}
result
}
tabledims <- function(e) {
if (identical(e,1)) return(list(1))
if (!is.language(e)) stop('Need an expression')
if (is.expression(e)) e <- e[[1]]
if (is.name(e)) result <- list(e)
else {
result <- list()
chr <- deparse(e[[1]])
if (chr == "~") {
if (length(e) == 2)
result <- c(result, tabledims(e[[2]]))
else
result <- c(result, tabledims(e[[2]]),tabledims(e[[3]]))
} else result <- list(e)
}
if (length(result) > 2) stop("Only 2 dim tables supported")
result
}
tabular <- function(table, ...)
UseMethod("tabular")
tabular.default <- function(table, ...) {
tabular.formula(as.formula(table, env=parent.frame()), ...)
}
tabular.formula <- function(table, data=NULL, n, suppressLabels=0, ...) {
formula <- table
if (length(list(...)))
warning(gettextf("extra argument(s) %s will be disregarded",
paste(sQuote(names(list(...))), collapse = ", ")),
domain = NA)
if (missing(n) && inherits(data, "data.frame"))
n <- nrow(data)
if (is.null(data))
data <- environment(table)
else if (is.list(data))
data <- list2env(data, parent=environment(table))
else if (!is.environment(data))
stop("'data' must be a dataframe, list or environment")
table <- expandExpressions(table, data)
table <- collectFormats(table)
dims <- tabledims(table)
if (length(dims) == 1) dims <- c(list(quote((` `=1))), dims)
dims[[1]] <- expandFactors(dims[[1]], data)
rlabels <- getLabels(dims[[1]], rows=TRUE, suppress=suppressLabels, env = data)
suppressLabels <- attr(rlabels, "suppress")
justify <- attr(rlabels, "justify")
dims[[2]] <- expandFactors(dims[[2]], data)
clabels <- getLabels(dims[[2]], rows=FALSE, justify=justify,
suppress=suppressLabels, env = data)
# Check if the Percent calls name nonexistent terms
subsetLabels <- unique(c(collectSubsets(dims[[1]]), collectSubsets(dims[[2]])))
checkDenomExprs(dims[[1]], subsetLabels)
checkDenomExprs(dims[[2]], subsetLabels)
rows <- sumofprods(dims[[1]])
cols <- sumofprods(dims[[2]])
result <- NULL
formats <- NULL
justifications <- NULL
dropcells <- NULL
droprow <- rep(TRUE, length(rows))
dropcol <- rep(TRUE, length(cols))
for (i in seq_along(rows)) {
row <- NULL
rowformats <- NULL
rowjustification <- NULL
rowdropcell <- NULL
for (j in seq_along(cols)) {
# term2table checks that n matches across calls
term <- term2table(rows[[i]], cols[[j]], data, n)
n <- attr(term, "n")
format <- attr(term, "format")
justification <- attr(term, "justification")
dropcell <- attr(term, "dropcell")
droprow[i] <- droprow[i] && attr(term, "droprow")
dropcol[j] <- dropcol[j] && attr(term, "dropcol")
row <- cbind(row, term)
rowformats <- cbind(rowformats, format)
rowjustification <- cbind(rowjustification, justification)
rowdropcell <- cbind(rowdropcell, dropcell)
}
result <- rbind(result, row)
formats <- rbind(formats, rowformats)
justifications <- rbind(justifications, rowjustification)
dropcells <- rbind(dropcells, rowdropcell)
}
if (any(c(droprow, dropcol))) {
result <- result[!droprow, !dropcol, drop = FALSE]
formats <- formats[!droprow, !dropcol]
justifications <- justifications[!droprow, !dropcol, drop = FALSE]
dropcells <- dropcells[!droprow, !dropcol, drop = FALSE]
save <- oldClass(rlabels)
oldClass(rlabels) <- c("tabularRowLabels", save)
rlabels <- rlabels[!droprow,, drop = FALSE]
oldClass(rlabels) <- save
save <- oldClass(clabels)
oldClass(clabels) <- c("tabularColLabels", save)
clabels <- clabels[, !dropcol, drop = FALSE]
oldClass(clabels) <- save
}
structure(result, formula=formula, rowLabels=rlabels, colLabels=clabels, table=table,
formats = formats, justification = justifications, dropcells = dropcells,
class = "tabular")
}
justify <- function(x, justification="c", width=max(nchar(x))) {
justification <- rep(justification, len=length(x))
change <- justification %in% c("c", "l", "r")
if (!any(change)) return(x)
y <- x[change]
justification <- justification[change]
y <- sub("^ *", "", y)
y <- sub(" *$", "", y)
width <- rep(width, len=length(x))
width <- width[change]
lens <- nchar(y)
ind <- justification == "c"
if (any(ind)) {
left <- (width[ind] - lens[ind]) %/% 2
right <- width[ind] - lens[ind] - left
y[ind] <- sprintf("%*s%s%*s", left, "", y[ind], right, "")
}
ind <- justification == "l"
if (any(ind)) {
right <- width[ind] - lens[ind]
y[ind] <- sprintf("%s%*s", y[ind], right, "")
}
ind <- justification == "r"
if (any(ind)) {
left <- width[ind] - lens[ind]
y[ind] <- sprintf("%*s%s", left, "", y[ind])
}
x[change] <- y
x
}
latexNumeric <- function(chars, minus=TRUE, leftpad=TRUE, rightpad=TRUE,
mathmode=TRUE) {
regexp <- "^( *)([-]?)([^ -][^ ]*)( *)$"
leadin <- sub(regexp, "\\1", chars)
sign <- sub(regexp, "\\2", chars)
rest <- sub(regexp, "\\3", chars)
tail <- sub(regexp, "\\4", chars)
if (minus && any(neg <- sign == "-")) {
if (any(leadin[!neg] == ""))
leadin <- sub("^", " ", leadin)
leadin[!neg] <- sub(" ", "", leadin[!neg])
sign[!neg] <- "\\phantom{-}"
}
if (leftpad && any(ind <- leadin != ""))
leadin[ind] <- paste("\\phantom{",
gsub(" ", "0", leadin[ind]),
"}", sep="")
if (rightpad && any(ind <- tail != ""))
tail[ind] <- paste("\\phantom{",
gsub(" ", "0", tail[ind]),
"}", sep="")
if (mathmode)
paste("$", leadin, sign, rest, tail, "$", sep="")
else
paste(leadin, sign, rest, tail, sep="")
}
format.tabular <- function(x, digits=4, justification="n",
latex=FALSE, html=FALSE,
leftpad = TRUE, rightpad = TRUE, minus = TRUE, mathmode = TRUE, ...) {
if (latex && html) stop("Only one of 'latex' and 'html' may be requested")
result <- unclass(x)
formats <- attr(x, "formats")
table <- attr(x, "table")
fmtlist <- attr(table, "fmtlist")
justify <- attr(x, "justification")
justify[is.na(justify)] <- justification
dropcells <- attr(x, "dropcells")
ischar <- sapply(result, is.character)
chars <- matrix(NA_character_, nrow(result), ncol(result))
chars[ischar] <- unlist(result[ischar])
lengths <- lapply(result, length)
for (i in seq_len(ncol(result))) {
ind <- col(result) == i & is.na(formats) & !ischar &
lengths == 1 & is.na(dropcells)
if (any(ind)) {
x <- do.call(c, result[ind])
chars[ind] <- format(x, digits=digits, ...)
if (is.numeric(x)) {
if (latex)
chars[ind] <- latexNumeric(chars[ind], leftpad = leftpad, rightpad = rightpad, minus = minus, mathmode = mathmode)
else if (html)
chars[ind] <- htmlNumeric(chars[ind], leftpad = leftpad, rightpad = rightpad, minus = minus)
}
}
}
for (i in seq_along(fmtlist)) {
ind <- !is.na(formats) & formats == i & is.na(dropcells)
if (any(ind)) {
call <- fmtlist[[i]]
isformat <- identical(call[[1]], format)
if (isformat) skip <- ischar | (lengths != 1)
else skip <- ischar & FALSE
last <- length(call)
x <- do.call(c, result[ind & !skip])
call[[last+1]] <- x
names(call)[last+1] <- "x"
chars[ind & !skip] <- eval(call, environment(table))
if (isformat) {
if (latex) {
if (is.numeric(x))
chars[ind] <- latexNumeric(chars[ind], leftpad = leftpad, rightpad = rightpad, minus = minus, mathmode = mathmode)
else
chars[ind] <- texify(chars[ind])
} else if (html) {
if (is.numeric(x))
chars[ind] <- htmlNumeric(chars[ind], leftpad = leftpad, rightpad = rightpad, minus = minus)
else
chars[ind] <- htmlify(chars[ind])
}
}
}
}
chars[!is.na(dropcells)] <- dropcells[!is.na(dropcells)]
if (!latex && !html)
for (i in seq_len(ncol(result)))
chars[,i] <- justify(chars[,i], justify[,i])
chars[]
chars
}
|
#load the parallel library for
library(parallel)
#Choose a seed for reproducible random numbers
set.seed(1)
#Size of random matrix
size <- 1000
#Generate random numbers and shape them
#into a square matrix
x <- runif(size*size)
x <- matrix(x, nrow=size, ncol=size)
#Compute the coefficients of variation of each row
# first in serial
print("Serial time:")
print(system.time(y <- lapply(x, function(x) sd(x)/mean(x))))
#Create a sockets cluster with two cores on the localhost
cl <- makeCluster(type="SOCK", c("localhost", "localhost"))
#Generate and shape new random numbers
# this prevents our timing being affected
# by caching of the previous results
x <- runif(size*size)
x <- matrix(x, nrow=size, ncol=size)
#Compute the coefficients of variation of each row
# in parallel this time
print("Parallel time:")
print(system.time(y <- parLapply(cl, x, function(x) sd(x)/mean(x)))) | /solutions/lapply-parallel.R | no_license | calculquebec/cq-formation-r-intermediaire | R | false | false | 893 | r | #load the parallel library for
library(parallel)
#Choose a seed for reproducible random numbers
set.seed(1)
#Size of random matrix
size <- 1000
#Generate random numbers and shape them
#into a square matrix
x <- runif(size*size)
x <- matrix(x, nrow=size, ncol=size)
#Compute the coefficients of variation of each row
# first in serial
print("Serial time:")
print(system.time(y <- lapply(x, function(x) sd(x)/mean(x))))
#Create a sockets cluster with two cores on the localhost
cl <- makeCluster(type="SOCK", c("localhost", "localhost"))
#Generate and shape new random numbers
# this prevents our timing being affected
# by caching of the previous results
x <- runif(size*size)
x <- matrix(x, nrow=size, ncol=size)
#Compute the coefficients of variation of each row
# in parallel this time
print("Parallel time:")
print(system.time(y <- parLapply(cl, x, function(x) sd(x)/mean(x)))) |
## Cache Solve returns the inverse of a matrix (X). The function is optimized to cache the results
## and reuse it for subsequent calls.
## MakeCacheMatrix creates and initializes the local variables and include all setters/getters
makeCacheMatrix <- function(x = matrix()) {
x_inv <- NULL
set <- function(y){
x <<- y
x_inv <<- NULL
}
get <- function() x
setinv <- function(inv) x_inv <<- inv
getinv <- function() x_inv
list(set=set, get=get,
setinv = setinv,
getinv = getinv)
}
## Returns the inverse of the matrix (X)
## The function fetches the cache first to reuse results from previous runs, otherwise, the inverse is evaluated.
cacheSolve <- function(x, ...) {
#x_df <- as.data.frame(x)
x_inv <- x$getinv()
if(!is.null(x_inv)){
return(x_inv)
}
data <- x$get()
x_inv <- solve(data,...)
x$setinv(x_inv)
x_inv
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | ialromaih/ProgrammingAssignment2 | R | false | false | 1,032 | r | ## Cache Solve returns the inverse of a matrix (X). The function is optimized to cache the results
## and reuse it for subsequent calls.
## MakeCacheMatrix creates and initializes the local variables and include all setters/getters
makeCacheMatrix <- function(x = matrix()) {
x_inv <- NULL
set <- function(y){
x <<- y
x_inv <<- NULL
}
get <- function() x
setinv <- function(inv) x_inv <<- inv
getinv <- function() x_inv
list(set=set, get=get,
setinv = setinv,
getinv = getinv)
}
## Returns the inverse of the matrix (X)
## The function fetches the cache first to reuse results from previous runs, otherwise, the inverse is evaluated.
cacheSolve <- function(x, ...) {
#x_df <- as.data.frame(x)
x_inv <- x$getinv()
if(!is.null(x_inv)){
return(x_inv)
}
data <- x$get()
x_inv <- solve(data,...)
x$setinv(x_inv)
x_inv
## Return a matrix that is the inverse of 'x'
}
|
library(readr)
library(readxl)
library(reshape2)
library(data.table)
library(ggplot2)
library(ggpubr)
library(ggsignif)
BRCA_Proteome_sample <- read_delim(
"TCGA_Breast_BI_Phosphoproteome.sample.csv",
"\t",
escape_double = FALSE,
trim_ws = TRUE
)
BRCA_Proteome_summary <- read_delim(
"TCGA_Breast_BI_Proteome.summary.csv",
"\t",
escape_double = FALSE,
trim_ws = TRUE
)
BRCA_Proteome_itraq <- fread(
"TCGA_Breast_BI_Proteome.itraq.tsv",
header = T
)
BRCA_Phosphoproteome_summary <- read_delim(
"TCGA_Breast_BI_Phosphoproteome.summary.csv",
"\t",
escape_double = FALSE,
trim_ws = TRUE
)
BRCA_Phosphopeptide_itraq <- fread(
"TCGA_Breast_BI_Phosphoproteome.phosphopeptide.itraq-1.tsv",
header = T
)
BRCA_Phosphosite_itraq <- fread(
"TCGA_Breast_BI_Phosphoproteome.phosphosite.itraq-1.tsv",
header = T
)
CPTAC_BC_somatic_mutations <- read_excel(
"Documents/translation/Proteogenomics connects somatic mutations to signalling in breast cancer/nature18003-s2/CPTAC_BC_SupplementaryTable01.xlsx")
##########################################################
## use data from CPTAC Breast Cancer Confirmatory Study ##
##########################################################
## CPTAC_BCprospective_Proteome used different iTRAQ labeling scheme
## and give different data from the TCGA dataset
CPTAC2_Breast_Prospective_Collection_BI_Proteome <- read_delim(
"Documents/translation/CPTAC/CPTAC2_Breast_Prospective_Collection_BI_Proteome.summary.csv",
"\t",
escape_double = FALSE,
trim_ws = TRUE)
Proteome_EIF4 <- CPTAC2_Breast_Prospective_Collection_BI_Proteome [
grep("EIF4", CPTAC2_Breast_Prospective_Collection_BI_Proteome$Gene), ]
Proteome_EIF4_2 <- Proteome_EIF4 [ ,
grep("Spectral Counts", names(Proteome_EIF4), value = TRUE)]
rownames(Proteome_EIF4_2) <- Proteome_EIF4$Gene
Proteome_EIF4_3 <- as.data.frame(t(Proteome_EIF4_2))
Proteome_EIF4_4 <- melt(as.matrix(Proteome_EIF4_3[-17, ]))
black_bold_tahoma_12 <- element_text(
color = "black",
face = "bold",
family = "Tahoma",
size = 9
)
black_bold_tahoma_12_45 <- element_text(
color = "black",
face = "bold",
family = "Tahoma",
size = 9,
angle = 45,
hjust = 1
)
p1 <- ggplot(data = Proteome_EIF4_4,
aes(x = Var2,
y = log2(value),
color = Var2)) +
facet_grid(~ Var2,
scales = "free",
space = "free") +
geom_violin(trim = FALSE) +
geom_boxplot(
alpha = .01,
size = .75,
width = .5,
position = position_dodge(width = .9)
) +
labs(x = "protein name",
y = paste("Spectral Counts")) +
theme_bw() +
theme(
plot.title = black_bold_tahoma_12,
axis.title = black_bold_tahoma_12,
axis.text.x = black_bold_tahoma_12_45,
axis.text.y = black_bold_tahoma_12,
axis.line.x = element_line(color = "black"),
axis.line.y = element_line(color = "black"),
panel.grid = element_blank(),
legend.position = "none",
strip.text = black_bold_tahoma_12
)
# p1 <- p1 + stat_compare_means(method = "anova")
print(p1)
#################################
## Use data from CPTAC website ##
#################################
plot.CPTAC.iTRAQ <- function(status, data) {
Proteome_itraq_EIF4 <- data[
grep("EIF4", data$Gene), ]
Proteome_itraq_EIF4 <- as.data.frame(Proteome_itraq_EIF4)
Proteome_itraq_EIF4_1 <- Proteome_itraq_EIF4[ ,
grepl("Log Ratio", colnames(Proteome_itraq_EIF4))]
Proteome_itraq_EIF4_2 <- Proteome_itraq_EIF4_1[ ,
grepl("Unshared Log Ratio", colnames(Proteome_itraq_EIF4_1))]
rownames(Proteome_itraq_EIF4_2) <- Proteome_itraq_EIF4$Gene
Proteome_itraq_EIF4_3 <- as.data.frame(t(Proteome_itraq_EIF4_2))
Proteome_itraq_EIF4_4 <- melt(as.matrix(Proteome_itraq_EIF4_3))
black_bold_tahoma_12 <- element_text(
color = "black",
face = "bold",
family = "Tahoma",
size = 9
)
black_bold_tahoma_12_45 <- element_text(
color = "black",
face = "bold",
family = "Tahoma",
size = 9,
angle = 45,
hjust = 1
)
p1 <- ggplot(data = Proteome_itraq_EIF4_4,
aes(x = Var2,
y = value,
color = Var2)) +
geom_violin(trim = FALSE) +
geom_boxplot(
alpha = .01,
size = .75,
width = .5,
position = position_dodge(width = .9)
) +
labs(x = "protein name",
y = paste("log2 ratio", status)) +
theme_bw() +
theme(
plot.title = black_bold_tahoma_12,
axis.title = black_bold_tahoma_12,
axis.text.x = black_bold_tahoma_12_45,
axis.text.y = black_bold_tahoma_12,
axis.line.x = element_line(color = "black"),
axis.line.y = element_line(color = "black"),
panel.grid = element_blank(),
legend.position = "none",
strip.text = black_bold_tahoma_12
)
# p1 <- p1 + stat_compare_means(method = "anova")
print(p1)
}
plot.CPTAC.iTRAQ ("Proteome itraq", TCGA_Breast_BI_Proteome_itraq)
plot.CPTAC <- function(status, data) {
Proteome_EIF4 <- data [grep("EIF4", data$Gene), ]
EIF4.gene <- c("EIF4A1","EIF4E","EIF4G1","EIF4EBP1")
Proteome_EIF4 <- Proteome_EIF4[
Proteome_EIF4$Gene %in% EIF4.gene, ]
Proteome_EIF4_2 <- Proteome_EIF4 [ ,
grep("Spectral Counts", names(Proteome_EIF4), value = TRUE)]
rownames(Proteome_EIF4_2) <- Proteome_EIF4$Gene
Proteome_EIF4_3 <- as.data.frame(t(Proteome_EIF4_2))
Proteome_EIF4_4 <- melt(as.matrix(Proteome_EIF4_3[-38, ]))
Proteome_EIF4_4$type <- "Tumor (n=105)"
Proteome_EIF4_4$type[grep("263", Proteome_EIF4_4$Var1)] <- "Normal (n=3)"
Proteome_EIF4_4$type <- as.factor(Proteome_EIF4_4$type)
black_bold_tahoma_12 <- element_text(
color = "black",
face = "bold",
family = "Tahoma",
size = 9
)
black_bold_tahoma_12_45 <- element_text(
color = "black",
face = "bold",
family = "Tahoma",
size = 9,
angle = 45,
hjust = 1
)
p1 <- ggplot(data = Proteome_EIF4_4,
aes(x = type,
y = value,
color = Var2)) +
facet_grid(~ Var2,
scales = "free",
space = "free") +
geom_violin(trim = FALSE) +
geom_boxplot(
alpha = .01,
width = .5,
position = position_dodge(width = .9)
) +
labs(x = "sample type",
y = paste(status, "(Spectral Counts)")) +
theme_bw() +
theme(
plot.title = black_bold_tahoma_12,
axis.title = black_bold_tahoma_12,
axis.text.x = black_bold_tahoma_12_45,
axis.text.y = black_bold_tahoma_12,
axis.line.x = element_line(color = "black"),
axis.line.y = element_line(color = "black"),
panel.grid = element_blank(),
legend.position = "none",
strip.text = black_bold_tahoma_12
)
p2 <- p1 + stat_compare_means(comparisons = list(
c("Tumor (n=105)", "Normal (n=3)"),
method = "t.test"))
# p2 <- p1 + stat_compare_means()
print(p2)
write.csv(Proteome_EIF4_4, file = paste0(status,"data.csv"))
}
plot.CPTAC ("Protein abundance", BRCA_Proteome_summary)
plot.CPTAC ("PhosphoProtein abundance", BRCA_Phosphoproteome_summary)
################################################################
## extract protein quantity from iTRAQ and MS/MS spectra data ##
################################################################
data <- BRCA_Proteome_summary
Proteome_EIF4 <- data[grep("EIF4", data$Gene), ]
Proteome_EIF4_2 <- Proteome_EIF4[ ,
grep("Spectral Counts", names(Proteome_EIF4), value = TRUE)]
rownames(Proteome_EIF4_2) <- Proteome_EIF4$Gene
colnames(Proteome_EIF4_2) <- str_remove(colnames(Proteome_EIF4_2),
"Spectral Counts")
Proteome_EIF4_2 [[38]] <- NULL
data <- BRCA_Proteome_itraq
Proteome_itraq_EIF4 <- data[
grep("EIF4", data$Gene), ]
Proteome_itraq_EIF4 <- as.data.frame(Proteome_itraq_EIF4)
Proteome_itraq_EIF4_1 <- Proteome_itraq_EIF4[ ,
grepl("Log Ratio", colnames(Proteome_itraq_EIF4))]
Proteome_itraq_EIF4_2 <- Proteome_itraq_EIF4_1[ ,
grepl("Unshared Log Ratio", colnames(Proteome_itraq_EIF4_1))]
rownames(Proteome_itraq_EIF4_2) <- Proteome_itraq_EIF4$Gene
# convert all value to non-log transforms
Proteome_itraq_EIF4_3 <- exp(Proteome_itraq_EIF4_2)
colnames(Proteome_itraq_EIF4_3) <- str_remove(colnames(Proteome_itraq_EIF4_2),
" Unshared Log Ratio")
colnames(Proteome_itraq_EIF4_3) <- str_remove(colnames(Proteome_itraq_EIF4_3), "\\.1")
colnames(Proteome_itraq_EIF4_3) <- str_remove(colnames(Proteome_itraq_EIF4_3), "\\.2")
ncol(Proteome_itraq_EIF4_3)
## the following function draws the sum of all ratios
BRCA_Proteome_ratiosum <- BRCA_Proteome_sample
EIF4F_list <- rownames(Proteome_itraq_EIF4_3)
for(y in EIF4F_list){
for(x in 1:37)
{
group_item_name <- function(x){
BRCA_Proteome_sample[x, c("114", "115", "116")]}
## have to use unlist to convert into vector
v <- as.vector (unlist(group_item_name(x)))
x1 <- rowSums(Proteome_itraq_EIF4_3[y, v])
BRCA_Proteome_ratiosum [x ,y] <- x1
message("x=", x)
}
}
BRCA_Proteome_ratiosum <- BRCA_Proteome_ratiosum[ ,EIF4F_list]
BRCA_Proteome_ratiosum_2 <- BRCA_Proteome_ratiosum + 1
rownames(BRCA_Proteome_ratiosum_2) <- colnames(Proteome_EIF4_2)
BRCA_Proteome_ratiosum_3 <- t(BRCA_Proteome_ratiosum_2)
| /R/CPTAC.R | permissive | dlroxe/EIF-analysis | R | false | false | 9,330 | r | library(readr)
library(readxl)
library(reshape2)
library(data.table)
library(ggplot2)
library(ggpubr)
library(ggsignif)
BRCA_Proteome_sample <- read_delim(
"TCGA_Breast_BI_Phosphoproteome.sample.csv",
"\t",
escape_double = FALSE,
trim_ws = TRUE
)
BRCA_Proteome_summary <- read_delim(
"TCGA_Breast_BI_Proteome.summary.csv",
"\t",
escape_double = FALSE,
trim_ws = TRUE
)
BRCA_Proteome_itraq <- fread(
"TCGA_Breast_BI_Proteome.itraq.tsv",
header = T
)
BRCA_Phosphoproteome_summary <- read_delim(
"TCGA_Breast_BI_Phosphoproteome.summary.csv",
"\t",
escape_double = FALSE,
trim_ws = TRUE
)
BRCA_Phosphopeptide_itraq <- fread(
"TCGA_Breast_BI_Phosphoproteome.phosphopeptide.itraq-1.tsv",
header = T
)
BRCA_Phosphosite_itraq <- fread(
"TCGA_Breast_BI_Phosphoproteome.phosphosite.itraq-1.tsv",
header = T
)
CPTAC_BC_somatic_mutations <- read_excel(
"Documents/translation/Proteogenomics connects somatic mutations to signalling in breast cancer/nature18003-s2/CPTAC_BC_SupplementaryTable01.xlsx")
##########################################################
## use data from CPTAC Breast Cancer Confirmatory Study ##
##########################################################
## CPTAC_BCprospective_Proteome used different iTRAQ labeling scheme
## and give different data from the TCGA dataset
CPTAC2_Breast_Prospective_Collection_BI_Proteome <- read_delim(
"Documents/translation/CPTAC/CPTAC2_Breast_Prospective_Collection_BI_Proteome.summary.csv",
"\t",
escape_double = FALSE,
trim_ws = TRUE)
Proteome_EIF4 <- CPTAC2_Breast_Prospective_Collection_BI_Proteome [
grep("EIF4", CPTAC2_Breast_Prospective_Collection_BI_Proteome$Gene), ]
Proteome_EIF4_2 <- Proteome_EIF4 [ ,
grep("Spectral Counts", names(Proteome_EIF4), value = TRUE)]
rownames(Proteome_EIF4_2) <- Proteome_EIF4$Gene
Proteome_EIF4_3 <- as.data.frame(t(Proteome_EIF4_2))
Proteome_EIF4_4 <- melt(as.matrix(Proteome_EIF4_3[-17, ]))
black_bold_tahoma_12 <- element_text(
color = "black",
face = "bold",
family = "Tahoma",
size = 9
)
black_bold_tahoma_12_45 <- element_text(
color = "black",
face = "bold",
family = "Tahoma",
size = 9,
angle = 45,
hjust = 1
)
p1 <- ggplot(data = Proteome_EIF4_4,
aes(x = Var2,
y = log2(value),
color = Var2)) +
facet_grid(~ Var2,
scales = "free",
space = "free") +
geom_violin(trim = FALSE) +
geom_boxplot(
alpha = .01,
size = .75,
width = .5,
position = position_dodge(width = .9)
) +
labs(x = "protein name",
y = paste("Spectral Counts")) +
theme_bw() +
theme(
plot.title = black_bold_tahoma_12,
axis.title = black_bold_tahoma_12,
axis.text.x = black_bold_tahoma_12_45,
axis.text.y = black_bold_tahoma_12,
axis.line.x = element_line(color = "black"),
axis.line.y = element_line(color = "black"),
panel.grid = element_blank(),
legend.position = "none",
strip.text = black_bold_tahoma_12
)
# p1 <- p1 + stat_compare_means(method = "anova")
print(p1)
#################################
## Use data from CPTAC website ##
#################################
plot.CPTAC.iTRAQ <- function(status, data) {
Proteome_itraq_EIF4 <- data[
grep("EIF4", data$Gene), ]
Proteome_itraq_EIF4 <- as.data.frame(Proteome_itraq_EIF4)
Proteome_itraq_EIF4_1 <- Proteome_itraq_EIF4[ ,
grepl("Log Ratio", colnames(Proteome_itraq_EIF4))]
Proteome_itraq_EIF4_2 <- Proteome_itraq_EIF4_1[ ,
grepl("Unshared Log Ratio", colnames(Proteome_itraq_EIF4_1))]
rownames(Proteome_itraq_EIF4_2) <- Proteome_itraq_EIF4$Gene
Proteome_itraq_EIF4_3 <- as.data.frame(t(Proteome_itraq_EIF4_2))
Proteome_itraq_EIF4_4 <- melt(as.matrix(Proteome_itraq_EIF4_3))
black_bold_tahoma_12 <- element_text(
color = "black",
face = "bold",
family = "Tahoma",
size = 9
)
black_bold_tahoma_12_45 <- element_text(
color = "black",
face = "bold",
family = "Tahoma",
size = 9,
angle = 45,
hjust = 1
)
p1 <- ggplot(data = Proteome_itraq_EIF4_4,
aes(x = Var2,
y = value,
color = Var2)) +
geom_violin(trim = FALSE) +
geom_boxplot(
alpha = .01,
size = .75,
width = .5,
position = position_dodge(width = .9)
) +
labs(x = "protein name",
y = paste("log2 ratio", status)) +
theme_bw() +
theme(
plot.title = black_bold_tahoma_12,
axis.title = black_bold_tahoma_12,
axis.text.x = black_bold_tahoma_12_45,
axis.text.y = black_bold_tahoma_12,
axis.line.x = element_line(color = "black"),
axis.line.y = element_line(color = "black"),
panel.grid = element_blank(),
legend.position = "none",
strip.text = black_bold_tahoma_12
)
# p1 <- p1 + stat_compare_means(method = "anova")
print(p1)
}
plot.CPTAC.iTRAQ ("Proteome itraq", TCGA_Breast_BI_Proteome_itraq)
plot.CPTAC <- function(status, data) {
Proteome_EIF4 <- data [grep("EIF4", data$Gene), ]
EIF4.gene <- c("EIF4A1","EIF4E","EIF4G1","EIF4EBP1")
Proteome_EIF4 <- Proteome_EIF4[
Proteome_EIF4$Gene %in% EIF4.gene, ]
Proteome_EIF4_2 <- Proteome_EIF4 [ ,
grep("Spectral Counts", names(Proteome_EIF4), value = TRUE)]
rownames(Proteome_EIF4_2) <- Proteome_EIF4$Gene
Proteome_EIF4_3 <- as.data.frame(t(Proteome_EIF4_2))
Proteome_EIF4_4 <- melt(as.matrix(Proteome_EIF4_3[-38, ]))
Proteome_EIF4_4$type <- "Tumor (n=105)"
Proteome_EIF4_4$type[grep("263", Proteome_EIF4_4$Var1)] <- "Normal (n=3)"
Proteome_EIF4_4$type <- as.factor(Proteome_EIF4_4$type)
black_bold_tahoma_12 <- element_text(
color = "black",
face = "bold",
family = "Tahoma",
size = 9
)
black_bold_tahoma_12_45 <- element_text(
color = "black",
face = "bold",
family = "Tahoma",
size = 9,
angle = 45,
hjust = 1
)
p1 <- ggplot(data = Proteome_EIF4_4,
aes(x = type,
y = value,
color = Var2)) +
facet_grid(~ Var2,
scales = "free",
space = "free") +
geom_violin(trim = FALSE) +
geom_boxplot(
alpha = .01,
width = .5,
position = position_dodge(width = .9)
) +
labs(x = "sample type",
y = paste(status, "(Spectral Counts)")) +
theme_bw() +
theme(
plot.title = black_bold_tahoma_12,
axis.title = black_bold_tahoma_12,
axis.text.x = black_bold_tahoma_12_45,
axis.text.y = black_bold_tahoma_12,
axis.line.x = element_line(color = "black"),
axis.line.y = element_line(color = "black"),
panel.grid = element_blank(),
legend.position = "none",
strip.text = black_bold_tahoma_12
)
p2 <- p1 + stat_compare_means(comparisons = list(
c("Tumor (n=105)", "Normal (n=3)"),
method = "t.test"))
# p2 <- p1 + stat_compare_means()
print(p2)
write.csv(Proteome_EIF4_4, file = paste0(status,"data.csv"))
}
plot.CPTAC ("Protein abundance", BRCA_Proteome_summary)
plot.CPTAC ("PhosphoProtein abundance", BRCA_Phosphoproteome_summary)
################################################################
## extract protein quantity from iTRAQ and MS/MS spectra data ##
################################################################
data <- BRCA_Proteome_summary
Proteome_EIF4 <- data[grep("EIF4", data$Gene), ]
Proteome_EIF4_2 <- Proteome_EIF4[ ,
grep("Spectral Counts", names(Proteome_EIF4), value = TRUE)]
rownames(Proteome_EIF4_2) <- Proteome_EIF4$Gene
colnames(Proteome_EIF4_2) <- str_remove(colnames(Proteome_EIF4_2),
"Spectral Counts")
Proteome_EIF4_2 [[38]] <- NULL
data <- BRCA_Proteome_itraq
Proteome_itraq_EIF4 <- data[
grep("EIF4", data$Gene), ]
Proteome_itraq_EIF4 <- as.data.frame(Proteome_itraq_EIF4)
Proteome_itraq_EIF4_1 <- Proteome_itraq_EIF4[ ,
grepl("Log Ratio", colnames(Proteome_itraq_EIF4))]
Proteome_itraq_EIF4_2 <- Proteome_itraq_EIF4_1[ ,
grepl("Unshared Log Ratio", colnames(Proteome_itraq_EIF4_1))]
rownames(Proteome_itraq_EIF4_2) <- Proteome_itraq_EIF4$Gene
# convert all value to non-log transforms
Proteome_itraq_EIF4_3 <- exp(Proteome_itraq_EIF4_2)
colnames(Proteome_itraq_EIF4_3) <- str_remove(colnames(Proteome_itraq_EIF4_2),
" Unshared Log Ratio")
colnames(Proteome_itraq_EIF4_3) <- str_remove(colnames(Proteome_itraq_EIF4_3), "\\.1")
colnames(Proteome_itraq_EIF4_3) <- str_remove(colnames(Proteome_itraq_EIF4_3), "\\.2")
ncol(Proteome_itraq_EIF4_3)
## the following function draws the sum of all ratios
BRCA_Proteome_ratiosum <- BRCA_Proteome_sample
EIF4F_list <- rownames(Proteome_itraq_EIF4_3)
for(y in EIF4F_list){
for(x in 1:37)
{
group_item_name <- function(x){
BRCA_Proteome_sample[x, c("114", "115", "116")]}
## have to use unlist to convert into vector
v <- as.vector (unlist(group_item_name(x)))
x1 <- rowSums(Proteome_itraq_EIF4_3[y, v])
BRCA_Proteome_ratiosum [x ,y] <- x1
message("x=", x)
}
}
BRCA_Proteome_ratiosum <- BRCA_Proteome_ratiosum[ ,EIF4F_list]
BRCA_Proteome_ratiosum_2 <- BRCA_Proteome_ratiosum + 1
rownames(BRCA_Proteome_ratiosum_2) <- colnames(Proteome_EIF4_2)
BRCA_Proteome_ratiosum_3 <- t(BRCA_Proteome_ratiosum_2)
|
#' Convert raw ranking data(case form) to ranking data with rank frequencies
#'
#' Convert raw ranking data(case form) to ranking data with rank frequencies
#'
#' @param x A data frame or a matrix(case form), each row is a rank.
#' @return A data frame, each row contains a rank and the corresponding frequency.
#' @export
#' @author Li Qinglong <liqinglong0830@@163.com>
#' @examples
#' data(APA)
#' cases = freq2case(APA, freq.col = 1)
#' freqs = case2freq(cases)
case2freq <- function (x)
{
nCol = dim(x)[2]
DF = as.data.frame(table(x), stringAsFactors = TRUE)
DF = DF[DF[, nCol + 1] != 0, ]
DF = as.data.frame(DF)
row.names(DF) = 1:nrow(DF)
return(DF)
}
| /R/case2freq.R | no_license | cran/StatMethRank | R | false | false | 688 | r | #' Convert raw ranking data(case form) to ranking data with rank frequencies
#'
#' Convert raw ranking data(case form) to ranking data with rank frequencies
#'
#' @param x A data frame or a matrix(case form), each row is a rank.
#' @return A data frame, each row contains a rank and the corresponding frequency.
#' @export
#' @author Li Qinglong <liqinglong0830@@163.com>
#' @examples
#' data(APA)
#' cases = freq2case(APA, freq.col = 1)
#' freqs = case2freq(cases)
case2freq <- function (x)
{
nCol = dim(x)[2]
DF = as.data.frame(table(x), stringAsFactors = TRUE)
DF = DF[DF[, nCol + 1] != 0, ]
DF = as.data.frame(DF)
row.names(DF) = 1:nrow(DF)
return(DF)
}
|
setwd("/Volumes/Data Science/Google Drive/data_science_competition/kaggle/Santander_Customer_Satisfaction/")
rm(list = ls()); gc();
require(data.table)
require(purrr)
require(caret)
require(Metrics)
require(ggplot2)
require(caTools)
source("utilities/preprocess.R")
source("utilities/cv.R")
load("../data/Santander_Customer_Satisfaction/RData/dt_cleansed.RData")
#######################################################################################
## 1.0 train, valid, test #############################################################
#######################################################################################
cat("prepare train, valid, and test data set...\n")
set.seed(888)
ind.train <- createDataPartition(dt.cleansed[TARGET >= 0]$TARGET, p = .8, list = F) # remember to change it to .66
dt.train <- dt.cleansed[TARGET >= 0][ind.train]
dt.valid <- dt.cleansed[TARGET >= 0][-ind.train]
dt.test <- dt.cleansed[TARGET == -1]
dim(dt.train); dim(dt.valid); dim(dt.test)
table(dt.train$TARGET)
table(dt.valid$TARGET)
dt.train[, TARGET := as.factor(dt.train$TARGET)]
dt.valid[, TARGET := as.factor(dt.valid$TARGET)]
#######################################################################################
## 2.0 h2o cv #########################################################################
#######################################################################################
require(h2o)
h2o.init(ip = 'localhost', port = 54321, max_mem_size = '6g')
h2o.train <- as.h2o(dt.train)
h2o.valid <- as.h2o(dt.valid)
md.h2o <- h2o.deeplearning(x = setdiff(names(dt.train), c("ID", "TARGET")),
y = "TARGET",
training_frame = h2o.train,
nfolds = 3,
stopping_rounds = 3,
epochs = 20,
overwrite_with_best_model = TRUE,
activation = "RectifierWithDropout",
input_dropout_ratio = 0.2,
hidden = c(100,100),
l1 = 1e-4,
loss = "CrossEntropy",
distribution = "bernoulli",
stopping_metric = "AUC"
)
pred.valid <- as.data.frame(h2o.predict(object = md.h2o, newdata = h2o.valid))
auc(dt.valid$TARGET, pred.valid$p1)
# benchmark
# 0.790934 | /script/4_singleModel_h2o.R | no_license | noahhhhhh/Santander_Customer_Satisfaction | R | false | false | 2,352 | r | setwd("/Volumes/Data Science/Google Drive/data_science_competition/kaggle/Santander_Customer_Satisfaction/")
rm(list = ls()); gc();
require(data.table)
require(purrr)
require(caret)
require(Metrics)
require(ggplot2)
require(caTools)
source("utilities/preprocess.R")
source("utilities/cv.R")
load("../data/Santander_Customer_Satisfaction/RData/dt_cleansed.RData")
#######################################################################################
## 1.0 train, valid, test #############################################################
#######################################################################################
cat("prepare train, valid, and test data set...\n")
set.seed(888)
ind.train <- createDataPartition(dt.cleansed[TARGET >= 0]$TARGET, p = .8, list = F) # remember to change it to .66
dt.train <- dt.cleansed[TARGET >= 0][ind.train]
dt.valid <- dt.cleansed[TARGET >= 0][-ind.train]
dt.test <- dt.cleansed[TARGET == -1]
dim(dt.train); dim(dt.valid); dim(dt.test)
table(dt.train$TARGET)
table(dt.valid$TARGET)
dt.train[, TARGET := as.factor(dt.train$TARGET)]
dt.valid[, TARGET := as.factor(dt.valid$TARGET)]
#######################################################################################
## 2.0 h2o cv #########################################################################
#######################################################################################
require(h2o)
h2o.init(ip = 'localhost', port = 54321, max_mem_size = '6g')
h2o.train <- as.h2o(dt.train)
h2o.valid <- as.h2o(dt.valid)
md.h2o <- h2o.deeplearning(x = setdiff(names(dt.train), c("ID", "TARGET")),
y = "TARGET",
training_frame = h2o.train,
nfolds = 3,
stopping_rounds = 3,
epochs = 20,
overwrite_with_best_model = TRUE,
activation = "RectifierWithDropout",
input_dropout_ratio = 0.2,
hidden = c(100,100),
l1 = 1e-4,
loss = "CrossEntropy",
distribution = "bernoulli",
stopping_metric = "AUC"
)
pred.valid <- as.data.frame(h2o.predict(object = md.h2o, newdata = h2o.valid))
auc(dt.valid$TARGET, pred.valid$p1)
# benchmark
# 0.790934 |
# Test the itnensity adpative
library(spatstat)
library(devtools)
load_all(".")
x <- runifpoint(200)
r <- seq(0, 0.2, l=10)
int <- intensity_adapted(x, r)
z<-rho_box(x, r)
g <- z/(pi * int^2)
plot(NA, xlim=range(r), ylim=c(0,2))
apply(g, 2, lines, x=r)
| /tests/6-adapted-intensity.R | no_license | antiphon/Kcross | R | false | false | 261 | r | # Test the itnensity adpative
library(spatstat)
library(devtools)
load_all(".")
x <- runifpoint(200)
r <- seq(0, 0.2, l=10)
int <- intensity_adapted(x, r)
z<-rho_box(x, r)
g <- z/(pi * int^2)
plot(NA, xlim=range(r), ylim=c(0,2))
apply(g, 2, lines, x=r)
|
library("lattice")
plotfun <- function(file, doplot = TRUE) {
load(file)
parm <- lapply(out, function(x) x$parm)
df <- NULL
for (i in 1:length(parm)) {
tmp <- c()
chkRsk <- out[[i]][["checkRisk"]]
out[[i]][["L.1"]] <- chkRsk[,,1]
out[[i]][["L.5"]] <- chkRsk[,,2]
out[[i]][["L.9"]] <- chkRsk[,,3]
nm <- names(out[[i]])
out[[i]] <- out[[i]][nm[nm != "checkRisk"]]
for (j in 2:length(out[[i]])) {
dummy <- as.data.frame(out[[i]][[j]])
vars <- 1:ncol(dummy)
vnames <- colnames(dummy)[vars]
dummy$id <- factor(1:nrow(dummy))
dummy <- reshape(dummy, varying = list(vars), direction = "long",
idvar = "id", timevar = "model",
v.names = names(out[[i]])[j])
dummy$model <- factor(dummy$model, levels = vars, labels = vnames)
if (j == 2) {
tmp <- dummy
} else {
tmp <- merge(tmp, dummy, by = c("id", "model"))
}
}
tmp <- cbind(tmp, as.data.frame(parm[i])[rep(1, nrow(tmp)),,drop = FALSE])
df <- rbind(df, tmp)
}
save(df, file = paste(file, "_out.rda", sep = ""))
if (doplot) {
df <- subset(df, model != "ttBern")
df <- subset(df, model != "ttBernExSplit")
pdf(paste(file, ".pdf", sep = ""))
print(bwplot(ll ~ model | p + tau + prod_mu + prod_sigma, data = df, scales = list(y =
"free", x = list(rot = 45)), main = file))#, ylim = c(-800, -350)))
print(bwplot(time ~ model | p + tau + prod_mu + prod_sigma, data = df, scales = list(y =
"free", x = list(rot = 45)), main = file))#, ylim = c(-800, -350)))
print(bwplot(L.1 ~ model | p + tau + prod_mu + prod_sigma,, data = df,
scales = list(y = "free", x = list(rot = 45)), main = file))
print(bwplot(L.5 ~ model | p + tau + prod_mu + prod_sigma,, data = df,
scales = list(y = "free", x = list(rot = 45)), main = file))
print(bwplot(L.9 ~ model | p + tau + prod_mu + prod_sigma,, data = df,
scales = list(y = "free", x = list(rot = 45)), main = file))
dev.off()
}
}
plotfun("2d.rda", doplot = FALSE)
plotfun("lognormal_2d.rda", doplot = FALSE)
plotfun("friedman.rda", doplot = FALSE)
plotfun("lognormal_friedman.rda", doplot = FALSE)
plotfun("timings.rda", doplot = FALSE)
| /inst/sim/summary.R | no_license | cran/trtf | R | false | false | 2,225 | r |
library("lattice")
plotfun <- function(file, doplot = TRUE) {
load(file)
parm <- lapply(out, function(x) x$parm)
df <- NULL
for (i in 1:length(parm)) {
tmp <- c()
chkRsk <- out[[i]][["checkRisk"]]
out[[i]][["L.1"]] <- chkRsk[,,1]
out[[i]][["L.5"]] <- chkRsk[,,2]
out[[i]][["L.9"]] <- chkRsk[,,3]
nm <- names(out[[i]])
out[[i]] <- out[[i]][nm[nm != "checkRisk"]]
for (j in 2:length(out[[i]])) {
dummy <- as.data.frame(out[[i]][[j]])
vars <- 1:ncol(dummy)
vnames <- colnames(dummy)[vars]
dummy$id <- factor(1:nrow(dummy))
dummy <- reshape(dummy, varying = list(vars), direction = "long",
idvar = "id", timevar = "model",
v.names = names(out[[i]])[j])
dummy$model <- factor(dummy$model, levels = vars, labels = vnames)
if (j == 2) {
tmp <- dummy
} else {
tmp <- merge(tmp, dummy, by = c("id", "model"))
}
}
tmp <- cbind(tmp, as.data.frame(parm[i])[rep(1, nrow(tmp)),,drop = FALSE])
df <- rbind(df, tmp)
}
save(df, file = paste(file, "_out.rda", sep = ""))
if (doplot) {
df <- subset(df, model != "ttBern")
df <- subset(df, model != "ttBernExSplit")
pdf(paste(file, ".pdf", sep = ""))
print(bwplot(ll ~ model | p + tau + prod_mu + prod_sigma, data = df, scales = list(y =
"free", x = list(rot = 45)), main = file))#, ylim = c(-800, -350)))
print(bwplot(time ~ model | p + tau + prod_mu + prod_sigma, data = df, scales = list(y =
"free", x = list(rot = 45)), main = file))#, ylim = c(-800, -350)))
print(bwplot(L.1 ~ model | p + tau + prod_mu + prod_sigma,, data = df,
scales = list(y = "free", x = list(rot = 45)), main = file))
print(bwplot(L.5 ~ model | p + tau + prod_mu + prod_sigma,, data = df,
scales = list(y = "free", x = list(rot = 45)), main = file))
print(bwplot(L.9 ~ model | p + tau + prod_mu + prod_sigma,, data = df,
scales = list(y = "free", x = list(rot = 45)), main = file))
dev.off()
}
}
plotfun("2d.rda", doplot = FALSE)
plotfun("lognormal_2d.rda", doplot = FALSE)
plotfun("friedman.rda", doplot = FALSE)
plotfun("lognormal_friedman.rda", doplot = FALSE)
plotfun("timings.rda", doplot = FALSE)
|
\name{quasi_sym_pseudo}
\alias{quasi_sym_pseudo}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Recursive computation of pseudo conditional maximum likelihood
method proposed by Bartolucci & Nigro (2012).}
\description{Recursively compute the denominator of the individual
conditional likelihood function for the pseudo conditional maximum
likelihood method proposed by Bartolucci & Nigro (2012) recursively,
adapted from Krailo & Pike (1984).}
\usage{
quasi_sym_pseudo(eta,qi,s,y0=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{eta}{individual vector of products between covariate and parameters}
\item{s}{total score of the individual}
\item{qi}{Vector of quantities from first step estimation}
\item{y0}{Individual initial observation for dynamic models}
}
\value{
\item{f}{value of the denominator}
\item{d1}{first derivative of the recursive function}
\item{dl1}{a component of the score function}
\item{D2}{second derivative of the recursive function}
\item{Dl2}{a component for the Hessian matrix}
}
\references{
Bartolucci, F. and Nigro, V. (2010), A dynamic model for binary panel data with unobserved heterogeneity admitting a root-n consistent conditional estimator, \emph{Econometrica}, \bold{78}, 719-733.
Bartolucci, F. and Nigro, V. (2012), Pseudo conditional maximum
likelihood estimation of the dynamic logit model for binary panel data,
\emph{Journal of Econometrics}, \bold{170}, 102-116.
Bartolucci, F., Valentini. F., & Pigini, C. (2021), Recursive Computation of the Conditional Probability Function of the Quadratic Exponential Model for Binary Panel Data, \emph{Computational Economics}, https://doi.org/10.1007/s10614-021-10218-2.
Krailo, M. D., & Pike, M. C. (1984). Algorithm AS 196: conditional multivariate logistic analysis of stratified case-control studies, \emph{Journal of the Royal Statistical Society. Series C (Applied Statistics)}, \bold{33(1)}, 95-103.
}
\author{
Francesco Bartolucci (University of Perugia), Claudia Pigini (University of Ancona "Politecnica delle Marche"), Francesco Valentini (University
of Ancona "Politecnica delle Marche")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{panel data} | /man/quasi_sym_pseudo.Rd | no_license | cran/cquad | R | false | false | 2,305 | rd | \name{quasi_sym_pseudo}
\alias{quasi_sym_pseudo}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Recursive computation of pseudo conditional maximum likelihood
method proposed by Bartolucci & Nigro (2012).}
\description{Recursively compute the denominator of the individual
conditional likelihood function for the pseudo conditional maximum
likelihood method proposed by Bartolucci & Nigro (2012) recursively,
adapted from Krailo & Pike (1984).}
\usage{
quasi_sym_pseudo(eta,qi,s,y0=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{eta}{individual vector of products between covariate and parameters}
\item{s}{total score of the individual}
\item{qi}{Vector of quantities from first step estimation}
\item{y0}{Individual initial observation for dynamic models}
}
\value{
\item{f}{value of the denominator}
\item{d1}{first derivative of the recursive function}
\item{dl1}{a component of the score function}
\item{D2}{second derivative of the recursive function}
\item{Dl2}{a component for the Hessian matrix}
}
\references{
Bartolucci, F. and Nigro, V. (2010), A dynamic model for binary panel data with unobserved heterogeneity admitting a root-n consistent conditional estimator, \emph{Econometrica}, \bold{78}, 719-733.
Bartolucci, F. and Nigro, V. (2012), Pseudo conditional maximum
likelihood estimation of the dynamic logit model for binary panel data,
\emph{Journal of Econometrics}, \bold{170}, 102-116.
Bartolucci, F., Valentini. F., & Pigini, C. (2021), Recursive Computation of the Conditional Probability Function of the Quadratic Exponential Model for Binary Panel Data, \emph{Computational Economics}, https://doi.org/10.1007/s10614-021-10218-2.
Krailo, M. D., & Pike, M. C. (1984). Algorithm AS 196: conditional multivariate logistic analysis of stratified case-control studies, \emph{Journal of the Royal Statistical Society. Series C (Applied Statistics)}, \bold{33(1)}, 95-103.
}
\author{
Francesco Bartolucci (University of Perugia), Claudia Pigini (University of Ancona "Politecnica delle Marche"), Francesco Valentini (University
of Ancona "Politecnica delle Marche")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{panel data} |
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{fsmoother_smooth.spline}
\alias{fsmoother_smooth.spline}
\title{smooth.spline wrapper}
\usage{
fsmoother_smooth.spline(x, y, ...)
}
\arguments{
\item{x}{numeric vector of x values}
\item{y}{vector of y values}
\item{...}{passed to \code{stats::smooth.spline}}
}
\description{
Makes a smoother function that returns the y-value
given and x-value. Also allows the derivative to be
returned using the \code{deriv} argument
}
| /man/fsmoother_smooth.spline.Rd | no_license | kdauria/wellz | R | false | false | 484 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{fsmoother_smooth.spline}
\alias{fsmoother_smooth.spline}
\title{smooth.spline wrapper}
\usage{
fsmoother_smooth.spline(x, y, ...)
}
\arguments{
\item{x}{numeric vector of x values}
\item{y}{vector of y values}
\item{...}{passed to \code{stats::smooth.spline}}
}
\description{
Makes a smoother function that returns the y-value
given and x-value. Also allows the derivative to be
returned using the \code{deriv} argument
}
|
# Exercise 4: Working with Data Frames
# Load R's "USPersonalExpenditure" dataest using the `data()` function
data("USPersonalExpenditure")
# The variable USPersonalExpenditure is now accessible to you. Unfortunately, it's not a data.frame
# Test this using the is.data.frame function
is.data.frame("USPersonalExpenditure")
# Luckily, you can simply pass the USPersonalExpenditure variable to the data.frame function
# to convert it a data.farme
# Create a new variable by passing the USPersonalExpenditure to the data.frame function
spending <- data.frame(USSpending = USPersonalExpenditure)
# What are the column names of your dataframe?
colnames(spending)
# Why are they so strange?
# What are the row names of your dataframe?
rownames(spending)
row
# Create a column `category` that is equal to your rownames
spending$category <- row.names(spending)
# How much money was spent on personal care in 1940?
personalCare.1940 <- spending["Personal Care", "USSpending.1940"]
# How much money was spent on Food and Tobacco in 1960
foodAndTobacco.1960 <- spending["Food and Tobacco", "USSpending.1960"]
# What was the highest expenditure category in 1960?
highest.1960 <- max(spending[, "USSpending.1960"])
### Bonus ###
# Write a function that takes in a year as a parameter, and
# returns the highest spending category of that year
highest <- function(year) {
return (row.name(max(spending[, "USSpending.1960"])))
}
# Using your function, determine the highest spending category of each year
highest(1960)
# Write a loop to cycle through the years, and store the highest spending category of
# each year in a list
| /exercise-4/exercise.R | permissive | 16AnishV/m8-dataframes | R | false | false | 1,631 | r | # Exercise 4: Working with Data Frames
# Load R's "USPersonalExpenditure" dataest using the `data()` function
data("USPersonalExpenditure")
# The variable USPersonalExpenditure is now accessible to you. Unfortunately, it's not a data.frame
# Test this using the is.data.frame function
is.data.frame("USPersonalExpenditure")
# Luckily, you can simply pass the USPersonalExpenditure variable to the data.frame function
# to convert it a data.farme
# Create a new variable by passing the USPersonalExpenditure to the data.frame function
spending <- data.frame(USSpending = USPersonalExpenditure)
# What are the column names of your dataframe?
colnames(spending)
# Why are they so strange?
# What are the row names of your dataframe?
rownames(spending)
row
# Create a column `category` that is equal to your rownames
spending$category <- row.names(spending)
# How much money was spent on personal care in 1940?
personalCare.1940 <- spending["Personal Care", "USSpending.1940"]
# How much money was spent on Food and Tobacco in 1960
foodAndTobacco.1960 <- spending["Food and Tobacco", "USSpending.1960"]
# What was the highest expenditure category in 1960?
highest.1960 <- max(spending[, "USSpending.1960"])
### Bonus ###
# Write a function that takes in a year as a parameter, and
# returns the highest spending category of that year
highest <- function(year) {
return (row.name(max(spending[, "USSpending.1960"])))
}
# Using your function, determine the highest spending category of each year
highest(1960)
# Write a loop to cycle through the years, and store the highest spending category of
# each year in a list
|
# for (iterator in set_of_values){
# do a thing for iterator items
# }
output_vector <- c()
for (i in 1:5){
print(i)
for (j in c("a", "b", "c", "d", "e")){
temp_output <- paste(i,j)
output_vector <- c(output_vector, temp_output)
}
}
output_matrix <- matrix(nrow = 5,
ncol = 5)
j_vector <- c("a", "b", "c", "d", "e")
for(i in 1:5){
print(paste("row", i, "going into matrix"))
for (j in 1:5){
temp_j_value <- j_vector[j]
temp_output <- paste (i, temp_j_value)
output_matrix[i, j] <- temp_output
}
}
output_vector2 <- as.vector(output_matrix)
#while (a condition is true) {
# do a thing
#}
z <- 1
while(z > 0.1){
z <- runif(1)
cat(z, "\n")
}
mtcars
#Write a script that loops through mtcars by cyl
#and prints out mean mpg for each category of cyl
#Step 1
unique()
#Step 2
#looping over unique values of cyl
#temporary values
#remember subset notation
mtcars[mtcars$cyl == 4, ]
#mean function
mean()
### vectorization
x <- 1:5
x
x_cm<- x*2.14
y <- 6:10
x + y
mtcars
0.43 conversion
mtcars$kpl <- mtcars$mpg * 0.43
log10(x)
class(mtcars)
mtcars_col_class<- lapply(X=mtcars,
FUN = mean, na.rm = TRUE)
ul <- unlist(mtcars_col_class)
mean(mtcars, na.rm=TRUE)
mtcars[]<- lapply(X = mtcars,
FUN = function(x) x/mean(x))
mtcars$mpg/mean(mtcars$mpg)
mtcars$mpg/0.5
for (i in 1:length(mtcars)){
print(mtcars[[i]]/mean(mtcars[[i]]))
}
source("20201210_temp_conversions_functions.R")
boiling <- fahr_to_kelvin(temp = 212)
freezing <- fahr_to_kelvin(temp = 32)
abs_zero <- kelvin_to_celsius(temp = 0)
freezing_c <- fahr_to_celsius(temp = 32)
fahr_to_celsius(temp = "zero") #testing our stopifnot condition
test_conversion <- fahr_to_celsius(label_to_print = "Degrees in Celsius")
| /20201210_repetitive_R_code.R | permissive | oulib-resdata/repetitive_r | R | false | false | 1,763 | r | # for (iterator in set_of_values){
# do a thing for iterator items
# }
output_vector <- c()
for (i in 1:5){
print(i)
for (j in c("a", "b", "c", "d", "e")){
temp_output <- paste(i,j)
output_vector <- c(output_vector, temp_output)
}
}
output_matrix <- matrix(nrow = 5,
ncol = 5)
j_vector <- c("a", "b", "c", "d", "e")
for(i in 1:5){
print(paste("row", i, "going into matrix"))
for (j in 1:5){
temp_j_value <- j_vector[j]
temp_output <- paste (i, temp_j_value)
output_matrix[i, j] <- temp_output
}
}
output_vector2 <- as.vector(output_matrix)
#while (a condition is true) {
# do a thing
#}
z <- 1
while(z > 0.1){
z <- runif(1)
cat(z, "\n")
}
mtcars
#Write a script that loops through mtcars by cyl
#and prints out mean mpg for each category of cyl
#Step 1
unique()
#Step 2
#looping over unique values of cyl
#temporary values
#remember subset notation
mtcars[mtcars$cyl == 4, ]
#mean function
mean()
### vectorization
x <- 1:5
x
x_cm<- x*2.14
y <- 6:10
x + y
mtcars
0.43 conversion
mtcars$kpl <- mtcars$mpg * 0.43
log10(x)
class(mtcars)
mtcars_col_class<- lapply(X=mtcars,
FUN = mean, na.rm = TRUE)
ul <- unlist(mtcars_col_class)
mean(mtcars, na.rm=TRUE)
mtcars[]<- lapply(X = mtcars,
FUN = function(x) x/mean(x))
mtcars$mpg/mean(mtcars$mpg)
mtcars$mpg/0.5
for (i in 1:length(mtcars)){
print(mtcars[[i]]/mean(mtcars[[i]]))
}
source("20201210_temp_conversions_functions.R")
boiling <- fahr_to_kelvin(temp = 212)
freezing <- fahr_to_kelvin(temp = 32)
abs_zero <- kelvin_to_celsius(temp = 0)
freezing_c <- fahr_to_celsius(temp = 32)
fahr_to_celsius(temp = "zero") #testing our stopifnot condition
test_conversion <- fahr_to_celsius(label_to_print = "Degrees in Celsius")
|
library(rio)
library(tidyr)
library(dplyr)
library(nlme)
library(MASS)
library(ggplot2)
library(scales)
library(car)
library(AER)
library(mice)
library(naniar)
library(flextable)
library(officer)
getwd()
# Import the aggregated dataset.
# Dataset includes item level RAMP data
# merged with citation info from Crossref
# and OA availability data from Unpaywall.
dat <- import("../data/ramp_crossref_unpaywall_merged.csv")
# Drop rows where ir_is_oa_loc == TRUE but ct_ir_oa_copies == 0 (102 rows).
# Note the filter drops rows where count_error is NA or FALSE,
# so more than just the 102 rows are dropped.
dat$count_error <- (dat$ir_is_oa_loc == TRUE & dat$ct_ir_oa_copies == 0)
dat <- dat %>% filter(count_error != TRUE)
# Drop columns related to which IR hosts the item,
# and also the method used to extract the DOI from
# item level metadata.
dat_adj <- dplyr::select(dat,
ir_is_oa_loc,
ir_pub_year,
cref_created_year,
doi,
ct_oa_copies,
ct_ir_oa_copies,
ct_ir_oa_copies,
ct_dr_oa_copies,
ct_pub_oa_copies,
ct_other_oa_copies,
item_uri_sum_clicks,
ct_citations)
# Drop rows where IR publication year
# is before 2017. This filters out items
# that had been available from an IR
# for less than 2 years before RAMP data were collected.
dat_adj <- dat_adj%>%
filter(ir_pub_year < 2017)
# Limit items to those for which the IR year of
# publication is not more than 1 year from when the Crossref
# DOI was created.
dat_adj$ir_pub_year <- as.numeric(dat_adj$ir_pub_year)
dat_adj$cref_created_year <- as.numeric(dat_adj$cref_created_year)
dat_adj$pub_yr_diff <- dat_adj$ir_pub_year - dat_adj$cref_created_year
dat_adj_pub_yr <- dat_adj %>% filter(pub_yr_diff == 0 | pub_yr_diff == 1)
# Check for incomplete observations.
md.pattern(dat_adj_pub_yr, rotate.names = TRUE)
# Adjust citations by year.
# Average the Crossref DOI year of creation and IR year of upload/publication
# to create a single column to refer to for calculating years of availability.
dat_adj_pub_yr$avg_pub_year <- (dat_adj_pub_yr$cref_created_year +
dat_adj_pub_yr$ir_pub_year)/2
# Create a new column for the number of years an item has been available.
# Reference year is 2020, when citation data were harvested from Crossref.
# So for items published in 2003, we are averaging citations across 17 years, etc.
dat_adj_pub_yr$year <- ifelse(dat_adj_pub_yr$avg_pub_year < 2004, 17,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2004 & dat_adj_pub_yr$avg_pub_year < 2005, 16,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2005 & dat_adj_pub_yr$avg_pub_year < 2006, 15,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2006 & dat_adj_pub_yr$avg_pub_year < 2007, 14,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2007 & dat_adj_pub_yr$avg_pub_year < 2008, 13,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2008 & dat_adj_pub_yr$avg_pub_year < 2009, 12,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2009 & dat_adj_pub_yr$avg_pub_year < 2010, 11,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2010 & dat_adj_pub_yr$avg_pub_year < 2011, 10,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2011 & dat_adj_pub_yr$avg_pub_year < 2012, 9,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2012 & dat_adj_pub_yr$avg_pub_year < 2013, 8,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2013 & dat_adj_pub_yr$avg_pub_year < 2014, 7,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2014 & dat_adj_pub_yr$avg_pub_year < 2015, 6,
5))))))))))))
# Create a column for the adjusted number
# of citations per year.
dat_adj_pub_yr$ct_citations_adj <- dat_adj_pub_yr$ct_citations/dat_adj_pub_yr$year
# Every item in the dataset has at least one OA copy hosted by an IR.
# Not every IR in the study was harvested by Unpwayall at time of data collection,
# so make an adjustment to add 1 to count of IR hosted OA copies
# and also add 1 to count of total OA copies for any row where the RAMP IR
# that hosts an item was not listed as an OA host by Unpaywall.
adj_dat <- dat_adj_pub_yr %>%
mutate(ct_ir_oa_copies_adj = case_when(ir_is_oa_loc == FALSE ~ ct_ir_oa_copies + 1L,
ir_is_oa_loc == TRUE ~ ct_ir_oa_copies + 0L),
ct_oa_copies_adj = case_when(ir_is_oa_loc == FALSE ~ ct_oa_copies + 1L,
ir_is_oa_loc == TRUE ~ ct_oa_copies + 0L))
#--Combine DOIs
# There are five DOIs with 2 IR hosts occurring in the remaining data.
# These are not true duplicates, as they are two distinct copies of an
# item hosted by different IR. Their search engine performance data will
# be combined into a single observation for each DOI.
# View the DOIs with 2 hosts:
adj_dat %>%
group_by(doi) %>%
summarise(count_dois = sum(!is.na(doi))) %>%
filter(count_dois > 1)
# Combine: get sum of all clicks from SERP,
# average other stats to avoid double counting citations, etc.
adj_dat_n <- adj_dat%>%
group_by(doi)%>%
summarize(click = sum(item_uri_sum_clicks, na.rm = TRUE),
ir_c = mean(ct_ir_oa_copies, na.rm = TRUE),
ir_c_adj = mean(ct_ir_oa_copies_adj, na.rm = TRUE),
citation_c = mean(ct_citations, na.rm = TRUE),
citation_c_adj = mean(ct_citations_adj, na.rm = TRUE),
oa_c = mean(ct_oa_copies, na.rm = TRUE),
oa_c_adj = mean(ct_oa_copies_adj, na.rm = TRUE),
dr_c = mean(ct_dr_oa_copies, na.rm = TRUE),
other_c = mean(ct_other_oa_copies, na.rm = TRUE),
pub_c = mean(ct_pub_oa_copies, na.rm = TRUE))
summary(adj_dat_n)
#---Transform data for the ANCOVA analysis.
# Change click counts to categorical data
adj_dat_n$click_b <-ifelse(adj_dat_n$click<=3, "Median and below or 1-3 clicks",
"Above median")
adj_dat_n$click_b <- factor(adj_dat_n$click_b, levels = c("Median and below or 1-3 clicks", "Above median"))
prop.table(table(adj_dat_n$click_b))
summary(adj_dat_n)
# Create categorical variable using adjusted count of total OA copies.
adj_dat_n$oa_c_adj_n <- ifelse(adj_dat_n$oa_c_adj>2, "Above median or 3 or more copies", "Median and below or 1-2 copies")
adj_dat_n$oa_c_adj_n <- factor(adj_dat_n$oa_c_adj_n, levels = c("Median and below or 1-2 copies", "Above median or 3 or more copies"))
prop.table(table(adj_dat_n$oa_c_adj_n))
# Create categorical variable using adjusted count of IR hosted copies.
adj_dat_n$ir_c_adj_c <- ifelse(adj_dat_n$ir_c_adj==1, "Median and below or 1 copy",
"Above median or more than 1 copy")
adj_dat_n$ir_c_adj_c <- factor(adj_dat_n$ir_c_adj_c, levels = c("Median and below or 1 copy",
"Above median or more than 1 copy"))
prop.table(table(adj_dat_n$ir_c_adj_c))
summary(adj_dat_n)
# Create binary variables based on availability of
# OA copies from disciplinary repositories.
adj_dat_n$dr_c_b <- ifelse(adj_dat_n$dr_c>0, "1", "0")
prop.table(table(adj_dat_n$dr_c_b))
# Create binary variables based on availability of
# OA copies from "other" OA host types.
adj_dat_n$other_c_b <- ifelse(adj_dat_n$other_c>0, "1", "0")
prop.table(table(adj_dat_n$other_c_b))
# Create binary variables based on availability of
# OA copies from publisher-provided OA.
adj_dat_n$pub_c_b <- ifelse(adj_dat_n$pub_c>0, "1", "0")
prop.table(table(adj_dat_n$pub_c_b))
#-----Use descriptive statistics to explore whether clicks
# and number of types of OA copies are related to
# citation rates.
summary(adj_dat_n)
# Citation rate mean differences across click groups.
# Data are reported in Table 2 of the manuscript
adj_dat_n %>%
dplyr::select(citation_c_adj, click_b) %>%
group_by(click_b) %>%
summarise(n = n(),
mean = mean(citation_c_adj),
sd = sd(citation_c_adj),
median = median(citation_c_adj),
min = min(citation_c_adj),
max = max(citation_c_adj))
# Citation rate mean differences by OA host type.
# Data for all host types are reported in Table 3 of the manuscript.
# Total OA availability
adj_dat_n %>%
dplyr::select(citation_c_adj, oa_c_adj_n) %>%
group_by(oa_c_adj_n) %>%
summarise(n = n(),
mean = mean(citation_c_adj),
sd = sd(citation_c_adj),
median = median(citation_c_adj),
min = min(citation_c_adj),
max = max(citation_c_adj))
# IR
adj_dat_n %>%
dplyr::select(citation_c_adj, ir_c_adj_c) %>%
group_by(ir_c_adj_c) %>%
summarise(n = n(),
mean = mean(citation_c_adj),
sd = sd(citation_c_adj),
median = median(citation_c_adj),
min = min(citation_c_adj),
max = max(citation_c_adj))
# DR (binary)
adj_dat_n %>%
dplyr::select(citation_c_adj, dr_c_b) %>%
group_by(dr_c_b) %>%
summarise(n = n(),
mean = mean(citation_c_adj),
sd = sd(citation_c_adj),
median = median(citation_c_adj),
min = min(citation_c_adj),
max = max(citation_c_adj))
# Pub (binary)
adj_dat_n %>%
dplyr::select(citation_c_adj, pub_c_b) %>%
group_by(pub_c_b) %>%
summarise(n = n(),
mean = mean(citation_c_adj),
sd = sd(citation_c_adj),
median = median(citation_c_adj),
min = min(citation_c_adj),
max = max(citation_c_adj))
# Other (binary)
adj_dat_n %>%
dplyr::select(citation_c_adj, other_c_b) %>%
group_by(other_c_b) %>%
summarise(n = n(),
mean = mean(citation_c_adj),
sd = sd(citation_c_adj),
median = median(citation_c_adj),
min = min(citation_c_adj),
max = max(citation_c_adj))
###########---------------------ANCOVA as a generalized linear model
# Test for correlations between clicks received from search engine
# results pages, citations, and availability from different types of
# OA hosts.
library(lmtest)
library(sandwich)
library(car)
library(broom)
adj_dat_n <- cbind(index = 1:nrow(adj_dat_n), adj_dat_n)
#------- Citation effects based on total OA availability
m1 <- lm (citation_c_adj ~ click_b + oa_c_adj_n, data = adj_dat_n)
summary(m1)
anova(m1)
#---- Assumptions
#-Normality assumptions
res <- m1$residuals
hist(res)
# We can't assume normality of residuals
#-Deal with outliers
m1_1 <- augment(m1) %>%
mutate(index = 1:n())
m1_1 %>% top_n(3, .cooksd)
list_1 <- m1_1 %>%
filter(abs(.std.resid) > 3)
index <- list_1$index
list_1_n <- data.frame(index)
adj_dat_n_1 <- bind_rows(adj_dat_n, list_1_n)
# Extract the rows which appear only once to remove influential values
adj_dat_n_1 <- adj_dat_n_1 [!(duplicated(adj_dat_n_1$index ) | duplicated(adj_dat_n_1$index , fromLast = TRUE)), ]
#-Run the model again without outliers.
# Results are presented in Table 5 of the manuscript.
m1_2 <- lm (citation_c_adj ~ click_b + oa_c_adj_n, data = adj_dat_n_1)
summary(m1_2)
anova(m1_2)
#------- Citation effects based on availability from IR
m2 <- lm (citation_c_adj ~ click_b + ir_c_adj_c, data = adj_dat_n)
summary(m2)
#---- Assumptions
#-Normality assumptions
res <- m2$residuals
hist(res)
# We can't assume normality of residuals
#-Deal with outliers
m2_1 <- augment(m2) %>%
mutate(index = 1:n())
m2_1 %>% top_n(3, .cooksd)
list_2 <- m2_1 %>%
filter(abs(.std.resid) > 3)
index <- list_2$index
list_2_n <- data.frame(index)
adj_dat_n_2 <- bind_rows(adj_dat_n, list_2_n)
# Extract the rows which appear only once to remove influential values.
adj_dat_n_2 <- adj_dat_n_2 [!(duplicated(adj_dat_n_2$index ) | duplicated(adj_dat_n_2$index , fromLast = TRUE)), ]
str(adj_dat_n_2)
#-Run the model again with outliers removed.
# Results are reported in Table 5 of the manuscript.
m2_2 <- lm (citation_c_adj ~ click_b + ir_c_adj_c, data = adj_dat_n_2)
summary(m2_2)
anova(m2_2)
#------- Citation effects based on availability from disciplinary repositories
m3 <- lm (citation_c_adj ~ click_b + dr_c_b, data = adj_dat_n)
summary(m3)
#---- Assumptions
#-Normality assumptions
res <- m3$residuals
hist(res)
# We can't assume normality of residuals
#-Deal with outliers.
m3_1 <- augment(m3) %>%
mutate(index = 1:n())
m3_1 %>% top_n(3, .cooksd)
list_3 <- m3_1 %>%
filter(abs(.std.resid) > 3)
index <- list_3$index
list_3_n <- data.frame(index)
adj_dat_n_3 <- bind_rows(adj_dat_n, list_3_n)
# Extract the rows which appear only once to remove influential values.
adj_dat_n_3 <- adj_dat_n_3 [!(duplicated(adj_dat_n_3$index ) | duplicated(adj_dat_n_3$index , fromLast = TRUE)), ]
str(adj_dat_n_3)
#-Run the model again with outliers removed.
# Results are reported in Table 5 of the manuscript.
m3_2 <- lm (citation_c_adj ~ click_b + dr_c_b, data = adj_dat_n_3)
summary(m3_2)
anova(m3_2)
#------- Citation effects based on availability of publisher-provided OA
m4 <- lm (citation_c_adj ~ click_b + pub_c_b, data = adj_dat_n)
summary(m4)
#---- Assumptions
#-Normality assumptions
res <- m4$residuals
hist(res)
# We can't assume normality of residuals
#-Deal with outliers
m4_1 <- augment(m4) %>%
mutate(index = 1:n())
m4_1 %>% top_n(3, .cooksd)
list_4 <- m4_1 %>%
filter(abs(.std.resid) > 3)
index <- list_4$index
list_4_n <- data.frame(index)
adj_dat_n_4 <- bind_rows(adj_dat_n, list_4_n)
# Extract the rows which appear only once to remove influential values
adj_dat_n_4 <- adj_dat_n_4 [!(duplicated(adj_dat_n_4$index ) | duplicated(adj_dat_n_4$index , fromLast = TRUE)), ]
str(adj_dat_n_4)
#-Run the model again without outliers.
# Results are reported in Table 5 of the manuscript
m4_2 <- lm (citation_c_adj ~ click_b + pub_c_b, data = adj_dat_n_4)
summary(m4_2)
anova(m4_2)
#-------Citation effects based on availability of "other" types of OA.
m5 <- lm (citation_c_adj ~ click_b + other_c_b, data = adj_dat_n)
summary(m5)
#---- Assumptions
#-Normality assumptions
res <- m5$residuals
hist(res)
# We can't assume normality of residuals
#-Deal with outliers
m5_1 <- augment(m5) %>%
mutate(index = 1:n())
m5_1 %>% top_n(3, .cooksd)
list_5 <- m5_1 %>%
filter(abs(.std.resid) > 3)
index <- list_5$index
list_5_n <- data.frame(index)
adj_dat_n_5 <- bind_rows(adj_dat_n, list_5_n)
# Extract the rows which appear only once to remove influential values
adj_dat_n_5 <- adj_dat_n_5 [!(duplicated(adj_dat_n_5$index ) | duplicated(adj_dat_n_5$index , fromLast = TRUE)), ]
#-Run the model again without outliers.
# Results are reported in Table 5 of the manuscript.
m5_2 <- lm (citation_c_adj ~ click_b + other_c_b, data = adj_dat_n_5)
summary(m5_2)
anova(m5_2)
#---Citation effects based on number of clicks received.
m6 <- lm(citation_c_adj ~ click_b, data = adj_dat_n)
anova(m6)
#---- Assumptions
#-Normality assumptions
res <- m6$residuals
hist(res)
# We can't assume normality of residuals
#-Deal with outliers.
m6_1 <- augment(m6) %>%
mutate(index = 1:n())
m6_1 %>% top_n(3, .cooksd)
list_6 <- m6_1 %>%
filter(abs(.std.resid) > 3)
index <- list_6$index
list_6_n <- data.frame(index)
adj_dat_n_6 <- bind_rows(adj_dat_n, list_6_n)
# Extract the rows which appear only once to remove influential values
adj_dat_n_6 <- adj_dat_n_6 [!(duplicated(adj_dat_n_6$index ) | duplicated(adj_dat_n_6$index , fromLast = TRUE)), ]
#-Run the model again without outliers.
# Results are reported in Table 4 in the manuscript.
m6_2 <- lm (citation_c_adj ~ click_b, data = adj_dat_n_6)
anova(m6_2)
summary(m6_2)
# No new analysis from here forward.
# Remaining code draws tables for the manuscript.
# Note: Tables are not included in the github repository.
## Table 1: Group analyzed data by year
yd <- adj_dat %>%
dplyr::select(ir_pub_year) %>%
group_by(ir_pub_year) %>%
summarise(count_year = n(),
proportion = round((n()/13457)*100, digits = 2))
yd
t1_flex <- flextable(yd) %>%
colformat_num(j = 1, big.mark = "") %>%
set_header_labels(
ir_pub_year = "Year uploaded to IR",
count_year = "Count",
proportion = "Proportion") %>%
set_caption(caption = "Table 1: Count of items by year of upload to RAMP IR host") %>%
set_table_properties(width = 1, layout = "autofit")
t1_flex
save_as_docx(t1_flex, values = NULL,
path = "../figures/Table_1.docx")
# Table 2: Open Access Availability by Host Type
# Desc stats - count of OA copies per host type, % of total
t2_data <- adj_dat_n %>%
summarize("Items with OA availability" = sum(!is.na(oa_c_adj)),
"Items hosted by one or more IR" = sum(ir_c_adj > 0),
"Items also hosted by disciplinary repositories" = sum(dr_c > 0),
"Items also hosted by publisher OA repositories" = sum(pub_c > 0),
"Items also hosted by other types of OA repositories" = sum(other_c >0)) %>%
pivot_longer(
cols = c(starts_with("Items")),
names_to = "OA Host Type",
values_to = "Frequency"
)
t2_data$"Percentage of Observations" <- round((t2_data$Frequency/nrow(adj_dat_n))*100, 2)
t2_flex <- flextable(t2_data) %>%
set_caption(caption = "Table 2: Open Access Availability by Host Type (N = 13452)") %>%
set_table_properties(width = 1, layout = "autofit")
t2_flex
save_as_docx(t2_flex, values = NULL,
path = "../figures/Table_2.docx")
# Table 3: Distribution of items across disciplinary repositories
# Note: This table uses a different dataset
t3_data <- import("../data/ramp_crossref_unpaywall_by_hosts.csv")
dh <- t3_data %>%
filter(repo_subtype == "disciplinary") %>%
dplyr::select(repo_name) %>%
group_by(repo_name) %>%
summarise(count_dr = n()) %>%
arrange(desc(count_dr), repo_name)
dh
t3_flex <- flextable(dh) %>%
set_caption(caption = "Table 3: Distribution of items across disciplinary repositories.") %>%
set_header_labels(
repo_name = "Repository",
count_dr = "Count"
) %>%
set_table_properties(width = 1, layout = "autofit")
t3_flex
save_as_docx(t3_flex, values = NULL,
path = "../figures/Table_3.docx")
# Table 4: Citation rate mean differences across click groups (desc stats)
t4_data <- adj_dat_n %>%
dplyr::select(citation_c_adj, click_b) %>%
group_by(click_b) %>%
rename("Click group" = click_b) %>%
summarise(N = n(),
"Mean citations" = round(mean(citation_c_adj), 2),
SD = round(sd(citation_c_adj), 2),
"Median citations" = round(median(citation_c_adj), 2),
"Min citations" = round(min(citation_c_adj), 0),
"Max citations" = round(max(citation_c_adj), 0))
t4_flex <- flextable(t4_data) %>%
set_caption(caption = "Table 4: Citation mean differences across click groups") %>%
set_table_properties(width = 1, layout = "autofit")
t4_flex
save_as_docx(t4_flex, values = NULL,
path = "../figures/Table_4.docx")
# Table 5
# Citation rate mean differences across sub-groups of
# different types of OA repositories
# Including % of total observations
# All OA hosts
t5_oa_data <- adj_dat_n %>%
dplyr::select(citation_c_adj, oa_c_adj_n) %>%
group_by(oa_c_adj_n) %>%
summarise(N = n(),
#"Pct of Observations" = round((n()/nrow(adj_dat_n))*100, 0),
Mean = round(mean(citation_c_adj), 2),
SD = round(sd(citation_c_adj), 2),
Median = round(median(citation_c_adj), 2),
Min = round(min(citation_c_adj), 0),
Max = round(max(citation_c_adj), 0))
t5_oa_data$oa_c_adj_n <- as.character(t5_oa_data$oa_c_adj_n)
t5_oa_data <- t5_oa_data %>% rename(Category = oa_c_adj_n)
t5_oa_data$Host <- "All OA hosts"
# IR
t5_ir_data <- adj_dat_n %>%
dplyr::select(citation_c_adj, ir_c_adj_c) %>%
group_by(ir_c_adj_c) %>%
summarise(N = n(),
#"Pct of Observations" = round((n()/nrow(adj_dat_n))*100, 0),
Mean = round(mean(citation_c_adj), 2),
SD = round(sd(citation_c_adj), 2),
Median = round(median(citation_c_adj), 2),
Min = round(min(citation_c_adj), 0),
Max = round(max(citation_c_adj), 0))
t5_ir_data$ir_c_adj_c <- as.character(t5_ir_data$ir_c_adj_c)
t5_ir_data <- t5_ir_data %>% rename(Category = ir_c_adj_c)
t5_ir_data$Host <- "Institutional repositories"
# DR
t5_dr_data <- adj_dat_n %>%
dplyr::select(citation_c_adj, dr_c_b) %>%
group_by(dr_c_b) %>%
summarise(N = n(),
#"Pct of Observations" = round((n()/nrow(adj_dat_n))*100, 0),
Mean = round(mean(citation_c_adj), 2),
SD = round(sd(citation_c_adj), 2),
Median = round(median(citation_c_adj), 2),
Min = round(min(citation_c_adj), 0),
Max = round(max(citation_c_adj), 0))
t5_dr_data$dr_c_b <- as.character(t5_dr_data$dr_c_b)
t5_dr_data <- t5_dr_data %>% rename(Category = dr_c_b)
t5_dr_data$Host <- "Disciplinary repositories"
# Pub
t5_pub_data <- adj_dat_n %>%
dplyr::select(citation_c_adj, pub_c_b) %>%
group_by(pub_c_b) %>%
summarise(N = n(),
#"Pct of Observations" = round((n()/nrow(adj_dat_n))*100, 0),
Mean = round(mean(citation_c_adj), 2),
SD = round(sd(citation_c_adj), 2),
Median = round(median(citation_c_adj), 2),
Min = round(min(citation_c_adj), 0),
Max = round(max(citation_c_adj), 0))
t5_pub_data$pub_c_b <- as.character(t5_pub_data$pub_c_b)
t5_pub_data <- t5_pub_data %>% rename(Category = pub_c_b)
t5_pub_data$Host <- "Publisher OA"
# Other
t5_oth_data <- adj_dat_n %>%
dplyr::select(citation_c_adj, other_c_b) %>%
group_by(other_c_b) %>%
summarise(N = n(),
#"Pct of Observations" = round((n()/nrow(adj_dat_n))*100, 0),
Mean = round(mean(citation_c_adj), 2),
SD = round(sd(citation_c_adj), 2),
Median = round(median(citation_c_adj), 2),
Min = round(min(citation_c_adj), 0),
Max = round(max(citation_c_adj), 0))
t5_oth_data$other_c_b <- as.character(t5_oth_data$other_c_b)
t5_oth_data <- t5_oth_data %>% rename(Category = other_c_b)
t5_oth_data$Host <- "Other OA"
library(plyr)
dfs <- list(t5_oa_data, t5_ir_data, t5_dr_data, t5_pub_data, t5_oth_data)
t5_data <- ldply(dfs, rbind)
detach("package:plyr", unload = TRUE)
# Make "Host" first column
t5_data <- t5_data %>% relocate(Host, .before = Category)
# Combined table for descriptive stats of citations
# based on OA availability
t5_flex <- flextable(t5_data) %>%
merge_v(j = ~ Host) %>%
hline(part = "body") %>%
vline(part = "body") %>%
set_caption(caption = "Table 5: Citation means by OA host type.") %>%
set_table_properties(width = 1, layout = "autofit")
t5_flex
save_as_docx(t5_flex, values = NULL,
path = "../figures/Table_5.docx")
# Table 6: ANCOVA Citation mean differences between click groups
t6_flex <- as_flextable(m6_2) %>%
set_caption(caption = "Table 6: Average annual citation rates by click groups.") %>%
set_table_properties(width = 1, layout = "autofit")
t6_flex
save_as_docx(t6_flex, values = NULL,
path = "../figures/Table_6.docx")
# Table 7: Citation effects of different OA host sub-types
library(stargazer)
stargazer(m1_2,
m2_2,
m3_2,
m4_2,
m5_2,
type="html",
dep.var.labels = "Per-year citation rate means",
covariate.labels = c('Intercept',
'Clicks above median',
'Total OA copies above median',
'Count IR copies above median',
'Disciplinary repository OA available',
'Publisher OA available',
'Other OA services available'),
#ci = TRUE,
#single.row = TRUE,
intercept.bottom = FALSE,
intercept.top = TRUE,
align = TRUE,
report = "vcst*",
out = "../figures/Table_7.doc",
notes = "Table 7: Citation impact of additional OA copies of items held by repository type.")
| /scripts/ramp_crossref_unpaywall_correlations.R | no_license | imls-measuring-up/ramp_citation_analysis | R | false | false | 24,844 | r | library(rio)
library(tidyr)
library(dplyr)
library(nlme)
library(MASS)
library(ggplot2)
library(scales)
library(car)
library(AER)
library(mice)
library(naniar)
library(flextable)
library(officer)
getwd()
# Import the aggregated dataset.
# Dataset includes item level RAMP data
# merged with citation info from Crossref
# and OA availability data from Unpaywall.
dat <- import("../data/ramp_crossref_unpaywall_merged.csv")
# Drop rows where ir_is_oa_loc == TRUE but ct_ir_oa_copies == 0 (102 rows).
# Note the filter drops rows where count_error is NA or FALSE,
# so more than just the 102 rows are dropped.
dat$count_error <- (dat$ir_is_oa_loc == TRUE & dat$ct_ir_oa_copies == 0)
dat <- dat %>% filter(count_error != TRUE)
# Drop columns related to which IR hosts the item,
# and also the method used to extract the DOI from
# item level metadata.
dat_adj <- dplyr::select(dat,
ir_is_oa_loc,
ir_pub_year,
cref_created_year,
doi,
ct_oa_copies,
ct_ir_oa_copies,
ct_ir_oa_copies,
ct_dr_oa_copies,
ct_pub_oa_copies,
ct_other_oa_copies,
item_uri_sum_clicks,
ct_citations)
# Drop rows where IR publication year
# is before 2017. This filters out items
# that had been available from an IR
# for less than 2 years before RAMP data were collected.
dat_adj <- dat_adj%>%
filter(ir_pub_year < 2017)
# Limit items to those for which the IR year of
# publication is not more than 1 year from when the Crossref
# DOI was created.
dat_adj$ir_pub_year <- as.numeric(dat_adj$ir_pub_year)
dat_adj$cref_created_year <- as.numeric(dat_adj$cref_created_year)
dat_adj$pub_yr_diff <- dat_adj$ir_pub_year - dat_adj$cref_created_year
dat_adj_pub_yr <- dat_adj %>% filter(pub_yr_diff == 0 | pub_yr_diff == 1)
# Check for incomplete observations.
md.pattern(dat_adj_pub_yr, rotate.names = TRUE)
# Adjust citations by year.
# Average the Crossref DOI year of creation and IR year of upload/publication
# to create a single column to refer to for calculating years of availability.
dat_adj_pub_yr$avg_pub_year <- (dat_adj_pub_yr$cref_created_year +
dat_adj_pub_yr$ir_pub_year)/2
# Create a new column for the number of years an item has been available.
# Reference year is 2020, when citation data were harvested from Crossref.
# So for items published in 2003, we are averaging citations across 17 years, etc.
dat_adj_pub_yr$year <- ifelse(dat_adj_pub_yr$avg_pub_year < 2004, 17,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2004 & dat_adj_pub_yr$avg_pub_year < 2005, 16,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2005 & dat_adj_pub_yr$avg_pub_year < 2006, 15,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2006 & dat_adj_pub_yr$avg_pub_year < 2007, 14,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2007 & dat_adj_pub_yr$avg_pub_year < 2008, 13,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2008 & dat_adj_pub_yr$avg_pub_year < 2009, 12,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2009 & dat_adj_pub_yr$avg_pub_year < 2010, 11,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2010 & dat_adj_pub_yr$avg_pub_year < 2011, 10,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2011 & dat_adj_pub_yr$avg_pub_year < 2012, 9,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2012 & dat_adj_pub_yr$avg_pub_year < 2013, 8,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2013 & dat_adj_pub_yr$avg_pub_year < 2014, 7,
ifelse(dat_adj_pub_yr$avg_pub_year >= 2014 & dat_adj_pub_yr$avg_pub_year < 2015, 6,
5))))))))))))
# Create a column for the adjusted number
# of citations per year.
dat_adj_pub_yr$ct_citations_adj <- dat_adj_pub_yr$ct_citations/dat_adj_pub_yr$year
# Every item in the dataset has at least one OA copy hosted by an IR.
# Not every IR in the study was harvested by Unpwayall at time of data collection,
# so make an adjustment to add 1 to count of IR hosted OA copies
# and also add 1 to count of total OA copies for any row where the RAMP IR
# that hosts an item was not listed as an OA host by Unpaywall.
adj_dat <- dat_adj_pub_yr %>%
mutate(ct_ir_oa_copies_adj = case_when(ir_is_oa_loc == FALSE ~ ct_ir_oa_copies + 1L,
ir_is_oa_loc == TRUE ~ ct_ir_oa_copies + 0L),
ct_oa_copies_adj = case_when(ir_is_oa_loc == FALSE ~ ct_oa_copies + 1L,
ir_is_oa_loc == TRUE ~ ct_oa_copies + 0L))
#--Combine DOIs
# There are five DOIs with 2 IR hosts occurring in the remaining data.
# These are not true duplicates, as they are two distinct copies of an
# item hosted by different IR. Their search engine performance data will
# be combined into a single observation for each DOI.
# View the DOIs with 2 hosts:
adj_dat %>%
group_by(doi) %>%
summarise(count_dois = sum(!is.na(doi))) %>%
filter(count_dois > 1)
# Combine: get sum of all clicks from SERP,
# average other stats to avoid double counting citations, etc.
adj_dat_n <- adj_dat%>%
group_by(doi)%>%
summarize(click = sum(item_uri_sum_clicks, na.rm = TRUE),
ir_c = mean(ct_ir_oa_copies, na.rm = TRUE),
ir_c_adj = mean(ct_ir_oa_copies_adj, na.rm = TRUE),
citation_c = mean(ct_citations, na.rm = TRUE),
citation_c_adj = mean(ct_citations_adj, na.rm = TRUE),
oa_c = mean(ct_oa_copies, na.rm = TRUE),
oa_c_adj = mean(ct_oa_copies_adj, na.rm = TRUE),
dr_c = mean(ct_dr_oa_copies, na.rm = TRUE),
other_c = mean(ct_other_oa_copies, na.rm = TRUE),
pub_c = mean(ct_pub_oa_copies, na.rm = TRUE))
summary(adj_dat_n)
#---Transform data for the ANCOVA analysis.
# Change click counts to categorical data
adj_dat_n$click_b <-ifelse(adj_dat_n$click<=3, "Median and below or 1-3 clicks",
"Above median")
adj_dat_n$click_b <- factor(adj_dat_n$click_b, levels = c("Median and below or 1-3 clicks", "Above median"))
prop.table(table(adj_dat_n$click_b))
summary(adj_dat_n)
# Create categorical variable using adjusted count of total OA copies.
adj_dat_n$oa_c_adj_n <- ifelse(adj_dat_n$oa_c_adj>2, "Above median or 3 or more copies", "Median and below or 1-2 copies")
adj_dat_n$oa_c_adj_n <- factor(adj_dat_n$oa_c_adj_n, levels = c("Median and below or 1-2 copies", "Above median or 3 or more copies"))
prop.table(table(adj_dat_n$oa_c_adj_n))
# Create categorical variable using adjusted count of IR hosted copies.
adj_dat_n$ir_c_adj_c <- ifelse(adj_dat_n$ir_c_adj==1, "Median and below or 1 copy",
"Above median or more than 1 copy")
adj_dat_n$ir_c_adj_c <- factor(adj_dat_n$ir_c_adj_c, levels = c("Median and below or 1 copy",
"Above median or more than 1 copy"))
prop.table(table(adj_dat_n$ir_c_adj_c))
summary(adj_dat_n)
# Create binary variables based on availability of
# OA copies from disciplinary repositories.
adj_dat_n$dr_c_b <- ifelse(adj_dat_n$dr_c>0, "1", "0")
prop.table(table(adj_dat_n$dr_c_b))
# Create binary variables based on availability of
# OA copies from "other" OA host types.
adj_dat_n$other_c_b <- ifelse(adj_dat_n$other_c>0, "1", "0")
prop.table(table(adj_dat_n$other_c_b))
# Create binary variables based on availability of
# OA copies from publisher-provided OA.
adj_dat_n$pub_c_b <- ifelse(adj_dat_n$pub_c>0, "1", "0")
prop.table(table(adj_dat_n$pub_c_b))
#-----Use descriptive statistics to explore whether clicks
# and number of types of OA copies are related to
# citation rates.
summary(adj_dat_n)
# Citation rate mean differences across click groups.
# Data are reported in Table 2 of the manuscript
adj_dat_n %>%
dplyr::select(citation_c_adj, click_b) %>%
group_by(click_b) %>%
summarise(n = n(),
mean = mean(citation_c_adj),
sd = sd(citation_c_adj),
median = median(citation_c_adj),
min = min(citation_c_adj),
max = max(citation_c_adj))
# Citation rate mean differences by OA host type.
# Data for all host types are reported in Table 3 of the manuscript.
# Total OA availability
adj_dat_n %>%
dplyr::select(citation_c_adj, oa_c_adj_n) %>%
group_by(oa_c_adj_n) %>%
summarise(n = n(),
mean = mean(citation_c_adj),
sd = sd(citation_c_adj),
median = median(citation_c_adj),
min = min(citation_c_adj),
max = max(citation_c_adj))
# IR
adj_dat_n %>%
dplyr::select(citation_c_adj, ir_c_adj_c) %>%
group_by(ir_c_adj_c) %>%
summarise(n = n(),
mean = mean(citation_c_adj),
sd = sd(citation_c_adj),
median = median(citation_c_adj),
min = min(citation_c_adj),
max = max(citation_c_adj))
# DR (binary)
adj_dat_n %>%
dplyr::select(citation_c_adj, dr_c_b) %>%
group_by(dr_c_b) %>%
summarise(n = n(),
mean = mean(citation_c_adj),
sd = sd(citation_c_adj),
median = median(citation_c_adj),
min = min(citation_c_adj),
max = max(citation_c_adj))
# Pub (binary)
adj_dat_n %>%
dplyr::select(citation_c_adj, pub_c_b) %>%
group_by(pub_c_b) %>%
summarise(n = n(),
mean = mean(citation_c_adj),
sd = sd(citation_c_adj),
median = median(citation_c_adj),
min = min(citation_c_adj),
max = max(citation_c_adj))
# Other (binary)
adj_dat_n %>%
dplyr::select(citation_c_adj, other_c_b) %>%
group_by(other_c_b) %>%
summarise(n = n(),
mean = mean(citation_c_adj),
sd = sd(citation_c_adj),
median = median(citation_c_adj),
min = min(citation_c_adj),
max = max(citation_c_adj))
###########---------------------ANCOVA as a generalized linear model
# Test for correlations between clicks received from search engine
# results pages, citations, and availability from different types of
# OA hosts.
library(lmtest)
library(sandwich)
library(car)
library(broom)
adj_dat_n <- cbind(index = 1:nrow(adj_dat_n), adj_dat_n)
#------- Citation effects based on total OA availability
m1 <- lm (citation_c_adj ~ click_b + oa_c_adj_n, data = adj_dat_n)
summary(m1)
anova(m1)
#---- Assumptions
#-Normality assumptions
res <- m1$residuals
hist(res)
# We can't assume normality of residuals
#-Deal with outliers
m1_1 <- augment(m1) %>%
mutate(index = 1:n())
m1_1 %>% top_n(3, .cooksd)
list_1 <- m1_1 %>%
filter(abs(.std.resid) > 3)
index <- list_1$index
list_1_n <- data.frame(index)
adj_dat_n_1 <- bind_rows(adj_dat_n, list_1_n)
# Extract the rows which appear only once to remove influential values
adj_dat_n_1 <- adj_dat_n_1 [!(duplicated(adj_dat_n_1$index ) | duplicated(adj_dat_n_1$index , fromLast = TRUE)), ]
#-Run the model again without outliers.
# Results are presented in Table 5 of the manuscript.
m1_2 <- lm (citation_c_adj ~ click_b + oa_c_adj_n, data = adj_dat_n_1)
summary(m1_2)
anova(m1_2)
#------- Citation effects based on availability from IR
m2 <- lm (citation_c_adj ~ click_b + ir_c_adj_c, data = adj_dat_n)
summary(m2)
#---- Assumptions
#-Normality assumptions
res <- m2$residuals
hist(res)
# We can't assume normality of residuals
#-Deal with outliers
m2_1 <- augment(m2) %>%
mutate(index = 1:n())
m2_1 %>% top_n(3, .cooksd)
list_2 <- m2_1 %>%
filter(abs(.std.resid) > 3)
index <- list_2$index
list_2_n <- data.frame(index)
adj_dat_n_2 <- bind_rows(adj_dat_n, list_2_n)
# Extract the rows which appear only once to remove influential values.
adj_dat_n_2 <- adj_dat_n_2 [!(duplicated(adj_dat_n_2$index ) | duplicated(adj_dat_n_2$index , fromLast = TRUE)), ]
str(adj_dat_n_2)
#-Run the model again with outliers removed.
# Results are reported in Table 5 of the manuscript.
m2_2 <- lm (citation_c_adj ~ click_b + ir_c_adj_c, data = adj_dat_n_2)
summary(m2_2)
anova(m2_2)
#------- Citation effects based on availability from disciplinary repositories
m3 <- lm (citation_c_adj ~ click_b + dr_c_b, data = adj_dat_n)
summary(m3)
#---- Assumptions
#-Normality assumptions
res <- m3$residuals
hist(res)
# We can't assume normality of residuals
#-Deal with outliers.
m3_1 <- augment(m3) %>%
mutate(index = 1:n())
m3_1 %>% top_n(3, .cooksd)
list_3 <- m3_1 %>%
filter(abs(.std.resid) > 3)
index <- list_3$index
list_3_n <- data.frame(index)
adj_dat_n_3 <- bind_rows(adj_dat_n, list_3_n)
# Extract the rows which appear only once to remove influential values.
adj_dat_n_3 <- adj_dat_n_3 [!(duplicated(adj_dat_n_3$index ) | duplicated(adj_dat_n_3$index , fromLast = TRUE)), ]
str(adj_dat_n_3)
#-Run the model again with outliers removed.
# Results are reported in Table 5 of the manuscript.
m3_2 <- lm (citation_c_adj ~ click_b + dr_c_b, data = adj_dat_n_3)
summary(m3_2)
anova(m3_2)
#------- Citation effects based on availability of publisher-provided OA
m4 <- lm (citation_c_adj ~ click_b + pub_c_b, data = adj_dat_n)
summary(m4)
#---- Assumptions
#-Normality assumptions
res <- m4$residuals
hist(res)
# We can't assume normality of residuals
#-Deal with outliers
m4_1 <- augment(m4) %>%
mutate(index = 1:n())
m4_1 %>% top_n(3, .cooksd)
list_4 <- m4_1 %>%
filter(abs(.std.resid) > 3)
index <- list_4$index
list_4_n <- data.frame(index)
adj_dat_n_4 <- bind_rows(adj_dat_n, list_4_n)
# Extract the rows which appear only once to remove influential values
adj_dat_n_4 <- adj_dat_n_4 [!(duplicated(adj_dat_n_4$index ) | duplicated(adj_dat_n_4$index , fromLast = TRUE)), ]
str(adj_dat_n_4)
#-Run the model again without outliers.
# Results are reported in Table 5 of the manuscript
m4_2 <- lm (citation_c_adj ~ click_b + pub_c_b, data = adj_dat_n_4)
summary(m4_2)
anova(m4_2)
#-------Citation effects based on availability of "other" types of OA.
m5 <- lm (citation_c_adj ~ click_b + other_c_b, data = adj_dat_n)
summary(m5)
#---- Assumptions
#-Normality assumptions
res <- m5$residuals
hist(res)
# We can't assume normality of residuals
#-Deal with outliers
m5_1 <- augment(m5) %>%
mutate(index = 1:n())
m5_1 %>% top_n(3, .cooksd)
list_5 <- m5_1 %>%
filter(abs(.std.resid) > 3)
index <- list_5$index
list_5_n <- data.frame(index)
adj_dat_n_5 <- bind_rows(adj_dat_n, list_5_n)
# Extract the rows which appear only once to remove influential values
adj_dat_n_5 <- adj_dat_n_5 [!(duplicated(adj_dat_n_5$index ) | duplicated(adj_dat_n_5$index , fromLast = TRUE)), ]
#-Run the model again without outliers.
# Results are reported in Table 5 of the manuscript.
m5_2 <- lm (citation_c_adj ~ click_b + other_c_b, data = adj_dat_n_5)
summary(m5_2)
anova(m5_2)
#---Citation effects based on number of clicks received.
m6 <- lm(citation_c_adj ~ click_b, data = adj_dat_n)
anova(m6)
#---- Assumptions
#-Normality assumptions
res <- m6$residuals
hist(res)
# We can't assume normality of residuals
#-Deal with outliers.
m6_1 <- augment(m6) %>%
mutate(index = 1:n())
m6_1 %>% top_n(3, .cooksd)
list_6 <- m6_1 %>%
filter(abs(.std.resid) > 3)
index <- list_6$index
list_6_n <- data.frame(index)
adj_dat_n_6 <- bind_rows(adj_dat_n, list_6_n)
# Extract the rows which appear only once to remove influential values
adj_dat_n_6 <- adj_dat_n_6 [!(duplicated(adj_dat_n_6$index ) | duplicated(adj_dat_n_6$index , fromLast = TRUE)), ]
#-Run the model again without outliers.
# Results are reported in Table 4 in the manuscript.
m6_2 <- lm (citation_c_adj ~ click_b, data = adj_dat_n_6)
anova(m6_2)
summary(m6_2)
# No new analysis from here forward.
# Remaining code draws tables for the manuscript.
# Note: Tables are not included in the github repository.
## Table 1: Group analyzed data by year
yd <- adj_dat %>%
dplyr::select(ir_pub_year) %>%
group_by(ir_pub_year) %>%
summarise(count_year = n(),
proportion = round((n()/13457)*100, digits = 2))
yd
t1_flex <- flextable(yd) %>%
colformat_num(j = 1, big.mark = "") %>%
set_header_labels(
ir_pub_year = "Year uploaded to IR",
count_year = "Count",
proportion = "Proportion") %>%
set_caption(caption = "Table 1: Count of items by year of upload to RAMP IR host") %>%
set_table_properties(width = 1, layout = "autofit")
t1_flex
save_as_docx(t1_flex, values = NULL,
path = "../figures/Table_1.docx")
# Table 2: Open Access Availability by Host Type
# Desc stats - count of OA copies per host type, % of total
t2_data <- adj_dat_n %>%
summarize("Items with OA availability" = sum(!is.na(oa_c_adj)),
"Items hosted by one or more IR" = sum(ir_c_adj > 0),
"Items also hosted by disciplinary repositories" = sum(dr_c > 0),
"Items also hosted by publisher OA repositories" = sum(pub_c > 0),
"Items also hosted by other types of OA repositories" = sum(other_c >0)) %>%
pivot_longer(
cols = c(starts_with("Items")),
names_to = "OA Host Type",
values_to = "Frequency"
)
t2_data$"Percentage of Observations" <- round((t2_data$Frequency/nrow(adj_dat_n))*100, 2)
t2_flex <- flextable(t2_data) %>%
set_caption(caption = "Table 2: Open Access Availability by Host Type (N = 13452)") %>%
set_table_properties(width = 1, layout = "autofit")
t2_flex
save_as_docx(t2_flex, values = NULL,
path = "../figures/Table_2.docx")
# Table 3: Distribution of items across disciplinary repositories
# Note: This table uses a different dataset
t3_data <- import("../data/ramp_crossref_unpaywall_by_hosts.csv")
dh <- t3_data %>%
filter(repo_subtype == "disciplinary") %>%
dplyr::select(repo_name) %>%
group_by(repo_name) %>%
summarise(count_dr = n()) %>%
arrange(desc(count_dr), repo_name)
dh
t3_flex <- flextable(dh) %>%
set_caption(caption = "Table 3: Distribution of items across disciplinary repositories.") %>%
set_header_labels(
repo_name = "Repository",
count_dr = "Count"
) %>%
set_table_properties(width = 1, layout = "autofit")
t3_flex
save_as_docx(t3_flex, values = NULL,
path = "../figures/Table_3.docx")
# Table 4: Citation rate mean differences across click groups (desc stats)
t4_data <- adj_dat_n %>%
dplyr::select(citation_c_adj, click_b) %>%
group_by(click_b) %>%
rename("Click group" = click_b) %>%
summarise(N = n(),
"Mean citations" = round(mean(citation_c_adj), 2),
SD = round(sd(citation_c_adj), 2),
"Median citations" = round(median(citation_c_adj), 2),
"Min citations" = round(min(citation_c_adj), 0),
"Max citations" = round(max(citation_c_adj), 0))
t4_flex <- flextable(t4_data) %>%
set_caption(caption = "Table 4: Citation mean differences across click groups") %>%
set_table_properties(width = 1, layout = "autofit")
t4_flex
save_as_docx(t4_flex, values = NULL,
path = "../figures/Table_4.docx")
# Table 5
# Citation rate mean differences across sub-groups of
# different types of OA repositories
# Including % of total observations
# All OA hosts
t5_oa_data <- adj_dat_n %>%
dplyr::select(citation_c_adj, oa_c_adj_n) %>%
group_by(oa_c_adj_n) %>%
summarise(N = n(),
#"Pct of Observations" = round((n()/nrow(adj_dat_n))*100, 0),
Mean = round(mean(citation_c_adj), 2),
SD = round(sd(citation_c_adj), 2),
Median = round(median(citation_c_adj), 2),
Min = round(min(citation_c_adj), 0),
Max = round(max(citation_c_adj), 0))
t5_oa_data$oa_c_adj_n <- as.character(t5_oa_data$oa_c_adj_n)
t5_oa_data <- t5_oa_data %>% rename(Category = oa_c_adj_n)
t5_oa_data$Host <- "All OA hosts"
# IR
t5_ir_data <- adj_dat_n %>%
dplyr::select(citation_c_adj, ir_c_adj_c) %>%
group_by(ir_c_adj_c) %>%
summarise(N = n(),
#"Pct of Observations" = round((n()/nrow(adj_dat_n))*100, 0),
Mean = round(mean(citation_c_adj), 2),
SD = round(sd(citation_c_adj), 2),
Median = round(median(citation_c_adj), 2),
Min = round(min(citation_c_adj), 0),
Max = round(max(citation_c_adj), 0))
t5_ir_data$ir_c_adj_c <- as.character(t5_ir_data$ir_c_adj_c)
t5_ir_data <- t5_ir_data %>% rename(Category = ir_c_adj_c)
t5_ir_data$Host <- "Institutional repositories"
# DR
t5_dr_data <- adj_dat_n %>%
dplyr::select(citation_c_adj, dr_c_b) %>%
group_by(dr_c_b) %>%
summarise(N = n(),
#"Pct of Observations" = round((n()/nrow(adj_dat_n))*100, 0),
Mean = round(mean(citation_c_adj), 2),
SD = round(sd(citation_c_adj), 2),
Median = round(median(citation_c_adj), 2),
Min = round(min(citation_c_adj), 0),
Max = round(max(citation_c_adj), 0))
t5_dr_data$dr_c_b <- as.character(t5_dr_data$dr_c_b)
t5_dr_data <- t5_dr_data %>% rename(Category = dr_c_b)
t5_dr_data$Host <- "Disciplinary repositories"
# Pub
t5_pub_data <- adj_dat_n %>%
dplyr::select(citation_c_adj, pub_c_b) %>%
group_by(pub_c_b) %>%
summarise(N = n(),
#"Pct of Observations" = round((n()/nrow(adj_dat_n))*100, 0),
Mean = round(mean(citation_c_adj), 2),
SD = round(sd(citation_c_adj), 2),
Median = round(median(citation_c_adj), 2),
Min = round(min(citation_c_adj), 0),
Max = round(max(citation_c_adj), 0))
t5_pub_data$pub_c_b <- as.character(t5_pub_data$pub_c_b)
t5_pub_data <- t5_pub_data %>% rename(Category = pub_c_b)
t5_pub_data$Host <- "Publisher OA"
# Other
t5_oth_data <- adj_dat_n %>%
dplyr::select(citation_c_adj, other_c_b) %>%
group_by(other_c_b) %>%
summarise(N = n(),
#"Pct of Observations" = round((n()/nrow(adj_dat_n))*100, 0),
Mean = round(mean(citation_c_adj), 2),
SD = round(sd(citation_c_adj), 2),
Median = round(median(citation_c_adj), 2),
Min = round(min(citation_c_adj), 0),
Max = round(max(citation_c_adj), 0))
t5_oth_data$other_c_b <- as.character(t5_oth_data$other_c_b)
t5_oth_data <- t5_oth_data %>% rename(Category = other_c_b)
t5_oth_data$Host <- "Other OA"
library(plyr)
dfs <- list(t5_oa_data, t5_ir_data, t5_dr_data, t5_pub_data, t5_oth_data)
t5_data <- ldply(dfs, rbind)
detach("package:plyr", unload = TRUE)
# Make "Host" first column
t5_data <- t5_data %>% relocate(Host, .before = Category)
# Combined table for descriptive stats of citations
# based on OA availability
t5_flex <- flextable(t5_data) %>%
merge_v(j = ~ Host) %>%
hline(part = "body") %>%
vline(part = "body") %>%
set_caption(caption = "Table 5: Citation means by OA host type.") %>%
set_table_properties(width = 1, layout = "autofit")
t5_flex
save_as_docx(t5_flex, values = NULL,
path = "../figures/Table_5.docx")
# Table 6: ANCOVA Citation mean differences between click groups
t6_flex <- as_flextable(m6_2) %>%
set_caption(caption = "Table 6: Average annual citation rates by click groups.") %>%
set_table_properties(width = 1, layout = "autofit")
t6_flex
save_as_docx(t6_flex, values = NULL,
path = "../figures/Table_6.docx")
# Table 7: Citation effects of different OA host sub-types
library(stargazer)
stargazer(m1_2,
m2_2,
m3_2,
m4_2,
m5_2,
type="html",
dep.var.labels = "Per-year citation rate means",
covariate.labels = c('Intercept',
'Clicks above median',
'Total OA copies above median',
'Count IR copies above median',
'Disciplinary repository OA available',
'Publisher OA available',
'Other OA services available'),
#ci = TRUE,
#single.row = TRUE,
intercept.bottom = FALSE,
intercept.top = TRUE,
align = TRUE,
report = "vcst*",
out = "../figures/Table_7.doc",
notes = "Table 7: Citation impact of additional OA copies of items held by repository type.")
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = Inf, rs = numeric(0), temp = numeric(0))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) | /meteor/inst/testfiles/ET0_PenmanMonteith/libFuzzer_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1612736698-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 398 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = Inf, rs = numeric(0), temp = numeric(0))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) |
## Two functions here
## makeCacheMatrix sets a matrix into the cache
## cacheSolve returns the inverse of a matrix
## makeCacheMatrix takes in matrix x and stores in cache
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inverse) {
i <<- inverse
}
getInverse <- function() {
i
}
list(
set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse
)
}
## Either returns the inverse of the cached matrix for x or returns the matrix of x
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if(!is.null(i)) { ## getting cache
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i ## returns the inverse matrix
}
| /cachematrix.R | no_license | bthornton/ProgrammingAssignment2 | R | false | false | 904 | r | ## Two functions here
## makeCacheMatrix sets a matrix into the cache
## cacheSolve returns the inverse of a matrix
## makeCacheMatrix takes in matrix x and stores in cache
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inverse) {
i <<- inverse
}
getInverse <- function() {
i
}
list(
set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse
)
}
## Either returns the inverse of the cached matrix for x or returns the matrix of x
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if(!is.null(i)) { ## getting cache
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i ## returns the inverse matrix
}
|
\alias{GMountOperation}
\alias{gMountOperation}
\alias{GAskPasswordFlags}
\alias{GPasswordSave}
\alias{GMountOperationResult}
\name{GMountOperation}
\title{GMountOperation}
\description{Object used for authentication and user interaction}
\section{Methods and Functions}{
\code{\link{gMountOperationNew}()}\cr
\code{\link{gMountOperationGetUsername}(object)}\cr
\code{\link{gMountOperationSetUsername}(object, username)}\cr
\code{\link{gMountOperationGetPassword}(object)}\cr
\code{\link{gMountOperationSetPassword}(object, password)}\cr
\code{\link{gMountOperationGetAnonymous}(object)}\cr
\code{\link{gMountOperationSetAnonymous}(object, anonymous)}\cr
\code{\link{gMountOperationGetDomain}(object)}\cr
\code{\link{gMountOperationSetDomain}(object, domain)}\cr
\code{\link{gMountOperationGetPasswordSave}(object)}\cr
\code{\link{gMountOperationSetPasswordSave}(object, save)}\cr
\code{\link{gMountOperationGetChoice}(object)}\cr
\code{\link{gMountOperationSetChoice}(object, choice)}\cr
\code{\link{gMountOperationReply}(object, result)}\cr
\code{gMountOperation()}
}
\section{Hierarchy}{\preformatted{
GFlags
+----GAskPasswordFlags
GEnum
+----GPasswordSave
GObject
+----GMountOperation
GEnum
+----GMountOperationResult
}}
\section{Detailed Description}{\code{\link{GMountOperation}} provides a mechanism for interacting with the user.
It can be used for authenticating mountable operations, such as loop
mounting files, hard drive partitions or server locations. It can
also be used to ask the user questions or show a list of applications
preventing unmount or eject operations from completing.
Note that \code{\link{GMountOperation}} is used for more than just \code{\link{GMount}}
objects – for example it is also used in \code{\link{gDriveStart}} and
\code{\link{gDriveStop}}.
Users should instantiate a subclass of this that implements all the
various callbacks to show the required dialogs, such as
\code{\link{GtkMountOperation}}. If no user interaction is desired (for example
when automounting filesystems at login time), usually \code{NULL} can be
passed, see each method taking a \code{\link{GMountOperation}} for details.}
\section{Structures}{\describe{\item{\verb{GMountOperation}}{
Class for providing authentication methods for mounting operations,
such as mounting a file locally, or authenticating with a server.
}}}
\section{Convenient Construction}{\code{gMountOperation} is the equivalent of \code{\link{gMountOperationNew}}.}
\section{Enums and Flags}{\describe{
\item{\verb{GAskPasswordFlags}}{
\code{\link{GAskPasswordFlags}} are used to request specific information from the
user, or to notify the user of their choices in an authentication
situation.
\describe{
\item{\verb{need-password}}{operation requires a password.}
\item{\verb{need-username}}{operation requires a username.}
\item{\verb{need-domain}}{operation requires a domain.}
\item{\verb{saving-supported}}{operation supports saving settings.}
\item{\verb{anonymous-supported}}{operation supports anonymous users.}
}
}
\item{\verb{GPasswordSave}}{
\code{\link{GPasswordSave}} is used to indicate the lifespan of a saved password.
\verb{Gvfs} stores passwords in the Gnome keyring when this flag allows it
to, and later retrieves it again from there.
\describe{
\item{\verb{never}}{never save a password.}
\item{\verb{for-session}}{save a password for the session.}
\item{\verb{permanently}}{save a password permanently.}
}
}
\item{\verb{GMountOperationResult}}{
\code{\link{GMountOperationResult}} is returned as a result when a request for
information is send by the mounting operation.
\describe{
\item{\verb{handled}}{The request was fulfilled and the
user specified data is now available}
\item{\verb{aborted}}{The user requested the mount operation
to be aborted}
\item{\verb{unhandled}}{The request was unhandled (i.e. not
implemented)}
}
}
}}
\section{Signals}{\describe{
\item{\code{aborted(user.data)}}{
Emitted by the backend when e.g. a device becomes unavailable
while a mount operation is in progress.
Implementations of GMountOperation should handle this signal
by dismissing open password dialogs.
Since 2.20
\describe{\item{\code{user.data}}{user data set when the signal handler was connected.}}
}
\item{\code{ask-password(op, message, default.user, default.domain, flags, user.data)}}{
Emitted when a mount operation asks the user for a password.
If the message contains a line break, the first line should be
presented as a heading. For example, it may be used as the
primary text in a \code{\link{GtkMessageDialog}}.
\describe{
\item{\code{op}}{a \code{\link{GMountOperation}} requesting a password.}
\item{\code{message}}{string containing a message to display to the user.}
\item{\code{default.user}}{string containing the default user name.}
\item{\code{default.domain}}{string containing the default domain.}
\item{\code{flags}}{a set of \code{\link{GAskPasswordFlags}}.}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
\item{\code{ask-question(op, message, choices, user.data)}}{
Emitted when asking the user a question and gives a list of
choices for the user to choose from.
If the message contains a line break, the first line should be
presented as a heading. For example, it may be used as the
primary text in a \code{\link{GtkMessageDialog}}.
\describe{
\item{\code{op}}{a \code{\link{GMountOperation}} asking a question.}
\item{\code{message}}{string containing a message to display to the user.}
\item{\code{choices}}{a list of strings for each possible choice.}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
\item{\code{reply(op, result, user.data)}}{
Emitted when the user has replied to the mount operation.
\describe{
\item{\code{op}}{a \code{\link{GMountOperation}}.}
\item{\code{result}}{a \code{\link{GMountOperationResult}} indicating how the request was handled}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
\item{\code{show-processes(op, message, processes, choices, user.data)}}{
Emitted when one or more processes are blocking an operation
e.g. unmounting/ejecting a \code{\link{GMount}} or stopping a \code{\link{GDrive}}.
Note that this signal may be emitted several times to update the
list of blocking processes as processes close files. The
application should only respond with \code{\link{gMountOperationReply}} to
the latest signal (setting \verb{"choice"} to the choice
the user made).
If the message contains a line break, the first line should be
presented as a heading. For example, it may be used as the
primary text in a \code{\link{GtkMessageDialog}}.
Since 2.22
\describe{
\item{\code{op}}{a \code{\link{GMountOperation}}.}
\item{\code{message}}{string containing a message to display to the user.}
\item{\code{processes}}{a list of \verb{GPid} for processes blocking the operation.}
\item{\code{choices}}{a list of strings for each possible choice.}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
}}
\section{Properties}{\describe{
\item{\verb{anonymous} [logical : Read / Write]}{
Whether to use an anonymous user when authenticating.
Default value: FALSE
}
\item{\verb{choice} [integer : Read / Write]}{
The index of the user's choice when a question is asked during the
mount operation. See the \verb{"ask-question"} signal.
Allowed values: >= 0 Default value: 0
}
\item{\verb{domain} [character : * : Read / Write]}{
The domain to use for the mount operation.
Default value: NULL
}
\item{\verb{password} [character : * : Read / Write]}{
The password that is used for authentication when carrying out
the mount operation.
Default value: NULL
}
\item{\verb{password-save} [\code{\link{GPasswordSave}} : Read / Write]}{
Determines if and how the password information should be saved.
Default value: G_PASSWORD_SAVE_NEVER
}
\item{\verb{username} [character : * : Read / Write]}{
The user name that is used for authentication when carrying out
the mount operation.
Default value: NULL
}
}}
\references{\url{https://developer.gnome.org/gio/stable/GMountOperation.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/GMountOperation.Rd | no_license | cran/RGtk2 | R | false | false | 8,278 | rd | \alias{GMountOperation}
\alias{gMountOperation}
\alias{GAskPasswordFlags}
\alias{GPasswordSave}
\alias{GMountOperationResult}
\name{GMountOperation}
\title{GMountOperation}
\description{Object used for authentication and user interaction}
\section{Methods and Functions}{
\code{\link{gMountOperationNew}()}\cr
\code{\link{gMountOperationGetUsername}(object)}\cr
\code{\link{gMountOperationSetUsername}(object, username)}\cr
\code{\link{gMountOperationGetPassword}(object)}\cr
\code{\link{gMountOperationSetPassword}(object, password)}\cr
\code{\link{gMountOperationGetAnonymous}(object)}\cr
\code{\link{gMountOperationSetAnonymous}(object, anonymous)}\cr
\code{\link{gMountOperationGetDomain}(object)}\cr
\code{\link{gMountOperationSetDomain}(object, domain)}\cr
\code{\link{gMountOperationGetPasswordSave}(object)}\cr
\code{\link{gMountOperationSetPasswordSave}(object, save)}\cr
\code{\link{gMountOperationGetChoice}(object)}\cr
\code{\link{gMountOperationSetChoice}(object, choice)}\cr
\code{\link{gMountOperationReply}(object, result)}\cr
\code{gMountOperation()}
}
\section{Hierarchy}{\preformatted{
GFlags
+----GAskPasswordFlags
GEnum
+----GPasswordSave
GObject
+----GMountOperation
GEnum
+----GMountOperationResult
}}
\section{Detailed Description}{\code{\link{GMountOperation}} provides a mechanism for interacting with the user.
It can be used for authenticating mountable operations, such as loop
mounting files, hard drive partitions or server locations. It can
also be used to ask the user questions or show a list of applications
preventing unmount or eject operations from completing.
Note that \code{\link{GMountOperation}} is used for more than just \code{\link{GMount}}
objects – for example it is also used in \code{\link{gDriveStart}} and
\code{\link{gDriveStop}}.
Users should instantiate a subclass of this that implements all the
various callbacks to show the required dialogs, such as
\code{\link{GtkMountOperation}}. If no user interaction is desired (for example
when automounting filesystems at login time), usually \code{NULL} can be
passed, see each method taking a \code{\link{GMountOperation}} for details.}
\section{Structures}{\describe{\item{\verb{GMountOperation}}{
Class for providing authentication methods for mounting operations,
such as mounting a file locally, or authenticating with a server.
}}}
\section{Convenient Construction}{\code{gMountOperation} is the equivalent of \code{\link{gMountOperationNew}}.}
\section{Enums and Flags}{\describe{
\item{\verb{GAskPasswordFlags}}{
\code{\link{GAskPasswordFlags}} are used to request specific information from the
user, or to notify the user of their choices in an authentication
situation.
\describe{
\item{\verb{need-password}}{operation requires a password.}
\item{\verb{need-username}}{operation requires a username.}
\item{\verb{need-domain}}{operation requires a domain.}
\item{\verb{saving-supported}}{operation supports saving settings.}
\item{\verb{anonymous-supported}}{operation supports anonymous users.}
}
}
\item{\verb{GPasswordSave}}{
\code{\link{GPasswordSave}} is used to indicate the lifespan of a saved password.
\verb{Gvfs} stores passwords in the Gnome keyring when this flag allows it
to, and later retrieves it again from there.
\describe{
\item{\verb{never}}{never save a password.}
\item{\verb{for-session}}{save a password for the session.}
\item{\verb{permanently}}{save a password permanently.}
}
}
\item{\verb{GMountOperationResult}}{
\code{\link{GMountOperationResult}} is returned as a result when a request for
information is send by the mounting operation.
\describe{
\item{\verb{handled}}{The request was fulfilled and the
user specified data is now available}
\item{\verb{aborted}}{The user requested the mount operation
to be aborted}
\item{\verb{unhandled}}{The request was unhandled (i.e. not
implemented)}
}
}
}}
\section{Signals}{\describe{
\item{\code{aborted(user.data)}}{
Emitted by the backend when e.g. a device becomes unavailable
while a mount operation is in progress.
Implementations of GMountOperation should handle this signal
by dismissing open password dialogs.
Since 2.20
\describe{\item{\code{user.data}}{user data set when the signal handler was connected.}}
}
\item{\code{ask-password(op, message, default.user, default.domain, flags, user.data)}}{
Emitted when a mount operation asks the user for a password.
If the message contains a line break, the first line should be
presented as a heading. For example, it may be used as the
primary text in a \code{\link{GtkMessageDialog}}.
\describe{
\item{\code{op}}{a \code{\link{GMountOperation}} requesting a password.}
\item{\code{message}}{string containing a message to display to the user.}
\item{\code{default.user}}{string containing the default user name.}
\item{\code{default.domain}}{string containing the default domain.}
\item{\code{flags}}{a set of \code{\link{GAskPasswordFlags}}.}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
\item{\code{ask-question(op, message, choices, user.data)}}{
Emitted when asking the user a question and gives a list of
choices for the user to choose from.
If the message contains a line break, the first line should be
presented as a heading. For example, it may be used as the
primary text in a \code{\link{GtkMessageDialog}}.
\describe{
\item{\code{op}}{a \code{\link{GMountOperation}} asking a question.}
\item{\code{message}}{string containing a message to display to the user.}
\item{\code{choices}}{a list of strings for each possible choice.}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
\item{\code{reply(op, result, user.data)}}{
Emitted when the user has replied to the mount operation.
\describe{
\item{\code{op}}{a \code{\link{GMountOperation}}.}
\item{\code{result}}{a \code{\link{GMountOperationResult}} indicating how the request was handled}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
\item{\code{show-processes(op, message, processes, choices, user.data)}}{
Emitted when one or more processes are blocking an operation
e.g. unmounting/ejecting a \code{\link{GMount}} or stopping a \code{\link{GDrive}}.
Note that this signal may be emitted several times to update the
list of blocking processes as processes close files. The
application should only respond with \code{\link{gMountOperationReply}} to
the latest signal (setting \verb{"choice"} to the choice
the user made).
If the message contains a line break, the first line should be
presented as a heading. For example, it may be used as the
primary text in a \code{\link{GtkMessageDialog}}.
Since 2.22
\describe{
\item{\code{op}}{a \code{\link{GMountOperation}}.}
\item{\code{message}}{string containing a message to display to the user.}
\item{\code{processes}}{a list of \verb{GPid} for processes blocking the operation.}
\item{\code{choices}}{a list of strings for each possible choice.}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
}}
\section{Properties}{\describe{
\item{\verb{anonymous} [logical : Read / Write]}{
Whether to use an anonymous user when authenticating.
Default value: FALSE
}
\item{\verb{choice} [integer : Read / Write]}{
The index of the user's choice when a question is asked during the
mount operation. See the \verb{"ask-question"} signal.
Allowed values: >= 0 Default value: 0
}
\item{\verb{domain} [character : * : Read / Write]}{
The domain to use for the mount operation.
Default value: NULL
}
\item{\verb{password} [character : * : Read / Write]}{
The password that is used for authentication when carrying out
the mount operation.
Default value: NULL
}
\item{\verb{password-save} [\code{\link{GPasswordSave}} : Read / Write]}{
Determines if and how the password information should be saved.
Default value: G_PASSWORD_SAVE_NEVER
}
\item{\verb{username} [character : * : Read / Write]}{
The user name that is used for authentication when carrying out
the mount operation.
Default value: NULL
}
}}
\references{\url{https://developer.gnome.org/gio/stable/GMountOperation.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
#' Add together two numbers
#'
#' @param x A number
#' @param y A number
#' @return The sum of \code{x} and \code{y}
#' @examples
#' add(1, 1)
#' add(10, 1)
add <- function(x, y) {
x + y
}
| /R/MyNewFunction.R | no_license | ClaudiaMeier/stargazer | R | false | false | 193 | r | #' Add together two numbers
#'
#' @param x A number
#' @param y A number
#' @return The sum of \code{x} and \code{y}
#' @examples
#' add(1, 1)
#' add(10, 1)
add <- function(x, y) {
x + y
}
|
#function sampledata = readEKRaw_ReadSampledata(fid, sampledata)
#readEKRaw_ReadSampledata Read EK/ES RAW0 datagram
# sampledata = readEKRaw_ReadSampledata(fid, sampledate) returns a structure
# containing the data from a EK/ES RAW0 datagram.
#
# REQUIRED INPUT:
# fid: file handle id of raw file
# sampledata: pre-allocated sampledata data structure
#
# OPTIONAL PARAMETERS: None
#
# OUTPUT:
# Output is a data structure containing the RAW0 datagram data
#
# REQUIRES: None
#
# Rick Towler
# NOAA Alaska Fisheries Science Center
# Midwater Assesment and Conservation Engineering Group
# rick.towler@noaa.gov
#
# Based on code by Lars Nonboe Andersen, Simrad.
#-
#sampledata.channel = fread(fid,1,'int16', 'l');
#mode_low = fread(fid,1,'int8', 'l');
#mode_high = fread(fid,1,'int8', 'l');
#sampledata.mode = 256 * mode_high + mode_low;
#sampledata.transducerdepth = fread(fid,1,'float32', 'l');
#sampledata.frequency = fread(fid,1,'float32', 'l');
#sampledata.transmitpower = fread(fid,1,'float32', 'l');
#sampledata.pulselength = fread(fid,1,'float32', 'l');
#sampledata.bandwidth = fread(fid,1,'float32', 'l');
#sampledata.sampleinterval = fread(fid,1,'float32', 'l');
#sampledata.soundvelocity = fread(fid,1,'float32', 'l');
#sampledata.absorptioncoefficient = fread(fid,1,'float32', 'l');
#sampledata.heave = fread(fid,1,'float32', 'l');
#sampledata.roll = fread(fid,1,'float32', 'l');
#sampledata.pitch = fread(fid,1,'float32', 'l');
#sampledata.temperature = fread(fid,1,'float32', 'l');
#sampledata.trawlupperdepthvalid = fread(fid,1,'int16', 'l');
#sampledata.trawlopeningvalid = fread(fid,1,'int16', 'l');
#sampledata.trawlupperdepth = fread(fid,1,'float32', 'l');
#sampledata.trawlopening = fread(fid,1,'float32', 'l');
#sampledata.offset = fread(fid,1,'int32', 'l');
#sampledata.count = fread(fid,1,'int32', 'l');
#sampledata.power = [];
#sampledata.alongship = [];
#sampledata.athwartship = [];
#if (sampledata.count > 0)
# % check length of arrays - grow if necessary
# if (length(sampledata.power) < sampledata.count)
# nSampAdd = sampledata.count - length(sampledata.power);
# sampledata.power(end + 1:end + nSampAdd) = -999;
# sampledata.alongship(end + 1:end + nSampAdd) = 0;
# sampledata.athwartship(end + 1:end + nSampAdd) = 0;
# end
# if (sampledata.mode ~= 2)
# power = fread(fid,sampledata.count,'int16', 'l');
# % power * 10 * log10(2) / 256
# sampledata.power(1:sampledata.count) = (power * 0.011758984205624);
# end
# if (sampledata.mode > 1)
# angle = fread(fid,[2 sampledata.count],'int8', 'l'); # this is two row matrix with the number of columns equal to the sample count, this fills a whole column first
# sampledata.athwartship(1:sampledata.count) = angle(1,:)';
# sampledata.alongship(1:sampledata.count) = angle(2,:)';
# end
#end
# R Code
readEKRaw_ReadSampledata = function(fid) {
sampledata = list()
sampledata$channel = readBin(con = fid, what = "integer", n = 1, size = 2, endian = "little")
mode_low = readBin(con = fid, what = "integer", n = 1, size = 1, endian = "little")
mode_high = readBin(con =fid, what = "integer", n = 1, size = 1, endian = "little")
sampledata$mode = 256 * mode_high + mode_low
sampledata$transducerdepth = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$frequency = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$transmitpower = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$pulselength = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$bandwidth = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$sampleinterval = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$soundvelocity = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$absorptioncoefficient = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$heave = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$roll = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$pitch = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$temperature = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$trawlupperdepthvalid = readBin(con = fid, what = "integer", n = 1, size = 2, endian = "little")
sampledata$trawlopeningvalid = readBin(con = fid, what = "integer", n = 1, size = 2, endian = "little")
sampledata$trawlupperdepth = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$trawlopening = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$offset = readBin(con = fid, what = 'integer', n = 1, size = 4, signed = TRUE, endian = "little")
sampledata$count = readBin(con = fid, what = 'integer', n = 1, size = 4, signed = TRUE, endian = "little")
sampledata$power = c() # create empty vectors that get filled below
sampledata$alongship = c()
sampledata$athwartship = c()
if (sampledata$mode !=2) {
power = readBin(con = fid, what = "integer", n = sampledata$count, size = 2, endian = "little")
# power * 10 * log10(2)/256
sampledata$power = power * 0.011758984205624
}
if (sampledata$mode > 1) {
angle = readBin(con = fid, what = "integer", n = 2 * sampledata$count, size = 1, endian = "little")
anglematrix = matrix(angle, nrow = 2)
sampledata$alongship = anglematrix[1,] #1st column of the matrix
sampledata$athwartship = anglematrix[2,] # 2nd column of the matrix
}
sampledata
} | /R/readEKRaw_ReadSampledata.R | no_license | leachth/ReadEKRaw_updated | R | false | false | 5,896 | r | #function sampledata = readEKRaw_ReadSampledata(fid, sampledata)
#readEKRaw_ReadSampledata Read EK/ES RAW0 datagram
# sampledata = readEKRaw_ReadSampledata(fid, sampledate) returns a structure
# containing the data from a EK/ES RAW0 datagram.
#
# REQUIRED INPUT:
# fid: file handle id of raw file
# sampledata: pre-allocated sampledata data structure
#
# OPTIONAL PARAMETERS: None
#
# OUTPUT:
# Output is a data structure containing the RAW0 datagram data
#
# REQUIRES: None
#
# Rick Towler
# NOAA Alaska Fisheries Science Center
# Midwater Assesment and Conservation Engineering Group
# rick.towler@noaa.gov
#
# Based on code by Lars Nonboe Andersen, Simrad.
#-
#sampledata.channel = fread(fid,1,'int16', 'l');
#mode_low = fread(fid,1,'int8', 'l');
#mode_high = fread(fid,1,'int8', 'l');
#sampledata.mode = 256 * mode_high + mode_low;
#sampledata.transducerdepth = fread(fid,1,'float32', 'l');
#sampledata.frequency = fread(fid,1,'float32', 'l');
#sampledata.transmitpower = fread(fid,1,'float32', 'l');
#sampledata.pulselength = fread(fid,1,'float32', 'l');
#sampledata.bandwidth = fread(fid,1,'float32', 'l');
#sampledata.sampleinterval = fread(fid,1,'float32', 'l');
#sampledata.soundvelocity = fread(fid,1,'float32', 'l');
#sampledata.absorptioncoefficient = fread(fid,1,'float32', 'l');
#sampledata.heave = fread(fid,1,'float32', 'l');
#sampledata.roll = fread(fid,1,'float32', 'l');
#sampledata.pitch = fread(fid,1,'float32', 'l');
#sampledata.temperature = fread(fid,1,'float32', 'l');
#sampledata.trawlupperdepthvalid = fread(fid,1,'int16', 'l');
#sampledata.trawlopeningvalid = fread(fid,1,'int16', 'l');
#sampledata.trawlupperdepth = fread(fid,1,'float32', 'l');
#sampledata.trawlopening = fread(fid,1,'float32', 'l');
#sampledata.offset = fread(fid,1,'int32', 'l');
#sampledata.count = fread(fid,1,'int32', 'l');
#sampledata.power = [];
#sampledata.alongship = [];
#sampledata.athwartship = [];
#if (sampledata.count > 0)
# % check length of arrays - grow if necessary
# if (length(sampledata.power) < sampledata.count)
# nSampAdd = sampledata.count - length(sampledata.power);
# sampledata.power(end + 1:end + nSampAdd) = -999;
# sampledata.alongship(end + 1:end + nSampAdd) = 0;
# sampledata.athwartship(end + 1:end + nSampAdd) = 0;
# end
# if (sampledata.mode ~= 2)
# power = fread(fid,sampledata.count,'int16', 'l');
# % power * 10 * log10(2) / 256
# sampledata.power(1:sampledata.count) = (power * 0.011758984205624);
# end
# if (sampledata.mode > 1)
# angle = fread(fid,[2 sampledata.count],'int8', 'l'); # this is two row matrix with the number of columns equal to the sample count, this fills a whole column first
# sampledata.athwartship(1:sampledata.count) = angle(1,:)';
# sampledata.alongship(1:sampledata.count) = angle(2,:)';
# end
#end
# R Code
readEKRaw_ReadSampledata = function(fid) {
sampledata = list()
sampledata$channel = readBin(con = fid, what = "integer", n = 1, size = 2, endian = "little")
mode_low = readBin(con = fid, what = "integer", n = 1, size = 1, endian = "little")
mode_high = readBin(con =fid, what = "integer", n = 1, size = 1, endian = "little")
sampledata$mode = 256 * mode_high + mode_low
sampledata$transducerdepth = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$frequency = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$transmitpower = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$pulselength = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$bandwidth = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$sampleinterval = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$soundvelocity = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$absorptioncoefficient = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$heave = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$roll = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$pitch = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$temperature = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$trawlupperdepthvalid = readBin(con = fid, what = "integer", n = 1, size = 2, endian = "little")
sampledata$trawlopeningvalid = readBin(con = fid, what = "integer", n = 1, size = 2, endian = "little")
sampledata$trawlupperdepth = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$trawlopening = readBin(con = fid, what = 'double', n = 1, size = 4, endian = "little")
sampledata$offset = readBin(con = fid, what = 'integer', n = 1, size = 4, signed = TRUE, endian = "little")
sampledata$count = readBin(con = fid, what = 'integer', n = 1, size = 4, signed = TRUE, endian = "little")
sampledata$power = c() # create empty vectors that get filled below
sampledata$alongship = c()
sampledata$athwartship = c()
if (sampledata$mode !=2) {
power = readBin(con = fid, what = "integer", n = sampledata$count, size = 2, endian = "little")
# power * 10 * log10(2)/256
sampledata$power = power * 0.011758984205624
}
if (sampledata$mode > 1) {
angle = readBin(con = fid, what = "integer", n = 2 * sampledata$count, size = 1, endian = "little")
anglematrix = matrix(angle, nrow = 2)
sampledata$alongship = anglematrix[1,] #1st column of the matrix
sampledata$athwartship = anglematrix[2,] # 2nd column of the matrix
}
sampledata
} |
#' Convert eighth of a cent grain prices to decimal
#'
#' @param x is a column of a datatable of Grain contract BBO raw data from CME Group's Datamine.
#' Formatted as.character() from the start it will have 7 characters. Positions: 4 hundreds, 5 tens, 6 ones, and 7 8th of a cent
#' since futures quotes are in cents per bushel and the ticks are 8ths of a cent
#' this function will only work for corn prices < $100/bushel. At this date, grain prices
#' above $100/ bushel are unimaginable.
#'
#' This function is intended to be vectorized over a data.table object as in the example below.
#'
#' @return The column of the orginal datatable with the TrPrice column converted to numeric in decimal format.
#' @examples
#' accum <- as.list(Null)
#' accum[[1]] <- as.data.table(bboread('XCBT_C_FUT_110110.TXT'))
#' accum[[2]] <- as.data.table(bboread('XCBT_C_FUT_110110.TXT')) # accum is a list of two 'days' worth of BBO data
#' corn_110110 <- data.table::rbindlist(accum)
#' corn_110110[, Price := decimalprices(corn_110110$TrPrice)]
#'
decimalprices <- function(x) {
price <- as.numeric(substr(x, 4, 6))
e <- as.numeric(substr(x, 7, 7))/8
price <- price +e
return(price)
}
| /R/decimalprices.R | no_license | BrunoProgramming/BBOToolkit | R | false | false | 1,210 | r | #' Convert eighth of a cent grain prices to decimal
#'
#' @param x is a column of a datatable of Grain contract BBO raw data from CME Group's Datamine.
#' Formatted as.character() from the start it will have 7 characters. Positions: 4 hundreds, 5 tens, 6 ones, and 7 8th of a cent
#' since futures quotes are in cents per bushel and the ticks are 8ths of a cent
#' this function will only work for corn prices < $100/bushel. At this date, grain prices
#' above $100/ bushel are unimaginable.
#'
#' This function is intended to be vectorized over a data.table object as in the example below.
#'
#' @return The column of the orginal datatable with the TrPrice column converted to numeric in decimal format.
#' @examples
#' accum <- as.list(Null)
#' accum[[1]] <- as.data.table(bboread('XCBT_C_FUT_110110.TXT'))
#' accum[[2]] <- as.data.table(bboread('XCBT_C_FUT_110110.TXT')) # accum is a list of two 'days' worth of BBO data
#' corn_110110 <- data.table::rbindlist(accum)
#' corn_110110[, Price := decimalprices(corn_110110$TrPrice)]
#'
decimalprices <- function(x) {
price <- as.numeric(substr(x, 4, 6))
e <- as.numeric(substr(x, 7, 7))/8
price <- price +e
return(price)
}
|
#' Obtains the text of the speech
#'
#' @description
#'
#' Extract the text of the speech given an URL.
#'
#' @param keyword principal text or phrase present on speech.
#' @param start_date start date of search.
#' @param end_date end date of search.
#' @param uf state acronym.
#' @param speaker speaker's name.
#' @param party political party of speaker.
#'
#' @return the speech data with all informational columns and the speech.
#'
#' @export
#'
#' @examples
#' \dontrun{
#'
#' tecnologia_speeches <- speech_data(
#' keyword = "tecnologia",
#' reference_date = "2021-12-20",
#' start_date = "2021-12-10",
#' end_date = "2021-12-31")
#'
#'}
# TODO: error para start_date > end_date
speech_data <- function(
keyword,
start_date,
end_date,
uf = "",
speaker = "",
party = "") {
if (lubridate::ymd(start_date) > lubridate::ymd(end_date)) {
rlang::abort("`start_date` can't be greater than `end_date`.")
}
# extract html pages
partial_build_url <- purrr::partial(
build_url,
keyword = keyword,
start_date = start_date,
end_date = end_date,
uf = uf,
speaker = speaker,
party = party)
first_page <- partial_build_url(current_page = 1) %>%
speechbr_api()
pages <-
purrr::map(
seq(1, parse_pagination(first_page)),
~ partial_build_url(current_page = .x)) %>%
purrr::map(~ speechbr_api(.x))
# extract the text speeches
maybe_extract_speech <- purrr::possibly(extract_speech, otherwise = "error")
texts <-
purrr::map(
pages,
~ shift_url(.x)) %>%
unlist() %>%
purrr::discard(~ .x == "empty") %>%
purrr::map(~ maybe_extract_speech(.x)) %>%
unlist()
# extract table
maybe_extract_table <- purrr::possibly(
extract_table,
otherwise = tibble::tibble(error = "error"))
purrr::map_dfr(
pages,
~ maybe_extract_table(.x)) %>%
tidy_cleaner(texts)
}
| /R/speechbr_data.R | permissive | dcardosos/speechbr | R | false | false | 1,907 | r | #' Obtains the text of the speech
#'
#' @description
#'
#' Extract the text of the speech given an URL.
#'
#' @param keyword principal text or phrase present on speech.
#' @param start_date start date of search.
#' @param end_date end date of search.
#' @param uf state acronym.
#' @param speaker speaker's name.
#' @param party political party of speaker.
#'
#' @return the speech data with all informational columns and the speech.
#'
#' @export
#'
#' @examples
#' \dontrun{
#'
#' tecnologia_speeches <- speech_data(
#' keyword = "tecnologia",
#' reference_date = "2021-12-20",
#' start_date = "2021-12-10",
#' end_date = "2021-12-31")
#'
#'}
# TODO: error para start_date > end_date
speech_data <- function(
keyword,
start_date,
end_date,
uf = "",
speaker = "",
party = "") {
if (lubridate::ymd(start_date) > lubridate::ymd(end_date)) {
rlang::abort("`start_date` can't be greater than `end_date`.")
}
# extract html pages
partial_build_url <- purrr::partial(
build_url,
keyword = keyword,
start_date = start_date,
end_date = end_date,
uf = uf,
speaker = speaker,
party = party)
first_page <- partial_build_url(current_page = 1) %>%
speechbr_api()
pages <-
purrr::map(
seq(1, parse_pagination(first_page)),
~ partial_build_url(current_page = .x)) %>%
purrr::map(~ speechbr_api(.x))
# extract the text speeches
maybe_extract_speech <- purrr::possibly(extract_speech, otherwise = "error")
texts <-
purrr::map(
pages,
~ shift_url(.x)) %>%
unlist() %>%
purrr::discard(~ .x == "empty") %>%
purrr::map(~ maybe_extract_speech(.x)) %>%
unlist()
# extract table
maybe_extract_table <- purrr::possibly(
extract_table,
otherwise = tibble::tibble(error = "error"))
purrr::map_dfr(
pages,
~ maybe_extract_table(.x)) %>%
tidy_cleaner(texts)
}
|
## ---- echo=FALSE, message=FALSE-----------------------------------------------
library(rmonad)
library(magrittr)
set.seed(210)
## -----------------------------------------------------------------------------
# %>>% corresponds to Haskell's >>=
1:5 %>>%
sqrt %>>%
sqrt %>>%
sqrt
## -----------------------------------------------------------------------------
1:5 %>%
sqrt %>%
sqrt %>%
sqrt
## -----------------------------------------------------------------------------
1:5 %>>%
sqrt %v>% # store this result
sqrt %>>%
sqrt
## -----------------------------------------------------------------------------
1:5 %>>% { o <- . * 2 ; { o + . } %>% { . + o } }
## -----------------------------------------------------------------------------
-1:3 %>>%
sqrt %v>%
sqrt %>>%
sqrt
## -----------------------------------------------------------------------------
"wrench" %>>%
sqrt %v>%
sqrt %>>%
sqrt
## ---- error=TRUE--------------------------------------------------------------
"wrench" %>%
sqrt %>%
sqrt %>%
sqrt
## -----------------------------------------------------------------------------
1:5 %>>% sqrt %>% esc
## ---- error=TRUE--------------------------------------------------------------
"wrench" %>>% sqrt %>>% sqrt %>% esc
## -----------------------------------------------------------------------------
1:5 %>>%
sqrt %v>%
sqrt %>>%
sqrt %>% mtabulate
## -----------------------------------------------------------------------------
-2:2 %>>% sqrt %>>% colSums %>% missues
## -----------------------------------------------------------------------------
result <- 1:5 %v>% sqrt %v>% sqrt %v>% sqrt
get_value(result)[[2]]
## ---- eval=FALSE--------------------------------------------------------------
# cars %>_% write.csv(file="cars.tab") %>>% summary
## -----------------------------------------------------------------------------
cars %>_% plot(xlab="index", ylab="value") %>>% summary
## ---- eval=FALSE--------------------------------------------------------------
# cars %>_%
# plot(xlab="index", ylab="value") %>_%
# write.csv(file="cars.tab") %>>%
# summary
## -----------------------------------------------------------------------------
iris %>_%
{ stopifnot(is.data.frame(.)) } %>_%
{ stopifnot(sapply(.,is.numeric)) } %>>%
colSums %|>% head
## -----------------------------------------------------------------------------
1:10 %>>% colSums %|>% sum
## ---- eval=FALSE--------------------------------------------------------------
# # try to load a cached file, on failure rerun the analysis
# read.table("analyasis_cache.tab") %||% run_analysis(x)
## -----------------------------------------------------------------------------
x <- list()
# compare
if(length(x) > 0) { x[[1]] } else { NULL }
# to
x[[1]] %||% NULL %>% esc
## ---- eval=FALSE--------------------------------------------------------------
# read.table("a.tab") %||% read.table("a.tsv") %>>% dostuff
## -----------------------------------------------------------------------------
letters[1:10] %v>% colSums %|>% sum %||% message("Can't process this")
## ---- eval=FALSE--------------------------------------------------------------
# rnorm(30) %>^% qplot(xlab="index", ylab="value") %>>% mean
## ---- eval=FALSE--------------------------------------------------------------
# rnorm(30) %>^% qplot(xlab="index", ylab="value") %>^% summary %>>% mean
## -----------------------------------------------------------------------------
x <- 1:10 %>^% dgamma(10, 1) %>^% dgamma(10, 5) %^>% cor
get_value(x)
## ---- fig.cap="1: the original iris table, 2: stores the cached iris data, 3: nrow, 4: qplot, 5: summary."----
# build memory cacher
f <- make_recacher(memory_cache)
# make core dataset
m <- as_monad(iris) %>>%
dplyr::select(
sepal_length = Sepal.Length,
sepal_width = Sepal.Width,
species = Species
) %>%
# cache value with tag 'iris'
f('iris') %>>%
# some downstream stuff
nrow
# Now can pick from the tagged node
m <- view(m, 'iris') %>>% {
qplot(
x=sepal_length,
y=sepal_width,
color=species,
data=.
)} %>% f('plot')
# and repeat however many times we like
m <- view(m, 'iris') %>>% summary %>% f('sum')
plot(m)
## -----------------------------------------------------------------------------
runif(10) %>>% sum %__%
rnorm(10) %>>% sum %__%
rexp(10) %>>% sum
## ---- eval=FALSE--------------------------------------------------------------
# program <-
# {
# x = 2
# y = 5
# x * y
# } %__% {
# letters %>% sqrt
# } %__% {
# 10 * x
# }
## -----------------------------------------------------------------------------
funnel(
"yolo",
stop("stop, drop, and die"),
runif("simon"),
k = 2
)
## ---- error=TRUE--------------------------------------------------------------
list( "yolo", stop("stop, drop, and die"), runif("simon"), 2)
## ---- eval=FALSE--------------------------------------------------------------
# funnel(read.csv("a.csv"), read.csv("b.csv")) %*>% merge
## ---- eval=FALSE--------------------------------------------------------------
# funnel(
# a = read.csv("a.csv") %>>% do_analysis_a,
# b = read.csv("b.csv") %>>% do_analysis_b,
# k = 5
# ) %*>% joint_analysis
## -----------------------------------------------------------------------------
{
"This is docstring. The following list is metadata associated with this
node. Both the docstring and the metadata list will be processed out of
this function before it is executed. They also will not appear in the code
stored in the Rmonad object."
list(sys = sessionInfo(), foo = "This can be anything")
# This NULL is necessary, otherwise the metadata list above would be
# treated as the node output
NULL
} %__% # The %__% operator connects independent pieces of a pipeline.
"a" %>>% {
"The docstrings are stored in the Rmonad objects. They may be extracted in
the generation of reports. For example, they could go into a text block
below the code in a knitr document. The advantage of having documentation
here, is that it is coupled unambiguously to the generating function. These
annotations, together with the ability to chain chains of monads, allows
whole complex workflows to be built, with the results collated into a
single object. All errors propagate exactly as errors should, only
affecting downstream computations. The final object can be converted into a
markdown document and automatically generated function graphs."
paste(., "b")
}
## -----------------------------------------------------------------------------
foo <- function(x, y) {
"This is a function containing a pipeline. It always fails"
"a" %>>% paste(x) %>>% paste(y) %>>% log
}
bar <- function(x) {
"this is another function, it doesn't fail"
funnel("b", "c") %*>% foo %>>% paste(x)
}
"d" %>>% bar
## -----------------------------------------------------------------------------
"hello world" %>>% {
list(
format_error=function(x, err){
paste0("Failure on input '", x, "': ", err)
}
)
sqrt(.)
}
## -----------------------------------------------------------------------------
d <- mtcars %>>% {
list(summarize=summary)
subset(., mpg > 20)
} %>>% nrow
get_summary(d)[[2]]
| /inst/doc/introduction.R | no_license | cran/rmonad | R | false | false | 7,540 | r | ## ---- echo=FALSE, message=FALSE-----------------------------------------------
library(rmonad)
library(magrittr)
set.seed(210)
## -----------------------------------------------------------------------------
# %>>% corresponds to Haskell's >>=
1:5 %>>%
sqrt %>>%
sqrt %>>%
sqrt
## -----------------------------------------------------------------------------
1:5 %>%
sqrt %>%
sqrt %>%
sqrt
## -----------------------------------------------------------------------------
1:5 %>>%
sqrt %v>% # store this result
sqrt %>>%
sqrt
## -----------------------------------------------------------------------------
1:5 %>>% { o <- . * 2 ; { o + . } %>% { . + o } }
## -----------------------------------------------------------------------------
-1:3 %>>%
sqrt %v>%
sqrt %>>%
sqrt
## -----------------------------------------------------------------------------
"wrench" %>>%
sqrt %v>%
sqrt %>>%
sqrt
## ---- error=TRUE--------------------------------------------------------------
"wrench" %>%
sqrt %>%
sqrt %>%
sqrt
## -----------------------------------------------------------------------------
1:5 %>>% sqrt %>% esc
## ---- error=TRUE--------------------------------------------------------------
"wrench" %>>% sqrt %>>% sqrt %>% esc
## -----------------------------------------------------------------------------
1:5 %>>%
sqrt %v>%
sqrt %>>%
sqrt %>% mtabulate
## -----------------------------------------------------------------------------
-2:2 %>>% sqrt %>>% colSums %>% missues
## -----------------------------------------------------------------------------
result <- 1:5 %v>% sqrt %v>% sqrt %v>% sqrt
get_value(result)[[2]]
## ---- eval=FALSE--------------------------------------------------------------
# cars %>_% write.csv(file="cars.tab") %>>% summary
## -----------------------------------------------------------------------------
cars %>_% plot(xlab="index", ylab="value") %>>% summary
## ---- eval=FALSE--------------------------------------------------------------
# cars %>_%
# plot(xlab="index", ylab="value") %>_%
# write.csv(file="cars.tab") %>>%
# summary
## -----------------------------------------------------------------------------
iris %>_%
{ stopifnot(is.data.frame(.)) } %>_%
{ stopifnot(sapply(.,is.numeric)) } %>>%
colSums %|>% head
## -----------------------------------------------------------------------------
1:10 %>>% colSums %|>% sum
## ---- eval=FALSE--------------------------------------------------------------
# # try to load a cached file, on failure rerun the analysis
# read.table("analyasis_cache.tab") %||% run_analysis(x)
## -----------------------------------------------------------------------------
x <- list()
# compare
if(length(x) > 0) { x[[1]] } else { NULL }
# to
x[[1]] %||% NULL %>% esc
## ---- eval=FALSE--------------------------------------------------------------
# read.table("a.tab") %||% read.table("a.tsv") %>>% dostuff
## -----------------------------------------------------------------------------
letters[1:10] %v>% colSums %|>% sum %||% message("Can't process this")
## ---- eval=FALSE--------------------------------------------------------------
# rnorm(30) %>^% qplot(xlab="index", ylab="value") %>>% mean
## ---- eval=FALSE--------------------------------------------------------------
# rnorm(30) %>^% qplot(xlab="index", ylab="value") %>^% summary %>>% mean
## -----------------------------------------------------------------------------
x <- 1:10 %>^% dgamma(10, 1) %>^% dgamma(10, 5) %^>% cor
get_value(x)
## ---- fig.cap="1: the original iris table, 2: stores the cached iris data, 3: nrow, 4: qplot, 5: summary."----
# build memory cacher
f <- make_recacher(memory_cache)
# make core dataset
m <- as_monad(iris) %>>%
dplyr::select(
sepal_length = Sepal.Length,
sepal_width = Sepal.Width,
species = Species
) %>%
# cache value with tag 'iris'
f('iris') %>>%
# some downstream stuff
nrow
# Now can pick from the tagged node
m <- view(m, 'iris') %>>% {
qplot(
x=sepal_length,
y=sepal_width,
color=species,
data=.
)} %>% f('plot')
# and repeat however many times we like
m <- view(m, 'iris') %>>% summary %>% f('sum')
plot(m)
## -----------------------------------------------------------------------------
runif(10) %>>% sum %__%
rnorm(10) %>>% sum %__%
rexp(10) %>>% sum
## ---- eval=FALSE--------------------------------------------------------------
# program <-
# {
# x = 2
# y = 5
# x * y
# } %__% {
# letters %>% sqrt
# } %__% {
# 10 * x
# }
## -----------------------------------------------------------------------------
funnel(
"yolo",
stop("stop, drop, and die"),
runif("simon"),
k = 2
)
## ---- error=TRUE--------------------------------------------------------------
list( "yolo", stop("stop, drop, and die"), runif("simon"), 2)
## ---- eval=FALSE--------------------------------------------------------------
# funnel(read.csv("a.csv"), read.csv("b.csv")) %*>% merge
## ---- eval=FALSE--------------------------------------------------------------
# funnel(
# a = read.csv("a.csv") %>>% do_analysis_a,
# b = read.csv("b.csv") %>>% do_analysis_b,
# k = 5
# ) %*>% joint_analysis
## -----------------------------------------------------------------------------
{
"This is docstring. The following list is metadata associated with this
node. Both the docstring and the metadata list will be processed out of
this function before it is executed. They also will not appear in the code
stored in the Rmonad object."
list(sys = sessionInfo(), foo = "This can be anything")
# This NULL is necessary, otherwise the metadata list above would be
# treated as the node output
NULL
} %__% # The %__% operator connects independent pieces of a pipeline.
"a" %>>% {
"The docstrings are stored in the Rmonad objects. They may be extracted in
the generation of reports. For example, they could go into a text block
below the code in a knitr document. The advantage of having documentation
here, is that it is coupled unambiguously to the generating function. These
annotations, together with the ability to chain chains of monads, allows
whole complex workflows to be built, with the results collated into a
single object. All errors propagate exactly as errors should, only
affecting downstream computations. The final object can be converted into a
markdown document and automatically generated function graphs."
paste(., "b")
}
## -----------------------------------------------------------------------------
foo <- function(x, y) {
"This is a function containing a pipeline. It always fails"
"a" %>>% paste(x) %>>% paste(y) %>>% log
}
bar <- function(x) {
"this is another function, it doesn't fail"
funnel("b", "c") %*>% foo %>>% paste(x)
}
"d" %>>% bar
## -----------------------------------------------------------------------------
"hello world" %>>% {
list(
format_error=function(x, err){
paste0("Failure on input '", x, "': ", err)
}
)
sqrt(.)
}
## -----------------------------------------------------------------------------
d <- mtcars %>>% {
list(summarize=summary)
subset(., mpg > 20)
} %>>% nrow
get_summary(d)[[2]]
|
import::from(shiny, shinyServer, renderUI, selectInput)
import::from(danStat, importSubjects)
shinyServer(function(input, output, session) {
subjects <- importSubjects()
output$outSubjects <- renderUI({
selectInput(inputId = "inSubjects",
label = "Choose Subject",
choices = subjects$description,
selected = subjects[1])
})
tables <- reactive({
if (!is.null(input$inSubjects)) {
choice <- input$inSubjects
importTables(subjects %>%
filter(description == choice) %>%
`$`(id))
}
})
output$outTables <- renderUI({
selectInput(inputId = "inTables",
label = "Choose Table",
choices = tables() %>% `$`(text),
selected = 1)
})
})
| /inst/app/server.R | no_license | AKLLaursen/danStat | R | false | false | 810 | r | import::from(shiny, shinyServer, renderUI, selectInput)
import::from(danStat, importSubjects)
shinyServer(function(input, output, session) {
subjects <- importSubjects()
output$outSubjects <- renderUI({
selectInput(inputId = "inSubjects",
label = "Choose Subject",
choices = subjects$description,
selected = subjects[1])
})
tables <- reactive({
if (!is.null(input$inSubjects)) {
choice <- input$inSubjects
importTables(subjects %>%
filter(description == choice) %>%
`$`(id))
}
})
output$outTables <- renderUI({
selectInput(inputId = "inTables",
label = "Choose Table",
choices = tables() %>% `$`(text),
selected = 1)
})
})
|
#Movement from MultipleBees5
#Bee does not harvest initially until close enough to memory spot, but then goes anywhere
#most parameters and storage set in All_The_Simulations_Raster
Nest_Resource = c(1,1) #amount of (wild/yellow, blueberry/white) resource stored in the nest (set to (1,1) initially to avoid divide by 0 errors) - needs to be reset for each simulation
NEST_RESOURCE = matrix(NA, nrow = nsim, ncol = 2) #store the nest resource from each simulation
#### Storage
start = numeric(n_bees) #store the starting location indices for each bee's locations
end = numeric(n_bees) #store the ending location indices for each bee's locations
starth = numeric(n_bees) #store the starting location indices for each bee's harvested locations
endh = numeric(n_bees) #store the ending location indices for each bee's harvested locations
DATAX=rep(NA,n_bees*10000) #All X locations of all bees
DATAY=rep(NA,n_bees*10000) #All Y locations of all bees
DATAXharvested=rep(NA,n_bees*10000) #All X locations of all harvested flowers
DATAYharvested=rep(NA,n_bees*10000) #All Y locations of all harvested flowers
DATAharvested = rep(0,n_bees*10000) #Whether harvested there
DATAbee_number=rep(NA,n_bees*10000) #Which bee this is
DATAbee_number_harvested=rep(NA,n_bees*10000) #bee number at only harvested flowers
DATAtype = rep(NA,n_bees*10000) #Which flower type at all locations
DATAtype_harvested=rep(NA,n_bees*10000) #flower type at only harvested flowers
DATAcounter = rep(NA,n_bees*10000) #How many times the bee has been a harvester so far
locations = 1 #number of locations that have been visited, to index through above vectors
Bees_on_bushes=list(0)
incomplete_bouts = 0 #count the number of incomplete bouts made by all bees
total_bouts = 0 #count the total number of bouts made by all bees (just to be safe)
total_collected = 0 #total amount of resource collected this simulation
for(bee in 1:n_bees){
print(paste("landscape", landscape,"simulation ", sim_number, "foraging bee ", bee))
start[bee] = locations #this is where this bee starts moving
starth[bee] = sum(DATAharvested,na.rm=T)+1 #this is where this bee starts harvesting
#################################Do the thing####################################################
### Initial values and storage
bouts_counter=0
i = 2
X = rep(NA,1000*bouts) #Create vectors for X and Y location of bee at end of each segment (might not need these)
Y = rep(NA,1000*bouts)
diffusion_counter = 0 #how many times the bee has been in harvester mode
DATAX[locations] = nx #bee starts at the nest
DATAY[locations] = ny
# DATAharvested[locations] = 0 #the bee does not harvest at the nest
DATAbee_number[locations] = bee #which bee it is
DATAtype[locations] = 0 #we'll pretend there's no resource, whether or not that's true, because the bee doesn't harvest right at the nest
DATAcounter[locations] = diffusion_counter #how many times the bee has been a harvester
locations = locations + 1 #increase 'locations' after every movement of any type (being at the nest counts as a movement here)
Status = numeric(1000*bouts) #store which movement mode the bee is in (not sure if I need this -- maybe add to DATA)
Status[1]=0 #initially the bee is a scout
walks = 0 #track how many scouting steps the bee has made -- maybe add this to DATA
memory_this_bee = matrix(memory_locations[which(memory_locations[,4]==bee),],ncol=5) #the memory locations for this bee
where = where_to_go() #decide where to remember
mx = where[1] #x and y coordinates of remembered location
my = where[2]
All_Bees_Memories[bee,] = where
BeeID = c(0, nx, ny, Turning2(nx,ny,Memory(nx,ny,mx,my),nx,ny,max_dist),0,where[3],0)
#1 population status: 1=Harvester; 0=Scout; 2 = returning to nest
#2,3 location: x & y coordinates of bee (nest is at (nx,ny))
#4 angle: direction of travel (initial angle based on memory direction)
#5 amount of resource
#6 flower type to search for; 1 for wildflower and 2 for blueberry
#7 whether or not the bee has encountered flowers since leaving the nest (0=no,1=yes)
### Make the bee move around!
while(bouts_counter<bouts){ ## do things for bouts steps ##
if(BeeID[1]==0){ ## what to do if the bee is a scout ##
## The bee will advect first
theta = BeeID[4] #the angle the bee travels at
advect_time = rexp(1,mu) #the time to travel for
new_x = DATAX[locations-1]+a*cos(theta)*advect_time #the new location the bee travels to (velocity*time)
new_y = DATAY[locations-1]+a*sin(theta)*advect_time #the new location the bee travels to (velocity*time)
walks = walks+1 #increase number of scouting steps
memory = Memory(new_x,new_y,mx,my) #the direction of the remembered location
memory_distance = sqrt((new_x-mx)^2+(new_y-my)^2) #the distance from the memory spot
nest_distance = sqrt((new_x-nx)^2+(new_y-ny)^2) #the distance from the nest
Fl = flowers(new_x, new_y) #the flower value
#then the bee will decide what to do next
if(walks<=too_long_f){ #not scouting for too long, so check other stuff
if(nest_distance<=max_dist && BeeID[7]==1 && Fl[1]>0){ #condition 1
BeeID[1] = 1 #the bee becomes a harvester
}else if(nest_distance<=max_dist && BeeID[7]==1 && Fl[1]==0){ #condition 2
BeeID[1] = 0 #stay a scout
BeeID[4] = Turning2(new_x,new_y,theta,nx,ny,max_dist) #choose a new angle based on previous direction of travel
}else if(nest_distance<=max_dist && BeeID[7]==0 && memory_distance>=memory_radius){ #condition 3
BeeID[1] = 0 #stay a scout
BeeID[4] = Turning2(new_x,new_y,memory,nx,ny,max_dist) #choose a new angle based on memory
}else if(nest_distance<=max_dist && BeeID[7]==0 && memory_distance<memory_radius && Fl[1]==BeeID[6]){ #condition 4
BeeID[1] = 1 #the bee becomes a harvester
}else if(nest_distance<=max_dist && BeeID[7]==0 && memory_distance<memory_radius && Fl[1]!=BeeID[6]){ #condition 5
BeeID[1] = 0 #stay a scout
BeeID[4] = Turning2(new_x,new_y,theta,nx,ny,max_dist) #choose a new angle based on previous direction of travel
}else{ #condition 6
BeeID[1] = 0 #stay a scout
BeeID[4] = Turning2(new_x,new_y,memory,nx,ny,max_dist) #choose a new angle (will be back towards nest)
}
}else{ #condition 7 (scouting too long)
BeeID[1] = 2 #become a returner
incomplete_bouts=incomplete_bouts+1
Nest_Resource = Nest_Resource + resource #store the amount of resource collected in this segment
BeeID[5] = BeeID[5]+sum(resource) #total amount of resource collected on this bout.
}
#Store the results of the Scouting segment
DATAX[locations] = new_x #the new x and y locations
DATAY[locations] = new_y
DATAbee_number[locations] = bee #which bee this is
DATAtype[locations] = Fl[1] #what resource type the bee ended the segment in
DATAcounter[locations] = diffusion_counter #how many times the bee has been a harvester so far
locations = locations + 1 #increase 'locations' after every movement of any type
Status[i] = BeeID[1]
#end of Scouting
}else if(BeeID[1]==1){ ## what to do if the bee is a harvester ##
walks = 0 #reset the number of scouting steps the bee has made
diffusion_counter = diffusion_counter+1 #how many times the bee has been a harvester
BeeID[7]=1 #bee has been a harvester now
harvested = sum(DATAharvested) #the number of flowers the bee has harvested from
resource = c(0,0) #the bee hasn't collected anything yet this harvesting segment -- maybe add this to DATA
#Check all the stuff for the current location to determine if the bee harvests here
distancex_indices = which(abs(DATAX[locations-1]-DATAXharvested)<depletion_dist) #just the points too close to the location in the x direction
harvestedy = DATAYharvested[distancex_indices] #the corresponding y points
distancey_indices=which(abs(DATAY[locations-1]-harvestedy)<depletion_dist) #which of those points is also too close in the y direction
depleted = length(distancey_indices) #how many depleted spots the bee is too close to
Fl = flowers(DATAX[locations-1],DATAY[locations-1]) #flower value at location
if(depleted==0){ #the flowers are undepleted
resource[Fl] = resource[Fl] + resource_unit #add resource unit
DATAXharvested[harvested+1] = DATAX[locations-1] #harvested+1 is the number of flowers harvested from including this one
DATAYharvested[harvested+1] = DATAY[locations-1] #locations-1 is the index of the location it landed
DATAbee_number_harvested[harvested+1] = bee
DATAtype_harvested[harvested+1] = Fl[1]
DATAharvested[locations-1] = 1
harvested = harvested+1 #the bee has harvested at one more flower
}else{ #the flowers are depleted
DATAharvested[locations-1] = 0 #have to reset this here, because it was set to 1 at the end of scouting
}
#end of checking the location the bee arrived to the flower patch at
#do the rest of the harvesting stuff
harvest_time = sum(rexp(2,gamma1)) #how long the bee will stay a harvester for
steps = max(1,floor(harvest_time/(handling_time+deltat))) #how many steps the bee will make
for(j in 2:(steps+1)){ ## the bee diffuses/random walks for a while ##
walk = rnorm(2,0,1) #random distances to travel in x and y directions
grad = flowers_gradient(DATAX[locations-1], DATAY[locations-1]) # "gradient" of flower field at current location
DATAX[locations] = DATAX[locations-1]+sqrt(2*D*deltat)*walk[1] + grad[1]*deltat #where the bee travels to
DATAY[locations] = DATAY[locations-1]+sqrt(2*D*deltat)*walk[2] + grad[2]*deltat #where the bee travels to
Fl = flowers(DATAX[locations], DATAY[locations]) #flower value at the new location
if(Fl>0){ #the bee is in flowers, so check for depletion
distancex_indices = which(abs(DATAX[locations]-DATAXharvested)<depletion_dist) #just the points too close to the location in the x direction
harvestedy = DATAYharvested[distancex_indices] #the corresponding y points
distancey_indices=which(abs(DATAY[locations]-harvestedy)<depletion_dist) #which of those points is also too close in the y direction
depleted = length(distancey_indices) #how many depleted spots the bee is too close to
if(depleted==0){ #the flowers are undepleted
resource[Fl] = resource[Fl] + resource_unit #add resource unit
DATAXharvested[harvested+1] = DATAX[locations] #this is a location where the bee harvested
DATAYharvested[harvested+1] = DATAY[locations]
DATAharvested[locations] = 1 #harvested here
DATAbee_number_harvested[harvested+1] = bee
DATAtype_harvested[harvested+1] = Fl
harvested = harvested+1
}else{ #the flowers are depleted
DATAharvested[locations] = 0 #the bee did not collect resource here
}
}else{ #the bee is not in flowers
DATAharvested[locations] = 0 #the bee did not collect resource here
}
DATAbee_number[locations] = bee #which bee this is
DATAtype[locations] = Fl[1] #which type of flowers it just harvested
DATAcounter[locations] = diffusion_counter #how many times it's been a harvester
locations = locations + 1 #increase 'locations' after every movement of any type
} #end of harvesting movement loop
Nest_Resource = Nest_Resource + resource #store the amount of resource collected in this segment
BeeID[5] = BeeID[5]+sum(resource) #total amount of resource collected on this bout.
if(BeeID[5] >= full){ #the bee is full
BeeID[1] = 2 #become a returner
}else{ #the bee is not full
BeeID[1] = 0 #switch back to beeing a scout
BeeID[4] = Turning4(DATAX[locations-1], DATAY[locations-1],post_harvest_angle) #pick a new angle to travel at as scout
}
Status[i] = BeeID[1]
#done doing the harvesting stuff
}else if(BeeID[1] == 2){ ## what to do if the bee is a returner
#count the bouts complete/incomplete
total_bouts = total_bouts+1
total_collected = total_collected + BeeID[5]
walks = 0 #reset number of scouting steps bee has made
DATAX[locations] = nx #send bee back to nest
DATAY[locations] = ny #send bee back to nest
DATAharvested[locations] = 0 #the bee did not collect resource here
DATAbee_number[locations] = bee #which bee this is
DATAtype[locations] = 0 #no harvesting at the nest
DATAcounter[locations] = diffusion_counter #how many times it's been a harvester
locations = locations + 1 #increase 'locations' after every movement of any type
# Times[i] = sqrt(BeeID[1]^2+BeeID[2]^2)/a #calculate time to get back to nest
BeeID[5] = 0 #empty the resource
BeeID[1] = 0 #make the bee a scout
BeeID[4] = Turning2(nx,ny,Memory(nx,ny,mx,my),nx,ny,max_dist) #new angle based on memory
bouts_counter = bouts_counter+1 #the bee has completed another bout
BeeID[7] = 0 #reset tracker for whether or not bee has found flowers since leaving nest
Status[i] = BeeID[1]
}
#Store/update things
BeeID[2] = DATAX[locations-1] #bee's location
BeeID[3] = DATAY[locations-1] #bee's location
# Theta[i] = BeeID[4] #bee's direction of travel
i = i+1
} #end of movement loops
end[bee] = locations-1 #don't include the +1 from the very last return to nest
endh[bee] = sum(DATAharvested,na.rm=T)
## Make a matrix of the data for just this bee
start1 = start[bee]
end1 = end[bee]
starth1 = starth[bee]
endh1 = endh[bee]
DATA = matrix(c(DATAX[start1:end1],DATAY[start1:end1],DATAharvested[start1:end1],DATAbee_number[start1:end1],DATAtype[start1:end1]),ncol=5)
HARVESTED = matrix(c(DATAXharvested[starth1:endh1],DATAYharvested[starth1:endh1],DATAbee_number_harvested[starth1:endh1],DATAtype_harvested[starth1:endh1]),ncol=4)
HARVESTED_blueberry = matrix(HARVESTED[HARVESTED[,4]==2,],ncol=4) #just the rows that are blueberry
# ### Show a plot of where the bee went!
# Xp = DATA[,1]
# Yp = DATA[,2]
# xmin = min(0,min(Xp)) #so that the whole path will show on the plot
# ymin = min(0,min(Yp))
# xmax = max(153,max(Xp))
# ymax = max(225,max(Yp))
#
# s = 1:(end1-1)
# x_plot = seq(x_min,x_max,.1) #just in case
# y_plot = seq(y_min,y_max,.1)
#
# image(x_plot,y_plot,field, xlab = "x", ylab = "y", xlim = c(0,150), ylim = c(0,225),
# col=c("grey85","yellow1","lightcyan"),asp=TRUE)
# segments(Xp[s],Yp[s],Xp[s+1],Yp[s+1], lty=1,col="grey") #add lines for the foraging path
#
# points(HARVESTED[,1],HARVESTED[,2], col = "blue",pch=20,cex=.1) #add the flower visit points
#
# points(mx,my, col = "red",cex=1) #make the memory spot obvious
# points(nx,ny,pch=20,cex=1.5) #make the nest obvious
## Determine which bushes were visited by this bee
PP <- ppp(x=HARVESTED_blueberry[,1], y=HARVESTED_blueberry[,2],window = W_b) #the planar point pattern of the blueberry visited points
if(length(PP$x)>0){ #if the bee visited any blueberries, count it up
Count = quadratcount(PP, xbreaks = sort(c(left_edges_b,right_edges_b)), ybreaks=ybreaks_b) #uses x and y breaks specific to the field, set in landscape file
Count[which(Count>=1)]=1 #just set the field to 1 if the bee visited it at all
Bees_on_bushes[[bee]] = Count #save the data for this bee
}
} #end of bumble bees loops
#Save the nest resource for this simulation
NEST_RESOURCE[nsim,] = Nest_Resource
##### Save the points of all bees into a single matrix
DATA_All_Bees_harvested = cbind(DATAXharvested,DATAYharvested,DATAbee_number_harvested,DATAtype_harvested) #only the points where resource was collected
DATA_All_Bees_blueberry= DATA_All_Bees_harvested[DATAtype_harvested==2,] #only the points where blueberry was collected
DATA_All_Bees_harvested = na.omit(DATA_All_Bees_harvested) #remove all the extra NAs at the end
DATA_All_Bees_blueberry = na.omit(DATA_All_Bees_blueberry) #remove all the extra NAs at the end
##Count how many bees visited each "bush"
Total_Bees_on_Bushes = Reduce("+",Bees_on_bushes) #add together all of the bee field visit counts
##Count how many blueberry flowers were visited (same as above, but blueberry only)
PP_blueberry_visits <- ppp(x=DATA_All_Bees_blueberry[,1], y=DATA_All_Bees_blueberry[,2],window = W_b) #the planar point pattern
Count_blueberry_visits = quadratcount(PP_blueberry_visits, xbreaks = sort(c(left_edges_b,right_edges_b)), ybreaks = 0:225)
#the percent of bushes with at least x flower visits
proportion3 = numeric(max(Count_blueberry_visits))
for(i in 1:(max(Count_blueberry_visits)+1)){
proportion3[i] = length(which(Count_blueberry_visits>(i-1)))/(n_bushes)
}
| /Multiple_Bees.R | no_license | macquees/BumbleBeeWildflowerManuscript | R | false | false | 19,214 | r | #Movement from MultipleBees5
#Bee does not harvest initially until close enough to memory spot, but then goes anywhere
#most parameters and storage set in All_The_Simulations_Raster
Nest_Resource = c(1,1) #amount of (wild/yellow, blueberry/white) resource stored in the nest (set to (1,1) initially to avoid divide by 0 errors) - needs to be reset for each simulation
NEST_RESOURCE = matrix(NA, nrow = nsim, ncol = 2) #store the nest resource from each simulation
#### Storage
start = numeric(n_bees) #store the starting location indices for each bee's locations
end = numeric(n_bees) #store the ending location indices for each bee's locations
starth = numeric(n_bees) #store the starting location indices for each bee's harvested locations
endh = numeric(n_bees) #store the ending location indices for each bee's harvested locations
DATAX=rep(NA,n_bees*10000) #All X locations of all bees
DATAY=rep(NA,n_bees*10000) #All Y locations of all bees
DATAXharvested=rep(NA,n_bees*10000) #All X locations of all harvested flowers
DATAYharvested=rep(NA,n_bees*10000) #All Y locations of all harvested flowers
DATAharvested = rep(0,n_bees*10000) #Whether harvested there
DATAbee_number=rep(NA,n_bees*10000) #Which bee this is
DATAbee_number_harvested=rep(NA,n_bees*10000) #bee number at only harvested flowers
DATAtype = rep(NA,n_bees*10000) #Which flower type at all locations
DATAtype_harvested=rep(NA,n_bees*10000) #flower type at only harvested flowers
DATAcounter = rep(NA,n_bees*10000) #How many times the bee has been a harvester so far
locations = 1 #number of locations that have been visited, to index through above vectors
Bees_on_bushes=list(0)
incomplete_bouts = 0 #count the number of incomplete bouts made by all bees
total_bouts = 0 #count the total number of bouts made by all bees (just to be safe)
total_collected = 0 #total amount of resource collected this simulation
for(bee in 1:n_bees){
print(paste("landscape", landscape,"simulation ", sim_number, "foraging bee ", bee))
start[bee] = locations #this is where this bee starts moving
starth[bee] = sum(DATAharvested,na.rm=T)+1 #this is where this bee starts harvesting
#################################Do the thing####################################################
### Initial values and storage
bouts_counter=0
i = 2
X = rep(NA,1000*bouts) #Create vectors for X and Y location of bee at end of each segment (might not need these)
Y = rep(NA,1000*bouts)
diffusion_counter = 0 #how many times the bee has been in harvester mode
DATAX[locations] = nx #bee starts at the nest
DATAY[locations] = ny
# DATAharvested[locations] = 0 #the bee does not harvest at the nest
DATAbee_number[locations] = bee #which bee it is
DATAtype[locations] = 0 #we'll pretend there's no resource, whether or not that's true, because the bee doesn't harvest right at the nest
DATAcounter[locations] = diffusion_counter #how many times the bee has been a harvester
locations = locations + 1 #increase 'locations' after every movement of any type (being at the nest counts as a movement here)
Status = numeric(1000*bouts) #store which movement mode the bee is in (not sure if I need this -- maybe add to DATA)
Status[1]=0 #initially the bee is a scout
walks = 0 #track how many scouting steps the bee has made -- maybe add this to DATA
memory_this_bee = matrix(memory_locations[which(memory_locations[,4]==bee),],ncol=5) #the memory locations for this bee
where = where_to_go() #decide where to remember
mx = where[1] #x and y coordinates of remembered location
my = where[2]
All_Bees_Memories[bee,] = where
BeeID = c(0, nx, ny, Turning2(nx,ny,Memory(nx,ny,mx,my),nx,ny,max_dist),0,where[3],0)
#1 population status: 1=Harvester; 0=Scout; 2 = returning to nest
#2,3 location: x & y coordinates of bee (nest is at (nx,ny))
#4 angle: direction of travel (initial angle based on memory direction)
#5 amount of resource
#6 flower type to search for; 1 for wildflower and 2 for blueberry
#7 whether or not the bee has encountered flowers since leaving the nest (0=no,1=yes)
### Make the bee move around!
while(bouts_counter<bouts){ ## do things for bouts steps ##
if(BeeID[1]==0){ ## what to do if the bee is a scout ##
## The bee will advect first
theta = BeeID[4] #the angle the bee travels at
advect_time = rexp(1,mu) #the time to travel for
new_x = DATAX[locations-1]+a*cos(theta)*advect_time #the new location the bee travels to (velocity*time)
new_y = DATAY[locations-1]+a*sin(theta)*advect_time #the new location the bee travels to (velocity*time)
walks = walks+1 #increase number of scouting steps
memory = Memory(new_x,new_y,mx,my) #the direction of the remembered location
memory_distance = sqrt((new_x-mx)^2+(new_y-my)^2) #the distance from the memory spot
nest_distance = sqrt((new_x-nx)^2+(new_y-ny)^2) #the distance from the nest
Fl = flowers(new_x, new_y) #the flower value
#then the bee will decide what to do next
if(walks<=too_long_f){ #not scouting for too long, so check other stuff
if(nest_distance<=max_dist && BeeID[7]==1 && Fl[1]>0){ #condition 1
BeeID[1] = 1 #the bee becomes a harvester
}else if(nest_distance<=max_dist && BeeID[7]==1 && Fl[1]==0){ #condition 2
BeeID[1] = 0 #stay a scout
BeeID[4] = Turning2(new_x,new_y,theta,nx,ny,max_dist) #choose a new angle based on previous direction of travel
}else if(nest_distance<=max_dist && BeeID[7]==0 && memory_distance>=memory_radius){ #condition 3
BeeID[1] = 0 #stay a scout
BeeID[4] = Turning2(new_x,new_y,memory,nx,ny,max_dist) #choose a new angle based on memory
}else if(nest_distance<=max_dist && BeeID[7]==0 && memory_distance<memory_radius && Fl[1]==BeeID[6]){ #condition 4
BeeID[1] = 1 #the bee becomes a harvester
}else if(nest_distance<=max_dist && BeeID[7]==0 && memory_distance<memory_radius && Fl[1]!=BeeID[6]){ #condition 5
BeeID[1] = 0 #stay a scout
BeeID[4] = Turning2(new_x,new_y,theta,nx,ny,max_dist) #choose a new angle based on previous direction of travel
}else{ #condition 6
BeeID[1] = 0 #stay a scout
BeeID[4] = Turning2(new_x,new_y,memory,nx,ny,max_dist) #choose a new angle (will be back towards nest)
}
}else{ #condition 7 (scouting too long)
BeeID[1] = 2 #become a returner
incomplete_bouts=incomplete_bouts+1
Nest_Resource = Nest_Resource + resource #store the amount of resource collected in this segment
BeeID[5] = BeeID[5]+sum(resource) #total amount of resource collected on this bout.
}
#Store the results of the Scouting segment
DATAX[locations] = new_x #the new x and y locations
DATAY[locations] = new_y
DATAbee_number[locations] = bee #which bee this is
DATAtype[locations] = Fl[1] #what resource type the bee ended the segment in
DATAcounter[locations] = diffusion_counter #how many times the bee has been a harvester so far
locations = locations + 1 #increase 'locations' after every movement of any type
Status[i] = BeeID[1]
#end of Scouting
}else if(BeeID[1]==1){ ## what to do if the bee is a harvester ##
walks = 0 #reset the number of scouting steps the bee has made
diffusion_counter = diffusion_counter+1 #how many times the bee has been a harvester
BeeID[7]=1 #bee has been a harvester now
harvested = sum(DATAharvested) #the number of flowers the bee has harvested from
resource = c(0,0) #the bee hasn't collected anything yet this harvesting segment -- maybe add this to DATA
#Check all the stuff for the current location to determine if the bee harvests here
distancex_indices = which(abs(DATAX[locations-1]-DATAXharvested)<depletion_dist) #just the points too close to the location in the x direction
harvestedy = DATAYharvested[distancex_indices] #the corresponding y points
distancey_indices=which(abs(DATAY[locations-1]-harvestedy)<depletion_dist) #which of those points is also too close in the y direction
depleted = length(distancey_indices) #how many depleted spots the bee is too close to
Fl = flowers(DATAX[locations-1],DATAY[locations-1]) #flower value at location
if(depleted==0){ #the flowers are undepleted
resource[Fl] = resource[Fl] + resource_unit #add resource unit
DATAXharvested[harvested+1] = DATAX[locations-1] #harvested+1 is the number of flowers harvested from including this one
DATAYharvested[harvested+1] = DATAY[locations-1] #locations-1 is the index of the location it landed
DATAbee_number_harvested[harvested+1] = bee
DATAtype_harvested[harvested+1] = Fl[1]
DATAharvested[locations-1] = 1
harvested = harvested+1 #the bee has harvested at one more flower
}else{ #the flowers are depleted
DATAharvested[locations-1] = 0 #have to reset this here, because it was set to 1 at the end of scouting
}
#end of checking the location the bee arrived to the flower patch at
#do the rest of the harvesting stuff
harvest_time = sum(rexp(2,gamma1)) #how long the bee will stay a harvester for
steps = max(1,floor(harvest_time/(handling_time+deltat))) #how many steps the bee will make
for(j in 2:(steps+1)){ ## the bee diffuses/random walks for a while ##
walk = rnorm(2,0,1) #random distances to travel in x and y directions
grad = flowers_gradient(DATAX[locations-1], DATAY[locations-1]) # "gradient" of flower field at current location
DATAX[locations] = DATAX[locations-1]+sqrt(2*D*deltat)*walk[1] + grad[1]*deltat #where the bee travels to
DATAY[locations] = DATAY[locations-1]+sqrt(2*D*deltat)*walk[2] + grad[2]*deltat #where the bee travels to
Fl = flowers(DATAX[locations], DATAY[locations]) #flower value at the new location
if(Fl>0){ #the bee is in flowers, so check for depletion
distancex_indices = which(abs(DATAX[locations]-DATAXharvested)<depletion_dist) #just the points too close to the location in the x direction
harvestedy = DATAYharvested[distancex_indices] #the corresponding y points
distancey_indices=which(abs(DATAY[locations]-harvestedy)<depletion_dist) #which of those points is also too close in the y direction
depleted = length(distancey_indices) #how many depleted spots the bee is too close to
if(depleted==0){ #the flowers are undepleted
resource[Fl] = resource[Fl] + resource_unit #add resource unit
DATAXharvested[harvested+1] = DATAX[locations] #this is a location where the bee harvested
DATAYharvested[harvested+1] = DATAY[locations]
DATAharvested[locations] = 1 #harvested here
DATAbee_number_harvested[harvested+1] = bee
DATAtype_harvested[harvested+1] = Fl
harvested = harvested+1
}else{ #the flowers are depleted
DATAharvested[locations] = 0 #the bee did not collect resource here
}
}else{ #the bee is not in flowers
DATAharvested[locations] = 0 #the bee did not collect resource here
}
DATAbee_number[locations] = bee #which bee this is
DATAtype[locations] = Fl[1] #which type of flowers it just harvested
DATAcounter[locations] = diffusion_counter #how many times it's been a harvester
locations = locations + 1 #increase 'locations' after every movement of any type
} #end of harvesting movement loop
Nest_Resource = Nest_Resource + resource #store the amount of resource collected in this segment
BeeID[5] = BeeID[5]+sum(resource) #total amount of resource collected on this bout.
if(BeeID[5] >= full){ #the bee is full
BeeID[1] = 2 #become a returner
}else{ #the bee is not full
BeeID[1] = 0 #switch back to beeing a scout
BeeID[4] = Turning4(DATAX[locations-1], DATAY[locations-1],post_harvest_angle) #pick a new angle to travel at as scout
}
Status[i] = BeeID[1]
#done doing the harvesting stuff
}else if(BeeID[1] == 2){ ## what to do if the bee is a returner
#count the bouts complete/incomplete
total_bouts = total_bouts+1
total_collected = total_collected + BeeID[5]
walks = 0 #reset number of scouting steps bee has made
DATAX[locations] = nx #send bee back to nest
DATAY[locations] = ny #send bee back to nest
DATAharvested[locations] = 0 #the bee did not collect resource here
DATAbee_number[locations] = bee #which bee this is
DATAtype[locations] = 0 #no harvesting at the nest
DATAcounter[locations] = diffusion_counter #how many times it's been a harvester
locations = locations + 1 #increase 'locations' after every movement of any type
# Times[i] = sqrt(BeeID[1]^2+BeeID[2]^2)/a #calculate time to get back to nest
BeeID[5] = 0 #empty the resource
BeeID[1] = 0 #make the bee a scout
BeeID[4] = Turning2(nx,ny,Memory(nx,ny,mx,my),nx,ny,max_dist) #new angle based on memory
bouts_counter = bouts_counter+1 #the bee has completed another bout
BeeID[7] = 0 #reset tracker for whether or not bee has found flowers since leaving nest
Status[i] = BeeID[1]
}
#Store/update things
BeeID[2] = DATAX[locations-1] #bee's location
BeeID[3] = DATAY[locations-1] #bee's location
# Theta[i] = BeeID[4] #bee's direction of travel
i = i+1
} #end of movement loops
end[bee] = locations-1 #don't include the +1 from the very last return to nest
endh[bee] = sum(DATAharvested,na.rm=T)
## Make a matrix of the data for just this bee
start1 = start[bee]
end1 = end[bee]
starth1 = starth[bee]
endh1 = endh[bee]
DATA = matrix(c(DATAX[start1:end1],DATAY[start1:end1],DATAharvested[start1:end1],DATAbee_number[start1:end1],DATAtype[start1:end1]),ncol=5)
HARVESTED = matrix(c(DATAXharvested[starth1:endh1],DATAYharvested[starth1:endh1],DATAbee_number_harvested[starth1:endh1],DATAtype_harvested[starth1:endh1]),ncol=4)
HARVESTED_blueberry = matrix(HARVESTED[HARVESTED[,4]==2,],ncol=4) #just the rows that are blueberry
# ### Show a plot of where the bee went!
# Xp = DATA[,1]
# Yp = DATA[,2]
# xmin = min(0,min(Xp)) #so that the whole path will show on the plot
# ymin = min(0,min(Yp))
# xmax = max(153,max(Xp))
# ymax = max(225,max(Yp))
#
# s = 1:(end1-1)
# x_plot = seq(x_min,x_max,.1) #just in case
# y_plot = seq(y_min,y_max,.1)
#
# image(x_plot,y_plot,field, xlab = "x", ylab = "y", xlim = c(0,150), ylim = c(0,225),
# col=c("grey85","yellow1","lightcyan"),asp=TRUE)
# segments(Xp[s],Yp[s],Xp[s+1],Yp[s+1], lty=1,col="grey") #add lines for the foraging path
#
# points(HARVESTED[,1],HARVESTED[,2], col = "blue",pch=20,cex=.1) #add the flower visit points
#
# points(mx,my, col = "red",cex=1) #make the memory spot obvious
# points(nx,ny,pch=20,cex=1.5) #make the nest obvious
## Determine which bushes were visited by this bee
PP <- ppp(x=HARVESTED_blueberry[,1], y=HARVESTED_blueberry[,2],window = W_b) #the planar point pattern of the blueberry visited points
if(length(PP$x)>0){ #if the bee visited any blueberries, count it up
Count = quadratcount(PP, xbreaks = sort(c(left_edges_b,right_edges_b)), ybreaks=ybreaks_b) #uses x and y breaks specific to the field, set in landscape file
Count[which(Count>=1)]=1 #just set the field to 1 if the bee visited it at all
Bees_on_bushes[[bee]] = Count #save the data for this bee
}
} #end of bumble bees loops
#Save the nest resource for this simulation
NEST_RESOURCE[nsim,] = Nest_Resource
##### Save the points of all bees into a single matrix
DATA_All_Bees_harvested = cbind(DATAXharvested,DATAYharvested,DATAbee_number_harvested,DATAtype_harvested) #only the points where resource was collected
DATA_All_Bees_blueberry= DATA_All_Bees_harvested[DATAtype_harvested==2,] #only the points where blueberry was collected
DATA_All_Bees_harvested = na.omit(DATA_All_Bees_harvested) #remove all the extra NAs at the end
DATA_All_Bees_blueberry = na.omit(DATA_All_Bees_blueberry) #remove all the extra NAs at the end
##Count how many bees visited each "bush"
Total_Bees_on_Bushes = Reduce("+",Bees_on_bushes) #add together all of the bee field visit counts
##Count how many blueberry flowers were visited (same as above, but blueberry only)
PP_blueberry_visits <- ppp(x=DATA_All_Bees_blueberry[,1], y=DATA_All_Bees_blueberry[,2],window = W_b) #the planar point pattern
Count_blueberry_visits = quadratcount(PP_blueberry_visits, xbreaks = sort(c(left_edges_b,right_edges_b)), ybreaks = 0:225)
#the percent of bushes with at least x flower visits
proportion3 = numeric(max(Count_blueberry_visits))
for(i in 1:(max(Count_blueberry_visits)+1)){
proportion3[i] = length(which(Count_blueberry_visits>(i-1)))/(n_bushes)
}
|
compare_api<<-c()
compare_names<<-c()
compare_tweets<<-c()
twitter_names<<-c()
i<<-0
library(dplyr)
basic=read.csv("Football.csv",sep = ';')
attri=read.csv("Attributes_head.csv",sep=';')
while(1)
{
cat(" FOOTBALL DATA ANALYSIS\n\n")
cat(" MENU\n\n")
cat(" 1. SEARCH LIST\n")
cat(" 2. SEE LIST\n")
cat(" 3. COMPARE DATA\n")
cat(" 4. CLEAR LIST\n")
cat(" 5. PLAYER TWITTER COMPARISON\n")
cat(" 6. COUNTRY RANK COMPARISON\n")
cat(" 7. EXIT\n\n")
cat(" CHOICE : ")
ch=scan()
switch(ch,search_list(basic,attri),see_list(),compare_data(basic,attri),case4(),twitter_graph(),fifa_ranking(),break)
} | /main.R | no_license | shivambachhety/Data-Analysis--Football | R | false | false | 916 | r | compare_api<<-c()
compare_names<<-c()
compare_tweets<<-c()
twitter_names<<-c()
i<<-0
library(dplyr)
basic=read.csv("Football.csv",sep = ';')
attri=read.csv("Attributes_head.csv",sep=';')
while(1)
{
cat(" FOOTBALL DATA ANALYSIS\n\n")
cat(" MENU\n\n")
cat(" 1. SEARCH LIST\n")
cat(" 2. SEE LIST\n")
cat(" 3. COMPARE DATA\n")
cat(" 4. CLEAR LIST\n")
cat(" 5. PLAYER TWITTER COMPARISON\n")
cat(" 6. COUNTRY RANK COMPARISON\n")
cat(" 7. EXIT\n\n")
cat(" CHOICE : ")
ch=scan()
switch(ch,search_list(basic,attri),see_list(),compare_data(basic,attri),case4(),twitter_graph(),fifa_ranking(),break)
} |
library(shiny)
library(sp)
library(maptools)
library(tidyverse)
library(leaflet)
library(raster)
library(data.table)
library(DT)
# Define server logic required to draw a histogram
shinyServer(function(input, output){
# reactive datasets
sweden_reg <- reactive({
data <- swe_data2
if (input$region != "") data <- swe_data2[swe_data2$NAME_1 == input$region, ]
data
})
sweden_cities <- reactive({
cities_neg <- if (input$negVal){
cities[cities[[input$year]] < 0, ]
} else {
cities
}
cities_neg
})
text_cities <- reactive({
paste0("Locations: ", sweden_cities()$Locations, "<BR>",
input$year, ": ", sweden_cities()[[input$year]])
})
# data output
output$data_sweden <- renderDataTable({
data.frame(swe_data1_tab)
})
output$data_sweden_region <- renderDataTable({
data.frame(swe_data2_tab[swe_data2_tab$NAME_1 == input$region, ])
})
output$data_sweden_cities <- renderDataTable({
sweden_cities()[, c("Locations", "y_2012", "y_2013", "y_2014")]
})
# basic maps
output$maps <- renderLeaflet({
leaflet(swe_data1) %>%
addTiles() %>%
setView(lng = 16.31667, lat = 62.38333, zoom = 5)
})
output$maps_region <- renderLeaflet({
leaflet(swe_data2) %>%
addTiles() %>%
setView(lng = 16.31667, lat = 62.38333, zoom = 5)
})
output$map_cities <- renderLeaflet({
text <- paste0("Locations: ", sweden_cities()$Locations, "<BR>",
input$year, ": ", sweden_cities()[[input$year]])
leaflet(data = sweden_cities()) %>%
addTiles() %>%
setView(lng = 16.31667, lat = 62.38333, zoom = 5) %>%
addMarkers(~long, ~lat, popup = text_cities())
})
## modifying maps sweden
observe({
colorBy <- input$year
colorData <- swe_data1[[colorBy]]
pal <- colorNumeric("Blues", colorData)
text <- paste0("Län: ", swe_data1$NAME_1, "<BR>", input$year, ": ",
swe_data1[[input$year]])
leafletProxy("maps", data = swe_data1) %>%
clearShapes() %>%
clearControls() %>%
addPolygons(
stroke = FALSE, fillOpacity = 0.5, smoothFactor = 0.5,
fillColor = pal(colorData), popup = text
) %>%
addLegend("bottomright", pal = pal, values = colorData, title = colorBy)
})
## modifying maps region
observe({
colorBy <- input$year
colorData <- sweden_reg()[[colorBy]]
pal <- colorNumeric("Blues", colorData)
text <- paste0("Län: ", sweden_reg()$NAME_1, "<BR>",
"Kommuner: ", sweden_reg()$NAME_2, "<BR>",
input$year, ": ", sweden_reg()[[input$year]])
leafletProxy("maps_region", data = sweden_reg()) %>%
clearShapes() %>%
clearControls() %>%
# fitBounds(
# lng1 = min(sweden_reg()$coord1), lat1 = min(sweden_reg()$coord2),
# lng2 = max(sweden_reg()$coord1), lat2 = max(sweden_reg()$coord2)
# )
addPolygons(
stroke = FALSE, fillOpacity = 0.5, smoothFactor = 0.5,
fillColor = pal(colorData), popup = text
) %>%
addLegend("bottomright", pal = pal, values = colorData, title = colorBy) %>%
fitBounds(
lng1 = min(sweden_reg()$coord1), lat1 = min(sweden_reg()$coord2),
lng2 = max(sweden_reg()$coord1), lat2 = max(sweden_reg()$coord2)
)
})
## modifying maps cities
observe({
leafletProxy("map_cities", data = sweden_cities()) %>%
clearMarkers() %>%
addMarkers(~long, ~lat, popup = text_cities())
})
}) | /maps/server.R | no_license | reinholdsson/shiny-server | R | false | false | 3,742 | r | library(shiny)
library(sp)
library(maptools)
library(tidyverse)
library(leaflet)
library(raster)
library(data.table)
library(DT)
# Define server logic required to draw a histogram
shinyServer(function(input, output){
# reactive datasets
sweden_reg <- reactive({
data <- swe_data2
if (input$region != "") data <- swe_data2[swe_data2$NAME_1 == input$region, ]
data
})
sweden_cities <- reactive({
cities_neg <- if (input$negVal){
cities[cities[[input$year]] < 0, ]
} else {
cities
}
cities_neg
})
text_cities <- reactive({
paste0("Locations: ", sweden_cities()$Locations, "<BR>",
input$year, ": ", sweden_cities()[[input$year]])
})
# data output
output$data_sweden <- renderDataTable({
data.frame(swe_data1_tab)
})
output$data_sweden_region <- renderDataTable({
data.frame(swe_data2_tab[swe_data2_tab$NAME_1 == input$region, ])
})
output$data_sweden_cities <- renderDataTable({
sweden_cities()[, c("Locations", "y_2012", "y_2013", "y_2014")]
})
# basic maps
output$maps <- renderLeaflet({
leaflet(swe_data1) %>%
addTiles() %>%
setView(lng = 16.31667, lat = 62.38333, zoom = 5)
})
output$maps_region <- renderLeaflet({
leaflet(swe_data2) %>%
addTiles() %>%
setView(lng = 16.31667, lat = 62.38333, zoom = 5)
})
output$map_cities <- renderLeaflet({
text <- paste0("Locations: ", sweden_cities()$Locations, "<BR>",
input$year, ": ", sweden_cities()[[input$year]])
leaflet(data = sweden_cities()) %>%
addTiles() %>%
setView(lng = 16.31667, lat = 62.38333, zoom = 5) %>%
addMarkers(~long, ~lat, popup = text_cities())
})
## modifying maps sweden
observe({
colorBy <- input$year
colorData <- swe_data1[[colorBy]]
pal <- colorNumeric("Blues", colorData)
text <- paste0("Län: ", swe_data1$NAME_1, "<BR>", input$year, ": ",
swe_data1[[input$year]])
leafletProxy("maps", data = swe_data1) %>%
clearShapes() %>%
clearControls() %>%
addPolygons(
stroke = FALSE, fillOpacity = 0.5, smoothFactor = 0.5,
fillColor = pal(colorData), popup = text
) %>%
addLegend("bottomright", pal = pal, values = colorData, title = colorBy)
})
## modifying maps region
observe({
colorBy <- input$year
colorData <- sweden_reg()[[colorBy]]
pal <- colorNumeric("Blues", colorData)
text <- paste0("Län: ", sweden_reg()$NAME_1, "<BR>",
"Kommuner: ", sweden_reg()$NAME_2, "<BR>",
input$year, ": ", sweden_reg()[[input$year]])
leafletProxy("maps_region", data = sweden_reg()) %>%
clearShapes() %>%
clearControls() %>%
# fitBounds(
# lng1 = min(sweden_reg()$coord1), lat1 = min(sweden_reg()$coord2),
# lng2 = max(sweden_reg()$coord1), lat2 = max(sweden_reg()$coord2)
# )
addPolygons(
stroke = FALSE, fillOpacity = 0.5, smoothFactor = 0.5,
fillColor = pal(colorData), popup = text
) %>%
addLegend("bottomright", pal = pal, values = colorData, title = colorBy) %>%
fitBounds(
lng1 = min(sweden_reg()$coord1), lat1 = min(sweden_reg()$coord2),
lng2 = max(sweden_reg()$coord1), lat2 = max(sweden_reg()$coord2)
)
})
## modifying maps cities
observe({
leafletProxy("map_cities", data = sweden_cities()) %>%
clearMarkers() %>%
addMarkers(~long, ~lat, popup = text_cities())
})
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constants.R
\name{linear_regression_description}
\alias{linear_regression_description}
\title{adds the description of the RunTest_linear_regression app}
\usage{
linear_regression_description(language)
}
\arguments{
\item{accepts}{the language in which the app will be written}
}
\value{
the information line for the program RunTest_linear_regression
}
\description{
adds the description of the RunTest_linear_regression app
}
| /man/linear_regression_description.Rd | no_license | jaropis/shiny-tools | R | false | true | 504 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constants.R
\name{linear_regression_description}
\alias{linear_regression_description}
\title{adds the description of the RunTest_linear_regression app}
\usage{
linear_regression_description(language)
}
\arguments{
\item{accepts}{the language in which the app will be written}
}
\value{
the information line for the program RunTest_linear_regression
}
\description{
adds the description of the RunTest_linear_regression app
}
|
HELPrct %>%
summarise(x.bar = mean(age), s = sd(age))
| /inst/snippet/summarise01.R | no_license | rpruim/fastR2 | R | false | false | 58 | r | HELPrct %>%
summarise(x.bar = mean(age), s = sd(age))
|
#' An S4 class for sparse matrix.
#'
#' @slot i row index
#' @slot j colum index
#' @slot x value
#' @slot dims dimensions
#' @name sparse.matrix
#' @rdname sparse.matrix-methods
#' @export sparse.matrix
#' @docType methods
#' @import methods
sparse.matrix <- setClass("sparse.matrix",
slots=list(i="numeric", j="numeric",
x="numeric",dims = "numeric"))
sparse.matrix<-function(i,j,x,dims=NULL){
if(is.null(dims)){
dims<-c(max(i),max(j))
}
a1<-new("sparse.matrix",i = i, j = j, x = x, dims = dims)
return(a1)
}
#' t method for sparse.matrix
#'
#' @param x,y,e1,e2,i,j,dims a \code{sparse.matrix} object
#' @docType methods
#' @rdname sparse.matrix-methods
#' @aliases t,sparse.matrix,ANY-method
#' @usage \S4method{t}{sparse.matrix}(x)
#' @inheritParams x from t function
#'
setMethod("t", signature = "sparse.matrix",function(x){
a <- data.frame(i = x@i, j = x@j, x = x@x)
temp<-a$i
a$i<-a$j
a$j<-temp
a<-a[order(a$i),]
a <- sparse.matrix(i = a$i, j = a$j, x = a$x,
dims = c(x@dims[2], x@dims[1]))
return(a)
})
#' + method for sparse.matrix
#'
#' @docType methods
#' @rdname sparse.matrix-methods
#' @aliases +,sparse.matrix,sparse.matrix-method
#' @usage \S4method{+}{sparse.matrix,sparse.matrix}(e1,e2)
#' @inheritParams e1,e2 from add function
setMethod("+",
signature(e1= "sparse.matrix", e2 = "sparse.matrix"),
function(e1, e2){
if(sum(e1@dims != e2@dims) == 0){
a <- data.frame(i = e1@i, j = e1@j, x = e1@x)
b <- data.frame(i = e2@i, j = e2@j, x = e2@x)
c <- merge(a, b, by = c("i", "j"), all = TRUE, suffixes = c("1", "2"))
c$x1[is.na(c$x1)] <- 0
c$x2[is.na(c$x2)] <- 0
c$x <- c$x1 + c$x2
c <- c[, c("i", "j", "x")]
c <- c[order(c$j), ]
rownames(c) <- (1:nrow(c))
c <- sparse.matrix(i = c$i, j = c$j, x = c$x,
dims = c(e1@dims[1], e2@dims[2]))
return(c)
}else{
stop("Dimensions Error")
}
})
#' %*% method for sparse.matrix
#'
#' @docType methods
#' @rdname sparse.matrix-methods
#' @aliases %*%,sparse.matrix,sparse.matrix-method
#' @usage \S4method{%*%}{sparse.matrix}(x,y)
#' @inheritParams x,y from matrix multiplication function
#'
setMethod("%*%",
signature(x= "sparse.matrix", y = "sparse.matrix"),
function(x, y){
# check dim
if(x@dims[2] == y@dims[1]){
a <- data.frame(i = x@i, j = x@j, x = x@x)
b <- data.frame(i = y@i, j = y@j, x = y@x)
unique_a <- unique(a$i)
unique_b <- unique(b$j)
c_index <- expand.grid(unique_a, unique_b)
colnames(c_index) <- c("i", "j")
i <- c()
j <- c()
xv <- c()
for(ida in unique_a){
j_i <- a$j[which(a$i == ida)]
birow <- b$i %in% j_i
c_j <- unique(b$j[birow])
a_x <- a$x[(a$i == ida) & (a$j == j_i)]
for(idb in c_j){
b_x <- b$x[(b$i == j_i) & (b$j == idb)]
c_x <- sum(a_x * b_x)
i <- c(i, ida)
j <- c(j, idb)
xv <- c(xv, c_x)
}
}
c <- data.frame(i = i, j = j, x = xv)
c <- c[order(c$j), ]
rownames(c) <- (1:nrow(c))
c <- sparse.matrix(i = c$i, j = c$j, x = c$x,
dims = c(x@dims[1], y@dims[2]))
return(c)
}else{
stop("Dimensions Error")
}
})
# args(getGeneric("t"))
## Another document sparse-matrix2.R can also passed the test
## but it cannot be shown simultaneouly with this file.
## Therefore, I will just show that the new S4 class works well.
## Actually, the new S3 class also works well.
| /R/sparse-matrix.R | no_license | daiw3/bis557 | R | false | false | 4,102 | r | #' An S4 class for sparse matrix.
#'
#' @slot i row index
#' @slot j colum index
#' @slot x value
#' @slot dims dimensions
#' @name sparse.matrix
#' @rdname sparse.matrix-methods
#' @export sparse.matrix
#' @docType methods
#' @import methods
sparse.matrix <- setClass("sparse.matrix",
slots=list(i="numeric", j="numeric",
x="numeric",dims = "numeric"))
sparse.matrix<-function(i,j,x,dims=NULL){
if(is.null(dims)){
dims<-c(max(i),max(j))
}
a1<-new("sparse.matrix",i = i, j = j, x = x, dims = dims)
return(a1)
}
#' t method for sparse.matrix
#'
#' @param x,y,e1,e2,i,j,dims a \code{sparse.matrix} object
#' @docType methods
#' @rdname sparse.matrix-methods
#' @aliases t,sparse.matrix,ANY-method
#' @usage \S4method{t}{sparse.matrix}(x)
#' @inheritParams x from t function
#'
setMethod("t", signature = "sparse.matrix",function(x){
a <- data.frame(i = x@i, j = x@j, x = x@x)
temp<-a$i
a$i<-a$j
a$j<-temp
a<-a[order(a$i),]
a <- sparse.matrix(i = a$i, j = a$j, x = a$x,
dims = c(x@dims[2], x@dims[1]))
return(a)
})
#' + method for sparse.matrix
#'
#' @docType methods
#' @rdname sparse.matrix-methods
#' @aliases +,sparse.matrix,sparse.matrix-method
#' @usage \S4method{+}{sparse.matrix,sparse.matrix}(e1,e2)
#' @inheritParams e1,e2 from add function
setMethod("+",
signature(e1= "sparse.matrix", e2 = "sparse.matrix"),
function(e1, e2){
if(sum(e1@dims != e2@dims) == 0){
a <- data.frame(i = e1@i, j = e1@j, x = e1@x)
b <- data.frame(i = e2@i, j = e2@j, x = e2@x)
c <- merge(a, b, by = c("i", "j"), all = TRUE, suffixes = c("1", "2"))
c$x1[is.na(c$x1)] <- 0
c$x2[is.na(c$x2)] <- 0
c$x <- c$x1 + c$x2
c <- c[, c("i", "j", "x")]
c <- c[order(c$j), ]
rownames(c) <- (1:nrow(c))
c <- sparse.matrix(i = c$i, j = c$j, x = c$x,
dims = c(e1@dims[1], e2@dims[2]))
return(c)
}else{
stop("Dimensions Error")
}
})
#' %*% method for sparse.matrix
#'
#' @docType methods
#' @rdname sparse.matrix-methods
#' @aliases %*%,sparse.matrix,sparse.matrix-method
#' @usage \S4method{%*%}{sparse.matrix}(x,y)
#' @inheritParams x,y from matrix multiplication function
#'
setMethod("%*%",
signature(x= "sparse.matrix", y = "sparse.matrix"),
function(x, y){
# check dim
if(x@dims[2] == y@dims[1]){
a <- data.frame(i = x@i, j = x@j, x = x@x)
b <- data.frame(i = y@i, j = y@j, x = y@x)
unique_a <- unique(a$i)
unique_b <- unique(b$j)
c_index <- expand.grid(unique_a, unique_b)
colnames(c_index) <- c("i", "j")
i <- c()
j <- c()
xv <- c()
for(ida in unique_a){
j_i <- a$j[which(a$i == ida)]
birow <- b$i %in% j_i
c_j <- unique(b$j[birow])
a_x <- a$x[(a$i == ida) & (a$j == j_i)]
for(idb in c_j){
b_x <- b$x[(b$i == j_i) & (b$j == idb)]
c_x <- sum(a_x * b_x)
i <- c(i, ida)
j <- c(j, idb)
xv <- c(xv, c_x)
}
}
c <- data.frame(i = i, j = j, x = xv)
c <- c[order(c$j), ]
rownames(c) <- (1:nrow(c))
c <- sparse.matrix(i = c$i, j = c$j, x = c$x,
dims = c(x@dims[1], y@dims[2]))
return(c)
}else{
stop("Dimensions Error")
}
})
# args(getGeneric("t"))
## Another document sparse-matrix2.R can also passed the test
## but it cannot be shown simultaneouly with this file.
## Therefore, I will just show that the new S4 class works well.
## Actually, the new S3 class also works well.
|
.validatePipelineDef <- function(object){
e <- c()
if(!is.list(object@functions) || !all(sapply(object@functions, is.function)))
e <- c("`functions` should be a (named) list of functions!")
if(!all(sapply(object@functions, FUN=function(x) "x" %in% names(formals(x)))))
e <- c(e, "Each function should at least take the argument `x`.")
isf <- function(x) is.null(x) || is.function(x)
if(!is.list(object@aggregation) || !all(sapply(object@aggregation, isf)))
stop("`aggregation` should be a list of functions and/or NULL slots!")
if(!is.list(object@evaluation) || !all(sapply(object@evaluation, isf)))
stop("`evaluation` should be a list of functions and/or NULL slots!")
if(!all(names(object@descriptions)==names(object@functions)))
e <- c(e, "descriptions do not match functions.")
if(!all(names(object@evaluation)==names(object@functions)))
e <- c(e, "evaluation do not match functions.")
if(!all(names(object@aggregation)==names(object@functions)))
e <- c(e, "aggregation do not match functions.")
args <- unlist( lapply( object@functions,
FUN=function(x){ setdiff(names(formals(x)), "x") }) )
if(any(duplicated(args))) e <- c(e, paste("Some arguments (beside `x`) is",
"used in more than one step, which is not currently supported."))
if(length( wa <- setdiff(names(object@defaultArguments),args) )>0)
e <- c(e, paste("The following default arguments are not in the pipeline's
functions:", paste(wa, collapse=", ")))
if(length(e) == 0) TRUE else e
}
#' @import methods
#' @exportClass PipelineDefinition
setClass( "PipelineDefinition",
slots=representation( functions="list", descriptions="list",
evaluation="list", aggregation="list",
initiation="function",
defaultArguments="list", misc="list" ),
prototype=prototype( functions=list(), descriptions=list(),
evaluation=list(), aggregation=list(),
initiation=identity,
defaultArguments=list(), misc=list() ),
validity=.validatePipelineDef )
#' PipelineDefinition
#'
#' Creates on object of class `PipelineDefinition` containing step functions,
#' as well as optionally step evaluation and aggregation functions.
#'
#' @param functions A list of functions for each step
#' @param descriptions A list of descriptions for each step
#' @param evaluation A list of optional evaluation functions for each step
#' @param aggregation A list of optional aggregation functions for each step
#' @param initiation A function ran when initiating a dataset
#' @param defaultArguments A lsit of optional default arguments
#' @param misc A list of whatever.
#' @param verbose Whether to output additional warnings (default TRUE).
#'
#' @return An object of class `PipelineDefinition`, with the slots functions,
#' descriptions, evaluation, aggregation, defaultArguments, and misc.
#'
#' @aliases PipelineDefinition-class
#' @seealso \code{\link{PipelineDefinition-methods}}, \code{\link{addPipelineStep}}.
#' For an example pipeline, see \code{\link{scrna_pipeline}}.
#' @export
#' @examples
#' PipelineDefinition(
#' list( step1=function(x, meth1){ get(meth1)(x) },
#' step2=function(x, meth2){ get(meth2)(x) } )
#' )
PipelineDefinition <- function( functions, descriptions=NULL, evaluation=NULL,
aggregation=NULL, initiation=identity,
defaultArguments=list(),
misc=list(), verbose=TRUE ){
if(!is.list(functions) || !all(sapply(functions, is.function)))
stop("`functions` should be a (named) list of functions!")
n <- names(functions)
if(is.null(n)) n <- names(functions) <- paste0("step",1:length(functions))
descriptions <- .checkInputList(descriptions, functions, FALSE)
evaluation <- .checkInputList(evaluation, functions)
aggregation2 <- .checkInputList(aggregation, functions)
names(aggregation2)<-names(evaluation)<-names(descriptions)<-names(functions)
for(f in names(aggregation2)){
if(is.null(aggregation2[[f]]) && !is.null(evaluation[[f]]) &&
!(f %in% names(aggregation)))
aggregation2[[f]] <- defaultStepAggregation
}
if(is.null(misc)) misc <- list()
x <- new("PipelineDefinition", functions=functions, descriptions=descriptions,
evaluation=evaluation, aggregation=aggregation2, initiation=initiation,
defaultArguments=defaultArguments, misc=misc)
w <- which( !sapply(x@aggregation,is.null) &
sapply(x@evaluation,is.null) )
if(verbose && length(w)>0){
warning(paste("An aggregation is defined for some steps that do not have",
"a defined evaluation function: ",
paste(names(x@functions)[w], collapse=", "),
"It is possible that evaluation is performed by the step's",
"function itself.") )
}
x
}
.checkInputList <- function( x, fns, containsFns=TRUE,
name=deparse(substitute(x)) ){
name <- paste0("`",name,"`")
if(!is.null(x)){
if(length(x)!=length(fns)){
if(is.null(names(x)))
stop("If ", name, " does not have the same length as the number of ",
"steps, its slots should be named.")
if(length(unknown <- setdiff(names(x),names(fns)))>0)
stop("Some elements of ",name," (",paste(unknown,collapse=", "),")",
"are unknown.")
x <- lapply(names(fns), FUN=function(f){
if(is.null(x[[f]])) return(NULL)
x[[f]]
})
names(x) <- names(fns)
}
if( !is.null(names(x)) ){
if(!all(names(x)==names(fns)) )
stop("The names of ",name," should match those of `functions`")
}
}else{
x <- lapply(fns,FUN=function(x) NULL)
}
if( containsFns &&
!all(sapply(x, FUN=function(x) is.null(x) || is.function(x))) )
stop(name," should be a list of functions")
x
}
#' Methods for \code{\link{PipelineDefinition}} class
#' @name PipelineDefinition-methods
#' @rdname PipelineDefinition-methods
#' @aliases PipelineDefinition-method
#' @seealso \code{\link{PipelineDefinition}}, \code{\link{addPipelineStep}}
#' @param object An object of class \code{\link{PipelineDefinition}}
NULL
#' @rdname PipelineDefinition-methods
#' @importMethodsFrom methods show
#' @importFrom knitr opts_current
setMethod("show", signature("PipelineDefinition"), function(object){
# colors and bold are going to trigger errors when rendered in a knit, so
# we disable them when rendering
isKnit <- tryCatch( isTRUE(getOption('knitr.in.progress')) ||
length(knitr::opts_current$get())>0,
error=function(e) FALSE)
fns <- sapply(names(object@functions), FUN=function(x){
x2 <- x
if(!isKnit) x2 <- paste0("\033[1m",x,"\033[22m")
y <- sapply( names(formals(object@functions[[x]])), FUN=function(n){
if(!is.null(def <- object@defaultArguments[[n]]))
n <- paste0(n,"=",deparse(def,100,FALSE))
n
})
y <- paste0(" - ", x2, "(", paste(y, collapse=", "), ")")
if(!is.null(object@evaluation[[x]]) || !is.null(object@aggregation[[x]]))
y <- paste0(y, ifelse(isKnit, " * ", " \033[34m*\033[39m "))
if(!is.null(object@descriptions[[x]])){
x2 <- object@descriptions[[x]]
if(!isKnit) x2 <- paste0("\033[3m",x2,"\033[23m")
y <- paste(y, x2, sep="\n")
}
y
})
cat("A PipelineDefinition object with the following steps:\n")
cat(paste(fns,collapse="\n"))
cat("\n")
})
#' @rdname PipelineDefinition-methods
#' @param x An object of class \code{\link{PipelineDefinition}}
setMethod("names", signature("PipelineDefinition"), function(x){
names(x@functions)
})
#' @rdname PipelineDefinition-methods
setMethod("names<-", signature("PipelineDefinition"), function(x, value){
if(any(duplicated(value))) stop("Some step names are duplicated!")
names(x@functions) <- value
names(x@evaluation) <- value
names(x@aggregation) <- value
names(x@descriptions) <- value
validObject(x)
x
})
#' @rdname PipelineDefinition-methods
setMethod("$", signature("PipelineDefinition"), function(x, name){
x@functions[[name]]
})
#' @rdname PipelineDefinition-methods
setMethod("length", signature("PipelineDefinition"), function(x){
length(x@functions)
})
#' @rdname PipelineDefinition-methods
setMethod("[",signature("PipelineDefinition"), function(x, i){
new("PipelineDefinition", functions=x@functions[i],
descriptions=x@descriptions[i], evaluation=x@evaluation[i],
aggregation=x@aggregation[i], misc=x@misc)
})
#' @rdname PipelineDefinition-methods
setMethod("as.list",signature("PipelineDefinition"), function(x){
x@functions
})
#' @exportMethod arguments
setGeneric("arguments", function(object) args(object))
#' @rdname PipelineDefinition-methods
setMethod("arguments",signature("PipelineDefinition"), function(object){
lapply(object@functions, FUN=function(x){ setdiff(names(formals(x)), "x") })
})
#' @exportMethod defaultArguments
setGeneric("defaultArguments", function(object) NULL)
#' @exportMethod defaultArguments<-
setGeneric("defaultArguments<-", function(object, value) NULL)
#' @rdname PipelineDefinition-methods
setMethod("defaultArguments",signature("PipelineDefinition"), function(object){
object@defaultArguments
})
#' @rdname PipelineDefinition-methods
setMethod( "defaultArguments<-",signature("PipelineDefinition"),
function(object, value){
object@defaultArguments <- value
validObject(object)
object
})
#' @exportMethod stepFn
setGeneric("stepFn", function(object, step, type) standardGeneric("stepFn"))
#' @param step The name of the step for which to set or get the function
#' @param type The type of function to set/get, either `functions`,
#' `evaluation`, `aggregation`, `descriptions`, or `initiation` (will parse
#' partial matches)
#' @rdname PipelineDefinition-methods
setMethod("stepFn", signature("PipelineDefinition"), function(object, step, type){
type <- match.arg(type, c("functions","evaluation","aggregation","descriptions"))
step <- match.arg(step, names(object))
slot(object, type)[[step]]
})
#' @exportMethod stepFn<-
setGeneric("stepFn<-", function(object, step, type, value) standardGeneric("stepFn<-"))
#' @rdname PipelineDefinition-methods
setMethod("stepFn<-", signature("PipelineDefinition"), function(object, step, type, value){
type <- match.arg(type, c("functions","evaluation","aggregation","descriptions","initiation"))
if(type!="descriptions" && !is.function(value))
stop("Replacement value should be a function.")
if(type=="initiation"){
slot(object, type) <- value
}else{
step <- match.arg(step, names(object))
slot(object, type)[[step]] <- value
}
object
})
#' addPipelineStep
#'
#' Add a step to an existing \code{\link{PipelineDefinition}}
#'
#' @param object A \code{\link{PipelineDefinition}}
#' @param name The name of the step to add
#' @param after The name of the step after which to add the new step. If NULL, will
#' add the step at the beginning of the pipeline.
#' @param slots A optional named list with slots to fill for that step (i.e. `functions`,
#' `evaluation`, `aggregation`, `descriptions` - will be parsed)
#'
#' @return A \code{\link{PipelineDefinition}}
#' @seealso \code{\link{PipelineDefinition}}, \code{\link{PipelineDefinition-methods}}
#' @importFrom methods is slot
#' @export
#'
#' @examples
#' pd <- mockPipeline()
#' pd
#' pd <- addPipelineStep(pd, name="newstep", after="step1",
#' slots=list(description="Step that does nothing..."))
#' pd
addPipelineStep <- function(object, name, after=NULL, slots=list()){
if(!is(object, "PipelineDefinition")) stop("object should be a PipelineDefinition")
if(name %in% names(object)) stop("There is already a step with that name!")
if(!is.null(after) && !(after %in% names(object)))
stop("`after` should either be null or the name of a step.")
n <- c("functions","evaluation","aggregation","descriptions")
if(length(slots)>0) names(slots) <- sapply(names(slots), choices=n, FUN=match.arg)
if(!all(names(slots) %in% n)) stop( paste("fns should be a function or a list",
"with one or more of the following names:\n", paste(n,collapse=", ")) )
if(is.null(after)){
i1 <- vector("integer")
i2 <- seq_along(names(object))
}else{
w <- which(names(object)==after)
i1 <- 1:w
i2 <- (w+1):length(object)
if(w==length(object)) i2 <- vector("integer")
}
ll <- list(NULL)
names(ll) <- name
for(f in n) slot(object,f) <- c(slot(object,f)[i1], ll, slot(object,f)[i2])
for(f in names(slots)) stepFn(object, name, f) <- slots[[f]]
if(is.null(stepFn(object, name, "functions")))
stepFn(object, name, "functions") <- identity
validObject(object)
object
}
#' mockPipeline
#'
#' A mock `PipelineDefinition` for use in examples.
#'
#' @return a `PipelineDefinition`
#' @export
#'
#' @examples
#' mockPipeline()
mockPipeline <- function(){
PipelineDefinition(
list( step1=function(x, meth1){ get(meth1)(x) },
step2=function(x, meth2){ get(meth2)(x) } ),
evaluation=list( step2=function(x) c(mean=mean(x), max=max(x)) ),
description=list( step1="This steps applies meth1 to x.",
step2="This steps applies meth2 to x."),
defaultArguments=list(meth1=c("log","sqrt"), meth2="cumsum")
)
}
| /R/PipelineDefinition.R | no_license | pythseq/pipeComp | R | false | false | 13,524 | r | .validatePipelineDef <- function(object){
e <- c()
if(!is.list(object@functions) || !all(sapply(object@functions, is.function)))
e <- c("`functions` should be a (named) list of functions!")
if(!all(sapply(object@functions, FUN=function(x) "x" %in% names(formals(x)))))
e <- c(e, "Each function should at least take the argument `x`.")
isf <- function(x) is.null(x) || is.function(x)
if(!is.list(object@aggregation) || !all(sapply(object@aggregation, isf)))
stop("`aggregation` should be a list of functions and/or NULL slots!")
if(!is.list(object@evaluation) || !all(sapply(object@evaluation, isf)))
stop("`evaluation` should be a list of functions and/or NULL slots!")
if(!all(names(object@descriptions)==names(object@functions)))
e <- c(e, "descriptions do not match functions.")
if(!all(names(object@evaluation)==names(object@functions)))
e <- c(e, "evaluation do not match functions.")
if(!all(names(object@aggregation)==names(object@functions)))
e <- c(e, "aggregation do not match functions.")
args <- unlist( lapply( object@functions,
FUN=function(x){ setdiff(names(formals(x)), "x") }) )
if(any(duplicated(args))) e <- c(e, paste("Some arguments (beside `x`) is",
"used in more than one step, which is not currently supported."))
if(length( wa <- setdiff(names(object@defaultArguments),args) )>0)
e <- c(e, paste("The following default arguments are not in the pipeline's
functions:", paste(wa, collapse=", ")))
if(length(e) == 0) TRUE else e
}
#' @import methods
#' @exportClass PipelineDefinition
setClass( "PipelineDefinition",
slots=representation( functions="list", descriptions="list",
evaluation="list", aggregation="list",
initiation="function",
defaultArguments="list", misc="list" ),
prototype=prototype( functions=list(), descriptions=list(),
evaluation=list(), aggregation=list(),
initiation=identity,
defaultArguments=list(), misc=list() ),
validity=.validatePipelineDef )
#' PipelineDefinition
#'
#' Creates on object of class `PipelineDefinition` containing step functions,
#' as well as optionally step evaluation and aggregation functions.
#'
#' @param functions A list of functions for each step
#' @param descriptions A list of descriptions for each step
#' @param evaluation A list of optional evaluation functions for each step
#' @param aggregation A list of optional aggregation functions for each step
#' @param initiation A function ran when initiating a dataset
#' @param defaultArguments A lsit of optional default arguments
#' @param misc A list of whatever.
#' @param verbose Whether to output additional warnings (default TRUE).
#'
#' @return An object of class `PipelineDefinition`, with the slots functions,
#' descriptions, evaluation, aggregation, defaultArguments, and misc.
#'
#' @aliases PipelineDefinition-class
#' @seealso \code{\link{PipelineDefinition-methods}}, \code{\link{addPipelineStep}}.
#' For an example pipeline, see \code{\link{scrna_pipeline}}.
#' @export
#' @examples
#' PipelineDefinition(
#' list( step1=function(x, meth1){ get(meth1)(x) },
#' step2=function(x, meth2){ get(meth2)(x) } )
#' )
PipelineDefinition <- function( functions, descriptions=NULL, evaluation=NULL,
aggregation=NULL, initiation=identity,
defaultArguments=list(),
misc=list(), verbose=TRUE ){
if(!is.list(functions) || !all(sapply(functions, is.function)))
stop("`functions` should be a (named) list of functions!")
n <- names(functions)
if(is.null(n)) n <- names(functions) <- paste0("step",1:length(functions))
descriptions <- .checkInputList(descriptions, functions, FALSE)
evaluation <- .checkInputList(evaluation, functions)
aggregation2 <- .checkInputList(aggregation, functions)
names(aggregation2)<-names(evaluation)<-names(descriptions)<-names(functions)
for(f in names(aggregation2)){
if(is.null(aggregation2[[f]]) && !is.null(evaluation[[f]]) &&
!(f %in% names(aggregation)))
aggregation2[[f]] <- defaultStepAggregation
}
if(is.null(misc)) misc <- list()
x <- new("PipelineDefinition", functions=functions, descriptions=descriptions,
evaluation=evaluation, aggregation=aggregation2, initiation=initiation,
defaultArguments=defaultArguments, misc=misc)
w <- which( !sapply(x@aggregation,is.null) &
sapply(x@evaluation,is.null) )
if(verbose && length(w)>0){
warning(paste("An aggregation is defined for some steps that do not have",
"a defined evaluation function: ",
paste(names(x@functions)[w], collapse=", "),
"It is possible that evaluation is performed by the step's",
"function itself.") )
}
x
}
.checkInputList <- function( x, fns, containsFns=TRUE,
name=deparse(substitute(x)) ){
name <- paste0("`",name,"`")
if(!is.null(x)){
if(length(x)!=length(fns)){
if(is.null(names(x)))
stop("If ", name, " does not have the same length as the number of ",
"steps, its slots should be named.")
if(length(unknown <- setdiff(names(x),names(fns)))>0)
stop("Some elements of ",name," (",paste(unknown,collapse=", "),")",
"are unknown.")
x <- lapply(names(fns), FUN=function(f){
if(is.null(x[[f]])) return(NULL)
x[[f]]
})
names(x) <- names(fns)
}
if( !is.null(names(x)) ){
if(!all(names(x)==names(fns)) )
stop("The names of ",name," should match those of `functions`")
}
}else{
x <- lapply(fns,FUN=function(x) NULL)
}
if( containsFns &&
!all(sapply(x, FUN=function(x) is.null(x) || is.function(x))) )
stop(name," should be a list of functions")
x
}
#' Methods for \code{\link{PipelineDefinition}} class
#' @name PipelineDefinition-methods
#' @rdname PipelineDefinition-methods
#' @aliases PipelineDefinition-method
#' @seealso \code{\link{PipelineDefinition}}, \code{\link{addPipelineStep}}
#' @param object An object of class \code{\link{PipelineDefinition}}
NULL
#' @rdname PipelineDefinition-methods
#' @importMethodsFrom methods show
#' @importFrom knitr opts_current
setMethod("show", signature("PipelineDefinition"), function(object){
# colors and bold are going to trigger errors when rendered in a knit, so
# we disable them when rendering
isKnit <- tryCatch( isTRUE(getOption('knitr.in.progress')) ||
length(knitr::opts_current$get())>0,
error=function(e) FALSE)
fns <- sapply(names(object@functions), FUN=function(x){
x2 <- x
if(!isKnit) x2 <- paste0("\033[1m",x,"\033[22m")
y <- sapply( names(formals(object@functions[[x]])), FUN=function(n){
if(!is.null(def <- object@defaultArguments[[n]]))
n <- paste0(n,"=",deparse(def,100,FALSE))
n
})
y <- paste0(" - ", x2, "(", paste(y, collapse=", "), ")")
if(!is.null(object@evaluation[[x]]) || !is.null(object@aggregation[[x]]))
y <- paste0(y, ifelse(isKnit, " * ", " \033[34m*\033[39m "))
if(!is.null(object@descriptions[[x]])){
x2 <- object@descriptions[[x]]
if(!isKnit) x2 <- paste0("\033[3m",x2,"\033[23m")
y <- paste(y, x2, sep="\n")
}
y
})
cat("A PipelineDefinition object with the following steps:\n")
cat(paste(fns,collapse="\n"))
cat("\n")
})
#' @rdname PipelineDefinition-methods
#' @param x An object of class \code{\link{PipelineDefinition}}
setMethod("names", signature("PipelineDefinition"), function(x){
names(x@functions)
})
#' @rdname PipelineDefinition-methods
setMethod("names<-", signature("PipelineDefinition"), function(x, value){
if(any(duplicated(value))) stop("Some step names are duplicated!")
names(x@functions) <- value
names(x@evaluation) <- value
names(x@aggregation) <- value
names(x@descriptions) <- value
validObject(x)
x
})
#' @rdname PipelineDefinition-methods
setMethod("$", signature("PipelineDefinition"), function(x, name){
x@functions[[name]]
})
#' @rdname PipelineDefinition-methods
setMethod("length", signature("PipelineDefinition"), function(x){
length(x@functions)
})
#' @rdname PipelineDefinition-methods
setMethod("[",signature("PipelineDefinition"), function(x, i){
new("PipelineDefinition", functions=x@functions[i],
descriptions=x@descriptions[i], evaluation=x@evaluation[i],
aggregation=x@aggregation[i], misc=x@misc)
})
#' @rdname PipelineDefinition-methods
setMethod("as.list",signature("PipelineDefinition"), function(x){
x@functions
})
#' @exportMethod arguments
setGeneric("arguments", function(object) args(object))
#' @rdname PipelineDefinition-methods
setMethod("arguments",signature("PipelineDefinition"), function(object){
lapply(object@functions, FUN=function(x){ setdiff(names(formals(x)), "x") })
})
#' @exportMethod defaultArguments
setGeneric("defaultArguments", function(object) NULL)
#' @exportMethod defaultArguments<-
setGeneric("defaultArguments<-", function(object, value) NULL)
#' @rdname PipelineDefinition-methods
setMethod("defaultArguments",signature("PipelineDefinition"), function(object){
object@defaultArguments
})
#' @rdname PipelineDefinition-methods
setMethod( "defaultArguments<-",signature("PipelineDefinition"),
function(object, value){
object@defaultArguments <- value
validObject(object)
object
})
#' @exportMethod stepFn
setGeneric("stepFn", function(object, step, type) standardGeneric("stepFn"))
#' @param step The name of the step for which to set or get the function
#' @param type The type of function to set/get, either `functions`,
#' `evaluation`, `aggregation`, `descriptions`, or `initiation` (will parse
#' partial matches)
#' @rdname PipelineDefinition-methods
setMethod("stepFn", signature("PipelineDefinition"), function(object, step, type){
type <- match.arg(type, c("functions","evaluation","aggregation","descriptions"))
step <- match.arg(step, names(object))
slot(object, type)[[step]]
})
#' @exportMethod stepFn<-
setGeneric("stepFn<-", function(object, step, type, value) standardGeneric("stepFn<-"))
#' @rdname PipelineDefinition-methods
setMethod("stepFn<-", signature("PipelineDefinition"), function(object, step, type, value){
type <- match.arg(type, c("functions","evaluation","aggregation","descriptions","initiation"))
if(type!="descriptions" && !is.function(value))
stop("Replacement value should be a function.")
if(type=="initiation"){
slot(object, type) <- value
}else{
step <- match.arg(step, names(object))
slot(object, type)[[step]] <- value
}
object
})
#' addPipelineStep
#'
#' Add a step to an existing \code{\link{PipelineDefinition}}
#'
#' @param object A \code{\link{PipelineDefinition}}
#' @param name The name of the step to add
#' @param after The name of the step after which to add the new step. If NULL, will
#' add the step at the beginning of the pipeline.
#' @param slots A optional named list with slots to fill for that step (i.e. `functions`,
#' `evaluation`, `aggregation`, `descriptions` - will be parsed)
#'
#' @return A \code{\link{PipelineDefinition}}
#' @seealso \code{\link{PipelineDefinition}}, \code{\link{PipelineDefinition-methods}}
#' @importFrom methods is slot
#' @export
#'
#' @examples
#' pd <- mockPipeline()
#' pd
#' pd <- addPipelineStep(pd, name="newstep", after="step1",
#' slots=list(description="Step that does nothing..."))
#' pd
addPipelineStep <- function(object, name, after=NULL, slots=list()){
if(!is(object, "PipelineDefinition")) stop("object should be a PipelineDefinition")
if(name %in% names(object)) stop("There is already a step with that name!")
if(!is.null(after) && !(after %in% names(object)))
stop("`after` should either be null or the name of a step.")
n <- c("functions","evaluation","aggregation","descriptions")
if(length(slots)>0) names(slots) <- sapply(names(slots), choices=n, FUN=match.arg)
if(!all(names(slots) %in% n)) stop( paste("fns should be a function or a list",
"with one or more of the following names:\n", paste(n,collapse=", ")) )
if(is.null(after)){
i1 <- vector("integer")
i2 <- seq_along(names(object))
}else{
w <- which(names(object)==after)
i1 <- 1:w
i2 <- (w+1):length(object)
if(w==length(object)) i2 <- vector("integer")
}
ll <- list(NULL)
names(ll) <- name
for(f in n) slot(object,f) <- c(slot(object,f)[i1], ll, slot(object,f)[i2])
for(f in names(slots)) stepFn(object, name, f) <- slots[[f]]
if(is.null(stepFn(object, name, "functions")))
stepFn(object, name, "functions") <- identity
validObject(object)
object
}
#' mockPipeline
#'
#' A mock `PipelineDefinition` for use in examples.
#'
#' @return a `PipelineDefinition`
#' @export
#'
#' @examples
#' mockPipeline()
mockPipeline <- function(){
PipelineDefinition(
list( step1=function(x, meth1){ get(meth1)(x) },
step2=function(x, meth2){ get(meth2)(x) } ),
evaluation=list( step2=function(x) c(mean=mean(x), max=max(x)) ),
description=list( step1="This steps applies meth1 to x.",
step2="This steps applies meth2 to x."),
defaultArguments=list(meth1=c("log","sqrt"), meth2="cumsum")
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AI2-case.R
\name{f_protein}
\alias{f_protein}
\title{function for protein concentration}
\usage{
f_protein(para, extra)
}
\arguments{
\item{para}{numeric. unknown parameters}
\item{extra}{numeric, given parameters}
}
\value{
function. returns \eqn{y} when giving \eqn{x} as argument
}
\description{
function for protein concentration
}
\examples{
NULL
}
\seealso{
Other AI-2 case functions:
\code{\link{f_AI2_out}()}
}
\concept{AI-2 case functions}
| /man/f_protein.Rd | no_license | dongzhuoer/bisecpp | R | false | true | 530 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AI2-case.R
\name{f_protein}
\alias{f_protein}
\title{function for protein concentration}
\usage{
f_protein(para, extra)
}
\arguments{
\item{para}{numeric. unknown parameters}
\item{extra}{numeric, given parameters}
}
\value{
function. returns \eqn{y} when giving \eqn{x} as argument
}
\description{
function for protein concentration
}
\examples{
NULL
}
\seealso{
Other AI-2 case functions:
\code{\link{f_AI2_out}()}
}
\concept{AI-2 case functions}
|
sar_template <- "https://raw.githubusercontent.com/Microsoft/Product-Recommendations/master/saw/recommendationswebapp/core/arm/resources.json"
sar_dll <- "https://github.com/Microsoft/Product-Recommendations/raw/master/saw/recommendationswebapp/assets/Recommendations.WebApp.zip"
| /fuzzedpackages/SAR/R/az_uris.R | no_license | akhikolla/testpackages | R | false | false | 284 | r | sar_template <- "https://raw.githubusercontent.com/Microsoft/Product-Recommendations/master/saw/recommendationswebapp/core/arm/resources.json"
sar_dll <- "https://github.com/Microsoft/Product-Recommendations/raw/master/saw/recommendationswebapp/assets/Recommendations.WebApp.zip"
|
# ea.fflch.usp.br
rm(list=ls())
library(stringr)
library(splitstackshape)
setwd("~/repos/scripts/migracao-drupal-d6-d7-d8/ea.fflch.usp.br/")
# Agenda de Defesas (Identificador: agenda_de_defesas)
df = read.csv("./exportverbetes.csv", stringsAsFactors = F)
nrow(df)
# trim nas colunas:
df <- data.frame(lapply(df, trimws), stringsAsFactors = FALSE)
# Não achei uma forma de migrar o campo:
# referências feitas no verbete
df = df[,-10]
df['field_autoria|format']='full_html'
colnames(df)[names(df) == "field_autoria"] = 'field_autoria|value'
df['field_bibliografia|format']='full_html'
colnames(df)[names(df) == "field_bibliografia"] = 'field_bibliografia|value'
df['field_verbete|format']='full_html'
colnames(df)[names(df) == "field_verbete"] = 'field_verbete|value'
# field_incial_do_verbete,field_palavras_chave,field_referencias_verbete
df = cSplit(df, "field_palavras_chave")
#df = cSplit(df, "field_referencias_verbete")
write.csv(df,"output_verbetes2.csv",row.names = F, na = "")
| /tratamento-de-dados/ea.fflch.usp.br/main.R | no_license | fflch/scripts | R | false | false | 998 | r | # ea.fflch.usp.br
rm(list=ls())
library(stringr)
library(splitstackshape)
setwd("~/repos/scripts/migracao-drupal-d6-d7-d8/ea.fflch.usp.br/")
# Agenda de Defesas (Identificador: agenda_de_defesas)
df = read.csv("./exportverbetes.csv", stringsAsFactors = F)
nrow(df)
# trim nas colunas:
df <- data.frame(lapply(df, trimws), stringsAsFactors = FALSE)
# Não achei uma forma de migrar o campo:
# referências feitas no verbete
df = df[,-10]
df['field_autoria|format']='full_html'
colnames(df)[names(df) == "field_autoria"] = 'field_autoria|value'
df['field_bibliografia|format']='full_html'
colnames(df)[names(df) == "field_bibliografia"] = 'field_bibliografia|value'
df['field_verbete|format']='full_html'
colnames(df)[names(df) == "field_verbete"] = 'field_verbete|value'
# field_incial_do_verbete,field_palavras_chave,field_referencias_verbete
df = cSplit(df, "field_palavras_chave")
#df = cSplit(df, "field_referencias_verbete")
write.csv(df,"output_verbetes2.csv",row.names = F, na = "")
|
.Random.seed <-
c(403L, 1L, 2085088349L, -796298668L, -622909318L, 1608362949L,
45301023L, 1334406310L, -951345616L, 866411731L, 8391409L, 1407990304L,
1074429118L, 2072045769L, 1579457147L, 56111178L, 833364140L,
-852879441L, -1941265563L, -1812735716L, 4546930L, 1631314413L,
1425737255L, -992008626L, 547785384L, 1429145771L, -532254263L,
617405912L, 548981894L, -272322207L, -1426041613L, -247874718L,
-349358636L, -1928281897L, -362007859L, 1840403044L, 1792632906L,
-1450811979L, 313090319L, -589316586L, -1772250944L, -1003180413L,
900467361L, -1153639280L, -1859897490L, 1510609177L, -2060635733L,
228724634L, -1830502564L, 1512558175L, 631881269L, 805649868L,
842882338L, -1573402755L, -710894473L, -1732673122L, 96598104L,
-1888401989L, 274705241L, 196212968L, 132765974L, 67512977L,
1416611651L, -150842798L, 9925860L, -156451033L, 278575997L,
-1008648588L, -974372774L, 118003877L, 1502857919L, 753070406L,
1627809744L, 661688819L, -157025007L, -693307072L, 1796147486L,
2129063401L, 563011355L, -2039314134L, -1632419252L, -1068368369L,
-1348644539L, 1221434108L, 1188704594L, -474720563L, 946687623L,
708065646L, 1756279624L, -1266112693L, -1799262551L, 1484493816L,
1500564262L, 1862196289L, 1959751123L, 513736578L, -17419340L,
-1384887497L, -1762670035L, 627485508L, 1739866218L, 1045027541L,
860554351L, 1588215158L, -1972705632L, 1089168227L, 1927940097L,
-220810384L, -564937970L, -995428999L, 674337035L, -327232966L,
2121402684L, -1256700289L, 2069499349L, -44792852L, 2125731138L,
848218781L, -70965481L, -1284553090L, 1622566456L, 1227628827L,
661128825L, 984629960L, -1252453642L, 348625713L, 1808231267L,
1425244466L, -696810876L, 2112237255L, -1690798691L, 75227412L,
-1217310790L, -1848079867L, 1952234463L, 1247310438L, 318795248L,
-1482687725L, -470973263L, 1952421728L, -1355123458L, -249310071L,
-397257797L, 687390090L, 796941420L, -1624558097L, 41651749L,
-1773565220L, 581656754L, 925758381L, -444979353L, -1380789106L,
1708151144L, 2099990891L, -1364155639L, 611833240L, -1129225018L,
-1787716063L, -65831245L, 2100962338L, 424025620L, -2125524329L,
1402457229L, -517619292L, -306402422L, -2075228171L, -1043364273L,
298369366L, -248116224L, -884701885L, 347941089L, 60850768L,
1548164782L, 1074154073L, -34483605L, -536053798L, 1252292764L,
911948447L, 1255528693L, -405192308L, 783772770L, 1630207037L,
302272183L, 793554270L, -347261800L, -198123525L, -2128624231L,
1295439400L, 189173462L, -1465668783L, -1839239677L, 1031573906L,
1495027492L, 397353447L, 1040895549L, 1775796L, 1055764250L,
234161253L, 987417855L, 207240582L, 1400419088L, 684181427L,
1254091857L, 221302784L, 102596830L, 202767145L, -189987109L,
996095466L, -486351476L, -1264692529L, 476417669L, -881906372L,
667545618L, -1092894963L, 1371012167L, -1753575890L, -742107256L,
195437195L, 2119832681L, -1657759688L, -1865838362L, 2018474113L,
-995663853L, 1447143362L, -1649770380L, 1853798007L, 1241687917L,
1177524228L, 2115605290L, 1088372122L, 1397741008L, -548605508L,
-705729900L, 1303156354L, -181290560L, -1469120708L, -1388018032L,
861442850L, -1796116984L, 422188812L, -1834475620L, 723601426L,
-1986964608L, 212844308L, 1050253608L, -197572550L, 165488592L,
99770988L, -1712892876L, 1962204306L, 139838816L, 188182860L,
-978128416L, -1432713310L, -602165336L, 528063628L, 1479252924L,
-2025152398L, 314594288L, -1507848124L, 401019992L, -1517823942L,
-1855303568L, 958818876L, 1212422932L, 757280450L, 1917542112L,
-1640344452L, 276586704L, 338883490L, 1438769704L, -2023621364L,
-1467044932L, -1022743246L, -755755840L, -957473228L, -1154287928L,
1383329466L, -1988802416L, 444336876L, 1588179540L, -765454702L,
110160672L, 223718348L, 1826653408L, 2037877442L, -221544408L,
1608156076L, -897875268L, 1200885234L, 983019056L, 1259268004L,
1175528888L, 333588314L, 423856272L, -1839217796L, -980493548L,
517456194L, -1476644096L, 862156732L, -749239472L, -901640926L,
1710663240L, 1633716236L, -503048100L, 100966866L, 1851425280L,
-503039660L, 840729832L, -1812128518L, -1693786416L, 707867948L,
290855156L, 560668050L, -1671360160L, 1864970252L, 1106413280L,
-1370191262L, -1442666840L, 1246721612L, -739941508L, 507237682L,
-1951779216L, -382211260L, 1129452504L, -328913030L, -1470625232L,
409541564L, -1702690220L, 252961858L, -250068832L, -1755067716L,
-95261232L, 679966562L, 1398016744L, -870697780L, 760528380L,
277506546L, 1179805312L, 839132660L, 195097224L, 1191964474L,
2103177040L, 861150316L, -1383410284L, -685239214L, 313948640L,
1407315148L, -902285152L, 563755394L, -1097922328L, -1916048660L,
-1196272324L, 1476842610L, -904795024L, -142183388L, -905576392L,
-447302886L, -1205631792L, -421936324L, 930181012L, 1317566338L,
-1535795776L, 2101375548L, 341154192L, -1763696094L, -22938360L,
-817528180L, 1413826332L, -1223305070L, -931886720L, 1630726676L,
150713128L, 293641786L, 12403152L, 2045183340L, -854418508L,
-2013590510L, 681826784L, 689978316L, -1733273376L, -1833332702L,
-2091037400L, -1721307636L, 101272380L, -1963088910L, -537465104L,
1238626884L, 152694360L, 770175802L, 1257972208L, -2046968132L,
35753236L, -452176190L, -1171105696L, -255596676L, 672328656L,
959663394L, -1050847576L, 570010508L, -1895482180L, -79108302L,
1180367424L, 243097780L, -1352013240L, 1332874682L, 1549279760L,
1441494764L, -393595564L, 1288336658L, -1449772640L, -1603070260L,
1743498848L, -323388478L, -1867355352L, 100432812L, 1309466428L,
-782795406L, -1919848656L, 419820836L, -743677128L, 1909563098L,
-1078908016L, 171924220L, -1381227628L, 1491804226L, -1024483072L,
1500396604L, -112290480L, 1074024610L, -908475064L, 246172428L,
-1667847460L, -1808666286L, -275076992L, -2057860012L, 1959027688L,
633453818L, 602701648L, 1307760428L, 1581882740L, 1566023570L,
384105568L, -924260980L, -1418308768L, 1808594402L, 283739048L,
-333371188L, -292006916L, -1472331342L, -1390391824L, -1622941756L,
-1189377704L, 670653818L, -2018097232L, 9682364L, -565354149L,
845477181L, 1734748074L, -1151246264L, -1652295567L, -1940908573L,
1685180644L, -616449006L, -352692249L, -1332756559L, -1144000010L,
-230982636L, -2018155739L, -139627473L, -1636194840L, 76288710L,
175889267L, 1411285973L, -1241953358L, -967881344L, -1855793815L,
468736475L, -747585236L, -708959430L, 541187567L, -1841993607L,
-987909362L, 962283228L, 29177165L, 1759406263L, 1901572672L,
896903966L, 519098859L, -992920211L, -1198324774L, 84690328L,
-1704506367L, 2114416051L, 1841411284L, 1663458L, -1910948745L,
439034305L, 817101638L, -26089852L, 1955999797L, -716201953L,
1907918712L, -400179626L, -1953991965L, -2114538331L, -1027596990L,
792062768L, 256446297L, 1796080843L, -566422212L, 288604522L,
-1572893793L, -1163934167L, -2545474L, -798510676L, -1801449731L,
1092205127L, -297715408L, 302613070L, -1965987205L, 548231645L,
494727178L, -1077980888L, -1607217263L, -1682218621L, -1665433980L,
-349528910L, 1710865415L, 1134192721L, -1579583530L, -2130635084L,
-2068142395L, 506746895L, 954919048L, 24172966L, 19136851L, -1859669323L,
-763598830L, 245590496L, -2025964471L, -751842693L, -1472113844L,
270178010L, 1488833231L, -342394215L, -299428178L, -223214660L,
-997106643L, -734153769L, 1945754144L, 1595276158L, 1080436043L,
-1517729971L, -26817158L, -1514762312L, -1306987167L, -1727575789L,
1885380916L, -1632896190L, -291816617L, 1657076385L, 681548646L,
960763236L, -1677959019L, 2129916351L, -832584360L, 1692416886L,
-182257277L, 412097477L, -1106263838L, -2112506544L, 359584889L,
1480009643L, -750514916L, -1479603638L, -136237505L, -655326583L,
206202526L, -650366132L, -1046097123L, -208182617L, 441878992L,
-800423570L, 1333062683L, -280140419L, 960295786L, 724633096L,
-653411663L, -253049565L, -800557532L, 1993604306L, 300809511L,
-1102915727L, 2143753654L, 1003183444L, 141941733L, -1897175569L,
-507859672L, 2017148294L, -1466484301L, 1691800853L, -711551118L,
-694897728L, 470021161L, 1067557019L, -158790292L, -786631302L,
2046883503L, -356508231L, 1624227790L, 2099515164L, 1612725901L,
-581837065L, -578579968L, 1246469214L, -29696213L, 1747495981L,
864869530L, 85954648L, 1837283265L, -1767499661L, 1405096340L,
2078105634L, -1820612809L, 645271641L)
| /R/Gmatools-internal.R | no_license | schantepie/Gmatools | R | false | false | 8,212 | r | .Random.seed <-
c(403L, 1L, 2085088349L, -796298668L, -622909318L, 1608362949L,
45301023L, 1334406310L, -951345616L, 866411731L, 8391409L, 1407990304L,
1074429118L, 2072045769L, 1579457147L, 56111178L, 833364140L,
-852879441L, -1941265563L, -1812735716L, 4546930L, 1631314413L,
1425737255L, -992008626L, 547785384L, 1429145771L, -532254263L,
617405912L, 548981894L, -272322207L, -1426041613L, -247874718L,
-349358636L, -1928281897L, -362007859L, 1840403044L, 1792632906L,
-1450811979L, 313090319L, -589316586L, -1772250944L, -1003180413L,
900467361L, -1153639280L, -1859897490L, 1510609177L, -2060635733L,
228724634L, -1830502564L, 1512558175L, 631881269L, 805649868L,
842882338L, -1573402755L, -710894473L, -1732673122L, 96598104L,
-1888401989L, 274705241L, 196212968L, 132765974L, 67512977L,
1416611651L, -150842798L, 9925860L, -156451033L, 278575997L,
-1008648588L, -974372774L, 118003877L, 1502857919L, 753070406L,
1627809744L, 661688819L, -157025007L, -693307072L, 1796147486L,
2129063401L, 563011355L, -2039314134L, -1632419252L, -1068368369L,
-1348644539L, 1221434108L, 1188704594L, -474720563L, 946687623L,
708065646L, 1756279624L, -1266112693L, -1799262551L, 1484493816L,
1500564262L, 1862196289L, 1959751123L, 513736578L, -17419340L,
-1384887497L, -1762670035L, 627485508L, 1739866218L, 1045027541L,
860554351L, 1588215158L, -1972705632L, 1089168227L, 1927940097L,
-220810384L, -564937970L, -995428999L, 674337035L, -327232966L,
2121402684L, -1256700289L, 2069499349L, -44792852L, 2125731138L,
848218781L, -70965481L, -1284553090L, 1622566456L, 1227628827L,
661128825L, 984629960L, -1252453642L, 348625713L, 1808231267L,
1425244466L, -696810876L, 2112237255L, -1690798691L, 75227412L,
-1217310790L, -1848079867L, 1952234463L, 1247310438L, 318795248L,
-1482687725L, -470973263L, 1952421728L, -1355123458L, -249310071L,
-397257797L, 687390090L, 796941420L, -1624558097L, 41651749L,
-1773565220L, 581656754L, 925758381L, -444979353L, -1380789106L,
1708151144L, 2099990891L, -1364155639L, 611833240L, -1129225018L,
-1787716063L, -65831245L, 2100962338L, 424025620L, -2125524329L,
1402457229L, -517619292L, -306402422L, -2075228171L, -1043364273L,
298369366L, -248116224L, -884701885L, 347941089L, 60850768L,
1548164782L, 1074154073L, -34483605L, -536053798L, 1252292764L,
911948447L, 1255528693L, -405192308L, 783772770L, 1630207037L,
302272183L, 793554270L, -347261800L, -198123525L, -2128624231L,
1295439400L, 189173462L, -1465668783L, -1839239677L, 1031573906L,
1495027492L, 397353447L, 1040895549L, 1775796L, 1055764250L,
234161253L, 987417855L, 207240582L, 1400419088L, 684181427L,
1254091857L, 221302784L, 102596830L, 202767145L, -189987109L,
996095466L, -486351476L, -1264692529L, 476417669L, -881906372L,
667545618L, -1092894963L, 1371012167L, -1753575890L, -742107256L,
195437195L, 2119832681L, -1657759688L, -1865838362L, 2018474113L,
-995663853L, 1447143362L, -1649770380L, 1853798007L, 1241687917L,
1177524228L, 2115605290L, 1088372122L, 1397741008L, -548605508L,
-705729900L, 1303156354L, -181290560L, -1469120708L, -1388018032L,
861442850L, -1796116984L, 422188812L, -1834475620L, 723601426L,
-1986964608L, 212844308L, 1050253608L, -197572550L, 165488592L,
99770988L, -1712892876L, 1962204306L, 139838816L, 188182860L,
-978128416L, -1432713310L, -602165336L, 528063628L, 1479252924L,
-2025152398L, 314594288L, -1507848124L, 401019992L, -1517823942L,
-1855303568L, 958818876L, 1212422932L, 757280450L, 1917542112L,
-1640344452L, 276586704L, 338883490L, 1438769704L, -2023621364L,
-1467044932L, -1022743246L, -755755840L, -957473228L, -1154287928L,
1383329466L, -1988802416L, 444336876L, 1588179540L, -765454702L,
110160672L, 223718348L, 1826653408L, 2037877442L, -221544408L,
1608156076L, -897875268L, 1200885234L, 983019056L, 1259268004L,
1175528888L, 333588314L, 423856272L, -1839217796L, -980493548L,
517456194L, -1476644096L, 862156732L, -749239472L, -901640926L,
1710663240L, 1633716236L, -503048100L, 100966866L, 1851425280L,
-503039660L, 840729832L, -1812128518L, -1693786416L, 707867948L,
290855156L, 560668050L, -1671360160L, 1864970252L, 1106413280L,
-1370191262L, -1442666840L, 1246721612L, -739941508L, 507237682L,
-1951779216L, -382211260L, 1129452504L, -328913030L, -1470625232L,
409541564L, -1702690220L, 252961858L, -250068832L, -1755067716L,
-95261232L, 679966562L, 1398016744L, -870697780L, 760528380L,
277506546L, 1179805312L, 839132660L, 195097224L, 1191964474L,
2103177040L, 861150316L, -1383410284L, -685239214L, 313948640L,
1407315148L, -902285152L, 563755394L, -1097922328L, -1916048660L,
-1196272324L, 1476842610L, -904795024L, -142183388L, -905576392L,
-447302886L, -1205631792L, -421936324L, 930181012L, 1317566338L,
-1535795776L, 2101375548L, 341154192L, -1763696094L, -22938360L,
-817528180L, 1413826332L, -1223305070L, -931886720L, 1630726676L,
150713128L, 293641786L, 12403152L, 2045183340L, -854418508L,
-2013590510L, 681826784L, 689978316L, -1733273376L, -1833332702L,
-2091037400L, -1721307636L, 101272380L, -1963088910L, -537465104L,
1238626884L, 152694360L, 770175802L, 1257972208L, -2046968132L,
35753236L, -452176190L, -1171105696L, -255596676L, 672328656L,
959663394L, -1050847576L, 570010508L, -1895482180L, -79108302L,
1180367424L, 243097780L, -1352013240L, 1332874682L, 1549279760L,
1441494764L, -393595564L, 1288336658L, -1449772640L, -1603070260L,
1743498848L, -323388478L, -1867355352L, 100432812L, 1309466428L,
-782795406L, -1919848656L, 419820836L, -743677128L, 1909563098L,
-1078908016L, 171924220L, -1381227628L, 1491804226L, -1024483072L,
1500396604L, -112290480L, 1074024610L, -908475064L, 246172428L,
-1667847460L, -1808666286L, -275076992L, -2057860012L, 1959027688L,
633453818L, 602701648L, 1307760428L, 1581882740L, 1566023570L,
384105568L, -924260980L, -1418308768L, 1808594402L, 283739048L,
-333371188L, -292006916L, -1472331342L, -1390391824L, -1622941756L,
-1189377704L, 670653818L, -2018097232L, 9682364L, -565354149L,
845477181L, 1734748074L, -1151246264L, -1652295567L, -1940908573L,
1685180644L, -616449006L, -352692249L, -1332756559L, -1144000010L,
-230982636L, -2018155739L, -139627473L, -1636194840L, 76288710L,
175889267L, 1411285973L, -1241953358L, -967881344L, -1855793815L,
468736475L, -747585236L, -708959430L, 541187567L, -1841993607L,
-987909362L, 962283228L, 29177165L, 1759406263L, 1901572672L,
896903966L, 519098859L, -992920211L, -1198324774L, 84690328L,
-1704506367L, 2114416051L, 1841411284L, 1663458L, -1910948745L,
439034305L, 817101638L, -26089852L, 1955999797L, -716201953L,
1907918712L, -400179626L, -1953991965L, -2114538331L, -1027596990L,
792062768L, 256446297L, 1796080843L, -566422212L, 288604522L,
-1572893793L, -1163934167L, -2545474L, -798510676L, -1801449731L,
1092205127L, -297715408L, 302613070L, -1965987205L, 548231645L,
494727178L, -1077980888L, -1607217263L, -1682218621L, -1665433980L,
-349528910L, 1710865415L, 1134192721L, -1579583530L, -2130635084L,
-2068142395L, 506746895L, 954919048L, 24172966L, 19136851L, -1859669323L,
-763598830L, 245590496L, -2025964471L, -751842693L, -1472113844L,
270178010L, 1488833231L, -342394215L, -299428178L, -223214660L,
-997106643L, -734153769L, 1945754144L, 1595276158L, 1080436043L,
-1517729971L, -26817158L, -1514762312L, -1306987167L, -1727575789L,
1885380916L, -1632896190L, -291816617L, 1657076385L, 681548646L,
960763236L, -1677959019L, 2129916351L, -832584360L, 1692416886L,
-182257277L, 412097477L, -1106263838L, -2112506544L, 359584889L,
1480009643L, -750514916L, -1479603638L, -136237505L, -655326583L,
206202526L, -650366132L, -1046097123L, -208182617L, 441878992L,
-800423570L, 1333062683L, -280140419L, 960295786L, 724633096L,
-653411663L, -253049565L, -800557532L, 1993604306L, 300809511L,
-1102915727L, 2143753654L, 1003183444L, 141941733L, -1897175569L,
-507859672L, 2017148294L, -1466484301L, 1691800853L, -711551118L,
-694897728L, 470021161L, 1067557019L, -158790292L, -786631302L,
2046883503L, -356508231L, 1624227790L, 2099515164L, 1612725901L,
-581837065L, -578579968L, 1246469214L, -29696213L, 1747495981L,
864869530L, 85954648L, 1837283265L, -1767499661L, 1405096340L,
2078105634L, -1820612809L, 645271641L)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Solver.R
\docType{class}
\name{Solver-class}
\alias{Solver-class}
\alias{.Solver}
\alias{Solver}
\alias{Solver}
\alias{getAssayData,Solver-method}
\alias{getTarget,Solver-method}
\alias{getRegulators,Solver-method}
\title{Define an object of class Solver}
\usage{
Solver(mtx.assay = matrix(), targetGene, candidateRegulators, quiet = TRUE)
\S4method{getAssayData}{Solver}(obj)
\S4method{getTarget}{Solver}(obj)
\S4method{getRegulators}{Solver}(obj)
}
\arguments{
\item{mtx.assay}{An assay matrix of gene expression data}
\item{quiet}{A logical indicating whether or not the Solver object should print output}
\item{obj}{An object of class Solver}
\item{obj}{An object of class Solver}
\item{obj}{An object of class Solver}
}
\value{
An object of the Solver class
}
\description{
The Solver class is a generic class that governs the different solvers available in TReNA. A
Solver class object is constructed during creation of a TReNA object and resides within the
TReNA object. It is rarely called by itself; rather, interaction with a particular solver object
is achieved using the \code{\link{solve}} method on a TReNA object.
}
\section{Methods (by generic)}{
\itemize{
\item \code{getAssayData}: Retrieve the assay matrix of gene expression data
\item \code{getTarget}: Retrieve the target gene for a Solver
\item \code{getRegulators}: Retrieve the candidate regulators for a Solver
}}
\examples{
# Create a simple Solver object with default options
mtx <- matrix(rnorm(10000), nrow = 100)
solver <- Solver(mtx)
# Create a Solver object using the included Alzheimer's data and retrieve the matrix
load(system.file(package="TReNA", "extdata/ampAD.154genes.mef2cTFs.278samples.RData"))
solver <- Solver(mtx.sub)
mtx <- getAssayData(solver)
# Create a Solver object using the included Alzheimer's data and retrieve the matrix
load(system.file(package="TReNA", "extdata/ampAD.154genes.mef2cTFs.278samples.RData"))
solver <- Solver(mtx.sub)
mtx <- getTarget(solver)
# Create a Solver object using the included Alzheimer's data and retrieve the matrix
load(system.file(package="TReNA", "extdata/ampAD.154genes.mef2cTFs.278samples.RData"))
solver <- Solver(mtx.sub)
mtx <- getRegulators(solver)
}
\seealso{
\code{\link{getAssayData}}, \code{\link{TReNA}}, \code{\link{solve}}
Other Solver class objects: \code{\link{BayesSpikeSolver}},
\code{\link{EnsembleSolver}},
\code{\link{LassoPVSolver}}, \code{\link{LassoSolver}},
\code{\link{PearsonSolver}},
\code{\link{RandomForestSolver}},
\code{\link{RidgeSolver}}, \code{\link{SpearmanSolver}},
\code{\link{SqrtLassoSolver}}
}
| /man/Solver-class.Rd | no_license | noahmclean1/TReNA | R | false | true | 2,676 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Solver.R
\docType{class}
\name{Solver-class}
\alias{Solver-class}
\alias{.Solver}
\alias{Solver}
\alias{Solver}
\alias{getAssayData,Solver-method}
\alias{getTarget,Solver-method}
\alias{getRegulators,Solver-method}
\title{Define an object of class Solver}
\usage{
Solver(mtx.assay = matrix(), targetGene, candidateRegulators, quiet = TRUE)
\S4method{getAssayData}{Solver}(obj)
\S4method{getTarget}{Solver}(obj)
\S4method{getRegulators}{Solver}(obj)
}
\arguments{
\item{mtx.assay}{An assay matrix of gene expression data}
\item{quiet}{A logical indicating whether or not the Solver object should print output}
\item{obj}{An object of class Solver}
\item{obj}{An object of class Solver}
\item{obj}{An object of class Solver}
}
\value{
An object of the Solver class
}
\description{
The Solver class is a generic class that governs the different solvers available in TReNA. A
Solver class object is constructed during creation of a TReNA object and resides within the
TReNA object. It is rarely called by itself; rather, interaction with a particular solver object
is achieved using the \code{\link{solve}} method on a TReNA object.
}
\section{Methods (by generic)}{
\itemize{
\item \code{getAssayData}: Retrieve the assay matrix of gene expression data
\item \code{getTarget}: Retrieve the target gene for a Solver
\item \code{getRegulators}: Retrieve the candidate regulators for a Solver
}}
\examples{
# Create a simple Solver object with default options
mtx <- matrix(rnorm(10000), nrow = 100)
solver <- Solver(mtx)
# Create a Solver object using the included Alzheimer's data and retrieve the matrix
load(system.file(package="TReNA", "extdata/ampAD.154genes.mef2cTFs.278samples.RData"))
solver <- Solver(mtx.sub)
mtx <- getAssayData(solver)
# Create a Solver object using the included Alzheimer's data and retrieve the matrix
load(system.file(package="TReNA", "extdata/ampAD.154genes.mef2cTFs.278samples.RData"))
solver <- Solver(mtx.sub)
mtx <- getTarget(solver)
# Create a Solver object using the included Alzheimer's data and retrieve the matrix
load(system.file(package="TReNA", "extdata/ampAD.154genes.mef2cTFs.278samples.RData"))
solver <- Solver(mtx.sub)
mtx <- getRegulators(solver)
}
\seealso{
\code{\link{getAssayData}}, \code{\link{TReNA}}, \code{\link{solve}}
Other Solver class objects: \code{\link{BayesSpikeSolver}},
\code{\link{EnsembleSolver}},
\code{\link{LassoPVSolver}}, \code{\link{LassoSolver}},
\code{\link{PearsonSolver}},
\code{\link{RandomForestSolver}},
\code{\link{RidgeSolver}}, \code{\link{SpearmanSolver}},
\code{\link{SqrtLassoSolver}}
}
|
test_that(
# https://math.stackexchange.com/questions/3335885/expansion-of-sum-x-in-in-schur-polynomials
"Schur expansion of (sum x_i)^n", {
# numeric
x <- c(3,4,5,6)
e <- Schur(x, c(4)) + 3*Schur(x, c(3,1)) + 2*Schur(x, c(2,2)) +
3*Schur(x, c(2,1,1)) + Schur(x, c(1,1,1,1))
expect_equal(e, sum(x)^4)
# gmp
x <- as.bigq(c(3L,4L,5L,6L), c(4L,5L,6L,7L))
e <- Schur(x, c(4)) + 3L*Schur(x, c(3,1)) + 2L*Schur(x, c(2,2)) +
3L*Schur(x, c(2,1,1)) + Schur(x, c(1,1,1,1))
expect_identical(e, sum(x)^4)
# polynomial
n <- 4
P <- SchurPol(n, c(4)) + 3*SchurPol(n, c(3, 1)) + 2*SchurPol(n, c(2, 2)) +
3*SchurPol(n, c(2, 1, 1)) + SchurPol(n, c(1, 1, 1, 1))
Q <- (mvp("x_1", 1, 1) + mvp("x_2", 1, 1) + mvp("x_3", 1, 1) +
mvp("x_4", 1, 1))^4
expect_true(as_mvp_qspray(P) == Q)
}
)
test_that(
"Schur = 0 if l(lambda)>l(x)", {
# numeric
expect_equal(Schur(c(1,2), c(3,2,1)), 0)
expect_equal(Schur(c(1,2), c(3,2,1), algorithm = "naive"), 0)
# gmp
x <- as.bigq(c(1L,2L))
lambda <- c(3,2,1)
expect_identical(Schur(x, lambda), as.bigq(0L))
expect_identical(Schur(x, lambda, algorithm = "naive"), as.bigq(0L))
# polynomial
n <- 2
lambda <- c(3,2,1)
expect_true(SchurPol(n, lambda) == as.qspray(0))
expect_identical(SchurPol(n, lambda, algorithm = "naive"),
as.qspray(0))
expect_identical(SchurPol(n, lambda, exact = FALSE, algorithm = "naive"),
mvp::constant(0))
expect_identical(SchurPol(n, lambda, algorithm = "naive",
basis = "MSF"),
as.qspray(0))
expect_identical(SchurPol(n, lambda, exact = FALSE, algorithm = "naive",
basis = "MSF"),
mvp::constant(0))
}
)
test_that(
"Schur (3,2) - gmp", {
x <- as.bigq(3L:5L, c(10L,2L,1L))
expected <- x[1]^3*x[2]^2 + x[1]^3*x[3]^2 + x[1]^3*x[2]*x[3] +
x[1]^2*x[2]^3 + x[1]^2*x[3]^3 + 2*x[1]^2*x[2]*x[3]^2 +
2*x[1]^2*x[2]^2*x[3] + x[1]*x[2]*x[3]^3 + 2*x[1]*x[2]^2*x[3]^2 +
x[1]*x[2]^3*x[3] + x[2]^2*x[3]^3 + x[2]^3*x[3]^2
naive <- Schur(x, c(3,2), algorithm = "naive")
DK <- Schur(x, c(3,2), algorithm = "DK")
expect_identical(naive, expected)
expect_identical(DK, expected)
}
)
test_that(
"Schur (3,2) - numeric", {
x <- c(3L:5L) / c(10L,2L,1L)
expected <- x[1]^3*x[2]^2 + x[1]^3*x[3]^2 + x[1]^3*x[2]*x[3] +
x[1]^2*x[2]^3 + x[1]^2*x[3]^3 + 2*x[1]^2*x[2]*x[3]^2 +
2*x[1]^2*x[2]^2*x[3] + x[1]*x[2]*x[3]^3 + 2*x[1]*x[2]^2*x[3]^2 +
x[1]*x[2]^3*x[3] + x[2]^2*x[3]^3 + x[2]^3*x[3]^2
naive <- Schur(x, c(3,2), algorithm = "naive")
DK <- Schur(x, c(3,2), algorithm = "DK")
expect_equal(naive, expected)
expect_equal(DK, expected)
}
)
test_that(
"SchurPol is correct", {
lambda <- c(3,2)
pol <- SchurPol(4, lambda, algorithm = "naive")
x <- as.bigq(c(6L,-7L,8L,9L), c(1L,2L,3L,4L))
polEval <- qspray::evalQspray(pol, x)
expect_identical(polEval, Schur(as.bigq(x), lambda))
}
)
test_that(
"Pieri rule", {
n <- 3
P1 <- SchurPol(n, c(3, 2)) + 2 * SchurPol(n, c(2, 2, 1)) +
SchurPol(n, c(3, 1, 1)) + 2 * SchurPol(n, c(2, 1, 1, 1)) +
SchurPol(n, c(1, 1, 1, 1, 1))
P2 <- qspray::ESFpoly(n, c(2, 2, 1))
expect_true(P1 == P2)
}
)
test_that(
"SchurPolCPP is correct", {
lambda <- c(3, 2)
pol <- SchurPolCPP(4, lambda)
x <- as.bigq(c(6L,-7L,8L,9L), c(1L,2L,3L,4L))
polEval <- qspray::evalQspray(pol, x)
expect_identical(polEval, Schur(as.bigq(x), lambda))
}
)
test_that(
"SchurCPP is correct", {
x <- as.bigq(c(6L, -7L, 8L, 9L), c(1L, 2L, 3L, 4L))
lambda <- c(3, 2)
res <- SchurCPP(x, lambda)
expect_identical(res, Schur(x, lambda))
#
x <- c(6, -7, 8, 9) / c(1, 2, 3, 4)
lambda <- c(3, 2)
res <- SchurCPP(x, lambda)
expect_equal(res, Schur(x, lambda))
}
)
| /tests/testthat/test-schur.R | no_license | cran/jack | R | false | false | 4,096 | r | test_that(
# https://math.stackexchange.com/questions/3335885/expansion-of-sum-x-in-in-schur-polynomials
"Schur expansion of (sum x_i)^n", {
# numeric
x <- c(3,4,5,6)
e <- Schur(x, c(4)) + 3*Schur(x, c(3,1)) + 2*Schur(x, c(2,2)) +
3*Schur(x, c(2,1,1)) + Schur(x, c(1,1,1,1))
expect_equal(e, sum(x)^4)
# gmp
x <- as.bigq(c(3L,4L,5L,6L), c(4L,5L,6L,7L))
e <- Schur(x, c(4)) + 3L*Schur(x, c(3,1)) + 2L*Schur(x, c(2,2)) +
3L*Schur(x, c(2,1,1)) + Schur(x, c(1,1,1,1))
expect_identical(e, sum(x)^4)
# polynomial
n <- 4
P <- SchurPol(n, c(4)) + 3*SchurPol(n, c(3, 1)) + 2*SchurPol(n, c(2, 2)) +
3*SchurPol(n, c(2, 1, 1)) + SchurPol(n, c(1, 1, 1, 1))
Q <- (mvp("x_1", 1, 1) + mvp("x_2", 1, 1) + mvp("x_3", 1, 1) +
mvp("x_4", 1, 1))^4
expect_true(as_mvp_qspray(P) == Q)
}
)
test_that(
"Schur = 0 if l(lambda)>l(x)", {
# numeric
expect_equal(Schur(c(1,2), c(3,2,1)), 0)
expect_equal(Schur(c(1,2), c(3,2,1), algorithm = "naive"), 0)
# gmp
x <- as.bigq(c(1L,2L))
lambda <- c(3,2,1)
expect_identical(Schur(x, lambda), as.bigq(0L))
expect_identical(Schur(x, lambda, algorithm = "naive"), as.bigq(0L))
# polynomial
n <- 2
lambda <- c(3,2,1)
expect_true(SchurPol(n, lambda) == as.qspray(0))
expect_identical(SchurPol(n, lambda, algorithm = "naive"),
as.qspray(0))
expect_identical(SchurPol(n, lambda, exact = FALSE, algorithm = "naive"),
mvp::constant(0))
expect_identical(SchurPol(n, lambda, algorithm = "naive",
basis = "MSF"),
as.qspray(0))
expect_identical(SchurPol(n, lambda, exact = FALSE, algorithm = "naive",
basis = "MSF"),
mvp::constant(0))
}
)
test_that(
"Schur (3,2) - gmp", {
x <- as.bigq(3L:5L, c(10L,2L,1L))
expected <- x[1]^3*x[2]^2 + x[1]^3*x[3]^2 + x[1]^3*x[2]*x[3] +
x[1]^2*x[2]^3 + x[1]^2*x[3]^3 + 2*x[1]^2*x[2]*x[3]^2 +
2*x[1]^2*x[2]^2*x[3] + x[1]*x[2]*x[3]^3 + 2*x[1]*x[2]^2*x[3]^2 +
x[1]*x[2]^3*x[3] + x[2]^2*x[3]^3 + x[2]^3*x[3]^2
naive <- Schur(x, c(3,2), algorithm = "naive")
DK <- Schur(x, c(3,2), algorithm = "DK")
expect_identical(naive, expected)
expect_identical(DK, expected)
}
)
test_that(
"Schur (3,2) - numeric", {
x <- c(3L:5L) / c(10L,2L,1L)
expected <- x[1]^3*x[2]^2 + x[1]^3*x[3]^2 + x[1]^3*x[2]*x[3] +
x[1]^2*x[2]^3 + x[1]^2*x[3]^3 + 2*x[1]^2*x[2]*x[3]^2 +
2*x[1]^2*x[2]^2*x[3] + x[1]*x[2]*x[3]^3 + 2*x[1]*x[2]^2*x[3]^2 +
x[1]*x[2]^3*x[3] + x[2]^2*x[3]^3 + x[2]^3*x[3]^2
naive <- Schur(x, c(3,2), algorithm = "naive")
DK <- Schur(x, c(3,2), algorithm = "DK")
expect_equal(naive, expected)
expect_equal(DK, expected)
}
)
test_that(
"SchurPol is correct", {
lambda <- c(3,2)
pol <- SchurPol(4, lambda, algorithm = "naive")
x <- as.bigq(c(6L,-7L,8L,9L), c(1L,2L,3L,4L))
polEval <- qspray::evalQspray(pol, x)
expect_identical(polEval, Schur(as.bigq(x), lambda))
}
)
test_that(
"Pieri rule", {
n <- 3
P1 <- SchurPol(n, c(3, 2)) + 2 * SchurPol(n, c(2, 2, 1)) +
SchurPol(n, c(3, 1, 1)) + 2 * SchurPol(n, c(2, 1, 1, 1)) +
SchurPol(n, c(1, 1, 1, 1, 1))
P2 <- qspray::ESFpoly(n, c(2, 2, 1))
expect_true(P1 == P2)
}
)
test_that(
"SchurPolCPP is correct", {
lambda <- c(3, 2)
pol <- SchurPolCPP(4, lambda)
x <- as.bigq(c(6L,-7L,8L,9L), c(1L,2L,3L,4L))
polEval <- qspray::evalQspray(pol, x)
expect_identical(polEval, Schur(as.bigq(x), lambda))
}
)
test_that(
"SchurCPP is correct", {
x <- as.bigq(c(6L, -7L, 8L, 9L), c(1L, 2L, 3L, 4L))
lambda <- c(3, 2)
res <- SchurCPP(x, lambda)
expect_identical(res, Schur(x, lambda))
#
x <- c(6, -7, 8, 9) / c(1, 2, 3, 4)
lambda <- c(3, 2)
res <- SchurCPP(x, lambda)
expect_equal(res, Schur(x, lambda))
}
)
|
#' @title Create individuals with reduced ploidy
#'
#' @description Creates new individuals from gametes. This function
#' was created to model the creation of diploid potatoes from
#' tetraploid potatoes. It can be used on any population with an
#' even ploidy level. The newly created individuals will have half
#' the ploidy level of the originals. The reduction can occur with
#' or without genetic recombination.
#'
#' @param pop an object of 'Pop' superclass
#' @param nProgeny total number of progeny per individual
#' @param useFemale should female recombination rates be used.
#' @param keepParents should previous parents be used for mother and
#' father.
#' @param simRecomb should genetic recombination be modeled.
#' @param simParam an object of 'SimParam' class
#'
#' @return Returns an object of \code{\link{Pop-class}}
#'
#' @examples
#' #Create founder haplotypes
#' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10)
#'
#' #Set simulation parameters
#' SP = SimParam$new(founderPop)
#'
#' #Create population
#' pop = newPop(founderPop, simParam=SP)
#'
#' #Create individuals with reduced ploidy
#' pop2 = reduceGenome(pop, simParam=SP)
#'
#' @export
reduceGenome = function(pop,nProgeny=1,useFemale=TRUE,keepParents=TRUE,
simRecomb=TRUE,simParam=NULL){
if(is.null(simParam)){
simParam = get("SP",envir=.GlobalEnv)
}
if(pop@ploidy%%2L){
stop("You cannot reduce aneuploids")
}
if(simRecomb){
if(useFemale){
map = simParam$femaleMap
}else{
map = simParam$maleMap
}
}else{
# Create dummy map with zero genetic distance
map = vector("list",pop@nChr)
for(i in 1:pop@nChr){
map[[i]] = rep(0,pop@nLoci[i])
}
map = as.matrix(map)
}
tmp = createReducedGenome(pop@geno, nProgeny,
map,
simParam$v,
simParam$isTrackRec,
pop@ploidy,
simParam$femaleCentromere,
simParam$quadProb,
simParam$nThreads)
rPop = new("RawPop",
nInd=as.integer(pop@nInd*nProgeny),
nChr=pop@nChr,
ploidy=as.integer(pop@ploidy/2),
nLoci=pop@nLoci,
geno=tmp$geno)
if(simParam$isTrackRec){
simParam$addToRec(tmp$recHist)
}
if(keepParents){
return(newPop(rawPop=rPop,
mother=rep(pop@id,each=nProgeny),
father=rep(pop@id,each=nProgeny),
origM=rep(pop@mother,each=nProgeny),
origF=rep(pop@father,each=nProgeny),
isDH=FALSE,
simParam=simParam))
}else{
return(newPop(rawPop=rPop,
mother=rep(pop@id,each=nProgeny),
father=rep(pop@id,each=nProgeny),
isDH=FALSE,
simParam=simParam))
}
}
#' @title Double the ploidy of individuals
#'
#' @description Creates new individuals with twice the ploidy.
#' This function was created to model the formation of tetraploid
#' potatoes from diploid potatoes. This function will work on any
#' population.
#'
#' @param pop an object of 'Pop' superclass
#' @param keepParents should previous parents be used for mother and
#' father.
#' @param simParam an object of 'SimParam' class
#'
#' @return Returns an object of \code{\link{Pop-class}}
#'
#' @examples
#' #Create founder haplotypes
#' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10)
#'
#' #Set simulation parameters
#' SP = SimParam$new(founderPop)
#'
#' #Create population
#' pop = newPop(founderPop, simParam=SP)
#'
#' #Create individuals with doubled ploidy
#' pop2 = doubleGenome(pop, simParam=SP)
#'
#' @export
doubleGenome = function(pop, keepParents=TRUE,
simParam=NULL){
if(is.null(simParam)){
simParam = get("SP",envir=.GlobalEnv)
}
geno = pop@geno
for(i in 1:pop@nChr){
geno[[i]] = geno[[i]][,rep(1:pop@ploidy,each=2),]
}
rPop = new("RawPop",
nInd=as.integer(pop@nInd),
nChr=pop@nChr,
ploidy=2L*pop@ploidy,
nLoci=pop@nLoci,
geno=geno)
if(keepParents){
origM=pop@mother
origF=pop@father
}else{
origM=pop@id
origF=pop@id
}
if(simParam$isTrackPed){
# Extract actual parents
ped = simParam$ped
id = as.numeric(pop@id)
mother = ped[id,1]
father = ped[id,2]
}else{
# Provide arbitrary parents (not actually used)
mother = origM
father = origF
}
if(simParam$isTrackRec){
# Duplicate recombination histories
oldHist = simParam$recHist
newHist = vector("list", 2*pop@ploidy)
newHist = rep(list(newHist), pop@nChr)
newHist = rep(list(newHist), pop@nInd)
for(i in 1:pop@nInd){
for(j in 1:pop@nChr){
k = 0
for(l in 1:pop@ploidy){
for(m in 1:2){
k = k+1
newHist[[i]][[j]][[k]] =
oldHist[[as.numeric(id[i])]][[j]][[l]]
}
}
}
}
simParam$addToRec(newHist)
}
return(newPop(rawPop=rPop,
mother=mother,
father=father,
origM=origM,
origF=origF,
isDH=TRUE,
simParam=simParam))
}
#' @title Combine genomes of individuals
#'
#' @description
#' This function is designed to model the pairing of gametes. The male
#' and female individuals are treated as gametes, so the ploidy of newly
#' created individuals will be the sum of it parents.
#'
#' @param females an object of \code{\link{Pop-class}} for female parents.
#' @param males an object of \code{\link{Pop-class}} for male parents.
#' @param crossPlan a matrix with two column representing
#' female and male parents. Either integers for the position in
#' population or character strings for the IDs.
#' @param simParam an object of \code{\link{SimParam}}
#'
#' @return Returns an object of \code{\link{Pop-class}}
#'
#' @examples
#' #Create founder haplotypes
#' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10)
#'
#' #Set simulation parameters
#' SP = SimParam$new(founderPop)
#'
#' #Create population
#' pop = newPop(founderPop, simParam=SP)
#'
#' #Cross individual 1 with individual 10
#' crossPlan = matrix(c(1,10), nrow=1, ncol=2)
#' pop2 = mergeGenome(pop, pop, crossPlan, simParam=SP)
#'
#' @export
mergeGenome = function(females,males,crossPlan,simParam=NULL){
if(is.null(simParam)){
simParam = get("SP",envir=.GlobalEnv)
}
if(is.character(crossPlan)){ #Match by ID
crossPlan = cbind(match(crossPlan[,1],females@id),
match(crossPlan[,2],males@id))
if(any(is.na(crossPlan))){
stop("Failed to match supplied IDs")
}
}
if((max(crossPlan[,1])>nInd(females)) |
(max(crossPlan[,2])>nInd(males)) |
(min(crossPlan)<1L)){
stop("Invalid crossPlan")
}
mother = as.integer(females@id[crossPlan[,1]])
father = as.integer(males@id[crossPlan[,2]])
# Merge genotype data
geno = vector("list", females@nChr)
for(i in 1:females@nChr){
geno[[i]] = array(as.raw(0),
dim = c(dim(females@geno[[i]])[1],
females@ploidy+males@ploidy,
nrow(crossPlan)))
for(j in 1:nrow(crossPlan)){
# Add female gamete
geno[[i]][,1:females@ploidy,j] =
females@geno[[i]][,,crossPlan[j,1]]
# Add male gamete
geno[[i]][,(females@ploidy+1):(females@ploidy+males@ploidy),j] =
males@geno[[i]][,,crossPlan[j,2]]
}
}
rPop = new("RawPop",
nInd=as.integer(nrow(crossPlan)),
nChr=females@nChr,
ploidy=females@ploidy+males@ploidy,
nLoci=females@nLoci,
geno=as.matrix(geno))
if(simParam$isTrackRec){
# Duplicate recombination histories
oldHist = simParam$recHist
newHist = vector("list", females@ploidy+males@ploidy)
newHist = rep(list(newHist), females@nChr)
newHist = rep(list(newHist), nrow(crossPlan))
for(i in 1:nrow(crossPlan)){
for(j in 1:females@nChr){
k = 0
for(l in 1:females@ploidy){
k = k+1
newHist[[i]][[j]][[k]] =
oldHist[[mother[i]]][[j]][[l]]
}
for(l in 1:males@ploidy){
k = k+1
newHist[[i]][[j]][[k]] =
oldHist[[father[i]]][[j]][[l]]
}
}
}
simParam$addToRec(newHist)
}
return(newPop(rawPop=rPop,
mother=mother,
father=father,
isDH=FALSE,
simParam=simParam))
}
| /fuzzedpackages/AlphaSimR/R/polyploids.R | no_license | akhikolla/testpackages | R | false | false | 8,932 | r | #' @title Create individuals with reduced ploidy
#'
#' @description Creates new individuals from gametes. This function
#' was created to model the creation of diploid potatoes from
#' tetraploid potatoes. It can be used on any population with an
#' even ploidy level. The newly created individuals will have half
#' the ploidy level of the originals. The reduction can occur with
#' or without genetic recombination.
#'
#' @param pop an object of 'Pop' superclass
#' @param nProgeny total number of progeny per individual
#' @param useFemale should female recombination rates be used.
#' @param keepParents should previous parents be used for mother and
#' father.
#' @param simRecomb should genetic recombination be modeled.
#' @param simParam an object of 'SimParam' class
#'
#' @return Returns an object of \code{\link{Pop-class}}
#'
#' @examples
#' #Create founder haplotypes
#' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10)
#'
#' #Set simulation parameters
#' SP = SimParam$new(founderPop)
#'
#' #Create population
#' pop = newPop(founderPop, simParam=SP)
#'
#' #Create individuals with reduced ploidy
#' pop2 = reduceGenome(pop, simParam=SP)
#'
#' @export
reduceGenome = function(pop,nProgeny=1,useFemale=TRUE,keepParents=TRUE,
simRecomb=TRUE,simParam=NULL){
if(is.null(simParam)){
simParam = get("SP",envir=.GlobalEnv)
}
if(pop@ploidy%%2L){
stop("You cannot reduce aneuploids")
}
if(simRecomb){
if(useFemale){
map = simParam$femaleMap
}else{
map = simParam$maleMap
}
}else{
# Create dummy map with zero genetic distance
map = vector("list",pop@nChr)
for(i in 1:pop@nChr){
map[[i]] = rep(0,pop@nLoci[i])
}
map = as.matrix(map)
}
tmp = createReducedGenome(pop@geno, nProgeny,
map,
simParam$v,
simParam$isTrackRec,
pop@ploidy,
simParam$femaleCentromere,
simParam$quadProb,
simParam$nThreads)
rPop = new("RawPop",
nInd=as.integer(pop@nInd*nProgeny),
nChr=pop@nChr,
ploidy=as.integer(pop@ploidy/2),
nLoci=pop@nLoci,
geno=tmp$geno)
if(simParam$isTrackRec){
simParam$addToRec(tmp$recHist)
}
if(keepParents){
return(newPop(rawPop=rPop,
mother=rep(pop@id,each=nProgeny),
father=rep(pop@id,each=nProgeny),
origM=rep(pop@mother,each=nProgeny),
origF=rep(pop@father,each=nProgeny),
isDH=FALSE,
simParam=simParam))
}else{
return(newPop(rawPop=rPop,
mother=rep(pop@id,each=nProgeny),
father=rep(pop@id,each=nProgeny),
isDH=FALSE,
simParam=simParam))
}
}
#' @title Double the ploidy of individuals
#'
#' @description Creates new individuals with twice the ploidy.
#' This function was created to model the formation of tetraploid
#' potatoes from diploid potatoes. This function will work on any
#' population.
#'
#' @param pop an object of 'Pop' superclass
#' @param keepParents should previous parents be used for mother and
#' father.
#' @param simParam an object of 'SimParam' class
#'
#' @return Returns an object of \code{\link{Pop-class}}
#'
#' @examples
#' #Create founder haplotypes
#' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10)
#'
#' #Set simulation parameters
#' SP = SimParam$new(founderPop)
#'
#' #Create population
#' pop = newPop(founderPop, simParam=SP)
#'
#' #Create individuals with doubled ploidy
#' pop2 = doubleGenome(pop, simParam=SP)
#'
#' @export
doubleGenome = function(pop, keepParents=TRUE,
simParam=NULL){
if(is.null(simParam)){
simParam = get("SP",envir=.GlobalEnv)
}
geno = pop@geno
for(i in 1:pop@nChr){
geno[[i]] = geno[[i]][,rep(1:pop@ploidy,each=2),]
}
rPop = new("RawPop",
nInd=as.integer(pop@nInd),
nChr=pop@nChr,
ploidy=2L*pop@ploidy,
nLoci=pop@nLoci,
geno=geno)
if(keepParents){
origM=pop@mother
origF=pop@father
}else{
origM=pop@id
origF=pop@id
}
if(simParam$isTrackPed){
# Extract actual parents
ped = simParam$ped
id = as.numeric(pop@id)
mother = ped[id,1]
father = ped[id,2]
}else{
# Provide arbitrary parents (not actually used)
mother = origM
father = origF
}
if(simParam$isTrackRec){
# Duplicate recombination histories
oldHist = simParam$recHist
newHist = vector("list", 2*pop@ploidy)
newHist = rep(list(newHist), pop@nChr)
newHist = rep(list(newHist), pop@nInd)
for(i in 1:pop@nInd){
for(j in 1:pop@nChr){
k = 0
for(l in 1:pop@ploidy){
for(m in 1:2){
k = k+1
newHist[[i]][[j]][[k]] =
oldHist[[as.numeric(id[i])]][[j]][[l]]
}
}
}
}
simParam$addToRec(newHist)
}
return(newPop(rawPop=rPop,
mother=mother,
father=father,
origM=origM,
origF=origF,
isDH=TRUE,
simParam=simParam))
}
#' @title Combine genomes of individuals
#'
#' @description
#' This function is designed to model the pairing of gametes. The male
#' and female individuals are treated as gametes, so the ploidy of newly
#' created individuals will be the sum of it parents.
#'
#' @param females an object of \code{\link{Pop-class}} for female parents.
#' @param males an object of \code{\link{Pop-class}} for male parents.
#' @param crossPlan a matrix with two column representing
#' female and male parents. Either integers for the position in
#' population or character strings for the IDs.
#' @param simParam an object of \code{\link{SimParam}}
#'
#' @return Returns an object of \code{\link{Pop-class}}
#'
#' @examples
#' #Create founder haplotypes
#' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10)
#'
#' #Set simulation parameters
#' SP = SimParam$new(founderPop)
#'
#' #Create population
#' pop = newPop(founderPop, simParam=SP)
#'
#' #Cross individual 1 with individual 10
#' crossPlan = matrix(c(1,10), nrow=1, ncol=2)
#' pop2 = mergeGenome(pop, pop, crossPlan, simParam=SP)
#'
#' @export
mergeGenome = function(females,males,crossPlan,simParam=NULL){
if(is.null(simParam)){
simParam = get("SP",envir=.GlobalEnv)
}
if(is.character(crossPlan)){ #Match by ID
crossPlan = cbind(match(crossPlan[,1],females@id),
match(crossPlan[,2],males@id))
if(any(is.na(crossPlan))){
stop("Failed to match supplied IDs")
}
}
if((max(crossPlan[,1])>nInd(females)) |
(max(crossPlan[,2])>nInd(males)) |
(min(crossPlan)<1L)){
stop("Invalid crossPlan")
}
mother = as.integer(females@id[crossPlan[,1]])
father = as.integer(males@id[crossPlan[,2]])
# Merge genotype data
geno = vector("list", females@nChr)
for(i in 1:females@nChr){
geno[[i]] = array(as.raw(0),
dim = c(dim(females@geno[[i]])[1],
females@ploidy+males@ploidy,
nrow(crossPlan)))
for(j in 1:nrow(crossPlan)){
# Add female gamete
geno[[i]][,1:females@ploidy,j] =
females@geno[[i]][,,crossPlan[j,1]]
# Add male gamete
geno[[i]][,(females@ploidy+1):(females@ploidy+males@ploidy),j] =
males@geno[[i]][,,crossPlan[j,2]]
}
}
rPop = new("RawPop",
nInd=as.integer(nrow(crossPlan)),
nChr=females@nChr,
ploidy=females@ploidy+males@ploidy,
nLoci=females@nLoci,
geno=as.matrix(geno))
if(simParam$isTrackRec){
# Duplicate recombination histories
oldHist = simParam$recHist
newHist = vector("list", females@ploidy+males@ploidy)
newHist = rep(list(newHist), females@nChr)
newHist = rep(list(newHist), nrow(crossPlan))
for(i in 1:nrow(crossPlan)){
for(j in 1:females@nChr){
k = 0
for(l in 1:females@ploidy){
k = k+1
newHist[[i]][[j]][[k]] =
oldHist[[mother[i]]][[j]][[l]]
}
for(l in 1:males@ploidy){
k = k+1
newHist[[i]][[j]][[k]] =
oldHist[[father[i]]][[j]][[l]]
}
}
}
simParam$addToRec(newHist)
}
return(newPop(rawPop=rPop,
mother=mother,
father=father,
isDH=FALSE,
simParam=simParam))
}
|
# exon1
library(pheatmap)
cpg.anno <- read.csv("CpG_anno_e1.csv",header=T)
row.names(cpg.anno) <- cpg.anno$CpG
cpg.anno <- cpg.anno[,-1]
cols.rows <- list(Esteller=c(forward="#469990",reverse="#ffe119"),
Bady=c("Bady"="#800000"),
Pyromark=c(Pyromark="#4363d8"),
Felsberg=c(forward="#469990",reverse="#ffe119"),
EPIC=c(EPIC="#808000"),
A450k=c(A450k="#000075"))
s.anno <- read.csv('sample_ids_new.csv')
te.meth <- read.table("MGMTe1methallsamples.txt",sep="\t")
te.meth <- t(te.meth)
colnames(te.meth) <- te.meth[1,]
te.meth <- te.meth[-1,]
r.names <- te.meth[,1]
te.meth <- te.meth[,-1]
te.meth <- as.data.frame(apply(te.meth,2,as.numeric))
row.names(te.meth) <- r.names
matchi <- match(colnames(te.meth),s.anno$ID)
colnames(te.meth) <- s.anno$Spalte1[matchi]
te.meth <- te.meth[,!(colnames(te.meth)%in%c('#81','#82','#83'))]
#cpg.anno$Context <- gsub("[[:punct:]].","",row.names(te.meth))
#cpg.anno$Context <- gsub("[0-9]","",cpg.anno$Context)
row.names(cpg.anno) <- row.names(te.meth)
png("MGMTe1_heatmap.png")
pheatmap(t(te.meth), annotation_col = cpg.anno[,-c(4:6)], annotation_colors=cols.rows,
cluster_cols=F,fontsize_col=10,fontsize_row=6)
dev.off()
pdf("MGMTe1_heatmap.pdf")
pheatmap(t(te.meth), annotation_col = cpg.anno[,-c(4:6)], annotation_colors=cols.rows,
cluster_cols=F,fontsize_col=10,fontsize_row=6)
dev.off()
te.meth <- te.meth[,sort(colnames(te.meth))]
clust <- hclust(dist(t(te.meth)))
ord <- clust$order
# intron1
library(pheatmap)
cpg.anno <- read.csv("CpG_anno_i1.csv",header=T)
row.names(cpg.anno) <- cpg.anno$CpG
cpg.anno <- cpg.anno[,-1]
cols.rows <- list(Esteller=c(forward="#469990",reverse="#ffe119"),
Bady=c("Bady"="#800000"),
Pyromark=c(Pyromark="#4363d8"),
Felsberg=c(forward="#469990",reverse="#ffe119"),
EPIC=c(EPIC="#808000"),
A450k=c(A450k="#000075"))
i.meth <- read.table("MGMTi1allsamples.txt",sep="\t")
i.meth <- t(i.meth)
colnames(i.meth) <- i.meth[1,]
i.meth <- i.meth[-1,]
r.names <- i.meth[,1]
i.meth <- i.meth[,-1]
i.meth <- as.data.frame(apply(i.meth,2,as.numeric))
row.names(i.meth) <- r.names
matchi <- match(colnames(i.meth),s.anno$ID)
colnames(i.meth) <- s.anno$Spalte1[matchi]
i.meth <- i.meth[,!(colnames(i.meth)%in%c('#81','#82','#83'))]
#cpg.anno$Context <- gsub("[[:punct:]].","",row.names(te.meth))
#cpg.anno$Context <- gsub("[0-9]","",cpg.anno$Context)
row.names(cpg.anno) <- row.names(i.meth)
i.meth <- i.meth[,sort(colnames(i.meth))]
i.meth <- i.meth[,ord]
png("MGMTi1_heatmap.png")
pheatmap(t(i.meth), annotation_col = cpg.anno[,-3], annotation_colors=cols.rows,
cluster_cols=F,cluster_rows=F,fontsize_col=10,fontsize_row=6)
dev.off()
pdf("MGMTi1_heatmap.pdf")
pheatmap(t(i.meth), annotation_col = cpg.anno[,-3], annotation_colors=cols.rows,
cluster_cols=F,cluster_rows=F,fontsize_col=10,fontsize_row=6)
dev.off()
# upmeth
library(pheatmap)
cpg.anno <- read.csv("CpG_anno_up.csv",header=T)
row.names(cpg.anno) <- cpg.anno$CpG
cpg.anno <- cpg.anno[,-1]
cols.rows <- list(Esteller=c(forward="#469990",reverse="#ffe119"),
Bady=c("Bady"="#800000"),
Pyromark=c(Pyromark="#4363d8"),
Felsberg=c(forward="#469990",reverse="#ffe119"),
EPIC=c(EPIC="#808000"),
A450k=c(A450k="#000075"))
up.meth <- read.table("MGMTupmethallsamplesneu.txt",sep="\t")
up.meth <- t(up.meth)
colnames(up.meth) <- up.meth[1,]
up.meth <- up.meth[-1,]
r.names <- up.meth[,1]
up.meth <- up.meth[,-1]
up.meth <- as.data.frame(apply(up.meth,2,as.numeric))
row.names(up.meth) <- r.names
matchi <- match(colnames(up.meth),s.anno$ID)
colnames(up.meth) <- s.anno$Spalte1[matchi]
up.meth <- up.meth[,!(colnames(up.meth)%in%c('#81','#82','#83'))]
#cpg.anno$Context <- gsub("[[:punct:]].","",row.names(te.meth))
#cpg.anno$Context <- gsub("[0-9]","",cpg.anno$Context)
row.names(cpg.anno) <- row.names(up.meth)
up.meth <- up.meth[,sort(colnames(up.meth))]
up.meth <- up.meth[,ord]
png("MGMTup_heatmap.png")
pheatmap(t(up.meth), annotation_col = cpg.anno, annotation_colors=cols.rows,
cluster_cols=F,cluster_rows=F,fontsize_col=10,fontsize_row=5)
dev.off()
pdf("MGMTup_heatmap.pdf")
pheatmap(t(up.meth), annotation_col = cpg.anno, annotation_colors=cols.rows,
cluster_cols=F,cluster_rows=F,fontsize_col=10,fontsize_row=5)
dev.off()
| /heatmaps/create_heatmaps.R | no_license | schmic05/MGMT_methylation | R | false | false | 4,191 | r | # exon1
library(pheatmap)
cpg.anno <- read.csv("CpG_anno_e1.csv",header=T)
row.names(cpg.anno) <- cpg.anno$CpG
cpg.anno <- cpg.anno[,-1]
cols.rows <- list(Esteller=c(forward="#469990",reverse="#ffe119"),
Bady=c("Bady"="#800000"),
Pyromark=c(Pyromark="#4363d8"),
Felsberg=c(forward="#469990",reverse="#ffe119"),
EPIC=c(EPIC="#808000"),
A450k=c(A450k="#000075"))
s.anno <- read.csv('sample_ids_new.csv')
te.meth <- read.table("MGMTe1methallsamples.txt",sep="\t")
te.meth <- t(te.meth)
colnames(te.meth) <- te.meth[1,]
te.meth <- te.meth[-1,]
r.names <- te.meth[,1]
te.meth <- te.meth[,-1]
te.meth <- as.data.frame(apply(te.meth,2,as.numeric))
row.names(te.meth) <- r.names
matchi <- match(colnames(te.meth),s.anno$ID)
colnames(te.meth) <- s.anno$Spalte1[matchi]
te.meth <- te.meth[,!(colnames(te.meth)%in%c('#81','#82','#83'))]
#cpg.anno$Context <- gsub("[[:punct:]].","",row.names(te.meth))
#cpg.anno$Context <- gsub("[0-9]","",cpg.anno$Context)
row.names(cpg.anno) <- row.names(te.meth)
png("MGMTe1_heatmap.png")
pheatmap(t(te.meth), annotation_col = cpg.anno[,-c(4:6)], annotation_colors=cols.rows,
cluster_cols=F,fontsize_col=10,fontsize_row=6)
dev.off()
pdf("MGMTe1_heatmap.pdf")
pheatmap(t(te.meth), annotation_col = cpg.anno[,-c(4:6)], annotation_colors=cols.rows,
cluster_cols=F,fontsize_col=10,fontsize_row=6)
dev.off()
te.meth <- te.meth[,sort(colnames(te.meth))]
clust <- hclust(dist(t(te.meth)))
ord <- clust$order
# intron1
library(pheatmap)
cpg.anno <- read.csv("CpG_anno_i1.csv",header=T)
row.names(cpg.anno) <- cpg.anno$CpG
cpg.anno <- cpg.anno[,-1]
cols.rows <- list(Esteller=c(forward="#469990",reverse="#ffe119"),
Bady=c("Bady"="#800000"),
Pyromark=c(Pyromark="#4363d8"),
Felsberg=c(forward="#469990",reverse="#ffe119"),
EPIC=c(EPIC="#808000"),
A450k=c(A450k="#000075"))
i.meth <- read.table("MGMTi1allsamples.txt",sep="\t")
i.meth <- t(i.meth)
colnames(i.meth) <- i.meth[1,]
i.meth <- i.meth[-1,]
r.names <- i.meth[,1]
i.meth <- i.meth[,-1]
i.meth <- as.data.frame(apply(i.meth,2,as.numeric))
row.names(i.meth) <- r.names
matchi <- match(colnames(i.meth),s.anno$ID)
colnames(i.meth) <- s.anno$Spalte1[matchi]
i.meth <- i.meth[,!(colnames(i.meth)%in%c('#81','#82','#83'))]
#cpg.anno$Context <- gsub("[[:punct:]].","",row.names(te.meth))
#cpg.anno$Context <- gsub("[0-9]","",cpg.anno$Context)
row.names(cpg.anno) <- row.names(i.meth)
i.meth <- i.meth[,sort(colnames(i.meth))]
i.meth <- i.meth[,ord]
png("MGMTi1_heatmap.png")
pheatmap(t(i.meth), annotation_col = cpg.anno[,-3], annotation_colors=cols.rows,
cluster_cols=F,cluster_rows=F,fontsize_col=10,fontsize_row=6)
dev.off()
pdf("MGMTi1_heatmap.pdf")
pheatmap(t(i.meth), annotation_col = cpg.anno[,-3], annotation_colors=cols.rows,
cluster_cols=F,cluster_rows=F,fontsize_col=10,fontsize_row=6)
dev.off()
# upmeth
library(pheatmap)
cpg.anno <- read.csv("CpG_anno_up.csv",header=T)
row.names(cpg.anno) <- cpg.anno$CpG
cpg.anno <- cpg.anno[,-1]
cols.rows <- list(Esteller=c(forward="#469990",reverse="#ffe119"),
Bady=c("Bady"="#800000"),
Pyromark=c(Pyromark="#4363d8"),
Felsberg=c(forward="#469990",reverse="#ffe119"),
EPIC=c(EPIC="#808000"),
A450k=c(A450k="#000075"))
up.meth <- read.table("MGMTupmethallsamplesneu.txt",sep="\t")
up.meth <- t(up.meth)
colnames(up.meth) <- up.meth[1,]
up.meth <- up.meth[-1,]
r.names <- up.meth[,1]
up.meth <- up.meth[,-1]
up.meth <- as.data.frame(apply(up.meth,2,as.numeric))
row.names(up.meth) <- r.names
matchi <- match(colnames(up.meth),s.anno$ID)
colnames(up.meth) <- s.anno$Spalte1[matchi]
up.meth <- up.meth[,!(colnames(up.meth)%in%c('#81','#82','#83'))]
#cpg.anno$Context <- gsub("[[:punct:]].","",row.names(te.meth))
#cpg.anno$Context <- gsub("[0-9]","",cpg.anno$Context)
row.names(cpg.anno) <- row.names(up.meth)
up.meth <- up.meth[,sort(colnames(up.meth))]
up.meth <- up.meth[,ord]
png("MGMTup_heatmap.png")
pheatmap(t(up.meth), annotation_col = cpg.anno, annotation_colors=cols.rows,
cluster_cols=F,cluster_rows=F,fontsize_col=10,fontsize_row=5)
dev.off()
pdf("MGMTup_heatmap.pdf")
pheatmap(t(up.meth), annotation_col = cpg.anno, annotation_colors=cols.rows,
cluster_cols=F,cluster_rows=F,fontsize_col=10,fontsize_row=5)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OmicsPLS.R
\name{loadings}
\alias{loadings}
\alias{loadings.o2m}
\title{Extract the loadings from an O2PLS fit}
\usage{
loadings(x, ...)
\method{loadings}{o2m}(x, loading_name = c("Xjoint", "Yjoint", "Xorth",
"Yorth"), subset = 0, sorted = FALSE, ...)
}
\arguments{
\item{x}{Object of class \code{o2m}}
\item{...}{For consistency}
\item{loading_name}{character string. One of the following: 'Xjoint', 'Yjoint', 'Xorth' or 'Yorth'.}
\item{subset}{subset of loading vectors to be extracted.}
\item{sorted}{Logical. Should the rows of the loadings be sorted according to the
absolute magnitute of the first column?}
}
\value{
Loading matrix
}
\description{
This function extracts loading parameters from an O2PLS fit
}
\examples{
loadings(o2m(scale(-2:2),scale(-2:2*4),1,0,0))
}
| /man/loadings.Rd | no_license | BioinformaticsMaterials/OmicsPLS | R | false | true | 862 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OmicsPLS.R
\name{loadings}
\alias{loadings}
\alias{loadings.o2m}
\title{Extract the loadings from an O2PLS fit}
\usage{
loadings(x, ...)
\method{loadings}{o2m}(x, loading_name = c("Xjoint", "Yjoint", "Xorth",
"Yorth"), subset = 0, sorted = FALSE, ...)
}
\arguments{
\item{x}{Object of class \code{o2m}}
\item{...}{For consistency}
\item{loading_name}{character string. One of the following: 'Xjoint', 'Yjoint', 'Xorth' or 'Yorth'.}
\item{subset}{subset of loading vectors to be extracted.}
\item{sorted}{Logical. Should the rows of the loadings be sorted according to the
absolute magnitute of the first column?}
}
\value{
Loading matrix
}
\description{
This function extracts loading parameters from an O2PLS fit
}
\examples{
loadings(o2m(scale(-2:2),scale(-2:2*4),1,0,0))
}
|
## read data file
d1 <- read.table("./QLHEFT/20.txt")
d1_nume <- as.numeric(d1$V1)
d1_median <- median(d1_nume)
d1 <- d1 / d1_median # QLHEFT_20_median
d2 <- read.table("./QLHEFT/50.txt")
d2_nume <- as.numeric(d2$V1)
d2_median <- median(d2_nume)
d2 <- d2 / d2_median # QLHEFT_50_median
d3 <- read.table("./QLHEFT/100.txt")
d3_nume <- as.numeric(d3$V1)
d3_median <- median(d3_nume)
d3 <- d3 / d3_median # QLHEFT_100_median
d4 <- read.table("./QLHEFT/200.txt")
d4_nume <- as.numeric(d4$V1)
d4_median <- median(d4_nume)
d4 <- d4 / d4_median # QLHEFT_200_median
qlheft <- cbind(d1, d2, d3, d4) # bind data
## read data file
d1 <- read.table("./Propose/20.txt")
d1 <- d1 / d1_median # QLHEFT_20_median
d2 <- read.table("./Propose/50.txt")
d2 <- d2 / d2_median # QLHEFT_50_median
d3 <- read.table("./Propose/100.txt")
d3 <- d3 / d3_median # QLHEFT_100_median
d4 <- read.table("./Propose/200.txt")
d4 <- d4 / d4_median # QLHEFT_200_median
propose <- cbind(d1, d2, d3, d4) # bind data
## read data file
d1 <- read.table("./HEFT/20.txt")
d1 <- d1 / d1_median # QLHEFT_20_median
d2 <- read.table("./HEFT/50.txt")
d2 <- d2 / d2_median # QLHEFT_50_median
d3 <- read.table("./HEFT/100.txt")
d3 <- d3 / d3_median # QLHEFT_100_median
d4 <- read.table("./HEFT/200.txt")
d4 <- d4 / d4_median # QLHEFT_200_median
heft <- cbind(d1, d2, d3, d4) # bind data
all_data <- list(propose, qlheft, heft) # merge two data (data.frame) into a list
## define x-axis scale name
xaxis_scale <- c("20", "50", "100", "200")
box_cols <- c("pink", "lightcyan", "palegreen1") # box colors
## border_cols <- c("red", "blue") # box-line colrs
border_cols <- c("red", "blue", "palegreen4") # box-line colors
## graphic function
comparison_BoxPlot <- function(all_data) {
## set parameters for graph
par(
xaxs="i", # x-axis data span has no margin
mar = c(5,6,2,2) # margin
)
## prepare graph feild
plot(
0, 0, type = "n",
xlab = "CCR", ylab = "Makespan", # labels
cex.lab = 1.8, # label font size
font.lab = 1, # label font
xlim = range(0:(ncol(propose) * 3)), # define large x-axis
ylim = c(0.4, max(range(propose), range(qlheft), range(heft))), # y-axis data span
font.axis = 1, # axis font
xaxt = "n" # no x-axis
)
## draw vertical line
abline(
v = c(3, 6, 9, 12, 15, 18, 21), # position
lwd = 1, # line width
col = 8, # line color
lty = 3 # line style
)
## draw boxplot
for (i in 1:3){
boxplot(
all_data[[i]],
at = c(1:ncol(propose)) * 3 + i - 3.5, # position of drawing boxplot
border = border_cols[i], # ボックス枠線の色
col = box_cols[i], # colors
xaxt = "n", # no x-axis scale
range = 0, # no plot outliers
add = TRUE)
}
## legend
legend(
0.1, 0.65, # position
legend = c("Propose", "QL-HEFT", "HEFT"), # labels
cex = 1.3, # labels font size
pt.cex = 3, # marker size
pch = 22, # kinds of marker
col = border_cols, # box-line colors
pt.bg = box_cols, # box colors
lty = 0,
lwd = 2, # box-line width
bg = "white" # background color
)
## x-axis scale
axis(
1,
at = 1:length(xaxis_scale) * 3 - 1.5, # position of scale
labels = xaxis_scale, # set scale name
cex.axis=0.73, # axis font size
tick = TRUE
)
}
## output file as eps
postscript("normalization_qlheft_BoxPlot.eps", horizontal = F, onefile = FALSE, paper = "special", width = 8, height = 6)
comparison_BoxPlot(all_data)
dev.off()
## output file as png
png("normalization_qlheft_BoxPlot.png", width = 600, height =400)
comparison_BoxPlot(all_data)
dev.off() | /input_tgff/result/change_tasknum/normalization_qlheft_BoxPlot.R | no_license | atsushi421/AlgorithmSimulator | R | false | false | 4,445 | r | ## read data file
d1 <- read.table("./QLHEFT/20.txt")
d1_nume <- as.numeric(d1$V1)
d1_median <- median(d1_nume)
d1 <- d1 / d1_median # QLHEFT_20_median
d2 <- read.table("./QLHEFT/50.txt")
d2_nume <- as.numeric(d2$V1)
d2_median <- median(d2_nume)
d2 <- d2 / d2_median # QLHEFT_50_median
d3 <- read.table("./QLHEFT/100.txt")
d3_nume <- as.numeric(d3$V1)
d3_median <- median(d3_nume)
d3 <- d3 / d3_median # QLHEFT_100_median
d4 <- read.table("./QLHEFT/200.txt")
d4_nume <- as.numeric(d4$V1)
d4_median <- median(d4_nume)
d4 <- d4 / d4_median # QLHEFT_200_median
qlheft <- cbind(d1, d2, d3, d4) # bind data
## read data file
d1 <- read.table("./Propose/20.txt")
d1 <- d1 / d1_median # QLHEFT_20_median
d2 <- read.table("./Propose/50.txt")
d2 <- d2 / d2_median # QLHEFT_50_median
d3 <- read.table("./Propose/100.txt")
d3 <- d3 / d3_median # QLHEFT_100_median
d4 <- read.table("./Propose/200.txt")
d4 <- d4 / d4_median # QLHEFT_200_median
propose <- cbind(d1, d2, d3, d4) # bind data
## read data file
d1 <- read.table("./HEFT/20.txt")
d1 <- d1 / d1_median # QLHEFT_20_median
d2 <- read.table("./HEFT/50.txt")
d2 <- d2 / d2_median # QLHEFT_50_median
d3 <- read.table("./HEFT/100.txt")
d3 <- d3 / d3_median # QLHEFT_100_median
d4 <- read.table("./HEFT/200.txt")
d4 <- d4 / d4_median # QLHEFT_200_median
heft <- cbind(d1, d2, d3, d4) # bind data
all_data <- list(propose, qlheft, heft) # merge two data (data.frame) into a list
## define x-axis scale name
xaxis_scale <- c("20", "50", "100", "200")
box_cols <- c("pink", "lightcyan", "palegreen1") # box colors
## border_cols <- c("red", "blue") # box-line colrs
border_cols <- c("red", "blue", "palegreen4") # box-line colors
## graphic function
comparison_BoxPlot <- function(all_data) {
## set parameters for graph
par(
xaxs="i", # x-axis data span has no margin
mar = c(5,6,2,2) # margin
)
## prepare graph feild
plot(
0, 0, type = "n",
xlab = "CCR", ylab = "Makespan", # labels
cex.lab = 1.8, # label font size
font.lab = 1, # label font
xlim = range(0:(ncol(propose) * 3)), # define large x-axis
ylim = c(0.4, max(range(propose), range(qlheft), range(heft))), # y-axis data span
font.axis = 1, # axis font
xaxt = "n" # no x-axis
)
## draw vertical line
abline(
v = c(3, 6, 9, 12, 15, 18, 21), # position
lwd = 1, # line width
col = 8, # line color
lty = 3 # line style
)
## draw boxplot
for (i in 1:3){
boxplot(
all_data[[i]],
at = c(1:ncol(propose)) * 3 + i - 3.5, # position of drawing boxplot
border = border_cols[i], # ボックス枠線の色
col = box_cols[i], # colors
xaxt = "n", # no x-axis scale
range = 0, # no plot outliers
add = TRUE)
}
## legend
legend(
0.1, 0.65, # position
legend = c("Propose", "QL-HEFT", "HEFT"), # labels
cex = 1.3, # labels font size
pt.cex = 3, # marker size
pch = 22, # kinds of marker
col = border_cols, # box-line colors
pt.bg = box_cols, # box colors
lty = 0,
lwd = 2, # box-line width
bg = "white" # background color
)
## x-axis scale
axis(
1,
at = 1:length(xaxis_scale) * 3 - 1.5, # position of scale
labels = xaxis_scale, # set scale name
cex.axis=0.73, # axis font size
tick = TRUE
)
}
## output file as eps
postscript("normalization_qlheft_BoxPlot.eps", horizontal = F, onefile = FALSE, paper = "special", width = 8, height = 6)
comparison_BoxPlot(all_data)
dev.off()
## output file as png
png("normalization_qlheft_BoxPlot.png", width = 600, height =400)
comparison_BoxPlot(all_data)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simMST.R
\name{sim_mst}
\alias{sim_mst}
\title{Simulate multistage testing data}
\usage{
sim_mst(pars, theta, test_design, routing_rules, routing = c("last", "all"))
}
\arguments{
\item{pars}{item parameters, can be either:
a data.frame with columns item_id, item_score, beta or a dexter or dexterMST parameters object}
\item{theta}{vector of person abilities}
\item{test_design}{data.frame with columns item_id, module_id, item_position}
\item{routing_rules}{output pf \code{\link{mst_rules}}}
\item{routing}{'all' or 'last' routing}
}
\description{
Simulates data from an extended nominal response model according to an mst design
}
| /fuzzedpackages/dexterMST/man/sim_mst.Rd | no_license | akhikolla/testpackages | R | false | true | 718 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simMST.R
\name{sim_mst}
\alias{sim_mst}
\title{Simulate multistage testing data}
\usage{
sim_mst(pars, theta, test_design, routing_rules, routing = c("last", "all"))
}
\arguments{
\item{pars}{item parameters, can be either:
a data.frame with columns item_id, item_score, beta or a dexter or dexterMST parameters object}
\item{theta}{vector of person abilities}
\item{test_design}{data.frame with columns item_id, module_id, item_position}
\item{routing_rules}{output pf \code{\link{mst_rules}}}
\item{routing}{'all' or 'last' routing}
}
\description{
Simulates data from an extended nominal response model according to an mst design
}
|
can = read.csv("CANBK.NS.csv")
BB = read.csv("BANKBARODA.csv")
axis = read.csv("AXISBANK.NS.csv")
hdfc = read.csv("HDFCBANK.NS.csv")
boi= read.csv("BANKINDIA.NS.csv")
#bandhan = read.csv("BANDHANBNK.NS.csv")
indus= read.csv("INDUSINDBK.NS.csv")
pnb= read.csv("PNB.NS.csv")
idbi = read.csv("IDBI.NS.csv")
cen = read.csv("CENTRALBK.NS.csv")
yes= read.csv("YESBANK.NS.csv")
icici=read.csv("ICICIBANK.NS.csv")
kotak = read.csv("KOTAKBANK.NS.csv")
#rbl = read.csv("RBLBANK.NS.csv")
sbi = read.csv("SBIN.NS.csv")
nifty = read.csv("NSEI.csv")
#niftyup=read.csv("NSEIup.csv")
rfr <- 3
dropped_n<-grep("null",nifty$Adj.Close)
#dropped_nup<-grep("null",niftyup$Adj.Close)
#niftyup<-niftyup[-dropped_nup,]
nifty<-nifty[-dropped_n,]
clean_null <- function(x,dropped_n)
{
x<-x[-dropped_n,]
}
bank<- function(bank_name)
{
dropped=grep("null",bank_name$Adj.Close)
bank_name=bank_name[-dropped,]
returns = rep(0,nrow(bank_name))
for ( i in 1:nrow(bank_name)){
returns[i] <- ((as.numeric(paste(bank_name$Adj.Close[i+1]))/(as.numeric(paste(bank_name$Adj.Close[i]))))-1)
}
average= mean(as.double(returns),na.rm=TRUE)*250*100
volu=var(returns[1:nrow(bank_name)],na.rm=TRUE)*250*100
risk= sqrt(volu)
vect<-c(average,volu,risk)
return(vect)
}
RETURNS<-function(bank_name)
{
returns<-rep(0,nrow(bank_name))
for(i in 1:nrow(bank_name)){
returns[i] <- ((as.numeric(paste(bank_name$Adj.Close[i+1]))/(as.numeric(paste(bank_name$Adj.Close[i]))))-1)
}
complete<-complete.cases(returns)
return(returns[complete])
}
SHARPE <- function(ret,rfr,sd)
{
sharpe_ratio = rep(0,13)
sharpe_ratio = ((ret - rfr)/sd)
return(c(sharpe_ratio))
}
bank_names = list(can , BB, axis , hdfc, boi , indus , pnb , idbi, cen, yes, icici, kotak,sbi)
store = rep(0,length(bank_names))
data=data.frame()
for (j in bank_names)
{
store<-bank(j)
data<-rbind(data,store)
}
colnames(data)= c('Expected_Return', 'Volatility','Risk','Sharpe','Ratio_With_Nifty','BETA')
rownames(data)=c('Canara Bank','Bank Of Baroda','Axis','HDFC','Bank Of India','Indus','Punjab National Bank','IDBI','Central Bank OF India','Yes Bank','ICICI','Kotak Bank','SBI')
sharpe=SHARPE(data$Expected_Return,rfr,data$Risk)
data <- cbind(data,sharpe)
ratio = data$Expected_Return/bank(nifty)[1]
data <- cbind(data,ratio)
cov_matrix<-matrix(nrow=13,ncol=13)
for (i in seq_len(nrow(cov_matrix))){
for (j in seq_len(ncol(cov_matrix))){
cov_matrix[i,j]<- cov(RETURNS(bank_names[[i]]),RETURNS(bank_names[[j]]))*250
}
}
cor<-cov2cor(cov_matrix)
weights<-rep(1/13,13)
weights_mat <- matrix(weights,nrow=1,ncol=13)
return_port <- weights_mat %*% as.matrix(data$Expected_Return)
sd_port<- sqrt((weights_mat %*% matrix)%*%t(weights_mat))*100
sharpe_ratio<- (return_port- rfr)/(sd_port)
calc_beta <- function(bank_name)
{
beta=rep(0,13)
print(length(RETURNS(bank_name)))
beta = cov(RETURNS(bank_name),RETURNS(nifty))/ var(RETURNS(nifty))
return(c(beta))
}
#bank_names = list(can , BB, axis , hdfc, boi , indus , pnb , idbi, cen, yes, icici, kotak,sbi)
can<-clean_null(can,dropped_n)
BB<-clean_null(BB,dropped_n)
axis<-clean_null(axis,dropped_n)
hdfc<-clean_null(hdfc,dropped_n)
boi<-clean_null(boi,dropped_n)
indus<-clean_null(indus,dropped_n)
pnb<-clean_null(pnb,dropped_n)
idbi<-clean_null(idbi,dropped_n)
cen<-clean_null(cen,dropped_n)
yes<-clean_null(yes,dropped_n)
icici<-clean_null(icici,dropped_n)
kotak<-clean_null(kotak,dropped_n)
sbi<-clean_null(sbi,dropped_n)
for (k in 1:13)
{
beta[k]=calc_beta(bank_names[k])
}
beta[1]=calc_beta(can)
beta[2]=calc_beta(BB)
beta[3]=calc_beta(axis)
beta[4]=calc_beta(hdfc)
beta[5]=calc_beta(boi)
beta[6]=calc_beta(indus)
beta[7]=calc_beta(pnb)
beta[8]=calc_beta(idbi)
beta[9]=calc_beta(cen)
beta[10]=calc_beta(yes)
beta[11]=calc_beta(icici)
beta[12]=calc_beta(kotak)
beta[13]=calc_beta(sbi)
data<- cbind(data,beta)
# Method 2
# ---------------------
nifty= nifty[1:length(can$Date),]
can$Date <- ymd(can$Date)
nifty$Date <- as.Date(nifty$Date,"%d-%m-%Y")
rangehdfc <- hdfc$Date == nifty$Date
can$Adj.Close <-can$Adj.Close[range]
nifty$Adj.Close <- nifty$Adj.Close[range]
fit<-lm(RETURNS(can) ~ RETURNS(nifty))
result <- summary(fit)
betalm <- result$coefficients[2,1]
#--------------------------
store_ret <- rep(0,13)
portfolioReturns <- data.frame()
for(k in bank_names){
store_ret <- RETURNS(k)
portfolioReturns <- rbind(portfolioReturns,store_ret)
#portfolioReturns<-t(portfolioReturns)
}
portfolioReturns<- t(portfolioReturns)
portfolioReturns <- as.timeSeries(portfolioReturns)
ef <- portfolioFrontier(portfolioReturns,constraints = "LongOnly")
plot(ef,1) | /fintech_model (1).R | no_license | aditya9729/Nifty_Indian_banks | R | false | false | 4,780 | r | can = read.csv("CANBK.NS.csv")
BB = read.csv("BANKBARODA.csv")
axis = read.csv("AXISBANK.NS.csv")
hdfc = read.csv("HDFCBANK.NS.csv")
boi= read.csv("BANKINDIA.NS.csv")
#bandhan = read.csv("BANDHANBNK.NS.csv")
indus= read.csv("INDUSINDBK.NS.csv")
pnb= read.csv("PNB.NS.csv")
idbi = read.csv("IDBI.NS.csv")
cen = read.csv("CENTRALBK.NS.csv")
yes= read.csv("YESBANK.NS.csv")
icici=read.csv("ICICIBANK.NS.csv")
kotak = read.csv("KOTAKBANK.NS.csv")
#rbl = read.csv("RBLBANK.NS.csv")
sbi = read.csv("SBIN.NS.csv")
nifty = read.csv("NSEI.csv")
#niftyup=read.csv("NSEIup.csv")
rfr <- 3
dropped_n<-grep("null",nifty$Adj.Close)
#dropped_nup<-grep("null",niftyup$Adj.Close)
#niftyup<-niftyup[-dropped_nup,]
nifty<-nifty[-dropped_n,]
clean_null <- function(x,dropped_n)
{
x<-x[-dropped_n,]
}
bank<- function(bank_name)
{
dropped=grep("null",bank_name$Adj.Close)
bank_name=bank_name[-dropped,]
returns = rep(0,nrow(bank_name))
for ( i in 1:nrow(bank_name)){
returns[i] <- ((as.numeric(paste(bank_name$Adj.Close[i+1]))/(as.numeric(paste(bank_name$Adj.Close[i]))))-1)
}
average= mean(as.double(returns),na.rm=TRUE)*250*100
volu=var(returns[1:nrow(bank_name)],na.rm=TRUE)*250*100
risk= sqrt(volu)
vect<-c(average,volu,risk)
return(vect)
}
RETURNS<-function(bank_name)
{
returns<-rep(0,nrow(bank_name))
for(i in 1:nrow(bank_name)){
returns[i] <- ((as.numeric(paste(bank_name$Adj.Close[i+1]))/(as.numeric(paste(bank_name$Adj.Close[i]))))-1)
}
complete<-complete.cases(returns)
return(returns[complete])
}
SHARPE <- function(ret,rfr,sd)
{
sharpe_ratio = rep(0,13)
sharpe_ratio = ((ret - rfr)/sd)
return(c(sharpe_ratio))
}
bank_names = list(can , BB, axis , hdfc, boi , indus , pnb , idbi, cen, yes, icici, kotak,sbi)
store = rep(0,length(bank_names))
data=data.frame()
for (j in bank_names)
{
store<-bank(j)
data<-rbind(data,store)
}
colnames(data)= c('Expected_Return', 'Volatility','Risk','Sharpe','Ratio_With_Nifty','BETA')
rownames(data)=c('Canara Bank','Bank Of Baroda','Axis','HDFC','Bank Of India','Indus','Punjab National Bank','IDBI','Central Bank OF India','Yes Bank','ICICI','Kotak Bank','SBI')
sharpe=SHARPE(data$Expected_Return,rfr,data$Risk)
data <- cbind(data,sharpe)
ratio = data$Expected_Return/bank(nifty)[1]
data <- cbind(data,ratio)
cov_matrix<-matrix(nrow=13,ncol=13)
for (i in seq_len(nrow(cov_matrix))){
for (j in seq_len(ncol(cov_matrix))){
cov_matrix[i,j]<- cov(RETURNS(bank_names[[i]]),RETURNS(bank_names[[j]]))*250
}
}
cor<-cov2cor(cov_matrix)
weights<-rep(1/13,13)
weights_mat <- matrix(weights,nrow=1,ncol=13)
return_port <- weights_mat %*% as.matrix(data$Expected_Return)
sd_port<- sqrt((weights_mat %*% matrix)%*%t(weights_mat))*100
sharpe_ratio<- (return_port- rfr)/(sd_port)
calc_beta <- function(bank_name)
{
beta=rep(0,13)
print(length(RETURNS(bank_name)))
beta = cov(RETURNS(bank_name),RETURNS(nifty))/ var(RETURNS(nifty))
return(c(beta))
}
#bank_names = list(can , BB, axis , hdfc, boi , indus , pnb , idbi, cen, yes, icici, kotak,sbi)
can<-clean_null(can,dropped_n)
BB<-clean_null(BB,dropped_n)
axis<-clean_null(axis,dropped_n)
hdfc<-clean_null(hdfc,dropped_n)
boi<-clean_null(boi,dropped_n)
indus<-clean_null(indus,dropped_n)
pnb<-clean_null(pnb,dropped_n)
idbi<-clean_null(idbi,dropped_n)
cen<-clean_null(cen,dropped_n)
yes<-clean_null(yes,dropped_n)
icici<-clean_null(icici,dropped_n)
kotak<-clean_null(kotak,dropped_n)
sbi<-clean_null(sbi,dropped_n)
for (k in 1:13)
{
beta[k]=calc_beta(bank_names[k])
}
beta[1]=calc_beta(can)
beta[2]=calc_beta(BB)
beta[3]=calc_beta(axis)
beta[4]=calc_beta(hdfc)
beta[5]=calc_beta(boi)
beta[6]=calc_beta(indus)
beta[7]=calc_beta(pnb)
beta[8]=calc_beta(idbi)
beta[9]=calc_beta(cen)
beta[10]=calc_beta(yes)
beta[11]=calc_beta(icici)
beta[12]=calc_beta(kotak)
beta[13]=calc_beta(sbi)
data<- cbind(data,beta)
# Method 2
# ---------------------
nifty= nifty[1:length(can$Date),]
can$Date <- ymd(can$Date)
nifty$Date <- as.Date(nifty$Date,"%d-%m-%Y")
rangehdfc <- hdfc$Date == nifty$Date
can$Adj.Close <-can$Adj.Close[range]
nifty$Adj.Close <- nifty$Adj.Close[range]
fit<-lm(RETURNS(can) ~ RETURNS(nifty))
result <- summary(fit)
betalm <- result$coefficients[2,1]
#--------------------------
store_ret <- rep(0,13)
portfolioReturns <- data.frame()
for(k in bank_names){
store_ret <- RETURNS(k)
portfolioReturns <- rbind(portfolioReturns,store_ret)
#portfolioReturns<-t(portfolioReturns)
}
portfolioReturns<- t(portfolioReturns)
portfolioReturns <- as.timeSeries(portfolioReturns)
ef <- portfolioFrontier(portfolioReturns,constraints = "LongOnly")
plot(ef,1) |
# Prepare dummy data
dummy <- matrix(data = seq_len(16), nrow = 4, ncol = 4)
rownames(dummy) <- paste0("row_", seq_len(nrow(dummy)))
colnames(dummy) <- paste0("col_", seq_len(ncol(dummy)))
createLinkedMatrix <- function(class, nNodes) {
linkedBy <- ifelse(class == "ColumnLinkedMatrix", "columns", "rows")
linkedMatrix <- LinkedMatrix(nrow = nrow(dummy), ncol = ncol(dummy), nNodes = nNodes, linkedBy = linkedBy, nodeInitializer = "matrixNodeInitializer")
rownames(linkedMatrix) <- paste0("row_", seq_len(nrow(dummy)))
colnames(linkedMatrix) <- paste0("col_", seq_len(ncol(dummy)))
linkedMatrix[] <- dummy
return(linkedMatrix)
}
for (class in c("ColumnLinkedMatrix", "RowLinkedMatrix")) {
context(class)
linkedBy <- ifelse(class == "ColumnLinkedMatrix", "columns", "rows")
test_that("LinkedMatrix creation", {
for (nNodes in c(1, 2)) {
linkedMatrix <- LinkedMatrix(nrow = nrow(dummy), ncol = ncol(dummy), nNodes = nNodes, linkedBy = linkedBy, nodeInitializer = "matrixNodeInitializer")
expect_equal(nNodes(linkedMatrix), nNodes)
expect_is(linkedMatrix[[1]], "matrix")
if (requireNamespace("ff", quietly = TRUE)) {
linkedMatrix <- LinkedMatrix(nrow = nrow(dummy), ncol = ncol(dummy), nNodes = nNodes, linkedBy = linkedBy, nodeInitializer = "ffNodeInitializer", vmode = "integer")
expect_equal(nNodes(linkedMatrix), nNodes)
expect_is(linkedMatrix[[1]], "ff_matrix")
}
}
})
test_that(paste(class, "creation"), {
expect_error(new(class, c(1, 2, 3)), "*arguments need to be matrix-like*")
# No input
linkedMatrix <- new(class)
expect_equal(nNodes(linkedMatrix), 1)
expect_true(is.na(linkedMatrix[1, 1]))
# Single matrix input
linkedMatrix <- new(class, matrix(data = 0, nrow = 1, ncol = 1))
expect_equal(nNodes(linkedMatrix), 1)
expect_equal(dim(linkedMatrix), c(1, 1))
# Single LinkedMatrix input
linkedMatrix <- new(class, createLinkedMatrix(class, 2))
expect_equal(nNodes(linkedMatrix), 1)
expect_equal(dim(linkedMatrix), dim(dummy))
# Multiple matrix inputs of same order
linkedMatrix <- new(class, matrix(data = 0, nrow = 1, ncol = 1), matrix(data = 0, nrow = 1, ncol = 1))
expect_equal(nNodes(linkedMatrix), 2)
if (class == "ColumnLinkedMatrix") {
expect_equal(dim(linkedMatrix), c(1, 2))
} else {
expect_equal(dim(linkedMatrix), c(2, 1))
}
# Multiple LinkedMatrix inputs of same order
linkedMatrix <- new(class, createLinkedMatrix(class, 2), createLinkedMatrix(class, 2))
expect_equal(nNodes(linkedMatrix), 2)
if (class == "ColumnLinkedMatrix") {
expect_equal(dim(linkedMatrix), c(nrow(dummy), ncol(dummy) * 2))
} else {
expect_equal(dim(linkedMatrix), c(ncol(dummy) * 2, nrow(dummy)))
}
# Multiple conformable matrix inputs of different order
if (class == "ColumnLinkedMatrix") {
args <- list(matrix(data = 0, nrow = 1, ncol = 3), matrix(data = 0, nrow = 1, ncol = 5))
dims <- c(1, 8)
} else {
args <- list(matrix(data = 0, nrow = 3, ncol = 1), matrix(data = 0, nrow = 5, ncol = 1))
dims <- c(8, 1)
}
linkedMatrix <- do.call(class, args)
expect_equal(nNodes(linkedMatrix), 2)
expect_equal(dim(linkedMatrix), dims)
# Multiple unconformable matrix inputs
if (class == "ColumnLinkedMatrix") {
args <- list(matrix(data = 0, nrow = 3, ncol = 1), matrix(data = 0, nrow = 5, ncol = 1))
} else {
args <- list(matrix(data = 0, nrow = 1, ncol = 3), matrix(data = 0, nrow = 1, ncol = 5))
}
expect_error(do.call(class, args), "*arguments need the same number of*")
# Warning if dimnames do not match
dimnamesMismatches <- list(
list(regexp = NA, dimnames = list(NULL, NULL, NULL)),
list(regexp = NA, dimnames = list(letters[1:3], NULL, NULL)),
list(regexp = NULL, dimnames = list(letters[1:3], letters[4:6], NULL))
)
for (dimnamesMismatch in dimnamesMismatches) {
if (class == "ColumnLinkedMatrix") {
args <- list(
matrix(data = 0, nrow = 3, ncol = 1, dimnames = list(dimnamesMismatch$dimnames[[1]], NULL)),
matrix(data = 0, nrow = 3, ncol = 1, dimnames = list(dimnamesMismatch$dimnames[[2]], NULL)),
matrix(data = 0, nrow = 3, ncol = 1, dimnames = list(dimnamesMismatch$dimnames[[3]], NULL))
)
} else {
args <- list(
matrix(data = 0, nrow = 1, ncol = 3, dimnames = list(NULL, dimnamesMismatch$dimnames[[1]])),
matrix(data = 0, nrow = 1, ncol = 3, dimnames = list(NULL, dimnamesMismatch$dimnames[[2]])),
matrix(data = 0, nrow = 1, ncol = 3, dimnames = list(NULL, dimnamesMismatch$dimnames[[3]]))
)
}
expect_warning(do.call(class, args), regexp = dimnamesMismatch$regexp)
}
})
for (nNodes in seq_len(ifelse(class == "ColumnLinkedMatrix", ncol(dummy), nrow(dummy)))) {
context(paste0(class, " with ", nNodes, " nodes"))
# Prepare LinkedMatrix object
linkedMatrix <- createLinkedMatrix(class, nNodes)
test_that("subsetting", {
idx2 <- expand.grid(seq_len(nrow(dummy)), seq_len(ncol(dummy)))
idx4r <- expand.grid(seq_len(nrow(dummy)), seq_len(nrow(dummy)), seq_len(nrow(dummy)), seq_len(nrow(dummy)))
idx4c <- expand.grid(seq_len(ncol(dummy)), seq_len(ncol(dummy)), seq_len(ncol(dummy)), seq_len(ncol(dummy)))
expect_equal(linkedMatrix[], dummy)
for (i in seq_len(nrow(dummy))) {
expect_equal(linkedMatrix[i, ], dummy[i, ])
expect_equal(linkedMatrix[i, , drop = FALSE], dummy[i, , drop = FALSE])
}
for (i in seq_len(ncol(dummy))) {
expect_equal(linkedMatrix[, i], dummy[, i])
expect_equal(linkedMatrix[, i, drop = FALSE], dummy[, i, drop = FALSE])
}
for (i in seq_len(nrow(idx2))) {
expect_equal(linkedMatrix[idx2[i, 1], idx2[i, 2]], dummy[idx2[i, 1], idx2[i, 2]])
expect_equal(linkedMatrix[idx2[i, 1], idx2[i, 2], drop = FALSE], dummy[idx2[i, 1], idx2[i, 2], drop = FALSE])
}
for (i in seq_len(nrow(idx2))) {
expect_equal(linkedMatrix[idx2[i, 1]:idx2[i, 2], ], dummy[idx2[i, 1]:idx2[i, 2], ])
expect_equal(linkedMatrix[idx2[i, 1]:idx2[i, 2], , drop = FALSE], dummy[idx2[i, 1]:idx2[i, 2], , drop = FALSE])
expect_equal(linkedMatrix[, idx2[i, 1]:idx2[i, 2]], dummy[, idx2[i, 1]:idx2[i, 2]])
expect_equal(linkedMatrix[, idx2[i, 1]:idx2[i, 2], drop = FALSE], dummy[, idx2[i, 1]:idx2[i, 2], drop = FALSE])
expect_equal(linkedMatrix[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]], dummy[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]])
expect_equal(linkedMatrix[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2], drop = FALSE], dummy[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2], drop = FALSE])
expect_equal(linkedMatrix[c(idx2[i, 1], idx2[i, 2]), ], dummy[c(idx2[i, 1], idx2[i, 2]), ])
expect_equal(linkedMatrix[c(idx2[i, 1], idx2[i, 2]), , drop = FALSE], dummy[c(idx2[i, 1], idx2[i, 2]), , drop = FALSE])
expect_equal(linkedMatrix[, c(idx2[i, 1], idx2[i, 2])], dummy[, c(idx2[i, 1], idx2[i, 2])])
expect_equal(linkedMatrix[, c(idx2[i, 1], idx2[i, 2]), drop = FALSE], dummy[, c(idx2[i, 1], idx2[i, 2]), drop = FALSE])
expect_equal(linkedMatrix[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])], dummy[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])])
expect_equal(linkedMatrix[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2]), drop = FALSE], dummy[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2]), drop = FALSE])
expect_equal(linkedMatrix[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])], dummy[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])])
expect_equal(linkedMatrix[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2]), drop = FALSE], dummy[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2]), drop = FALSE])
}
for (i in seq_len(nrow(idx4r))) {
expect_equal(linkedMatrix[c(idx4r[i, 1], idx4r[i, 2], idx4r[i, 3], idx4r[i, 4]), ], dummy[c(idx4r[i, 1], idx4r[i, 2], idx4r[i, 3], idx4r[i, 4]), ], info = paste(idx4r[i, ], collapse = ", "))
expect_equal(linkedMatrix[c(idx4r[i, 1], idx4r[i, 2], idx4r[i, 3], idx4r[i, 4]), , drop = FALSE], dummy[c(idx4r[i, 1], idx4r[i, 2], idx4r[i, 3], idx4r[i, 4]), , drop = FALSE], info = paste(idx4r[i, ], collapse = ", "))
}
for (i in seq_len(nrow(idx4c))) {
expect_equal(linkedMatrix[, c(idx4c[i, 1], idx4c[i, 2], idx4c[i, 3], idx4c[i, 4])], dummy[, c(idx4c[i, 1], idx4c[i, 2], idx4c[i, 3], idx4c[i, 4])], info = paste(idx4r[i, ], collapse = ", "))
expect_equal(linkedMatrix[, c(idx4c[i, 1], idx4c[i, 2], idx4c[i, 3], idx4c[i, 4]), drop = FALSE], dummy[, c(idx4c[i, 1], idx4c[i, 2], idx4c[i, 3], idx4c[i, 4]), drop = FALSE], info = paste(idx4r[i, ], collapse = ", "))
}
expect_equal(linkedMatrix[c(TRUE, FALSE), ], dummy[c(TRUE, FALSE), ])
expect_equal(linkedMatrix[, c(TRUE, FALSE)], dummy[, c(TRUE, FALSE)])
expect_equal(linkedMatrix[c(TRUE, FALSE), c(TRUE, FALSE)], dummy[c(TRUE, FALSE), c(TRUE, FALSE)])
expect_equal(linkedMatrix[c(TRUE, FALSE), , drop = FALSE], dummy[c(TRUE, FALSE), , drop = FALSE])
expect_equal(linkedMatrix[, c(TRUE, FALSE), drop = FALSE], dummy[, c(TRUE, FALSE), drop = FALSE])
expect_equal(linkedMatrix[c(TRUE, FALSE), c(TRUE, FALSE), drop = FALSE], dummy[c(TRUE, FALSE), c(TRUE, FALSE), drop = FALSE])
expect_equal(linkedMatrix["row_1", ], dummy["row_1", ])
expect_equal(linkedMatrix[, "col_1"], dummy[, "col_1"])
expect_equal(linkedMatrix["row_1", "col_1"], dummy["row_1", "col_1"])
expect_equal(linkedMatrix["row_1", , drop = FALSE], dummy["row_1", , drop = FALSE])
expect_equal(linkedMatrix[, "col_1", drop = FALSE], dummy[, "col_1", drop = FALSE])
expect_equal(linkedMatrix["row_1", "col_1", drop = FALSE], dummy["row_1", "col_1", drop = FALSE])
expect_equal(linkedMatrix[c("row_1", "row_2"), ], dummy[c("row_1", "row_2"), ])
expect_equal(linkedMatrix[, c("col_1", "col_2")], dummy[, c("col_1", "col_2")])
expect_equal(linkedMatrix[c("row_1", "row_2"), c("col_1", "col_2")], dummy[c("row_1", "row_2"), c("col_1", "col_2")])
expect_equal(linkedMatrix[c("row_1", "row_2"), , drop = FALSE], dummy[c("row_1", "row_2"), , drop = FALSE])
expect_equal(linkedMatrix[, c("col_1", "col_2"), drop = FALSE], dummy[, c("col_1", "col_2"), drop = FALSE])
expect_equal(linkedMatrix[c("row_1", "row_2"), c("col_1", "col_2"), drop = FALSE], dummy[c("row_1", "row_2"), c("col_1", "col_2"), drop = FALSE])
expect_equal(linkedMatrix[c("row_2", "row_1"), ], dummy[c("row_2", "row_1"), ])
expect_equal(linkedMatrix[, c("col_2", "col_1")], dummy[, c("col_2", "col_1")])
expect_equal(linkedMatrix[c("row_2", "row_1"), c("col_2", "col_1")], dummy[c("row_2", "row_1"), c("col_2", "col_1")])
expect_equal(linkedMatrix[c("row_2", "row_1"), , drop = FALSE], dummy[c("row_2", "row_1"), , drop = FALSE])
expect_equal(linkedMatrix[, c("col_2", "col_1"), drop = FALSE], dummy[, c("col_2", "col_1"), drop = FALSE])
expect_equal(linkedMatrix[c("row_2", "row_1"), c("col_2", "col_1"), drop = FALSE], dummy[c("row_2", "row_1"), c("col_2", "col_1"), drop = FALSE])
expect_equal(linkedMatrix[c("row_3", "row_1"), ], dummy[c("row_3", "row_1"), ])
expect_equal(linkedMatrix[, c("col_3", "col_1")], dummy[, c("col_3", "col_1")])
expect_equal(linkedMatrix[c("row_3", "row_1"), c("col_3", "col_1")], dummy[c("row_3", "row_1"), c("col_3", "col_1")])
expect_equal(linkedMatrix[c("row_3", "row_1"), , drop = FALSE], dummy[c("row_3", "row_1"), , drop = FALSE])
expect_equal(linkedMatrix[, c("col_3", "col_1"), drop = FALSE], dummy[, c("col_3", "col_1"), drop = FALSE])
expect_equal(linkedMatrix[c("row_3", "row_1"), c("col_3", "col_1"), drop = FALSE], dummy[c("row_3", "row_1"), c("col_3", "col_1"), drop = FALSE])
# data frame subset
expect_equal(new(class, mtcars)[], as.matrix(mtcars))
# expect_equal(linkedMatrix[1], dummy[1]) Not implemented yet
# expect_equal(linkedMatrix[x:y], dummy[x:y]) Not implemented yet
# expect_equal(linkedMatrix[c(x, y)], dummy[c(x, y)]) Not implemented yet
# expect_equal(linkedMatrix[dummy > 1], dummy[dummy > 1]) Not implemented yet
})
test_that("replacement", {
# Generate new dummy for replacement
replacement <- matrix(data = seq_len(16) * 10, nrow = 4, ncol = 4)
rownames(replacement) <- paste0("row_", seq_len(nrow(replacement)))
colnames(replacement) <- paste0("col_", seq_len(ncol(replacement)))
comparison <- dummy
idx2 <- expand.grid(seq_len(nrow(dummy)), seq_len(ncol(dummy)))
testAndRestore <- function(info) {
expect_equal(linkedMatrix[], comparison, info = info)
linkedMatrix <- createLinkedMatrix(class, nNodes)
assign("linkedMatrix", linkedMatrix, parent.frame())
assign("comparison", dummy, parent.frame())
}
linkedMatrix[] <- replacement
comparison[] <- replacement
testAndRestore("[]")
for (i in seq_len(nrow(dummy))) {
linkedMatrix[i, ] <- replacement[i, ]
comparison[i, ] <- replacement[i, ]
testAndRestore(paste0("[", i, ", ]"))
linkedMatrix[i, ] <- NA
comparison[i, ] <- NA
testAndRestore(paste0("[", i, ", ] <- NA"))
}
for (i in seq_len(ncol(dummy))) {
linkedMatrix[, i] <- replacement[, i]
comparison[, i] <- replacement[, i]
testAndRestore(paste0("[, ", i, "]"))
linkedMatrix[, i] <- NA
comparison[, i] <- NA
testAndRestore(paste0("[, ", i, "] <- NA"))
}
for (i in seq_len(nrow(idx2))) {
linkedMatrix[idx2[i, 1], idx2[i, 2]] <- replacement[idx2[i, 1], idx2[i, 2]]
comparison[idx2[i, 1], idx2[i, 2]] <- replacement[idx2[i, 1], idx2[i, 2]]
testAndRestore(paste0("[", idx2[i, 1], ", ", idx2[i, 2], "]"))
linkedMatrix[idx2[i, 1]:idx2[i, 2], ] <- replacement[idx2[i, 1]:idx2[i, 2], ]
comparison[idx2[i, 1]:idx2[i, 2], ] <- replacement[idx2[i, 1]:idx2[i, 2], ]
testAndRestore(paste0("[", idx2[i, 1], ":", idx2[i, 2], ", ]"))
linkedMatrix[, idx2[i, 1]:idx2[i, 2]] <- replacement[, idx2[i, 1]:idx2[i, 2]]
comparison[, idx2[i, 1]:idx2[i, 2]] <- replacement[, idx2[i, 1]:idx2[i, 2]]
testAndRestore(paste0("[, ", idx2[i, 1], ":", idx2[i, 2], "]"))
linkedMatrix[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]] <- replacement[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]]
comparison[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]] <- replacement[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]]
testAndRestore(paste0("[", idx2[i, 1], ":", idx2[i, 2], ", ", idx2[i, 1], ":", idx2[i, 2], "]"))
linkedMatrix[c(idx2[i, 1], idx2[i, 2]), ] <- replacement[c(idx2[i, 1], idx2[i, 2]), ]
comparison[c(idx2[i, 1], idx2[i, 2]), ] <- replacement[c(idx2[i, 1], idx2[i, 2]), ]
testAndRestore(paste0("[c(", idx2[i, 1], ", ", idx2[i, 2], "), ]"))
linkedMatrix[, c(idx2[i, 1], idx2[i, 2])] <- replacement[, c(idx2[i, 1], idx2[i, 2])]
comparison[, c(idx2[i, 1], idx2[i, 2])] <- replacement[, c(idx2[i, 1], idx2[i, 2])]
testAndRestore(paste0("[, c(", idx2[i, 1], ", ", idx2[i, 2], ")]"))
linkedMatrix[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])] <- replacement[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])]
comparison[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])] <- replacement[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])]
testAndRestore(paste0("[c(", idx2[i, 1], ", ", idx2[i, 2], "), c(", idx2[i, 1], ", ", idx2[i, 2], ")]"))
linkedMatrix[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]] <- NA
comparison[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]] <- NA
testAndRestore(paste0("[", idx2[i, 1], ", ", idx2[i, 2], "] <- NA"))
}
})
test_that("dim", {
expect_equal(dim(linkedMatrix), dim(dummy))
})
test_that("length", {
expect_equal(length(linkedMatrix), length(dummy))
})
test_that("nNodes", {
expect_equal(nNodes(linkedMatrix), nNodes)
})
test_that("bind", {
if (class == "RowLinkedMatrix") {
boundLinkedMatrix <- rbind(linkedMatrix, linkedMatrix)
expect_equal(dim(boundLinkedMatrix), c(nrow(dummy) * 2, ncol(dummy)))
expect_equal(nNodes(boundLinkedMatrix), nNodes * 2)
expect_error(cbind(linkedMatrix, linkedMatrix))
} else {
boundLinkedMatrix <- cbind(linkedMatrix, linkedMatrix)
expect_equal(dim(boundLinkedMatrix), c(nrow(dummy), ncol(dummy) * 2))
expect_equal(nNodes(boundLinkedMatrix), nNodes * 2)
expect_error(rbind(linkedMatrix, linkedMatrix))
}
})
}
}
| /tests/testthat/test-LinkedMatrix.R | no_license | minghao2016/LinkedMatrix | R | false | false | 18,503 | r | # Prepare dummy data
dummy <- matrix(data = seq_len(16), nrow = 4, ncol = 4)
rownames(dummy) <- paste0("row_", seq_len(nrow(dummy)))
colnames(dummy) <- paste0("col_", seq_len(ncol(dummy)))
createLinkedMatrix <- function(class, nNodes) {
linkedBy <- ifelse(class == "ColumnLinkedMatrix", "columns", "rows")
linkedMatrix <- LinkedMatrix(nrow = nrow(dummy), ncol = ncol(dummy), nNodes = nNodes, linkedBy = linkedBy, nodeInitializer = "matrixNodeInitializer")
rownames(linkedMatrix) <- paste0("row_", seq_len(nrow(dummy)))
colnames(linkedMatrix) <- paste0("col_", seq_len(ncol(dummy)))
linkedMatrix[] <- dummy
return(linkedMatrix)
}
for (class in c("ColumnLinkedMatrix", "RowLinkedMatrix")) {
context(class)
linkedBy <- ifelse(class == "ColumnLinkedMatrix", "columns", "rows")
test_that("LinkedMatrix creation", {
for (nNodes in c(1, 2)) {
linkedMatrix <- LinkedMatrix(nrow = nrow(dummy), ncol = ncol(dummy), nNodes = nNodes, linkedBy = linkedBy, nodeInitializer = "matrixNodeInitializer")
expect_equal(nNodes(linkedMatrix), nNodes)
expect_is(linkedMatrix[[1]], "matrix")
if (requireNamespace("ff", quietly = TRUE)) {
linkedMatrix <- LinkedMatrix(nrow = nrow(dummy), ncol = ncol(dummy), nNodes = nNodes, linkedBy = linkedBy, nodeInitializer = "ffNodeInitializer", vmode = "integer")
expect_equal(nNodes(linkedMatrix), nNodes)
expect_is(linkedMatrix[[1]], "ff_matrix")
}
}
})
test_that(paste(class, "creation"), {
expect_error(new(class, c(1, 2, 3)), "*arguments need to be matrix-like*")
# No input
linkedMatrix <- new(class)
expect_equal(nNodes(linkedMatrix), 1)
expect_true(is.na(linkedMatrix[1, 1]))
# Single matrix input
linkedMatrix <- new(class, matrix(data = 0, nrow = 1, ncol = 1))
expect_equal(nNodes(linkedMatrix), 1)
expect_equal(dim(linkedMatrix), c(1, 1))
# Single LinkedMatrix input
linkedMatrix <- new(class, createLinkedMatrix(class, 2))
expect_equal(nNodes(linkedMatrix), 1)
expect_equal(dim(linkedMatrix), dim(dummy))
# Multiple matrix inputs of same order
linkedMatrix <- new(class, matrix(data = 0, nrow = 1, ncol = 1), matrix(data = 0, nrow = 1, ncol = 1))
expect_equal(nNodes(linkedMatrix), 2)
if (class == "ColumnLinkedMatrix") {
expect_equal(dim(linkedMatrix), c(1, 2))
} else {
expect_equal(dim(linkedMatrix), c(2, 1))
}
# Multiple LinkedMatrix inputs of same order
linkedMatrix <- new(class, createLinkedMatrix(class, 2), createLinkedMatrix(class, 2))
expect_equal(nNodes(linkedMatrix), 2)
if (class == "ColumnLinkedMatrix") {
expect_equal(dim(linkedMatrix), c(nrow(dummy), ncol(dummy) * 2))
} else {
expect_equal(dim(linkedMatrix), c(ncol(dummy) * 2, nrow(dummy)))
}
# Multiple conformable matrix inputs of different order
if (class == "ColumnLinkedMatrix") {
args <- list(matrix(data = 0, nrow = 1, ncol = 3), matrix(data = 0, nrow = 1, ncol = 5))
dims <- c(1, 8)
} else {
args <- list(matrix(data = 0, nrow = 3, ncol = 1), matrix(data = 0, nrow = 5, ncol = 1))
dims <- c(8, 1)
}
linkedMatrix <- do.call(class, args)
expect_equal(nNodes(linkedMatrix), 2)
expect_equal(dim(linkedMatrix), dims)
# Multiple unconformable matrix inputs
if (class == "ColumnLinkedMatrix") {
args <- list(matrix(data = 0, nrow = 3, ncol = 1), matrix(data = 0, nrow = 5, ncol = 1))
} else {
args <- list(matrix(data = 0, nrow = 1, ncol = 3), matrix(data = 0, nrow = 1, ncol = 5))
}
expect_error(do.call(class, args), "*arguments need the same number of*")
# Warning if dimnames do not match
dimnamesMismatches <- list(
list(regexp = NA, dimnames = list(NULL, NULL, NULL)),
list(regexp = NA, dimnames = list(letters[1:3], NULL, NULL)),
list(regexp = NULL, dimnames = list(letters[1:3], letters[4:6], NULL))
)
for (dimnamesMismatch in dimnamesMismatches) {
if (class == "ColumnLinkedMatrix") {
args <- list(
matrix(data = 0, nrow = 3, ncol = 1, dimnames = list(dimnamesMismatch$dimnames[[1]], NULL)),
matrix(data = 0, nrow = 3, ncol = 1, dimnames = list(dimnamesMismatch$dimnames[[2]], NULL)),
matrix(data = 0, nrow = 3, ncol = 1, dimnames = list(dimnamesMismatch$dimnames[[3]], NULL))
)
} else {
args <- list(
matrix(data = 0, nrow = 1, ncol = 3, dimnames = list(NULL, dimnamesMismatch$dimnames[[1]])),
matrix(data = 0, nrow = 1, ncol = 3, dimnames = list(NULL, dimnamesMismatch$dimnames[[2]])),
matrix(data = 0, nrow = 1, ncol = 3, dimnames = list(NULL, dimnamesMismatch$dimnames[[3]]))
)
}
expect_warning(do.call(class, args), regexp = dimnamesMismatch$regexp)
}
})
for (nNodes in seq_len(ifelse(class == "ColumnLinkedMatrix", ncol(dummy), nrow(dummy)))) {
context(paste0(class, " with ", nNodes, " nodes"))
# Prepare LinkedMatrix object
linkedMatrix <- createLinkedMatrix(class, nNodes)
test_that("subsetting", {
idx2 <- expand.grid(seq_len(nrow(dummy)), seq_len(ncol(dummy)))
idx4r <- expand.grid(seq_len(nrow(dummy)), seq_len(nrow(dummy)), seq_len(nrow(dummy)), seq_len(nrow(dummy)))
idx4c <- expand.grid(seq_len(ncol(dummy)), seq_len(ncol(dummy)), seq_len(ncol(dummy)), seq_len(ncol(dummy)))
expect_equal(linkedMatrix[], dummy)
for (i in seq_len(nrow(dummy))) {
expect_equal(linkedMatrix[i, ], dummy[i, ])
expect_equal(linkedMatrix[i, , drop = FALSE], dummy[i, , drop = FALSE])
}
for (i in seq_len(ncol(dummy))) {
expect_equal(linkedMatrix[, i], dummy[, i])
expect_equal(linkedMatrix[, i, drop = FALSE], dummy[, i, drop = FALSE])
}
for (i in seq_len(nrow(idx2))) {
expect_equal(linkedMatrix[idx2[i, 1], idx2[i, 2]], dummy[idx2[i, 1], idx2[i, 2]])
expect_equal(linkedMatrix[idx2[i, 1], idx2[i, 2], drop = FALSE], dummy[idx2[i, 1], idx2[i, 2], drop = FALSE])
}
for (i in seq_len(nrow(idx2))) {
expect_equal(linkedMatrix[idx2[i, 1]:idx2[i, 2], ], dummy[idx2[i, 1]:idx2[i, 2], ])
expect_equal(linkedMatrix[idx2[i, 1]:idx2[i, 2], , drop = FALSE], dummy[idx2[i, 1]:idx2[i, 2], , drop = FALSE])
expect_equal(linkedMatrix[, idx2[i, 1]:idx2[i, 2]], dummy[, idx2[i, 1]:idx2[i, 2]])
expect_equal(linkedMatrix[, idx2[i, 1]:idx2[i, 2], drop = FALSE], dummy[, idx2[i, 1]:idx2[i, 2], drop = FALSE])
expect_equal(linkedMatrix[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]], dummy[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]])
expect_equal(linkedMatrix[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2], drop = FALSE], dummy[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2], drop = FALSE])
expect_equal(linkedMatrix[c(idx2[i, 1], idx2[i, 2]), ], dummy[c(idx2[i, 1], idx2[i, 2]), ])
expect_equal(linkedMatrix[c(idx2[i, 1], idx2[i, 2]), , drop = FALSE], dummy[c(idx2[i, 1], idx2[i, 2]), , drop = FALSE])
expect_equal(linkedMatrix[, c(idx2[i, 1], idx2[i, 2])], dummy[, c(idx2[i, 1], idx2[i, 2])])
expect_equal(linkedMatrix[, c(idx2[i, 1], idx2[i, 2]), drop = FALSE], dummy[, c(idx2[i, 1], idx2[i, 2]), drop = FALSE])
expect_equal(linkedMatrix[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])], dummy[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])])
expect_equal(linkedMatrix[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2]), drop = FALSE], dummy[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2]), drop = FALSE])
expect_equal(linkedMatrix[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])], dummy[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])])
expect_equal(linkedMatrix[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2]), drop = FALSE], dummy[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2]), drop = FALSE])
}
for (i in seq_len(nrow(idx4r))) {
expect_equal(linkedMatrix[c(idx4r[i, 1], idx4r[i, 2], idx4r[i, 3], idx4r[i, 4]), ], dummy[c(idx4r[i, 1], idx4r[i, 2], idx4r[i, 3], idx4r[i, 4]), ], info = paste(idx4r[i, ], collapse = ", "))
expect_equal(linkedMatrix[c(idx4r[i, 1], idx4r[i, 2], idx4r[i, 3], idx4r[i, 4]), , drop = FALSE], dummy[c(idx4r[i, 1], idx4r[i, 2], idx4r[i, 3], idx4r[i, 4]), , drop = FALSE], info = paste(idx4r[i, ], collapse = ", "))
}
for (i in seq_len(nrow(idx4c))) {
expect_equal(linkedMatrix[, c(idx4c[i, 1], idx4c[i, 2], idx4c[i, 3], idx4c[i, 4])], dummy[, c(idx4c[i, 1], idx4c[i, 2], idx4c[i, 3], idx4c[i, 4])], info = paste(idx4r[i, ], collapse = ", "))
expect_equal(linkedMatrix[, c(idx4c[i, 1], idx4c[i, 2], idx4c[i, 3], idx4c[i, 4]), drop = FALSE], dummy[, c(idx4c[i, 1], idx4c[i, 2], idx4c[i, 3], idx4c[i, 4]), drop = FALSE], info = paste(idx4r[i, ], collapse = ", "))
}
expect_equal(linkedMatrix[c(TRUE, FALSE), ], dummy[c(TRUE, FALSE), ])
expect_equal(linkedMatrix[, c(TRUE, FALSE)], dummy[, c(TRUE, FALSE)])
expect_equal(linkedMatrix[c(TRUE, FALSE), c(TRUE, FALSE)], dummy[c(TRUE, FALSE), c(TRUE, FALSE)])
expect_equal(linkedMatrix[c(TRUE, FALSE), , drop = FALSE], dummy[c(TRUE, FALSE), , drop = FALSE])
expect_equal(linkedMatrix[, c(TRUE, FALSE), drop = FALSE], dummy[, c(TRUE, FALSE), drop = FALSE])
expect_equal(linkedMatrix[c(TRUE, FALSE), c(TRUE, FALSE), drop = FALSE], dummy[c(TRUE, FALSE), c(TRUE, FALSE), drop = FALSE])
expect_equal(linkedMatrix["row_1", ], dummy["row_1", ])
expect_equal(linkedMatrix[, "col_1"], dummy[, "col_1"])
expect_equal(linkedMatrix["row_1", "col_1"], dummy["row_1", "col_1"])
expect_equal(linkedMatrix["row_1", , drop = FALSE], dummy["row_1", , drop = FALSE])
expect_equal(linkedMatrix[, "col_1", drop = FALSE], dummy[, "col_1", drop = FALSE])
expect_equal(linkedMatrix["row_1", "col_1", drop = FALSE], dummy["row_1", "col_1", drop = FALSE])
expect_equal(linkedMatrix[c("row_1", "row_2"), ], dummy[c("row_1", "row_2"), ])
expect_equal(linkedMatrix[, c("col_1", "col_2")], dummy[, c("col_1", "col_2")])
expect_equal(linkedMatrix[c("row_1", "row_2"), c("col_1", "col_2")], dummy[c("row_1", "row_2"), c("col_1", "col_2")])
expect_equal(linkedMatrix[c("row_1", "row_2"), , drop = FALSE], dummy[c("row_1", "row_2"), , drop = FALSE])
expect_equal(linkedMatrix[, c("col_1", "col_2"), drop = FALSE], dummy[, c("col_1", "col_2"), drop = FALSE])
expect_equal(linkedMatrix[c("row_1", "row_2"), c("col_1", "col_2"), drop = FALSE], dummy[c("row_1", "row_2"), c("col_1", "col_2"), drop = FALSE])
expect_equal(linkedMatrix[c("row_2", "row_1"), ], dummy[c("row_2", "row_1"), ])
expect_equal(linkedMatrix[, c("col_2", "col_1")], dummy[, c("col_2", "col_1")])
expect_equal(linkedMatrix[c("row_2", "row_1"), c("col_2", "col_1")], dummy[c("row_2", "row_1"), c("col_2", "col_1")])
expect_equal(linkedMatrix[c("row_2", "row_1"), , drop = FALSE], dummy[c("row_2", "row_1"), , drop = FALSE])
expect_equal(linkedMatrix[, c("col_2", "col_1"), drop = FALSE], dummy[, c("col_2", "col_1"), drop = FALSE])
expect_equal(linkedMatrix[c("row_2", "row_1"), c("col_2", "col_1"), drop = FALSE], dummy[c("row_2", "row_1"), c("col_2", "col_1"), drop = FALSE])
expect_equal(linkedMatrix[c("row_3", "row_1"), ], dummy[c("row_3", "row_1"), ])
expect_equal(linkedMatrix[, c("col_3", "col_1")], dummy[, c("col_3", "col_1")])
expect_equal(linkedMatrix[c("row_3", "row_1"), c("col_3", "col_1")], dummy[c("row_3", "row_1"), c("col_3", "col_1")])
expect_equal(linkedMatrix[c("row_3", "row_1"), , drop = FALSE], dummy[c("row_3", "row_1"), , drop = FALSE])
expect_equal(linkedMatrix[, c("col_3", "col_1"), drop = FALSE], dummy[, c("col_3", "col_1"), drop = FALSE])
expect_equal(linkedMatrix[c("row_3", "row_1"), c("col_3", "col_1"), drop = FALSE], dummy[c("row_3", "row_1"), c("col_3", "col_1"), drop = FALSE])
# data frame subset
expect_equal(new(class, mtcars)[], as.matrix(mtcars))
# expect_equal(linkedMatrix[1], dummy[1]) Not implemented yet
# expect_equal(linkedMatrix[x:y], dummy[x:y]) Not implemented yet
# expect_equal(linkedMatrix[c(x, y)], dummy[c(x, y)]) Not implemented yet
# expect_equal(linkedMatrix[dummy > 1], dummy[dummy > 1]) Not implemented yet
})
test_that("replacement", {
# Generate new dummy for replacement
replacement <- matrix(data = seq_len(16) * 10, nrow = 4, ncol = 4)
rownames(replacement) <- paste0("row_", seq_len(nrow(replacement)))
colnames(replacement) <- paste0("col_", seq_len(ncol(replacement)))
comparison <- dummy
idx2 <- expand.grid(seq_len(nrow(dummy)), seq_len(ncol(dummy)))
testAndRestore <- function(info) {
expect_equal(linkedMatrix[], comparison, info = info)
linkedMatrix <- createLinkedMatrix(class, nNodes)
assign("linkedMatrix", linkedMatrix, parent.frame())
assign("comparison", dummy, parent.frame())
}
linkedMatrix[] <- replacement
comparison[] <- replacement
testAndRestore("[]")
for (i in seq_len(nrow(dummy))) {
linkedMatrix[i, ] <- replacement[i, ]
comparison[i, ] <- replacement[i, ]
testAndRestore(paste0("[", i, ", ]"))
linkedMatrix[i, ] <- NA
comparison[i, ] <- NA
testAndRestore(paste0("[", i, ", ] <- NA"))
}
for (i in seq_len(ncol(dummy))) {
linkedMatrix[, i] <- replacement[, i]
comparison[, i] <- replacement[, i]
testAndRestore(paste0("[, ", i, "]"))
linkedMatrix[, i] <- NA
comparison[, i] <- NA
testAndRestore(paste0("[, ", i, "] <- NA"))
}
for (i in seq_len(nrow(idx2))) {
linkedMatrix[idx2[i, 1], idx2[i, 2]] <- replacement[idx2[i, 1], idx2[i, 2]]
comparison[idx2[i, 1], idx2[i, 2]] <- replacement[idx2[i, 1], idx2[i, 2]]
testAndRestore(paste0("[", idx2[i, 1], ", ", idx2[i, 2], "]"))
linkedMatrix[idx2[i, 1]:idx2[i, 2], ] <- replacement[idx2[i, 1]:idx2[i, 2], ]
comparison[idx2[i, 1]:idx2[i, 2], ] <- replacement[idx2[i, 1]:idx2[i, 2], ]
testAndRestore(paste0("[", idx2[i, 1], ":", idx2[i, 2], ", ]"))
linkedMatrix[, idx2[i, 1]:idx2[i, 2]] <- replacement[, idx2[i, 1]:idx2[i, 2]]
comparison[, idx2[i, 1]:idx2[i, 2]] <- replacement[, idx2[i, 1]:idx2[i, 2]]
testAndRestore(paste0("[, ", idx2[i, 1], ":", idx2[i, 2], "]"))
linkedMatrix[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]] <- replacement[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]]
comparison[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]] <- replacement[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]]
testAndRestore(paste0("[", idx2[i, 1], ":", idx2[i, 2], ", ", idx2[i, 1], ":", idx2[i, 2], "]"))
linkedMatrix[c(idx2[i, 1], idx2[i, 2]), ] <- replacement[c(idx2[i, 1], idx2[i, 2]), ]
comparison[c(idx2[i, 1], idx2[i, 2]), ] <- replacement[c(idx2[i, 1], idx2[i, 2]), ]
testAndRestore(paste0("[c(", idx2[i, 1], ", ", idx2[i, 2], "), ]"))
linkedMatrix[, c(idx2[i, 1], idx2[i, 2])] <- replacement[, c(idx2[i, 1], idx2[i, 2])]
comparison[, c(idx2[i, 1], idx2[i, 2])] <- replacement[, c(idx2[i, 1], idx2[i, 2])]
testAndRestore(paste0("[, c(", idx2[i, 1], ", ", idx2[i, 2], ")]"))
linkedMatrix[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])] <- replacement[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])]
comparison[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])] <- replacement[c(idx2[i, 1], idx2[i, 2]), c(idx2[i, 1], idx2[i, 2])]
testAndRestore(paste0("[c(", idx2[i, 1], ", ", idx2[i, 2], "), c(", idx2[i, 1], ", ", idx2[i, 2], ")]"))
linkedMatrix[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]] <- NA
comparison[idx2[i, 1]:idx2[i, 2], idx2[i, 1]:idx2[i, 2]] <- NA
testAndRestore(paste0("[", idx2[i, 1], ", ", idx2[i, 2], "] <- NA"))
}
})
test_that("dim", {
expect_equal(dim(linkedMatrix), dim(dummy))
})
test_that("length", {
expect_equal(length(linkedMatrix), length(dummy))
})
test_that("nNodes", {
expect_equal(nNodes(linkedMatrix), nNodes)
})
test_that("bind", {
if (class == "RowLinkedMatrix") {
boundLinkedMatrix <- rbind(linkedMatrix, linkedMatrix)
expect_equal(dim(boundLinkedMatrix), c(nrow(dummy) * 2, ncol(dummy)))
expect_equal(nNodes(boundLinkedMatrix), nNodes * 2)
expect_error(cbind(linkedMatrix, linkedMatrix))
} else {
boundLinkedMatrix <- cbind(linkedMatrix, linkedMatrix)
expect_equal(dim(boundLinkedMatrix), c(nrow(dummy), ncol(dummy) * 2))
expect_equal(nNodes(boundLinkedMatrix), nNodes * 2)
expect_error(rbind(linkedMatrix, linkedMatrix))
}
})
}
}
|
# Note workbooks includes invisible_workbooks, determined by the 'showInProfile' TRUE/FALSE flag
library(jsonlite)
get_tableau_profile_api_extract <- function(profile_name){
profile_call <- paste0('https://public.tableau.com/profile/api/',profile_name)
profile_data <- jsonlite::fromJSON(profile_call)
profile_following <- profile_data$totalNumberOfFollowing
profile_followers <- profile_data$totalNumberOfFollowers
profile_twitter <- profile_data$websites$url[grepl('twitter',profile_data$websites$url)]
profile_linkedin <- profile_data$websites$url[grepl('linkedin',profile_data$websites$url)]
profile_last_publish <- profile_data$lastPublishDate
profile_visible_workbooks <- profile_data$visibleWorkbookCount
profile_following <- ifelse(length(profile_following)==1,profile_following,0)
profile_followers <- ifelse(length(profile_followers)==1,profile_followers,0)
profile_twitter <- ifelse(length(profile_twitter)==1,profile_twitter,'')
profile_linkedin <- ifelse(length(profile_linkedin)==1,profile_linkedin,'')
profile_last_publish <- ifelse(length(profile_last_publish)==1,profile_last_publish,0)
profile_visible_workbooks <- ifelse(length(profile_visible_workbooks)==1,profile_visible_workbooks,0)
profile_df <- data.frame(name=profile_name,
profile_url=paste0('https://public.tableau.com/profile/',profile_name,'#!/'),
api_call=profile_call,
followers=profile_followers,
following=profile_following,
twitter=profile_twitter,
linkedin=profile_linkedin,
last_publish=profile_last_publish,
visible_workbooks=profile_visible_workbooks,
stringsAsFactors = F)
return(profile_df)
}
| /functions/function_get_tableau_public_api_extract.R | permissive | jfontestad/datafam | R | false | false | 1,892 | r | # Note workbooks includes invisible_workbooks, determined by the 'showInProfile' TRUE/FALSE flag
library(jsonlite)
get_tableau_profile_api_extract <- function(profile_name){
profile_call <- paste0('https://public.tableau.com/profile/api/',profile_name)
profile_data <- jsonlite::fromJSON(profile_call)
profile_following <- profile_data$totalNumberOfFollowing
profile_followers <- profile_data$totalNumberOfFollowers
profile_twitter <- profile_data$websites$url[grepl('twitter',profile_data$websites$url)]
profile_linkedin <- profile_data$websites$url[grepl('linkedin',profile_data$websites$url)]
profile_last_publish <- profile_data$lastPublishDate
profile_visible_workbooks <- profile_data$visibleWorkbookCount
profile_following <- ifelse(length(profile_following)==1,profile_following,0)
profile_followers <- ifelse(length(profile_followers)==1,profile_followers,0)
profile_twitter <- ifelse(length(profile_twitter)==1,profile_twitter,'')
profile_linkedin <- ifelse(length(profile_linkedin)==1,profile_linkedin,'')
profile_last_publish <- ifelse(length(profile_last_publish)==1,profile_last_publish,0)
profile_visible_workbooks <- ifelse(length(profile_visible_workbooks)==1,profile_visible_workbooks,0)
profile_df <- data.frame(name=profile_name,
profile_url=paste0('https://public.tableau.com/profile/',profile_name,'#!/'),
api_call=profile_call,
followers=profile_followers,
following=profile_following,
twitter=profile_twitter,
linkedin=profile_linkedin,
last_publish=profile_last_publish,
visible_workbooks=profile_visible_workbooks,
stringsAsFactors = F)
return(profile_df)
}
|
context("Test occurrence-related functions")
## ala_reasons
thischeck=function() {
test_that("ala_reasons works as expected", {
expect_that(ala_reasons(),has_names(c("rkey","name","id")))
expect_that(nrow(ala_reasons()),equals(11))
expect_equal(sort(ala_reasons()$id),0:10)
expect_error(ala_reasons(TRUE)) ## this should throw and error because there is an unused argument
})
}
check_caching(thischeck)
thischeck=function() {
test_that("occurrences summary works when no qa are present", {
expect_output(summary(occurrences(taxon="Amblyornis newtonianus",download_reason_id=10,qa='none')),"no assertion issues")
})
}
check_caching(thischeck)
thischeck=function() {
test_that("occurrences summary gives something sensible", {
expect_output(summary(occurrences(taxon="Amblyornis newtonianus",download_reason_id=10)),"^number of names")
})
}
check_caching(thischeck)
thischeck=function() {
test_that("occurrences retrieves the fields specified", {
expect_equal(sort(names(occurrences(taxon="Eucalyptus gunnii",fields=c("latitude","longitude"),qa="none",fq="basis_of_record:LivingSpecimen",download_reason_id=10)$data)),c("latitude","longitude"))
expect_error(occurrences(taxon="Eucalyptus gunnii",fields=c("blahblahblah"),download_reason_id=10))
})
}
check_caching(thischeck)
| /tests/testthat/test-occurrences.R | no_license | robbriers/ALA4R | R | false | false | 1,374 | r | context("Test occurrence-related functions")
## ala_reasons
thischeck=function() {
test_that("ala_reasons works as expected", {
expect_that(ala_reasons(),has_names(c("rkey","name","id")))
expect_that(nrow(ala_reasons()),equals(11))
expect_equal(sort(ala_reasons()$id),0:10)
expect_error(ala_reasons(TRUE)) ## this should throw and error because there is an unused argument
})
}
check_caching(thischeck)
thischeck=function() {
test_that("occurrences summary works when no qa are present", {
expect_output(summary(occurrences(taxon="Amblyornis newtonianus",download_reason_id=10,qa='none')),"no assertion issues")
})
}
check_caching(thischeck)
thischeck=function() {
test_that("occurrences summary gives something sensible", {
expect_output(summary(occurrences(taxon="Amblyornis newtonianus",download_reason_id=10)),"^number of names")
})
}
check_caching(thischeck)
thischeck=function() {
test_that("occurrences retrieves the fields specified", {
expect_equal(sort(names(occurrences(taxon="Eucalyptus gunnii",fields=c("latitude","longitude"),qa="none",fq="basis_of_record:LivingSpecimen",download_reason_id=10)$data)),c("latitude","longitude"))
expect_error(occurrences(taxon="Eucalyptus gunnii",fields=c("blahblahblah"),download_reason_id=10))
})
}
check_caching(thischeck)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apigatewayv2_operations.R
\name{apigatewayv2_get_authorizers}
\alias{apigatewayv2_get_authorizers}
\title{Gets the Authorizers for an API}
\usage{
apigatewayv2_get_authorizers(ApiId, MaxResults, NextToken)
}
\arguments{
\item{ApiId}{[required] The API identifier.}
\item{MaxResults}{The maximum number of elements to be returned for this resource.}
\item{NextToken}{The next page of elements from this collection. Not valid for the last
element of the collection.}
}
\description{
Gets the Authorizers for an API.
}
\section{Request syntax}{
\preformatted{svc$get_authorizers(
ApiId = "string",
MaxResults = "string",
NextToken = "string"
)
}
}
\keyword{internal}
| /cran/paws.networking/man/apigatewayv2_get_authorizers.Rd | permissive | johnnytommy/paws | R | false | true | 751 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apigatewayv2_operations.R
\name{apigatewayv2_get_authorizers}
\alias{apigatewayv2_get_authorizers}
\title{Gets the Authorizers for an API}
\usage{
apigatewayv2_get_authorizers(ApiId, MaxResults, NextToken)
}
\arguments{
\item{ApiId}{[required] The API identifier.}
\item{MaxResults}{The maximum number of elements to be returned for this resource.}
\item{NextToken}{The next page of elements from this collection. Not valid for the last
element of the collection.}
}
\description{
Gets the Authorizers for an API.
}
\section{Request syntax}{
\preformatted{svc$get_authorizers(
ApiId = "string",
MaxResults = "string",
NextToken = "string"
)
}
}
\keyword{internal}
|
rm(list=ls())
options(stringsAsFactors = FALSE)
graphics.off()
setwd("~/Documents/git/proterant/sub_projs/")
library(ape)
library(phytools)
library(brms)
library(tibble)
library(ggstance)
library(ggplot2)
library("dplyr")
library("jpeg")
library("phylolm")
library(ggstance)
options(scipen = 999)
fluxy<-read.csv("HarvardForest/AMF_US-Ha2_BASE_HH_3-5.csv")
fluxy2<-read.csv("HarvardForest/AMF_US-Ha1_BASE_HR_14-5.csv")
colnames(fluxy)
#colnames(fluxy2)
fluxy2<-dplyr::select(fluxy2,TIMESTAMP_START,TIMESTAMP_END,P)
#head(fluxy2)
##flux data starts 2004-2019
fluxy<-dplyr::select(fluxy,TIMESTAMP_START,TIMESTAMP_END,LE_1_1_1,TA_1_1_1)
fluxy<-filter(fluxy,LE_1_1_1!=-9999)
head(fluxy$TIMESTAMP_START)
head(fluxy$TIMESTAMP_END)
library("bigleaf")
fluxy$ETP1<-LE.to.ET(c(fluxy$LE_1_1_1),c(fluxy$TA_1_1_1))
fluxy$year<- (fluxy$TIMESTAMP_START %/% 1e8)
fluxer<- fluxy %>% group_by(year) %>% summarise(meanETP=mean(ETP1))
#ETP1 is in kg m-2 s-1
##1 kg/m2/s = 86400 mm/day
fluxer$ETP2<-fluxer$meanETP*86400*365 ### units mm/day#fluxy2<-read.csv("HarvardForest/AMF_US-Ha1_BASE_HR_14-5.csv")
###now get precip
#colnames(fluxy2)
fluxy2<-dplyr::select(fluxy2,TIMESTAMP_START,TIMESTAMP_END,P)
fluxy2<-filter(fluxy2,P!=-9999)
x <- 1293828893
fluxy2$year<- (fluxy2$TIMESTAMP_START %/% 1e8)
fluxer2<- fluxy2 %>% group_by(year) %>% summarise(TotalP=sum(P))
joint<-left_join(fluxer,fluxer2)
joint$PTEP<-joint$TotalP-joint$ETP2
joint$aridindex<-joint$ETP2/joint$TotalP
write.csv(joint,"PETP.HF.csv",row.names = FALSE)
| /sub_projs/HarvardForest/calculate_PETP.R | no_license | dbuona/proterant | R | false | false | 1,518 | r | rm(list=ls())
options(stringsAsFactors = FALSE)
graphics.off()
setwd("~/Documents/git/proterant/sub_projs/")
library(ape)
library(phytools)
library(brms)
library(tibble)
library(ggstance)
library(ggplot2)
library("dplyr")
library("jpeg")
library("phylolm")
library(ggstance)
options(scipen = 999)
fluxy<-read.csv("HarvardForest/AMF_US-Ha2_BASE_HH_3-5.csv")
fluxy2<-read.csv("HarvardForest/AMF_US-Ha1_BASE_HR_14-5.csv")
colnames(fluxy)
#colnames(fluxy2)
fluxy2<-dplyr::select(fluxy2,TIMESTAMP_START,TIMESTAMP_END,P)
#head(fluxy2)
##flux data starts 2004-2019
fluxy<-dplyr::select(fluxy,TIMESTAMP_START,TIMESTAMP_END,LE_1_1_1,TA_1_1_1)
fluxy<-filter(fluxy,LE_1_1_1!=-9999)
head(fluxy$TIMESTAMP_START)
head(fluxy$TIMESTAMP_END)
library("bigleaf")
fluxy$ETP1<-LE.to.ET(c(fluxy$LE_1_1_1),c(fluxy$TA_1_1_1))
fluxy$year<- (fluxy$TIMESTAMP_START %/% 1e8)
fluxer<- fluxy %>% group_by(year) %>% summarise(meanETP=mean(ETP1))
#ETP1 is in kg m-2 s-1
##1 kg/m2/s = 86400 mm/day
fluxer$ETP2<-fluxer$meanETP*86400*365 ### units mm/day#fluxy2<-read.csv("HarvardForest/AMF_US-Ha1_BASE_HR_14-5.csv")
###now get precip
#colnames(fluxy2)
fluxy2<-dplyr::select(fluxy2,TIMESTAMP_START,TIMESTAMP_END,P)
fluxy2<-filter(fluxy2,P!=-9999)
x <- 1293828893
fluxy2$year<- (fluxy2$TIMESTAMP_START %/% 1e8)
fluxer2<- fluxy2 %>% group_by(year) %>% summarise(TotalP=sum(P))
joint<-left_join(fluxer,fluxer2)
joint$PTEP<-joint$TotalP-joint$ETP2
joint$aridindex<-joint$ETP2/joint$TotalP
write.csv(joint,"PETP.HF.csv",row.names = FALSE)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 46330
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 46330
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query08_query49_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 8095
c no.of clauses 46330
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 46330
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query08_query49_1344n.qdimacs 8095 46330 E1 [] 0 180 7915 46330 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query08_query49_1344n/query08_query49_1344n.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 720 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 46330
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 46330
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query08_query49_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 8095
c no.of clauses 46330
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 46330
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query08_query49_1344n.qdimacs 8095 46330 E1 [] 0 180 7915 46330 NONE
|
##
### ---------------
###
### Create: Jianming Zeng
### Date: 2018-12-29 23:24:48
### Email: jmzeng1314@163.com
### Blog: http://www.bio-info-trainee.com/
### Forum: http://www.biotrainee.com/thread-1376-1-1.html
### CAFS/SUSTC/Eli Lilly/University of Macau
### Update Log: 2018-12-29 First version
###
### ---------------
rm(list = ls()) ## 魔幻操作,一键清空~
options(stringsAsFactors = F)
load(file = '../input.Rdata')
a[1:4,1:4]
head(df)
## 载入第0步准备好的表达矩阵,及细胞的一些属性(hclust分群,plate批次,检测到的细胞基因)
# 注意 变量a是原始的counts矩阵,变量 dat是logCPM后的表达量矩阵。
group_list=df$g
plate=df$plate
table(plate)
rownames(dat)=toupper(rownames(dat)) ##toupper()函数,把小写字符转换成大写字符
dat[1:4,1:4]
if(T){
ddata=t(dat)
ddata[1:4,1:4]
s=colnames(ddata);head(s);tail(s) ##把实验检测到的基因赋值给S
library(org.Hs.eg.db) ##人类基因信息的包
s2g=toTable(org.Hs.egSYMBOL)
g=s2g[match(s,s2g$symbol),1];head(g) ##取出实验检测到的基因所对应的基因名
# match(x, y)返回的是vector x中每个元素在vector y中对映的位置(positions in y),
# 如果vector x中存在不在vector y中的元素,该元素处返回的是NA
# probe Gene.symbol Gene.ID
dannot=data.frame(probe=s,
"Gene.Symbol" =s,
"EntrezGene.ID"=g)
#s向量是实验检测基因的基因名字,g向量是标准基因ID
# 这里s应该和g是一一对应的,制作一个数据框
ddata=ddata[,!is.na(dannot$EntrezGene.ID)] #ID转换
#制作行为样本,列为实验检测基因(这里的剩下的实验检测基因都有标准基因ID对应)的矩阵。
#即剔除无基因ID对应的列
# !is.na去除dannot数据框EntrezGene.ID列为NA的行(去除NA值即去除没有标准基因ID对应的实验检测基因名))
dannot=dannot[!is.na(dannot$EntrezGene.ID),] #去除有NA的行,即剔除无对应的基因
head(dannot)
library(genefu)
# c("scmgene", "scmod1", "scmod2","pam50", "ssp2006", "ssp2003", "intClust", "AIMS","claudinLow")
s<-molecular.subtyping(sbt.model = "pam50",data=ddata,
annot=dannot,do.mapping=TRUE)
table(s$subtype)
tmp=as.data.frame(s$subtype)
subtypes=as.character(s$subtype)
}
head(df)
df$subtypes=subtypes
table(df[,c(1,5)])
library(genefu)
pam50genes=pam50$centroids.map[c(1,3)]
pam50genes[pam50genes$probe=='CDCA1',1]='NUF2'
pam50genes[pam50genes$probe=='KNTC2',1]='NDC80'
pam50genes[pam50genes$probe=='ORC6L',1]='ORC6'
x=dat
dim(x)
x=x[pam50genes$probe[pam50genes$probe %in% rownames(x)] ,]
table(group_list)
tmp=data.frame(group=group_list,
subtypes=subtypes)
rownames(tmp)=colnames(x)
library(pheatmap)
pheatmap(x,show_rownames = T,show_colnames = F,
annotation_col = tmp,
filename = 'ht_by_pam50_raw.png')
x=t(scale(t(x)))
x[x>1.6]=1.6
x[x< -1.6]= -1.6
pheatmap(x,show_rownames = T,show_colnames = F,
annotation_col = tmp,
filename = 'ht_by_pam50_scale.png')
| /RNA-seq/step5-pam50.R | no_license | duanshumeng/scRNA_smart_seq2 | R | false | false | 3,146 | r | ##
### ---------------
###
### Create: Jianming Zeng
### Date: 2018-12-29 23:24:48
### Email: jmzeng1314@163.com
### Blog: http://www.bio-info-trainee.com/
### Forum: http://www.biotrainee.com/thread-1376-1-1.html
### CAFS/SUSTC/Eli Lilly/University of Macau
### Update Log: 2018-12-29 First version
###
### ---------------
rm(list = ls()) ## 魔幻操作,一键清空~
options(stringsAsFactors = F)
load(file = '../input.Rdata')
a[1:4,1:4]
head(df)
## 载入第0步准备好的表达矩阵,及细胞的一些属性(hclust分群,plate批次,检测到的细胞基因)
# 注意 变量a是原始的counts矩阵,变量 dat是logCPM后的表达量矩阵。
group_list=df$g
plate=df$plate
table(plate)
rownames(dat)=toupper(rownames(dat)) ##toupper()函数,把小写字符转换成大写字符
dat[1:4,1:4]
if(T){
ddata=t(dat)
ddata[1:4,1:4]
s=colnames(ddata);head(s);tail(s) ##把实验检测到的基因赋值给S
library(org.Hs.eg.db) ##人类基因信息的包
s2g=toTable(org.Hs.egSYMBOL)
g=s2g[match(s,s2g$symbol),1];head(g) ##取出实验检测到的基因所对应的基因名
# match(x, y)返回的是vector x中每个元素在vector y中对映的位置(positions in y),
# 如果vector x中存在不在vector y中的元素,该元素处返回的是NA
# probe Gene.symbol Gene.ID
dannot=data.frame(probe=s,
"Gene.Symbol" =s,
"EntrezGene.ID"=g)
#s向量是实验检测基因的基因名字,g向量是标准基因ID
# 这里s应该和g是一一对应的,制作一个数据框
ddata=ddata[,!is.na(dannot$EntrezGene.ID)] #ID转换
#制作行为样本,列为实验检测基因(这里的剩下的实验检测基因都有标准基因ID对应)的矩阵。
#即剔除无基因ID对应的列
# !is.na去除dannot数据框EntrezGene.ID列为NA的行(去除NA值即去除没有标准基因ID对应的实验检测基因名))
dannot=dannot[!is.na(dannot$EntrezGene.ID),] #去除有NA的行,即剔除无对应的基因
head(dannot)
library(genefu)
# c("scmgene", "scmod1", "scmod2","pam50", "ssp2006", "ssp2003", "intClust", "AIMS","claudinLow")
s<-molecular.subtyping(sbt.model = "pam50",data=ddata,
annot=dannot,do.mapping=TRUE)
table(s$subtype)
tmp=as.data.frame(s$subtype)
subtypes=as.character(s$subtype)
}
head(df)
df$subtypes=subtypes
table(df[,c(1,5)])
library(genefu)
pam50genes=pam50$centroids.map[c(1,3)]
pam50genes[pam50genes$probe=='CDCA1',1]='NUF2'
pam50genes[pam50genes$probe=='KNTC2',1]='NDC80'
pam50genes[pam50genes$probe=='ORC6L',1]='ORC6'
x=dat
dim(x)
x=x[pam50genes$probe[pam50genes$probe %in% rownames(x)] ,]
table(group_list)
tmp=data.frame(group=group_list,
subtypes=subtypes)
rownames(tmp)=colnames(x)
library(pheatmap)
pheatmap(x,show_rownames = T,show_colnames = F,
annotation_col = tmp,
filename = 'ht_by_pam50_raw.png')
x=t(scale(t(x)))
x[x>1.6]=1.6
x[x< -1.6]= -1.6
pheatmap(x,show_rownames = T,show_colnames = F,
annotation_col = tmp,
filename = 'ht_by_pam50_scale.png')
|
#################################################
# File Name:plot2.r
# Author: xingpengwei
# Mail: xingwei421@qq.com
# Created Time: Fri 12 Mar 2021 12:02:53 PM UTC
#################################################
library(ggplot2)
library(ggrepel)
library(ggpubr)
library(reshape2)
data = read.table("gene.cp.ecDNA.all.pheno",header = T)
CCNE1 = data[data$gene=="CCNE1",]
ERBB2 = data[data$gene=="ERBB2",]
EGFR = data[data$gene=="EGFR",]
row.names(CCNE1)=CCNE1$sample
row.names(ERBB2)=ERBB2$sample
row.names(EGFR)=EGFR$sample
#table_egfr = data.frame(CNV=c(0,0),ecDNA=c(0,0))
#row.names(table_egfr)=c("male","female")
#table_egfr[1,]=c(length(which(EGFR$type=="CNV"&EGFR$Sex=="male")),length(which(EGFR$type=="ecDNA"&EGFR$Sex=="male")))
#table_egfr[2,]=c(length(which(EGFR$type=="CNV"&EGFR$Sex=="female")),length(which(EGFR$type=="ecDNA"&EGFR$Sex=="female")))
#chis_egfr = chisq.test(table_egfr)
#corr1 <- paste("Chi-square test. = ", round(chis_egfr$p.value,2), sep="")
#p1 = ggplot(data = EGFR)+geom_point(aes(x = log(copy_number),y=Sex,size=year,color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),Sex,label=rownames(EGFR)),max.overlaps = 30)+geom_text(aes(3,"male",label=corr1))
#table_egfr$sex=c("male","female")
#table_egfr_bar = melt(table_egfr,id.vars = "sex")
#table_egfr_bar$variable=factor(table_egfr_bar$variable,levels=c("ecDNA","CNV"))
#p1_bar = ggplot(data=table_egfr_bar)+geom_bar(aes(x=sex,y=value,fill=variable),stat="identity")+theme_bw()+scale_y_continuous(expand = c(0,0))+scale_fill_manual(values=c("#ED2026","#676767"))
#
#table_erbb2 = data.frame(CNV=c(0,0),ecDNA=c(0,0))
#row.names(table_erbb2)=c("male","female")
#table_erbb2[1,]=c(length(which(ERBB2$type=="CNV"&ERBB2$Sex=="male")),length(which(ERBB2$type=="ecDNA"&ERBB2$Sex=="male")))
#table_erbb2[2,]=c(length(which(ERBB2$type=="CNV"&ERBB2$Sex=="female")),length(which(ERBB2$type=="ecDNA"&ERBB2$Sex=="female")))
#chis_erbb2 = chisq.test(table_erbb2)
#corr2 <- paste("Chi-square test. = ", round(chis_erbb2$p.value,2), sep="")
#p2 = ggplot(data = ERBB2)+geom_point(aes(x = log(copy_number),y=Sex,size=year,color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),Sex,label=rownames(ERBB2)),max.overlaps = 30)+geom_text(aes(2,"male",label=corr2))
#table_erbb2$sex=c("male","female")
#table_erbb2_bar = melt(table_erbb2,id.vars = "sex")
#table_erbb2_bar$variable=factor(table_erbb2_bar$variable,levels=c("ecDNA","CNV"))
#p2_bar = ggplot(data=table_erbb2_bar)+geom_bar(aes(x=sex,y=value,fill=variable),stat="identity")+theme_bw()+scale_y_continuous(expand = c(0,0))+scale_fill_manual(values=c("#ED2026","#676767"))
#
#table_ccne1 = data.frame(CNV=c(0,0),ecDNA=c(0,0))
#row.names(table_ccne1)=c("male","female")
#table_ccne1[1,]=c(length(which(CCNE1$type=="CNV"&CCNE1$Sex=="male")),length(which(CCNE1$type=="ecDNA"&CCNE1$Sex=="male")))
#table_ccne1[2,]=c(length(which(CCNE1$type=="CNV"&CCNE1$Sex=="female")),length(which(CCNE1$type=="ecDNA"&CCNE1$Sex=="female")))
#chis_erbb2 = chisq.test(table_ccne1)
#corr3 <- paste("Chi-square test. = ", round(chis_erbb2$p.value,2), sep="")
#p3 = ggplot(data = CCNE1)+geom_point(aes(x = log(copy_number),y=Sex,size=year,color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),Sex,label=rownames(CCNE1)),max.overlaps = 30)+geom_text(aes(3,"male",label=corr3))
#table_ccne1$sex=c("male","female")
#table_ccne1_bar = melt(table_ccne1,id.vars = "sex")
#table_ccne1_bar$variable=factor(table_ccne1_bar$variable,levels=c("ecDNA","CNV"))
#p3_bar = ggplot(data=table_ccne1_bar)+geom_bar(aes(x=sex,y=value,fill=variable),stat="identity")+theme_bw()+scale_y_continuous(expand = c(0,0))+scale_fill_manual(values=c("#ED2026","#676767"))
#
#ggsave('EGFR.sex.copynumber.pdf',plot=p1,width=6,height=4)
#ggsave('ERBB2.sex.copynumber.pdf',plot=p2,width=6,height=4)
#ggsave('CCNE1.sex.copynumber.pdf',plot=p3,width=6,height=4)
#ggsave('EGFR.sex.copynumber.bar.pdf',plot=p1_bar,width=6,height=4)
#ggsave('ERBB2.sex.copynumber.bar.pdf',plot=p2_bar,width=6,height=4)
#ggsave('CCNE1.sex.copynumber.bar.pdf',plot=p3_bar,width=6,height=4)
#
#compaired2=list(c("CNV","ecDNA"))
#p1 = ggplot(data = EGFR)+geom_point(aes(x = log(copy_number),y=Age_at_diagnosis,size=year,color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),Age_at_diagnosis,label=rownames(EGFR)),max.overlaps = 30)
#p1_box=ggboxplot(EGFR,x="type",y="Age_at_diagnosis",add="jitter",add.params=list(shape=21, fill="orange", size=3))+geom_signif(comparisons = compaired2,step_increase = 0.1,map_signif_level = F,test = t.test)
#p2 = ggplot(data = ERBB2)+geom_point(aes(x = log(copy_number),y=Age_at_diagnosis,size=year,color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),Age_at_diagnosis,label=rownames(ERBB2)),max.overlaps = 30)
#p2_box=ggboxplot(ERBB2,x="type",y="Age_at_diagnosis",add="jitter",add.params=list(shape=21, fill="orange", size=3))+geom_signif(comparisons = compaired2,step_increase = 0.1,map_signif_level = F,test = t.test)
#p3 = ggplot(data = CCNE1)+geom_point(aes(x = log(copy_number),y=Age_at_diagnosis,size=year,color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),Age_at_diagnosis,label=rownames(CCNE1)),max.overlaps = 30)
#p3_box=ggboxplot(CCNE1,x="type",y="Age_at_diagnosis",add="jitter",add.params=list(shape=21, fill="orange", size=3))+geom_signif(comparisons = compaired2,step_increase = 0.1,map_signif_level = F,test = t.test)
#ggsave('EGFR.Age.copynumber.pdf',plot=p1,width=6,height=4)
#ggsave('ERBB2.Age.copynumber.pdf',plot=p2,width=6,height=4)
#ggsave('CCNE1.Age.copynumber.pdf',plot=p3,width=6,height=4)
#ggsave('EGFR.Age.box.copynumber.pdf',plot=p1_box,width=6,height=4)
#ggsave('ERBB2.Age.box.copynumber.pdf',plot=p2_box,width=6,height=4)
#ggsave('CCNE1.Age.box.copynumber.pdf',plot=p3_box,width=6,height=4)
table_uicc = data.frame(CNV=c(0,0),ecDNA=c(0,0))
row.names(table_uicc)=c("III","II")
table_uicc[1,]=c(length(which(EGFR$type=="CNV"&EGFR$UICC_stage_6th=="III")),length(which(EGFR$type=="ecDNA"&EGFR$UICC_stage_6th=="III")))
table_uicc[2,]=c(length(which(EGFR$type=="CNV"&EGFR$UICC_stage_6th=="II")),length(which(EGFR$type=="ecDNA"&EGFR$UICC_stage_6th=="II")))
chis_uicc1 = chisq.test(table_uicc)
uicc1 <- paste("Chi-square test. = ", round(chis_uicc1$p.value,2), sep="")
p1 = ggplot(data = EGFR)+geom_point(aes(x = log(copy_number),y = UICC_stage_6th, size=year, color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),UICC_stage_6th,label=rownames(EGFR)),max.overlaps = 30)+geom_text(aes(3,"III",label=uicc1))
table_uicc$UICC_stage_6th=c("III","II")
table_uicc_bar = melt(table_uicc,id.vars = "UICC_stage_6th")
table_uicc_bar$variable=factor(table_uicc_bar$variable,levels=c("ecDNA","CNV"))
p1_bar = ggplot(data=table_uicc_bar)+geom_bar(aes(x=UICC_stage_6th,y=value,fill=variable),stat="identity")+theme_bw()+scale_y_continuous(expand = c(0,0))+scale_fill_manual(values=c("#ED2026","#676767"))
table_uicc2 = data.frame(CNV=c(0,0),ecDNA=c(0,0))
row.names(table_uicc2)=c("III","II")
table_uicc2[1,]=c(length(which(ERBB2$type=="CNV"&ERBB2$UICC_stage_6th=="III")),length(which(ERBB2$type=="ecDNA"&ERBB2$UICC_stage_6th=="III")))
table_uicc2[2,]=c(length(which(ERBB2$type=="CNV"&ERBB2$UICC_stage_6th=="II")),length(which(ERBB2$type=="ecDNA"&ERBB2$UICC_stage_6th=="II")))
chis_uicc2 = chisq.test(table_uicc2)
uicc2 <- paste("Chi-square test. = ", round(chis_uicc2$p.value,2), sep="")
p2 = ggplot(data = ERBB2)+geom_point(aes(x = log(copy_number),y = UICC_stage_6th, size=year, color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),UICC_stage_6th,label=rownames(ERBB2)),max.overlaps = 30)+geom_text(aes(3,"III",label=uicc2))
table_uicc2$UICC_stage_6th=c("III","II")
table_uicc2_bar = melt(table_uicc2,id.vars = "UICC_stage_6th")
table_uicc2_bar$variable=factor(table_uicc2_bar$variable,levels=c("ecDNA","CNV"))
p2_bar = ggplot(data=table_uicc2_bar)+geom_bar(aes(x=UICC_stage_6th,y=value,fill=variable),stat="identity")+theme_bw()+scale_y_continuous(expand = c(0,0))+scale_fill_manual(values=c("#ED2026","#676767"))
table_uicc3 = data.frame(CNV=c(0,0),ecDNA=c(0,0))
row.names(table_uicc3)=c("III","II")
table_uicc3[1,]=c(length(which(CCNE1$type=="CNV"&CCNE1$UICC_stage_6th=="III")),length(which(CCNE1$type=="ecDNA"&CCNE1$UICC_stage_6th=="III")))
table_uicc3[2,]=c(length(which(CCNE1$type=="CNV"&CCNE1$UICC_stage_6th=="II")),length(which(CCNE1$type=="ecDNA"&CCNE1$UICC_stage_6th=="II")))
chis_uicc3 = chisq.test(table_uicc3)
uicc3 <- paste("Chi-square test. = ", round(chis_uicc3$p.value,2), sep="")
p3 = ggplot(data = CCNE1)+geom_point(aes(x = log(copy_number),y = UICC_stage_6th, size=year, color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),UICC_stage_6th,label=rownames(CCNE1)),max.overlaps = 30)+geom_text(aes(3,"III",label=uicc3))
table_uicc3$UICC_stage_6th=c("III","II")
table_uicc3_bar = melt(table_uicc3,id.vars = "UICC_stage_6th")
table_uicc3_bar$variable=factor(table_uicc3_bar$variable,levels=c("ecDNA","CNV"))
p3_bar = ggplot(data=table_uicc3_bar)+geom_bar(aes(x = UICC_stage_6th,y=value,fill=variable),stat="identity")+theme_bw()+scale_y_continuous(expand = c(0,0))+scale_fill_manual(values=c("#ED2026","#676767"))
ggsave('EGFR.uicc.copynumber.pdf',plot=p1,width=6,height=4)
ggsave('ERBB2.uicc.copynumber.pdf',plot=p2,width=6,height=4)
ggsave('CCNE1.uicc.copynumber.pdf',plot=p3,width=6,height=4)
ggsave('EGFR.uicc.copynumber.bar.pdf',plot=p1_bar,width=6,height=4)
ggsave('ERBB2.uicc.copynumber.bar.pdf',plot=p2_bar,width=6,height=4)
ggsave('CCNE1.uicc.copynumber.bar.pdf',plot=p3_bar,width=6,height=4)
| /plot_survival.r | no_license | chenlab2019/ecDNA-on-GCA | R | false | false | 11,397 | r | #################################################
# File Name:plot2.r
# Author: xingpengwei
# Mail: xingwei421@qq.com
# Created Time: Fri 12 Mar 2021 12:02:53 PM UTC
#################################################
library(ggplot2)
library(ggrepel)
library(ggpubr)
library(reshape2)
data = read.table("gene.cp.ecDNA.all.pheno",header = T)
CCNE1 = data[data$gene=="CCNE1",]
ERBB2 = data[data$gene=="ERBB2",]
EGFR = data[data$gene=="EGFR",]
row.names(CCNE1)=CCNE1$sample
row.names(ERBB2)=ERBB2$sample
row.names(EGFR)=EGFR$sample
#table_egfr = data.frame(CNV=c(0,0),ecDNA=c(0,0))
#row.names(table_egfr)=c("male","female")
#table_egfr[1,]=c(length(which(EGFR$type=="CNV"&EGFR$Sex=="male")),length(which(EGFR$type=="ecDNA"&EGFR$Sex=="male")))
#table_egfr[2,]=c(length(which(EGFR$type=="CNV"&EGFR$Sex=="female")),length(which(EGFR$type=="ecDNA"&EGFR$Sex=="female")))
#chis_egfr = chisq.test(table_egfr)
#corr1 <- paste("Chi-square test. = ", round(chis_egfr$p.value,2), sep="")
#p1 = ggplot(data = EGFR)+geom_point(aes(x = log(copy_number),y=Sex,size=year,color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),Sex,label=rownames(EGFR)),max.overlaps = 30)+geom_text(aes(3,"male",label=corr1))
#table_egfr$sex=c("male","female")
#table_egfr_bar = melt(table_egfr,id.vars = "sex")
#table_egfr_bar$variable=factor(table_egfr_bar$variable,levels=c("ecDNA","CNV"))
#p1_bar = ggplot(data=table_egfr_bar)+geom_bar(aes(x=sex,y=value,fill=variable),stat="identity")+theme_bw()+scale_y_continuous(expand = c(0,0))+scale_fill_manual(values=c("#ED2026","#676767"))
#
#table_erbb2 = data.frame(CNV=c(0,0),ecDNA=c(0,0))
#row.names(table_erbb2)=c("male","female")
#table_erbb2[1,]=c(length(which(ERBB2$type=="CNV"&ERBB2$Sex=="male")),length(which(ERBB2$type=="ecDNA"&ERBB2$Sex=="male")))
#table_erbb2[2,]=c(length(which(ERBB2$type=="CNV"&ERBB2$Sex=="female")),length(which(ERBB2$type=="ecDNA"&ERBB2$Sex=="female")))
#chis_erbb2 = chisq.test(table_erbb2)
#corr2 <- paste("Chi-square test. = ", round(chis_erbb2$p.value,2), sep="")
#p2 = ggplot(data = ERBB2)+geom_point(aes(x = log(copy_number),y=Sex,size=year,color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),Sex,label=rownames(ERBB2)),max.overlaps = 30)+geom_text(aes(2,"male",label=corr2))
#table_erbb2$sex=c("male","female")
#table_erbb2_bar = melt(table_erbb2,id.vars = "sex")
#table_erbb2_bar$variable=factor(table_erbb2_bar$variable,levels=c("ecDNA","CNV"))
#p2_bar = ggplot(data=table_erbb2_bar)+geom_bar(aes(x=sex,y=value,fill=variable),stat="identity")+theme_bw()+scale_y_continuous(expand = c(0,0))+scale_fill_manual(values=c("#ED2026","#676767"))
#
#table_ccne1 = data.frame(CNV=c(0,0),ecDNA=c(0,0))
#row.names(table_ccne1)=c("male","female")
#table_ccne1[1,]=c(length(which(CCNE1$type=="CNV"&CCNE1$Sex=="male")),length(which(CCNE1$type=="ecDNA"&CCNE1$Sex=="male")))
#table_ccne1[2,]=c(length(which(CCNE1$type=="CNV"&CCNE1$Sex=="female")),length(which(CCNE1$type=="ecDNA"&CCNE1$Sex=="female")))
#chis_erbb2 = chisq.test(table_ccne1)
#corr3 <- paste("Chi-square test. = ", round(chis_erbb2$p.value,2), sep="")
#p3 = ggplot(data = CCNE1)+geom_point(aes(x = log(copy_number),y=Sex,size=year,color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),Sex,label=rownames(CCNE1)),max.overlaps = 30)+geom_text(aes(3,"male",label=corr3))
#table_ccne1$sex=c("male","female")
#table_ccne1_bar = melt(table_ccne1,id.vars = "sex")
#table_ccne1_bar$variable=factor(table_ccne1_bar$variable,levels=c("ecDNA","CNV"))
#p3_bar = ggplot(data=table_ccne1_bar)+geom_bar(aes(x=sex,y=value,fill=variable),stat="identity")+theme_bw()+scale_y_continuous(expand = c(0,0))+scale_fill_manual(values=c("#ED2026","#676767"))
#
#ggsave('EGFR.sex.copynumber.pdf',plot=p1,width=6,height=4)
#ggsave('ERBB2.sex.copynumber.pdf',plot=p2,width=6,height=4)
#ggsave('CCNE1.sex.copynumber.pdf',plot=p3,width=6,height=4)
#ggsave('EGFR.sex.copynumber.bar.pdf',plot=p1_bar,width=6,height=4)
#ggsave('ERBB2.sex.copynumber.bar.pdf',plot=p2_bar,width=6,height=4)
#ggsave('CCNE1.sex.copynumber.bar.pdf',plot=p3_bar,width=6,height=4)
#
#compaired2=list(c("CNV","ecDNA"))
#p1 = ggplot(data = EGFR)+geom_point(aes(x = log(copy_number),y=Age_at_diagnosis,size=year,color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),Age_at_diagnosis,label=rownames(EGFR)),max.overlaps = 30)
#p1_box=ggboxplot(EGFR,x="type",y="Age_at_diagnosis",add="jitter",add.params=list(shape=21, fill="orange", size=3))+geom_signif(comparisons = compaired2,step_increase = 0.1,map_signif_level = F,test = t.test)
#p2 = ggplot(data = ERBB2)+geom_point(aes(x = log(copy_number),y=Age_at_diagnosis,size=year,color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),Age_at_diagnosis,label=rownames(ERBB2)),max.overlaps = 30)
#p2_box=ggboxplot(ERBB2,x="type",y="Age_at_diagnosis",add="jitter",add.params=list(shape=21, fill="orange", size=3))+geom_signif(comparisons = compaired2,step_increase = 0.1,map_signif_level = F,test = t.test)
#p3 = ggplot(data = CCNE1)+geom_point(aes(x = log(copy_number),y=Age_at_diagnosis,size=year,color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),Age_at_diagnosis,label=rownames(CCNE1)),max.overlaps = 30)
#p3_box=ggboxplot(CCNE1,x="type",y="Age_at_diagnosis",add="jitter",add.params=list(shape=21, fill="orange", size=3))+geom_signif(comparisons = compaired2,step_increase = 0.1,map_signif_level = F,test = t.test)
#ggsave('EGFR.Age.copynumber.pdf',plot=p1,width=6,height=4)
#ggsave('ERBB2.Age.copynumber.pdf',plot=p2,width=6,height=4)
#ggsave('CCNE1.Age.copynumber.pdf',plot=p3,width=6,height=4)
#ggsave('EGFR.Age.box.copynumber.pdf',plot=p1_box,width=6,height=4)
#ggsave('ERBB2.Age.box.copynumber.pdf',plot=p2_box,width=6,height=4)
#ggsave('CCNE1.Age.box.copynumber.pdf',plot=p3_box,width=6,height=4)
table_uicc = data.frame(CNV=c(0,0),ecDNA=c(0,0))
row.names(table_uicc)=c("III","II")
table_uicc[1,]=c(length(which(EGFR$type=="CNV"&EGFR$UICC_stage_6th=="III")),length(which(EGFR$type=="ecDNA"&EGFR$UICC_stage_6th=="III")))
table_uicc[2,]=c(length(which(EGFR$type=="CNV"&EGFR$UICC_stage_6th=="II")),length(which(EGFR$type=="ecDNA"&EGFR$UICC_stage_6th=="II")))
chis_uicc1 = chisq.test(table_uicc)
uicc1 <- paste("Chi-square test. = ", round(chis_uicc1$p.value,2), sep="")
p1 = ggplot(data = EGFR)+geom_point(aes(x = log(copy_number),y = UICC_stage_6th, size=year, color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),UICC_stage_6th,label=rownames(EGFR)),max.overlaps = 30)+geom_text(aes(3,"III",label=uicc1))
table_uicc$UICC_stage_6th=c("III","II")
table_uicc_bar = melt(table_uicc,id.vars = "UICC_stage_6th")
table_uicc_bar$variable=factor(table_uicc_bar$variable,levels=c("ecDNA","CNV"))
p1_bar = ggplot(data=table_uicc_bar)+geom_bar(aes(x=UICC_stage_6th,y=value,fill=variable),stat="identity")+theme_bw()+scale_y_continuous(expand = c(0,0))+scale_fill_manual(values=c("#ED2026","#676767"))
table_uicc2 = data.frame(CNV=c(0,0),ecDNA=c(0,0))
row.names(table_uicc2)=c("III","II")
table_uicc2[1,]=c(length(which(ERBB2$type=="CNV"&ERBB2$UICC_stage_6th=="III")),length(which(ERBB2$type=="ecDNA"&ERBB2$UICC_stage_6th=="III")))
table_uicc2[2,]=c(length(which(ERBB2$type=="CNV"&ERBB2$UICC_stage_6th=="II")),length(which(ERBB2$type=="ecDNA"&ERBB2$UICC_stage_6th=="II")))
chis_uicc2 = chisq.test(table_uicc2)
uicc2 <- paste("Chi-square test. = ", round(chis_uicc2$p.value,2), sep="")
p2 = ggplot(data = ERBB2)+geom_point(aes(x = log(copy_number),y = UICC_stage_6th, size=year, color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),UICC_stage_6th,label=rownames(ERBB2)),max.overlaps = 30)+geom_text(aes(3,"III",label=uicc2))
table_uicc2$UICC_stage_6th=c("III","II")
table_uicc2_bar = melt(table_uicc2,id.vars = "UICC_stage_6th")
table_uicc2_bar$variable=factor(table_uicc2_bar$variable,levels=c("ecDNA","CNV"))
p2_bar = ggplot(data=table_uicc2_bar)+geom_bar(aes(x=UICC_stage_6th,y=value,fill=variable),stat="identity")+theme_bw()+scale_y_continuous(expand = c(0,0))+scale_fill_manual(values=c("#ED2026","#676767"))
table_uicc3 = data.frame(CNV=c(0,0),ecDNA=c(0,0))
row.names(table_uicc3)=c("III","II")
table_uicc3[1,]=c(length(which(CCNE1$type=="CNV"&CCNE1$UICC_stage_6th=="III")),length(which(CCNE1$type=="ecDNA"&CCNE1$UICC_stage_6th=="III")))
table_uicc3[2,]=c(length(which(CCNE1$type=="CNV"&CCNE1$UICC_stage_6th=="II")),length(which(CCNE1$type=="ecDNA"&CCNE1$UICC_stage_6th=="II")))
chis_uicc3 = chisq.test(table_uicc3)
uicc3 <- paste("Chi-square test. = ", round(chis_uicc3$p.value,2), sep="")
p3 = ggplot(data = CCNE1)+geom_point(aes(x = log(copy_number),y = UICC_stage_6th, size=year, color=type))+scale_color_manual(values=c("#666666","#FF0016"))+theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.background = element_blank())+theme(axis.text = element_text(color = "black"))+geom_text_repel(aes(log(copy_number),UICC_stage_6th,label=rownames(CCNE1)),max.overlaps = 30)+geom_text(aes(3,"III",label=uicc3))
table_uicc3$UICC_stage_6th=c("III","II")
table_uicc3_bar = melt(table_uicc3,id.vars = "UICC_stage_6th")
table_uicc3_bar$variable=factor(table_uicc3_bar$variable,levels=c("ecDNA","CNV"))
p3_bar = ggplot(data=table_uicc3_bar)+geom_bar(aes(x = UICC_stage_6th,y=value,fill=variable),stat="identity")+theme_bw()+scale_y_continuous(expand = c(0,0))+scale_fill_manual(values=c("#ED2026","#676767"))
ggsave('EGFR.uicc.copynumber.pdf',plot=p1,width=6,height=4)
ggsave('ERBB2.uicc.copynumber.pdf',plot=p2,width=6,height=4)
ggsave('CCNE1.uicc.copynumber.pdf',plot=p3,width=6,height=4)
ggsave('EGFR.uicc.copynumber.bar.pdf',plot=p1_bar,width=6,height=4)
ggsave('ERBB2.uicc.copynumber.bar.pdf',plot=p2_bar,width=6,height=4)
ggsave('CCNE1.uicc.copynumber.bar.pdf',plot=p3_bar,width=6,height=4)
|
# example 5.7 of section 5.2.3
# (example 5.7 of section 5.2.3) : Choosing and evaluating models : Evaluating models : Evaluating probability models
# Title: Plotting the receiver operating characteristic curve
# install.packages('ROCR')
library('ROCR')
eval <- prediction(spamTest$pred,spamTest$spam)
plot(performance(eval,"tpr","fpr"))
print(attributes(performance(eval,'auc'))$y.values[[1]])
## [1] 0.9660072
mitrFPR<-NA
mitrTPR<-NA
for (i in 1:99999) {
mtb<-table(spamTest$spam,spamTest$pred>=i/100000)
TPR<-mtb[2,2]/sum(mtb[2,2]+mtb[2,1])
FPR<-mtb[1,2]/sum(mtb[1,2]+mtb[1,1])
mitrFPR[i]<-FPR
mitrTPR[i]<-TPR
}
plot(mitrFPR,mitrTPR)
myframe<-data.frame(FPR=mitrFPR,TPR=mitrTPR)
ggplot(myframe,aes(x=FPR,y=TPR))+geom_line()+ylim(0,1)
| /Code/charpter5/00063_example_5.7_of_section_5.2.3.R | no_license | smartactuary/learn_data_science | R | false | false | 750 | r | # example 5.7 of section 5.2.3
# (example 5.7 of section 5.2.3) : Choosing and evaluating models : Evaluating models : Evaluating probability models
# Title: Plotting the receiver operating characteristic curve
# install.packages('ROCR')
library('ROCR')
eval <- prediction(spamTest$pred,spamTest$spam)
plot(performance(eval,"tpr","fpr"))
print(attributes(performance(eval,'auc'))$y.values[[1]])
## [1] 0.9660072
mitrFPR<-NA
mitrTPR<-NA
for (i in 1:99999) {
mtb<-table(spamTest$spam,spamTest$pred>=i/100000)
TPR<-mtb[2,2]/sum(mtb[2,2]+mtb[2,1])
FPR<-mtb[1,2]/sum(mtb[1,2]+mtb[1,1])
mitrFPR[i]<-FPR
mitrTPR[i]<-TPR
}
plot(mitrFPR,mitrTPR)
myframe<-data.frame(FPR=mitrFPR,TPR=mitrTPR)
ggplot(myframe,aes(x=FPR,y=TPR))+geom_line()+ylim(0,1)
|
#' Get Query page url
#'
#' Get naver news query page url withput pageNum.
#'
#' @param query requred.
#' @param st Default is news.all.
#' @param q_enc Default is euc-kr.
#' @param r_enc Default is UTF-8.
#' @param r_format Default is xml.
#' @param rp Default is none.
#' @param sm Default is all.basic.
#' @param ic Default is all.
#' @param so Default is datetime.dsc.
#' @param detail Default is 1 means only display title.
#' @param startDate Dfault is 3 days before today.
#' @param endDate Default is today.
#' @param stPaper Default is exist:1.
#' @param pd Default is 1.
#' @param dnaSo Default is rel.dsc.
#' @return Get url.
#' @export
getQueryUrl <- function(query,st="news.all",
q_enc="EUC-KR",
r_enc="UTF-8",
r_format="xml",
rp="none",
sm="all.basic",
ic="all",
so="datetime.dsc",
startDate=as.Date(Sys.time())-3,
endDate=as.Date(Sys.time()),
stPaper="exist:1",
detail=1,
pd=1,
dnaSo="rel.dsc") {
query <- utils::URLencode(query)
root <- "http://news.naver.com/main/search/search.nhn?"
link <- paste0(root,"st=",st,
"&q_enc=",q_enc,
"&r_enc=",r_enc,
"&r_format=",r_format,
"&rp=",rp,
"&sm=",sm,
"&ic=",ic,
"&so=",so,
"&detail=",detail,
"&pd=",pd,
"&dnaSo=",dnaSo,
"&startDate=",startDate,
"&endDate=",endDate,
"&stPaper=",stPaper,
"&query=",query)
return(link)
}
| /R/getQueryUrl.R | permissive | nanriblue/N2H4 | R | false | false | 1,837 | r | #' Get Query page url
#'
#' Get naver news query page url withput pageNum.
#'
#' @param query requred.
#' @param st Default is news.all.
#' @param q_enc Default is euc-kr.
#' @param r_enc Default is UTF-8.
#' @param r_format Default is xml.
#' @param rp Default is none.
#' @param sm Default is all.basic.
#' @param ic Default is all.
#' @param so Default is datetime.dsc.
#' @param detail Default is 1 means only display title.
#' @param startDate Dfault is 3 days before today.
#' @param endDate Default is today.
#' @param stPaper Default is exist:1.
#' @param pd Default is 1.
#' @param dnaSo Default is rel.dsc.
#' @return Get url.
#' @export
getQueryUrl <- function(query,st="news.all",
q_enc="EUC-KR",
r_enc="UTF-8",
r_format="xml",
rp="none",
sm="all.basic",
ic="all",
so="datetime.dsc",
startDate=as.Date(Sys.time())-3,
endDate=as.Date(Sys.time()),
stPaper="exist:1",
detail=1,
pd=1,
dnaSo="rel.dsc") {
query <- utils::URLencode(query)
root <- "http://news.naver.com/main/search/search.nhn?"
link <- paste0(root,"st=",st,
"&q_enc=",q_enc,
"&r_enc=",r_enc,
"&r_format=",r_format,
"&rp=",rp,
"&sm=",sm,
"&ic=",ic,
"&so=",so,
"&detail=",detail,
"&pd=",pd,
"&dnaSo=",dnaSo,
"&startDate=",startDate,
"&endDate=",endDate,
"&stPaper=",stPaper,
"&query=",query)
return(link)
}
|
install.packages("rtweet")
library (rtweet)
library(syuzhet)
library(ggplot2)
library(xlsx)
library (jsonlite)
library(dplyr)
library(syuzhet)
library(tidyr)
library(lubridate)
library(ggplot2)
library(tidyr)
library(reshape2)
library(reshape)
library(radarchart)
library(data.table)
#library(CASdatasets)
api_key <- "2rxDrVunPNAcDia4xBQrLjEVy"
api_secret_key <- "4N2mIFJm2yUP271LunvYSYsXkDLm5y9V1MMTvyrH0m7Pyc8M8C"
access_token <- "1478793870-6VOfCpmjUYFLgLkqLOoKhKLYQTZWdPggzh8xEPj"
access_token_secret <- "mjvomHex3BMo49WySOb9HLomU3nT4LzsLYAf6JWtGnJYb"
## authenticate via web browser
token <- create_token(
app = "Dublin City Bike",
consumer_key = api_key,
consumer_secret = api_secret_key,
access_token = access_token,
access_secret = access_token_secret)
DublinBikes <- search_tweets("Dublin_Bikes", n=10, include_rts=FALSE, lang="en")
#Cleaning Dataset
DublinBikes$text <- gsub("https\\S*", "", DublinBikes$text)
DublinBikes$text <- gsub("@\\S*", "", DublinBikes$text)
DublinBikes$text <- gsub("amp", "", DublinBikes$text)
DublinBikes$text <- gsub("[\r\n]", "", DublinBikes$text)
DublinBikes$text <- gsub("[[:punct:]]", "", DublinBikes$text)
# Converting tweets to ASCII to trackle strange characters
tweets <- iconv(DublinBikes$text, from="UTF-8", to="ASCII", sub="")
# Gathering the Newspaper Data
content <- fromJSON("http://newsapi.org/v2/everything?q=Dublin%20AND%20Bikes&from=2020-07-30&sortBy=publishedAt&apiKey=c9040948cf114c4cbd5a1c5d6727f23c",flatten = TRUE)
content <- as.data.frame(content)
#View(content)
NewsResponse <- content
#View(NewsResponse)
NewsResponse$articles.publishedAt <- as.Date(NewsResponse$articles.publishedAt)
Newsyear <- month(NewsResponse$articles.publishedAt)
#str(NewsResponse)
NewsResponse$articles.description <- trimws((gsub("<.*?>","",NewsResponse$articles.description)))
mysentiment_Classification <- get_nrc_sentiment(NewsResponse$articles.description)
head(mysentiment_Classification)
#
#
#Combining the both Data and applying the NRC Model
ew_sentiment<-get_nrc_sentiment((DublinBikes$text))
head(ew_sentiment)
BothSentiments <- rbind(mysentiment_Classification,ew_sentiment)
head(BothSentiments)
sentimentscores<-data.frame(colSums(BothSentiments[,]))
names(sentimentscores) <- "Score"
sentimentscores <- cbind("sentiment"=rownames(sentimentscores),sentimentscores)
rownames(sentimentscores) <- NULL
ggplot(data=sentimentscores,aes(x=sentiment,y=Score))+
geom_bar(aes(fill=sentiment),stat = "identity")+
theme(legend.position="none")+
xlab("Sentiments")+ylab("Scores")+
ggtitle("Total sentiment based on scores")+
theme_minimal()
#
#
#Both sentiment(tweets+Newspaper) output assignment to a varaible
mysentiment_Classification_Radar <- data.frame(Newsyear,mysentiment_Classification)
View(mysentiment_Classification_Radar)
#Sentiment_ID <- seq(1:nrow(mysentiment_Classification))
#View(mysentiment_Classification)
#mysentiment_Classification <- cbind(Sentiment_ID, mysentiment_Classification)
#head(mysentiment_Classification)
# Applying the Molten model
MoltenSentiments <- melt(mysentiment_Classification_Radar, id=c("Newsyear"))
head(MoltenSentiments,id=3)
abc <- aggregate(value ~ variable+Newsyear, MoltenSentiments, sum)
View(abc)
RR <- reshape(data=abc,idvar="variable",
v.names = "value",
timevar = "Newsyear",
direction="wide")
head(RR)
colnames(RR) <- c("Sentiments","July")
RR %>%
chartJSRadar(showToolTipLabel = TRUE,
main = "NRC Years Radar")
| /Sentiment_Analysis.R | no_license | rohit5555/Dublin_Bike_Analysis | R | false | false | 3,532 | r | install.packages("rtweet")
library (rtweet)
library(syuzhet)
library(ggplot2)
library(xlsx)
library (jsonlite)
library(dplyr)
library(syuzhet)
library(tidyr)
library(lubridate)
library(ggplot2)
library(tidyr)
library(reshape2)
library(reshape)
library(radarchart)
library(data.table)
#library(CASdatasets)
api_key <- "2rxDrVunPNAcDia4xBQrLjEVy"
api_secret_key <- "4N2mIFJm2yUP271LunvYSYsXkDLm5y9V1MMTvyrH0m7Pyc8M8C"
access_token <- "1478793870-6VOfCpmjUYFLgLkqLOoKhKLYQTZWdPggzh8xEPj"
access_token_secret <- "mjvomHex3BMo49WySOb9HLomU3nT4LzsLYAf6JWtGnJYb"
## authenticate via web browser
token <- create_token(
app = "Dublin City Bike",
consumer_key = api_key,
consumer_secret = api_secret_key,
access_token = access_token,
access_secret = access_token_secret)
DublinBikes <- search_tweets("Dublin_Bikes", n=10, include_rts=FALSE, lang="en")
#Cleaning Dataset
DublinBikes$text <- gsub("https\\S*", "", DublinBikes$text)
DublinBikes$text <- gsub("@\\S*", "", DublinBikes$text)
DublinBikes$text <- gsub("amp", "", DublinBikes$text)
DublinBikes$text <- gsub("[\r\n]", "", DublinBikes$text)
DublinBikes$text <- gsub("[[:punct:]]", "", DublinBikes$text)
# Converting tweets to ASCII to trackle strange characters
tweets <- iconv(DublinBikes$text, from="UTF-8", to="ASCII", sub="")
# Gathering the Newspaper Data
content <- fromJSON("http://newsapi.org/v2/everything?q=Dublin%20AND%20Bikes&from=2020-07-30&sortBy=publishedAt&apiKey=c9040948cf114c4cbd5a1c5d6727f23c",flatten = TRUE)
content <- as.data.frame(content)
#View(content)
NewsResponse <- content
#View(NewsResponse)
NewsResponse$articles.publishedAt <- as.Date(NewsResponse$articles.publishedAt)
Newsyear <- month(NewsResponse$articles.publishedAt)
#str(NewsResponse)
NewsResponse$articles.description <- trimws((gsub("<.*?>","",NewsResponse$articles.description)))
mysentiment_Classification <- get_nrc_sentiment(NewsResponse$articles.description)
head(mysentiment_Classification)
#
#
#Combining the both Data and applying the NRC Model
ew_sentiment<-get_nrc_sentiment((DublinBikes$text))
head(ew_sentiment)
BothSentiments <- rbind(mysentiment_Classification,ew_sentiment)
head(BothSentiments)
sentimentscores<-data.frame(colSums(BothSentiments[,]))
names(sentimentscores) <- "Score"
sentimentscores <- cbind("sentiment"=rownames(sentimentscores),sentimentscores)
rownames(sentimentscores) <- NULL
ggplot(data=sentimentscores,aes(x=sentiment,y=Score))+
geom_bar(aes(fill=sentiment),stat = "identity")+
theme(legend.position="none")+
xlab("Sentiments")+ylab("Scores")+
ggtitle("Total sentiment based on scores")+
theme_minimal()
#
#
#Both sentiment(tweets+Newspaper) output assignment to a varaible
mysentiment_Classification_Radar <- data.frame(Newsyear,mysentiment_Classification)
View(mysentiment_Classification_Radar)
#Sentiment_ID <- seq(1:nrow(mysentiment_Classification))
#View(mysentiment_Classification)
#mysentiment_Classification <- cbind(Sentiment_ID, mysentiment_Classification)
#head(mysentiment_Classification)
# Applying the Molten model
MoltenSentiments <- melt(mysentiment_Classification_Radar, id=c("Newsyear"))
head(MoltenSentiments,id=3)
abc <- aggregate(value ~ variable+Newsyear, MoltenSentiments, sum)
View(abc)
RR <- reshape(data=abc,idvar="variable",
v.names = "value",
timevar = "Newsyear",
direction="wide")
head(RR)
colnames(RR) <- c("Sentiments","July")
RR %>%
chartJSRadar(showToolTipLabel = TRUE,
main = "NRC Years Radar")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dmbc_classes.R
\name{initialize,dmbc_fit-method}
\alias{initialize,dmbc_fit-method}
\alias{dmbc_fit-initialize}
\title{Create an instance of the \code{dmbc_fit} class using new/initialize.}
\usage{
\S4method{initialize}{dmbc_fit}(
.Object,
z.chain = array(),
z.chain.p = array(),
alpha.chain = matrix(),
eta.chain = matrix(),
sigma2.chain = matrix(),
lambda.chain = matrix(),
prob.chain = array(),
x.ind.chain = array(),
x.chain = matrix(),
accept = matrix(),
diss = list(),
dens = list(),
control = list(),
prior = list(),
dim = list(),
model = NA
)
}
\arguments{
\item{.Object}{Prototype object from the class \code{\link{dmbc_fit}}.}
\item{z.chain}{An object of class \code{array}; posterior draws from
the MCMC algorithm for the (untransformed) latent configuration \eqn{Z}.}
\item{z.chain.p}{An object of class \code{array}; posterior draws from
the MCMC algorithm for the (Procrustes-transformed) latent configuration
\eqn{Z}.}
\item{alpha.chain}{An object of class \code{matrix}; posterior draws
from the MCMC algorithm for the \eqn{\alpha} parameters.}
\item{eta.chain}{An object of class \code{matrix}; posterior draws
from the MCMC algorithm for the \eqn{\eta} parameters.}
\item{sigma2.chain}{An object of class \code{matrix}; posterior draws
from the MCMC algorithm for the \eqn{\sigma^2} parameters.}
\item{lambda.chain}{An object of class \code{matrix}; posterior draws
from the MCMC algorithm for the \eqn{\lambda} parameters.}
\item{prob.chain}{An object of class \code{array}; posterior draws
from the MCMC algorithm for the cluster membership probabilities.}
\item{x.ind.chain}{An object of class \code{array}; posterior draws
from the MCMC algorithm for the cluster membership indicators.}
\item{x.chain}{An object of class \code{matrix}; posterior draws from
the MCMC algorithm for the cluster membership labels.}
\item{accept}{An object of class \code{matrix}; final acceptance rates
for the MCMC algorithm.}
\item{diss}{An object of class \code{list}; list of observed
dissimilarity matrices.}
\item{dens}{An object of class \code{list}; list of log-likelihood,
log-prior and log-posterior values at each iteration of the MCMC simulation.}
\item{control}{An object of class \code{list}; list of the control
parameters (number of burnin and sample iterations, number of MCMC chains,
etc.). See \code{\link{dmbc_control}()} for more information.}
\item{prior}{An object of class \code{list}; list of the prior
hyperparameters. See \code{\link{dmbc_prior}()} for more information.}
\item{dim}{An object of class \code{list}; list of dimensions for
the estimated model, i.e. number of objects (\emph{n}), number of latent
dimensions (\emph{p}), number of clusters (\emph{G}), and number of
subjects (\emph{S}).}
\item{model}{An object of class \code{\link{dmbc_model}}.}
}
\description{
Create an instance of the \code{dmbc_fit} class using new/initialize.
}
\author{
Sergio Venturini \email{sergio.venturini@unicatt.it}
}
| /man/initialize-dmbc_fit-method.Rd | no_license | sergioventurini/dmbc | R | false | true | 3,069 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dmbc_classes.R
\name{initialize,dmbc_fit-method}
\alias{initialize,dmbc_fit-method}
\alias{dmbc_fit-initialize}
\title{Create an instance of the \code{dmbc_fit} class using new/initialize.}
\usage{
\S4method{initialize}{dmbc_fit}(
.Object,
z.chain = array(),
z.chain.p = array(),
alpha.chain = matrix(),
eta.chain = matrix(),
sigma2.chain = matrix(),
lambda.chain = matrix(),
prob.chain = array(),
x.ind.chain = array(),
x.chain = matrix(),
accept = matrix(),
diss = list(),
dens = list(),
control = list(),
prior = list(),
dim = list(),
model = NA
)
}
\arguments{
\item{.Object}{Prototype object from the class \code{\link{dmbc_fit}}.}
\item{z.chain}{An object of class \code{array}; posterior draws from
the MCMC algorithm for the (untransformed) latent configuration \eqn{Z}.}
\item{z.chain.p}{An object of class \code{array}; posterior draws from
the MCMC algorithm for the (Procrustes-transformed) latent configuration
\eqn{Z}.}
\item{alpha.chain}{An object of class \code{matrix}; posterior draws
from the MCMC algorithm for the \eqn{\alpha} parameters.}
\item{eta.chain}{An object of class \code{matrix}; posterior draws
from the MCMC algorithm for the \eqn{\eta} parameters.}
\item{sigma2.chain}{An object of class \code{matrix}; posterior draws
from the MCMC algorithm for the \eqn{\sigma^2} parameters.}
\item{lambda.chain}{An object of class \code{matrix}; posterior draws
from the MCMC algorithm for the \eqn{\lambda} parameters.}
\item{prob.chain}{An object of class \code{array}; posterior draws
from the MCMC algorithm for the cluster membership probabilities.}
\item{x.ind.chain}{An object of class \code{array}; posterior draws
from the MCMC algorithm for the cluster membership indicators.}
\item{x.chain}{An object of class \code{matrix}; posterior draws from
the MCMC algorithm for the cluster membership labels.}
\item{accept}{An object of class \code{matrix}; final acceptance rates
for the MCMC algorithm.}
\item{diss}{An object of class \code{list}; list of observed
dissimilarity matrices.}
\item{dens}{An object of class \code{list}; list of log-likelihood,
log-prior and log-posterior values at each iteration of the MCMC simulation.}
\item{control}{An object of class \code{list}; list of the control
parameters (number of burnin and sample iterations, number of MCMC chains,
etc.). See \code{\link{dmbc_control}()} for more information.}
\item{prior}{An object of class \code{list}; list of the prior
hyperparameters. See \code{\link{dmbc_prior}()} for more information.}
\item{dim}{An object of class \code{list}; list of dimensions for
the estimated model, i.e. number of objects (\emph{n}), number of latent
dimensions (\emph{p}), number of clusters (\emph{G}), and number of
subjects (\emph{S}).}
\item{model}{An object of class \code{\link{dmbc_model}}.}
}
\description{
Create an instance of the \code{dmbc_fit} class using new/initialize.
}
\author{
Sergio Venturini \email{sergio.venturini@unicatt.it}
}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
%
%
% on Wed Feb 08 14:37:54 2006.
%
% Generator was the Rdoc class, which is part of the R.oo package written
% by Henrik Bengtsson, 2001-2004.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{Galgo}
\docType{class}
\alias{Galgo}
\keyword{classes}
\title{The representation of a Genetic Algorithm}
\section{Class}{Package: galgo \cr
\bold{Class Galgo}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{Galgo}\cr
\bold{Directly known subclasses:}\cr
\cr
public static class \bold{Galgo}\cr
extends \link[R.oo]{Object}\cr
}
\description{
Represents a genetic algorithm (GA) itself. The basic GA uses
at least one population of chromosomes, a ``fitness'' function,
and a stopping rule (see references).
The Galgo object is not limited to a single population,
it implements a list of populations where any element in the list can be either
a \code{Niche} object or a \code{World} object. Nervertheless, any user-defined object
that implements \code{evolve, progeny, best, max, bestFitness, and maxFitness} methods
can be part of the \code{populations} list.
The ``fitness'' function is by far the most important part of a GA, it evaluates a \code{Chromosome} to determine
how good the chromosome is respect to a given goal. The function can
be sensitive to data stored in \code{.GlobalEnv} or any other object (see \code{\link[galgo:evaluate.Galgo]{*evaluate}()} for further details).
For this package and in the case of the microarray,
we have included several fitness functions to classify samples using different methods.
However, it is not limited for a classification problem for microarray data, because
you can create any fitness function in any given context.
The stopping rule has three options. First, it is simply a desired fitness
value implemented as a numeric \code{fitnessGoal}, and If the maximum fitness value of a population
is equal or higher than \code{fitnessGoal} the GA ends. Second, \code{maxGenerations} determine
the maximum number of generations a GA can evolve. The current generation is increased after
evaluating the fitness function to the entire population list. Thus, if the current
generation reach \code{maxGenerations} the GA stops. Third, if the result of the
user-defined \code{callBackFunc} is \code{NA} the GA stops. In addition, you can always break any
R program using \code{Ctrl-C} (or \code{Esc} in Windows).
When the GA ends many values are used for futher analysis.
Examples are the best chromosome (\code{best} method), its fitness (\code{bestFitness} method),
the final generation (\code{generation} variable), the evolution of the maximum fitness (\code{maxFitnesses} list variable),
the maximum chromosome in each generation (\code{maxChromosome} list variable), and the elapsed time (\code{elapsedTime} variable).
Moreover, flags like \code{goalScored}, \code{userCancelled}, and \code{running} are available.
}
\usage{Galgo(id=0,
populations=list(),
fitnessFunc=function(...) 1,
goalFitness=0.9,
minGenerations=1,
maxGenerations=100,
addGenerations=0,
verbose=20,
callBackFunc=function(...) 1,
data=NULL,
gcCall=0,
savePopulations=FALSE,
maxFitnesses=c(),
maxFitness=0,
maxChromosomes=list(),
maxChromosome=NULL,
bestFitness=0,
bestChromosome=NULL,
savedPopulations=list(),
generation=0,
elapsedTime=0,
initialTime=0,
userCancelled=FALSE,
goalScored=FALSE,
running=FALSE,
...)}
\arguments{
\item{id}{A way to identify the object.}
\item{populations}{A list of populations of any class \code{World}, \code{Niche}, or user-defined population.}
\item{fitnessFunc}{The function that will be evaluate any chromosome in the populations. This function should receive two parameteres, the \code{Chromosome} object and the \code{parent} object (defined as a parameter as well). The \code{parent} object is commonly a object of class \code{BigBang} when used combined. Theoretically, the fitness function may return a numeric non-negative finite value, but commonly in practice these values are limited from \code{0} to \code{1}. The \code{offspring} factors in class \code{Niche} where established using the \code{0-1} range assumption.}
\item{goalFitness}{The desired fitness. The GA will evolve until it reach this value or any other stopping rule is met. See description section.}
\item{minGenerations}{The minimum number of generations. A GA evolution will not ends before this generation number even that \code{fitnessGoal} has been reach.}
\item{maxGenerations}{The maximum number of generations that the GA could evolve.}
\item{addGenerations}{The number of generations to over-evolve once that \code{goalFitness} has been met. Some solutions reach the goal from a large ``jump'' (or quasi-random mutation) and some other from ``plateau''. \code{addGenerations} helps to ensure the solutions has been ``matured'' at least that number of generations.}
\item{verbose}{Instruct the GA to display the general information about the evolution. When \code{verbose==1} this information is printed every generation. In general every \code{verbose} number of generation would produce a line of output. Of course if \code{verbose==0} would not display a thing at all.}
\item{callBackFunc}{A user-function to be called after every generation. It should receive the \code{Galgo} object itself. If the result is \code{NA} the GA ends. For instance, if \code{callBackFunc} is \code{plot} the trace of all generations is nicely viewed in a plot; however, in long runs it can consume time and memory.}
\item{data}{Any user-data can be stored in this variable (but it is not limited to \code{data}, the user can insert any other like \code{myData}, \code{mama.mia} or \code{whatever} in the \code{...} argument).}
\item{gcCall}{How often 10 calls to garbage collection function gc(). This sometimes helps for memory issues.}
\item{savePopulations}{If TRUE, it save the population array in a savedPopulations variable of the galgo object.}
\item{maxFitnesses}{Internal object included for generality not inteded for final users.}
\item{maxFitness}{Internal object included for generality not inteded for final users.}
\item{maxChromosomes}{Internal object included for generality not inteded for final users.}
\item{maxChromosome}{Internal object included for generality not inteded for final users.}
\item{bestFitness}{Internal object included for generality not inteded for final users.}
\item{bestChromosome}{Internal object included for generality not inteded for final users.}
\item{savedPopulations}{Internal object included for generality not inteded for final users.}
\item{generation}{Internal object included for generality not inteded for final users.}
\item{elapsedTime}{Internal object included for generality not inteded for final users.}
\item{initialTime}{Internal object included for generality not inteded for final users.}
\item{userCancelled}{Internal object included for generality not inteded for final users.}
\item{goalScored}{Internal object included for generality not inteded for final users.}
\item{running}{Internal object included for generality not inteded for final users.}
\item{...}{Other user named values to include in the object (like pMutation, pCrossover or any other).}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{\link[galgo:best.Galgo]{best}} \tab Returns the best chromosome.\cr
\tab \code{\link[galgo:bestFitness.Galgo]{bestFitness}} \tab Returns the fitness of the best chromosome.\cr
\tab \code{\link[galgo:clone.Galgo]{clone}} \tab Clones itself and all its objects.\cr
\tab \code{\link[galgo:evaluate.Galgo]{evaluate}} \tab Evaluates all chromosomes with a fitness function.\cr
\tab \code{\link[galgo:evolve.Galgo]{evolve}} \tab Evolves the chromosomes populations of a Galgo (Genetic Algorithm).\cr
\tab \code{\link[galgo:generateRandom.Galgo]{generateRandom}} \tab Generates random values for all populations in the Galgo object.\cr
\tab \code{\link[galgo:length.Galgo]{length}} \tab Gets the number of populations defined in the Galgo object.\cr
\tab \code{\link[galgo:max.Galgo]{max}} \tab Returns the chromosome whose current fitness is maximum.\cr
\tab \code{\link[galgo:maxFitness.Galgo]{maxFitness}} \tab Returns the fitness of the maximum chromosome.\cr
\tab \code{\link[galgo:plot.Galgo]{plot}} \tab Plots information about the Galgo object.\cr
\tab \code{\link[galgo:print.Galgo]{print}} \tab Prints the representation of a Galgo object.\cr
\tab \code{\link[galgo:refreshStats.Galgo]{refreshStats}} \tab Updates the internal values from the current populations.\cr
\tab \code{\link[galgo:reInit.Galgo]{reInit}} \tab Erases all internal values in order to re-use the object.\cr
\tab \code{\link[galgo:summary.Galgo]{summary}} \tab Prints the representation and statistics of the galgo object.\cr
}
\bold{Methods inherited from Object}:\cr
as.list, unObject, $, $<-, [[, [[<-, as.character, attach, clone, detach, equals, extend, finalize, getFields, getInstanciationTime, getStaticInstance, hasField, hashCode, ll, load, objectSize, print, save
}
\examples{
cr <- Chromosome(genes=newCollection(Gene(shape1=1, shape2=100),5))
ni <- Niche(chromosomes = newRandomCollection(cr, 10))
wo <- World(niches=newRandomCollection(ni,2))
ga <- Galgo(populations=list(wo), goalFitness = 0.75, callBackFunc=plot,
fitnessFunc=function(chr, parent) 5/sd(as.numeric(chr)))
ga
evolve(ga)
# missing a classification example
}
\references{Goldberg, David E. 1989 \emph{Genetic Algorithms in Search, Optimization and Machine Learning}. Addison-Wesley Pub. Co. ISBN: 0201157675}
\author{Victor Trevino. Francesco Falciani Group. University of Birmingham, U.K. http://www.bip.bham.ac.uk/bioinf}
\seealso{
\code{\link{Gene}},
\code{\link{Chromosome}},
\code{\link{Niche}},
\code{\link{World}},
\code{\link{BigBang}},
\code{\link{configBB.VarSel}}(),
\code{\link{configBB.VarSelMisc}}().
}
\keyword{programming}
\keyword{methods}
| /man/Galgo.Rd | no_license | cran/galgo | R | false | false | 10,484 | rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
%
%
% on Wed Feb 08 14:37:54 2006.
%
% Generator was the Rdoc class, which is part of the R.oo package written
% by Henrik Bengtsson, 2001-2004.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{Galgo}
\docType{class}
\alias{Galgo}
\keyword{classes}
\title{The representation of a Genetic Algorithm}
\section{Class}{Package: galgo \cr
\bold{Class Galgo}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{Galgo}\cr
\bold{Directly known subclasses:}\cr
\cr
public static class \bold{Galgo}\cr
extends \link[R.oo]{Object}\cr
}
\description{
Represents a genetic algorithm (GA) itself. The basic GA uses
at least one population of chromosomes, a ``fitness'' function,
and a stopping rule (see references).
The Galgo object is not limited to a single population,
it implements a list of populations where any element in the list can be either
a \code{Niche} object or a \code{World} object. Nervertheless, any user-defined object
that implements \code{evolve, progeny, best, max, bestFitness, and maxFitness} methods
can be part of the \code{populations} list.
The ``fitness'' function is by far the most important part of a GA, it evaluates a \code{Chromosome} to determine
how good the chromosome is respect to a given goal. The function can
be sensitive to data stored in \code{.GlobalEnv} or any other object (see \code{\link[galgo:evaluate.Galgo]{*evaluate}()} for further details).
For this package and in the case of the microarray,
we have included several fitness functions to classify samples using different methods.
However, it is not limited for a classification problem for microarray data, because
you can create any fitness function in any given context.
The stopping rule has three options. First, it is simply a desired fitness
value implemented as a numeric \code{fitnessGoal}, and If the maximum fitness value of a population
is equal or higher than \code{fitnessGoal} the GA ends. Second, \code{maxGenerations} determine
the maximum number of generations a GA can evolve. The current generation is increased after
evaluating the fitness function to the entire population list. Thus, if the current
generation reach \code{maxGenerations} the GA stops. Third, if the result of the
user-defined \code{callBackFunc} is \code{NA} the GA stops. In addition, you can always break any
R program using \code{Ctrl-C} (or \code{Esc} in Windows).
When the GA ends many values are used for futher analysis.
Examples are the best chromosome (\code{best} method), its fitness (\code{bestFitness} method),
the final generation (\code{generation} variable), the evolution of the maximum fitness (\code{maxFitnesses} list variable),
the maximum chromosome in each generation (\code{maxChromosome} list variable), and the elapsed time (\code{elapsedTime} variable).
Moreover, flags like \code{goalScored}, \code{userCancelled}, and \code{running} are available.
}
\usage{Galgo(id=0,
populations=list(),
fitnessFunc=function(...) 1,
goalFitness=0.9,
minGenerations=1,
maxGenerations=100,
addGenerations=0,
verbose=20,
callBackFunc=function(...) 1,
data=NULL,
gcCall=0,
savePopulations=FALSE,
maxFitnesses=c(),
maxFitness=0,
maxChromosomes=list(),
maxChromosome=NULL,
bestFitness=0,
bestChromosome=NULL,
savedPopulations=list(),
generation=0,
elapsedTime=0,
initialTime=0,
userCancelled=FALSE,
goalScored=FALSE,
running=FALSE,
...)}
\arguments{
\item{id}{A way to identify the object.}
\item{populations}{A list of populations of any class \code{World}, \code{Niche}, or user-defined population.}
\item{fitnessFunc}{The function that will be evaluate any chromosome in the populations. This function should receive two parameteres, the \code{Chromosome} object and the \code{parent} object (defined as a parameter as well). The \code{parent} object is commonly a object of class \code{BigBang} when used combined. Theoretically, the fitness function may return a numeric non-negative finite value, but commonly in practice these values are limited from \code{0} to \code{1}. The \code{offspring} factors in class \code{Niche} where established using the \code{0-1} range assumption.}
\item{goalFitness}{The desired fitness. The GA will evolve until it reach this value or any other stopping rule is met. See description section.}
\item{minGenerations}{The minimum number of generations. A GA evolution will not ends before this generation number even that \code{fitnessGoal} has been reach.}
\item{maxGenerations}{The maximum number of generations that the GA could evolve.}
\item{addGenerations}{The number of generations to over-evolve once that \code{goalFitness} has been met. Some solutions reach the goal from a large ``jump'' (or quasi-random mutation) and some other from ``plateau''. \code{addGenerations} helps to ensure the solutions has been ``matured'' at least that number of generations.}
\item{verbose}{Instruct the GA to display the general information about the evolution. When \code{verbose==1} this information is printed every generation. In general every \code{verbose} number of generation would produce a line of output. Of course if \code{verbose==0} would not display a thing at all.}
\item{callBackFunc}{A user-function to be called after every generation. It should receive the \code{Galgo} object itself. If the result is \code{NA} the GA ends. For instance, if \code{callBackFunc} is \code{plot} the trace of all generations is nicely viewed in a plot; however, in long runs it can consume time and memory.}
\item{data}{Any user-data can be stored in this variable (but it is not limited to \code{data}, the user can insert any other like \code{myData}, \code{mama.mia} or \code{whatever} in the \code{...} argument).}
\item{gcCall}{How often 10 calls to garbage collection function gc(). This sometimes helps for memory issues.}
\item{savePopulations}{If TRUE, it save the population array in a savedPopulations variable of the galgo object.}
\item{maxFitnesses}{Internal object included for generality not inteded for final users.}
\item{maxFitness}{Internal object included for generality not inteded for final users.}
\item{maxChromosomes}{Internal object included for generality not inteded for final users.}
\item{maxChromosome}{Internal object included for generality not inteded for final users.}
\item{bestFitness}{Internal object included for generality not inteded for final users.}
\item{bestChromosome}{Internal object included for generality not inteded for final users.}
\item{savedPopulations}{Internal object included for generality not inteded for final users.}
\item{generation}{Internal object included for generality not inteded for final users.}
\item{elapsedTime}{Internal object included for generality not inteded for final users.}
\item{initialTime}{Internal object included for generality not inteded for final users.}
\item{userCancelled}{Internal object included for generality not inteded for final users.}
\item{goalScored}{Internal object included for generality not inteded for final users.}
\item{running}{Internal object included for generality not inteded for final users.}
\item{...}{Other user named values to include in the object (like pMutation, pCrossover or any other).}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{\link[galgo:best.Galgo]{best}} \tab Returns the best chromosome.\cr
\tab \code{\link[galgo:bestFitness.Galgo]{bestFitness}} \tab Returns the fitness of the best chromosome.\cr
\tab \code{\link[galgo:clone.Galgo]{clone}} \tab Clones itself and all its objects.\cr
\tab \code{\link[galgo:evaluate.Galgo]{evaluate}} \tab Evaluates all chromosomes with a fitness function.\cr
\tab \code{\link[galgo:evolve.Galgo]{evolve}} \tab Evolves the chromosomes populations of a Galgo (Genetic Algorithm).\cr
\tab \code{\link[galgo:generateRandom.Galgo]{generateRandom}} \tab Generates random values for all populations in the Galgo object.\cr
\tab \code{\link[galgo:length.Galgo]{length}} \tab Gets the number of populations defined in the Galgo object.\cr
\tab \code{\link[galgo:max.Galgo]{max}} \tab Returns the chromosome whose current fitness is maximum.\cr
\tab \code{\link[galgo:maxFitness.Galgo]{maxFitness}} \tab Returns the fitness of the maximum chromosome.\cr
\tab \code{\link[galgo:plot.Galgo]{plot}} \tab Plots information about the Galgo object.\cr
\tab \code{\link[galgo:print.Galgo]{print}} \tab Prints the representation of a Galgo object.\cr
\tab \code{\link[galgo:refreshStats.Galgo]{refreshStats}} \tab Updates the internal values from the current populations.\cr
\tab \code{\link[galgo:reInit.Galgo]{reInit}} \tab Erases all internal values in order to re-use the object.\cr
\tab \code{\link[galgo:summary.Galgo]{summary}} \tab Prints the representation and statistics of the galgo object.\cr
}
\bold{Methods inherited from Object}:\cr
as.list, unObject, $, $<-, [[, [[<-, as.character, attach, clone, detach, equals, extend, finalize, getFields, getInstanciationTime, getStaticInstance, hasField, hashCode, ll, load, objectSize, print, save
}
\examples{
cr <- Chromosome(genes=newCollection(Gene(shape1=1, shape2=100),5))
ni <- Niche(chromosomes = newRandomCollection(cr, 10))
wo <- World(niches=newRandomCollection(ni,2))
ga <- Galgo(populations=list(wo), goalFitness = 0.75, callBackFunc=plot,
fitnessFunc=function(chr, parent) 5/sd(as.numeric(chr)))
ga
evolve(ga)
# missing a classification example
}
\references{Goldberg, David E. 1989 \emph{Genetic Algorithms in Search, Optimization and Machine Learning}. Addison-Wesley Pub. Co. ISBN: 0201157675}
\author{Victor Trevino. Francesco Falciani Group. University of Birmingham, U.K. http://www.bip.bham.ac.uk/bioinf}
\seealso{
\code{\link{Gene}},
\code{\link{Chromosome}},
\code{\link{Niche}},
\code{\link{World}},
\code{\link{BigBang}},
\code{\link{configBB.VarSel}}(),
\code{\link{configBB.VarSelMisc}}().
}
\keyword{programming}
\keyword{methods}
|
#' get.combination.count
#'
#' @description
#' Calculate the number of possible combinations between baits and fragments,
#' excluding self-ligations and only counting bait-to-bait interactions once (e.g. a-b, not b-a)
#'
#' @param baits vector of bait IDs in form chrN:start-end
#' @param fragments vector of fragment IDs in form chrN:start-end
#' @param cis.only logical indicating whether cis-interactions only should be considered
#'
#' @return total number of possible combinations
#'
#' @export get.combination.count
get.combination.count <- function(baits, fragments, cis.only = FALSE) {
### INPUT TESTS ###########################################################
if( 1 == length(baits) && file.exists(baits) ) {
stop('baits should be a vector of bait IDs');
}
if( 1 == length(fragments) && file.exists(fragments) ) {
stop('fragments should be a vector of fragment IDs');
}
if( !all(baits %in% fragments) ) {
print(baits);
print(fragments);
stop('All baits must be in fragments');
}
### MAIN ##################################################################
if( cis.only ) {
baits.chr <- gsub('(.*):(.*)', '\\1', baits);
fragments.chr <- gsub('(.*):(.*)', '\\1', fragments);
unique.chromosomes <- unique( c(baits.chr, fragments.chr) );
# keep track of how many combinations per chromosome
combinations.per.chromosome <- c();
for( chr in unique.chromosomes ) {
# call this function in trans mode to calculate possible combinations
# within this chromosome
combinations.per.chromosome[ chr ] <- get.combination.count(
baits = baits[ baits.chr == chr ],
fragments = fragments[ fragments.chr == chr ],
cis.only = FALSE
);
}
possible.combinations <- sum( combinations.per.chromosome );
} else {
# convert to numeric in the process to avoid integer overflow
n.baits <- as.numeric( length(baits) );
n.fragments <- as.numeric( length(fragments) );
# total number of combinations,
# minus "reverse linked" bait-to-bait interactions and bait self-ligations
possible.combinations <- n.baits*n.fragments - choose(n.baits, 2) - n.baits;
}
return(possible.combinations);
} | /R/get.combination.count.R | no_license | cran/chicane | R | false | false | 2,168 | r | #' get.combination.count
#'
#' @description
#' Calculate the number of possible combinations between baits and fragments,
#' excluding self-ligations and only counting bait-to-bait interactions once (e.g. a-b, not b-a)
#'
#' @param baits vector of bait IDs in form chrN:start-end
#' @param fragments vector of fragment IDs in form chrN:start-end
#' @param cis.only logical indicating whether cis-interactions only should be considered
#'
#' @return total number of possible combinations
#'
#' @export get.combination.count
get.combination.count <- function(baits, fragments, cis.only = FALSE) {
### INPUT TESTS ###########################################################
if( 1 == length(baits) && file.exists(baits) ) {
stop('baits should be a vector of bait IDs');
}
if( 1 == length(fragments) && file.exists(fragments) ) {
stop('fragments should be a vector of fragment IDs');
}
if( !all(baits %in% fragments) ) {
print(baits);
print(fragments);
stop('All baits must be in fragments');
}
### MAIN ##################################################################
if( cis.only ) {
baits.chr <- gsub('(.*):(.*)', '\\1', baits);
fragments.chr <- gsub('(.*):(.*)', '\\1', fragments);
unique.chromosomes <- unique( c(baits.chr, fragments.chr) );
# keep track of how many combinations per chromosome
combinations.per.chromosome <- c();
for( chr in unique.chromosomes ) {
# call this function in trans mode to calculate possible combinations
# within this chromosome
combinations.per.chromosome[ chr ] <- get.combination.count(
baits = baits[ baits.chr == chr ],
fragments = fragments[ fragments.chr == chr ],
cis.only = FALSE
);
}
possible.combinations <- sum( combinations.per.chromosome );
} else {
# convert to numeric in the process to avoid integer overflow
n.baits <- as.numeric( length(baits) );
n.fragments <- as.numeric( length(fragments) );
# total number of combinations,
# minus "reverse linked" bait-to-bait interactions and bait self-ligations
possible.combinations <- n.baits*n.fragments - choose(n.baits, 2) - n.baits;
}
return(possible.combinations);
} |
#' ewascatalog
#'
#' ewascatalog queries the EWAS Catalog from R.
#' @param query Query text.
#' @param type Type of query, either 'cpg', 'region', 'gene', 'trait', 'efo', 'study' (Default: 'cpg').
#' @param url url of website to query - default is http://www.ewascatalog.org
#'
#' @return Data frame of EWAS Catalog results.
#' @examples
#' # CpG
#' res <- ewascatalog("cg00029284", "cpg")
#'
#' # Region
#' res <- ewascatalog("6:15000000-25000000", "region")
#'
#' # Gene
#' res <- ewascatalog("FTO", "gene")
#'
#' # Trait
#' res <- ewascatalog("Alzheimers disease", "trait")
#'
#' # EFO
#' res <- ewascatalog("EFO_0002950", "efo")
#'
#' # Study
#' res <- ewascatalog("27040690", "study")
#' @author James R Staley <js16174@bristol.ac.uk>
#' @author Thomas Battram <thomas.battram@bristol.ac.uk>
#' @export
ewascatalog <- function(query, type = c("cpg", "loc", "region", "gene", "trait", "efo", "study"),
url = "http://www.ewascatalog.org") {
type <- match.arg(type)
if (type == "region") {
ub <- as.numeric(sub(".*-", "", sub(".*:", "", query)))
lb <- as.numeric(sub("-.*", "", sub(".*:", "", query)))
dist <- ub - lb
if (any(dist > 10000000)) stop("region query can be maximum of 10mb in size")
}
else if (type == "trait") {
query <- gsub(" ", "+", tolower(query))
}
json_file <- paste0(url, "/api/?", type, "=", query)
json_data <- rjson::fromJSON(file = json_file)
if (length(json_data) == 0) {
return(NULL)
}
fields <- json_data$fields
results <- as.data.frame(matrix(unlist(json_data$results), ncol = length(fields), byrow = T), stringsAsFactors = F)
names(results) <- fields
for (field in c("n","n_studies","pos")) {
results[[field]] <- as.integer(results[[field]])
}
for (field in c("beta","p", "se")) {
results[[field]] <- as.numeric(results[[field]])
}
return(results)
}
| /R/ewascatalog.R | permissive | MRCIEU/ewascatalog-r | R | false | false | 1,873 | r | #' ewascatalog
#'
#' ewascatalog queries the EWAS Catalog from R.
#' @param query Query text.
#' @param type Type of query, either 'cpg', 'region', 'gene', 'trait', 'efo', 'study' (Default: 'cpg').
#' @param url url of website to query - default is http://www.ewascatalog.org
#'
#' @return Data frame of EWAS Catalog results.
#' @examples
#' # CpG
#' res <- ewascatalog("cg00029284", "cpg")
#'
#' # Region
#' res <- ewascatalog("6:15000000-25000000", "region")
#'
#' # Gene
#' res <- ewascatalog("FTO", "gene")
#'
#' # Trait
#' res <- ewascatalog("Alzheimers disease", "trait")
#'
#' # EFO
#' res <- ewascatalog("EFO_0002950", "efo")
#'
#' # Study
#' res <- ewascatalog("27040690", "study")
#' @author James R Staley <js16174@bristol.ac.uk>
#' @author Thomas Battram <thomas.battram@bristol.ac.uk>
#' @export
ewascatalog <- function(query, type = c("cpg", "loc", "region", "gene", "trait", "efo", "study"),
url = "http://www.ewascatalog.org") {
type <- match.arg(type)
if (type == "region") {
ub <- as.numeric(sub(".*-", "", sub(".*:", "", query)))
lb <- as.numeric(sub("-.*", "", sub(".*:", "", query)))
dist <- ub - lb
if (any(dist > 10000000)) stop("region query can be maximum of 10mb in size")
}
else if (type == "trait") {
query <- gsub(" ", "+", tolower(query))
}
json_file <- paste0(url, "/api/?", type, "=", query)
json_data <- rjson::fromJSON(file = json_file)
if (length(json_data) == 0) {
return(NULL)
}
fields <- json_data$fields
results <- as.data.frame(matrix(unlist(json_data$results), ncol = length(fields), byrow = T), stringsAsFactors = F)
names(results) <- fields
for (field in c("n","n_studies","pos")) {
results[[field]] <- as.integer(results[[field]])
}
for (field in c("beta","p", "se")) {
results[[field]] <- as.numeric(results[[field]])
}
return(results)
}
|
library(DatabaseConnector)
# Test MySQL:
connectionDetails <- createConnectionDetails(dbms = "mysql",
server = "localhost",
user = "root",
password = pw,
schema = "fake_data")
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
dbDisconnect(conn)
# Test PDW with integrated security:
connectionDetails <- createConnectionDetails(dbms = "pdw",
server = "JRDUSAPSCTL01",
port = 17001,
schema = "CDM_Truven_MDCR_V415")
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
dbDisconnect(conn)
# CTAS hack stuff:
n <- 5000
data <- data.frame(x = 1:n, y = runif(n))
insertTable(conn, "#temp", data, TRUE, TRUE, TRUE)
querySql(conn, "SELECT * FROM #temp")
data <- querySql(conn, "SELECT TOP 10000 * FROM condition_occurrence")
data <- data[, c("PERSON_ID",
"CONDITION_CONCEPT_ID",
"CONDITION_START_DATE",
"CONDITION_END_DATE",
"CONDITION_TYPE_CONCEPT_ID",
"CONDITION_SOURCE_VALUE")]
insertTable(conn, "#temp", data, TRUE, TRUE, TRUE)
tableName <- "#temp"
dropTableIfExists <- TRUE
createTable <- TRUE
tempTable <- TRUE
oracleTempSchema <- NULL
connection <- conn
x <- querySql(conn, "SELECT * FROM #temp")
str(x)
# Test PDW without integrated security:
connectionDetails <- createConnectionDetails(dbms = "pdw",
server = "JRDUSAPSCTL01",
port = 17001,
schema = "CDM_Truven_MDCR",
user = "hix_writer",
password = pw)
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
dbDisconnect(conn)
# Test SQL Server without integrated security:
connectionDetails <- createConnectionDetails(dbms = "sql server",
server = "RNDUSRDHIT06.jnj.com",
user = "mschuemi",
domain = "eu",
password = pw,
schema = "cdm_hcup",
port = 1433)
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
x <- querySql.ffdf(conn, "SELECT TOP 1000000 * FROM person")
dbDisconnect(conn)
# Test SQL Server with integrated security:
connectionDetails <- createConnectionDetails(dbms = "sql server",
server = "RNDUSRDHIT06.jnj.com",
schema = "cdm_hcup")
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
querySql.ffdf(conn, "SELECT TOP 100 * FROM person")
executeSql(conn, "CREATE TABLE #temp (x int)")
querySql(conn, "SELECT COUNT(*) FROM #temp")
# x <- querySql.ffdf(conn,'SELECT * FROM person')
data <- data.frame(id = c(1, 2, 3),
date = as.Date(c("2000-01-01", "2001-01-31", "2004-12-31")),
text = c("asdf", "asdf", "asdf"))
data$date[2] <- NA
insertTable(connection = conn,
tableName = "test",
data = data,
dropTableIfExists = TRUE,
createTable = TRUE,
tempTable = TRUE)
d2 <- querySql(conn, "SELECT * FROM test")
str(d2)
is.na(d2$DATE)
dbDisconnect(conn)
# Test Oracle:
connectionDetails <- createConnectionDetails(dbms = "oracle",
server = "xe",
user = "system",
password = pw,
schema = "cdm_truven_ccae_6k")
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
dbDisconnect(conn)
# Test PostgreSQL:
pw <- Sys.getenv("pwPostgres")
connectionDetails <- createConnectionDetails(dbms = "postgresql",
server = "localhost/ohdsi",
user = "postgres",
password = pw,
schema = "cdm4_sim")
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
dbDisconnect(conn)
# Test Redshift:
pw <- Sys.getenv("pwRedShift")
connectionDetails <- createConnectionDetails(dbms = "redshift",
server = "hicoe.cldcoxyrkflo.us-east-1.redshift.amazonaws.com/truven_mdcr",
port = "5439",
user = "mschuemi",
password = pw,
schema = "cdm",
extraSettings = "ssl=true&sslfactory=com.amazon.redshift.ssl.NonValidatingFactory")
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
executeSql(conn, "CREATE TABLE scratch.test (x INT)")
person <- querySql.ffdf(conn, "SELECT * FROM person")
data <- data.frame(id = c(1, 2, 3),
date = as.Date(c("2000-01-01", "2001-01-31", "2004-12-31")),
text = c("asdf", "asdf", "asdf"))
insertTable(connection = conn,
tableName = "test",
data = data,
dropTableIfExists = TRUE,
createTable = TRUE,
tempTable = TRUE)
d2 <- querySql(conn, "SELECT * FROM test")
str(d2)
options(fftempdir = "s:/fftemp")
d2 <- querySql.ffdf(conn, "SELECT * FROM test")
d2
dbDisconnect(conn)
### Tests for dbInsertTable ###
day.start <- "1900/01/01"
day.end <- "2012/12/31"
dayseq <- seq.Date(as.Date(day.start), as.Date(day.end), by = "day")
makeRandomStrings <- function(n = 1, lenght = 12) {
randomString <- c(1:n)
for (i in 1:n) randomString[i] <- paste(sample(c(0:9, letters, LETTERS), lenght, replace = TRUE),
collapse = "")
return(randomString)
}
data <- data.frame(start_date = dayseq,
person_id = as.integer(round(runif(length(dayseq), 1, 1e+07))),
value = runif(length(dayseq)),
id = makeRandomStrings(length(dayseq)))
str(data)
tableName <- "#temp"
connectionDetails <- createConnectionDetails(dbms = "sql server",
server = "RNDUSRDHIT06.jnj.com",
schema = "cdm_hcup")
connection <- connect(connectionDetails)
dbInsertTable(connection, tableName, data, dropTableIfExists = TRUE)
d <- querySql(connection, "SELECT * FROM #temp")
d <- querySql.ffdf(connection, "SELECT * FROM #temp")
library(ffbase)
data <- as.ffdf(data)
dbDisconnect(connection)
### Test OHDSI RedShift:
details <- createConnectionDetails(dbms = "redshift",
user = Sys.getenv("userOhdsiRedshift"),
password = Sys.getenv("pwOhdsiRedshift"),
server = paste0(Sys.getenv("serverOhdsiRedshift"),"/synpuf"),
schema = "cdm")
connection <- connect(details)
querySql(connection, "SELECT COUNT(*) FROM person")
dbDisconnect(connection)
| /extras/TestCode.R | permissive | ltscomputingllc/DatabaseConnector | R | false | false | 7,626 | r | library(DatabaseConnector)
# Test MySQL:
connectionDetails <- createConnectionDetails(dbms = "mysql",
server = "localhost",
user = "root",
password = pw,
schema = "fake_data")
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
dbDisconnect(conn)
# Test PDW with integrated security:
connectionDetails <- createConnectionDetails(dbms = "pdw",
server = "JRDUSAPSCTL01",
port = 17001,
schema = "CDM_Truven_MDCR_V415")
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
dbDisconnect(conn)
# CTAS hack stuff:
n <- 5000
data <- data.frame(x = 1:n, y = runif(n))
insertTable(conn, "#temp", data, TRUE, TRUE, TRUE)
querySql(conn, "SELECT * FROM #temp")
data <- querySql(conn, "SELECT TOP 10000 * FROM condition_occurrence")
data <- data[, c("PERSON_ID",
"CONDITION_CONCEPT_ID",
"CONDITION_START_DATE",
"CONDITION_END_DATE",
"CONDITION_TYPE_CONCEPT_ID",
"CONDITION_SOURCE_VALUE")]
insertTable(conn, "#temp", data, TRUE, TRUE, TRUE)
tableName <- "#temp"
dropTableIfExists <- TRUE
createTable <- TRUE
tempTable <- TRUE
oracleTempSchema <- NULL
connection <- conn
x <- querySql(conn, "SELECT * FROM #temp")
str(x)
# Test PDW without integrated security:
connectionDetails <- createConnectionDetails(dbms = "pdw",
server = "JRDUSAPSCTL01",
port = 17001,
schema = "CDM_Truven_MDCR",
user = "hix_writer",
password = pw)
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
dbDisconnect(conn)
# Test SQL Server without integrated security:
connectionDetails <- createConnectionDetails(dbms = "sql server",
server = "RNDUSRDHIT06.jnj.com",
user = "mschuemi",
domain = "eu",
password = pw,
schema = "cdm_hcup",
port = 1433)
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
x <- querySql.ffdf(conn, "SELECT TOP 1000000 * FROM person")
dbDisconnect(conn)
# Test SQL Server with integrated security:
connectionDetails <- createConnectionDetails(dbms = "sql server",
server = "RNDUSRDHIT06.jnj.com",
schema = "cdm_hcup")
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
querySql.ffdf(conn, "SELECT TOP 100 * FROM person")
executeSql(conn, "CREATE TABLE #temp (x int)")
querySql(conn, "SELECT COUNT(*) FROM #temp")
# x <- querySql.ffdf(conn,'SELECT * FROM person')
data <- data.frame(id = c(1, 2, 3),
date = as.Date(c("2000-01-01", "2001-01-31", "2004-12-31")),
text = c("asdf", "asdf", "asdf"))
data$date[2] <- NA
insertTable(connection = conn,
tableName = "test",
data = data,
dropTableIfExists = TRUE,
createTable = TRUE,
tempTable = TRUE)
d2 <- querySql(conn, "SELECT * FROM test")
str(d2)
is.na(d2$DATE)
dbDisconnect(conn)
# Test Oracle:
connectionDetails <- createConnectionDetails(dbms = "oracle",
server = "xe",
user = "system",
password = pw,
schema = "cdm_truven_ccae_6k")
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
dbDisconnect(conn)
# Test PostgreSQL:
pw <- Sys.getenv("pwPostgres")
connectionDetails <- createConnectionDetails(dbms = "postgresql",
server = "localhost/ohdsi",
user = "postgres",
password = pw,
schema = "cdm4_sim")
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
dbDisconnect(conn)
# Test Redshift:
pw <- Sys.getenv("pwRedShift")
connectionDetails <- createConnectionDetails(dbms = "redshift",
server = "hicoe.cldcoxyrkflo.us-east-1.redshift.amazonaws.com/truven_mdcr",
port = "5439",
user = "mschuemi",
password = pw,
schema = "cdm",
extraSettings = "ssl=true&sslfactory=com.amazon.redshift.ssl.NonValidatingFactory")
conn <- connect(connectionDetails)
querySql(conn, "SELECT COUNT(*) FROM person")
executeSql(conn, "CREATE TABLE scratch.test (x INT)")
person <- querySql.ffdf(conn, "SELECT * FROM person")
data <- data.frame(id = c(1, 2, 3),
date = as.Date(c("2000-01-01", "2001-01-31", "2004-12-31")),
text = c("asdf", "asdf", "asdf"))
insertTable(connection = conn,
tableName = "test",
data = data,
dropTableIfExists = TRUE,
createTable = TRUE,
tempTable = TRUE)
d2 <- querySql(conn, "SELECT * FROM test")
str(d2)
options(fftempdir = "s:/fftemp")
d2 <- querySql.ffdf(conn, "SELECT * FROM test")
d2
dbDisconnect(conn)
### Tests for dbInsertTable ###
day.start <- "1900/01/01"
day.end <- "2012/12/31"
dayseq <- seq.Date(as.Date(day.start), as.Date(day.end), by = "day")
makeRandomStrings <- function(n = 1, lenght = 12) {
randomString <- c(1:n)
for (i in 1:n) randomString[i] <- paste(sample(c(0:9, letters, LETTERS), lenght, replace = TRUE),
collapse = "")
return(randomString)
}
data <- data.frame(start_date = dayseq,
person_id = as.integer(round(runif(length(dayseq), 1, 1e+07))),
value = runif(length(dayseq)),
id = makeRandomStrings(length(dayseq)))
str(data)
tableName <- "#temp"
connectionDetails <- createConnectionDetails(dbms = "sql server",
server = "RNDUSRDHIT06.jnj.com",
schema = "cdm_hcup")
connection <- connect(connectionDetails)
dbInsertTable(connection, tableName, data, dropTableIfExists = TRUE)
d <- querySql(connection, "SELECT * FROM #temp")
d <- querySql.ffdf(connection, "SELECT * FROM #temp")
library(ffbase)
data <- as.ffdf(data)
dbDisconnect(connection)
### Test OHDSI RedShift:
details <- createConnectionDetails(dbms = "redshift",
user = Sys.getenv("userOhdsiRedshift"),
password = Sys.getenv("pwOhdsiRedshift"),
server = paste0(Sys.getenv("serverOhdsiRedshift"),"/synpuf"),
schema = "cdm")
connection <- connect(details)
querySql(connection, "SELECT COUNT(*) FROM person")
dbDisconnect(connection)
|
library(glmnet)
library(randomForest)
library(gbm)
library(ROCR)
library(ggfortify)
#After iterations of prin component analysis
base.pca <- prcomp(final_base_table[,c( 15:23, 26:35)], center = TRUE,scale. = TRUE)
summary(base.pca)
autoplot(base.pca, data = final_base_table[,c( 15:23, 26:35)],
loadings = TRUE, loadings.colour = 'blue',
loadings.label = TRUE, loadings.label.size = 3)
#Check for collinearity
data.frame(colnames(final_base_table))
cor_cols = c(15:35)
res <- cor(final_base_table[,cor_cols])
final_base_table_mod_prep1 =
final_base_table %>%
ungroup() %>%
dplyr::select(-patient_id,
-stroke_dt,
-index_dt,
-had_death,
-had_stroke,
-death,
-min_nyha_dt
)
final_base_table_mod_prep1$ino_cnt = as.numeric(final_base_table_mod_prep1$ino_cnt)
final_base_table_mod_prep1$bmi = as.numeric(final_base_table_mod_prep1$bmi)
final_base_table_mod_prep1$region = as.character(final_base_table_mod_prep1$region)
final_base_table_mod_prep1$region[which(final_base_table_mod_prep1$region == "Other/Unknown")] = "Unknown"
final_base_table_mod_prep1$region = as.factor(final_base_table_mod_prep1$region)
final_base_table_mod_prep1 %>% dplyr::select(race) %>% distinct()
data.frame(colnames(final_base_table_mod_prep1))
cols <- c(3,seq(6,28))
final_base_table_mod_prep1[,cols] <- lapply(final_base_table_mod_prep1[,cols], factor)
sapply(final_base_table_mod_prep1, class)
#################################################
#Begin Double Cross Validation
#
#Testing Random Forests and Elastic Net regression
#
#Model selection to choose best 1. # of predictor vars
# and 2. alpha and lamda values
#
#################################################
lambdalist = 0:200/2000
alphalist = c(0, 0.05, 0.1, 0.15,.2,.4)
alpha_count = length(alphalist)
#Default mtry for classification is sqrt(predictors)
mtrylist = c(2,3,4,5,6,7,8,9)
m_count = length(mtrylist)
x.matrix = model.matrix(had_strk_or_dth~., data=final_base_table_mod_prep1)[,-1]
y = as.numeric(as.matrix(final_base_table_mod_prep1[,18]))
n = dim(x.matrix)[1]
# define the cross-validation splits
set.seed(15)
ncv = 10
groups = c(rep(1:ncv,floor(n/ncv)), 1:(n%%ncv))
cvgroups = sample(groups,n)
allpredictedCV = matrix(rep(0,n*2), ncol=2)
for(j in 1:ncv){
print(j)
#Choose validation set
group_assesm = (cvgroups == j)
#Assign training data
trainx = x.matrix[!group_assesm,]
trainy = as.factor(y[!group_assesm])
#Assign testing data
testx = x.matrix[group_assesm,]
testy = as.factor(y[group_assesm])
#Assign groups within training data
trainx.n = dim(trainx)[1]
if ((trainx.n%%ncv) == 0) {
groups.select = rep(1:ncv,floor(trainx.n/ncv))
} else {
#account for different-sized input matrices
groups.select = c(rep(1:ncv,floor(trainx.n / ncv)),(1:(trainx.n%%ncv)))
}
cvgroups.selection = sample(groups.select, trainx.n)
#END- Assign groups within training data
alllambdabest = rep(NA,alpha_count)
allcvbest.net = rep(NA,alpha_count)
rfor_predictions = matrix(rep(0,trainx.n*m_count), ncol=m_count)
rfor.cv = rep(0,m_count)
#################################################
#Begin model selection
#################################################
for (a in 1:alpha_count) {
#First, elastic net choosing best alpha and lamda
cvfit.net = cv.glmnet(trainx, trainy, lambda=lambdalist, alpha = alphalist[a],
nfolds=ncv, foldid=cvgroups.selection, family = "binomial")
#Cool best lambda plot
plot(cvfit.net$lambda, cvfit.net$cvm)
abline(v=cvfit.net$lambda[order(cvfit.net$cvm)[1]], col = "red")
allcvbest.net[a] = cvfit.net$cvm[order(cvfit.net$cvm)[1]]
alllambdabest[a] = cvfit.net$lambda[order(cvfit.net$cvm)[1]]
}
#Second, random forests, choosing best number of predictor vars
for(m in 1:m_count){
for(h in 1:ncv){
rfgroup = (cvgroups.selection == h)
rforest = randomForest(trainy[!rfgroup] ~ ., data = trainx[!rfgroup,],
mtry = mtrylist[m], importance = T, ntree = 500)
rfor_predictions[rfgroup,m] = predict(rforest, newdata = trainx[rfgroup,],type="prob")[,2]
}
}
#For Enet model assessment
#Choose best alpha and best lambda for elastic net
whichmodel = order(allcvbest.net)[1]
bestalpha = alphalist[whichmodel]
bestlambda = alllambdabest[whichmodel]
#Run the best model on all training data
bestmodel.enet = glmnet(trainx, trainy, alpha = bestalpha, lambda=bestlambda, family = "binomial")
#For Random Forests model assessment
#Get CV for each mtry value in random forests
for (rf in 1:m_count) {
y_i <- as.numeric(as.character(trainy))
u_i <- rfor_predictions[,rf]
deviance.contribs <- (y_i * log(u_i)) + ((1-y_i) * log(1 - u_i))
deviance.contribs <- (y_i * log(u_i)) + ((1-y_i) * log(1 - u_i))
deviance <- -2 * sum(deviance.contribs)
rfor.cv[rf] = deviance/length(rfor_predictions[,rf])
}
#Find mtry with lowest cv
whichmodel2 = order(rfor.cv)[1]
bestmtry = mtrylist[whichmodel2]
#Run the best model on all training data
bestmodel.rf = randomForest(trainy ~ ., data = trainx,
mtry = bestmtry, importance = T, ntree = 500)
#################################################
#End model selection
#################################################
#Store predictions on test set from best enent model
allpredictedCV[group_assesm,1] = predict(bestmodel.enet, newx = testx, s = bestlambda, type= "response")
#Store predictions on test set from random forest model
allpredictedCV[group_assesm,2] = predict(bestmodel.rf, newdata = testx, type= "prob")[,2]
}
rfor.cv
allcvbest.net
#Assess elastic net model
y_i <- y
u_i <- allpredictedCV[,1]
deviance.contribs <- (y_i * log(u_i)) + ((1-y_i) * log(1 - u_i))
deviance.contribs <- (y_i * log(u_i)) + ((1-y_i) * log(1 - u_i))
deviance.contribs[which(is.na(deviance.contribs))] = 0
deviance <- -2 * sum(deviance.contribs)
CV.enet = deviance/length(y_i)
CV.enet
#Assess random forest model
y_i <- as.numeric(y)
u_i <- allpredictedCV[,2]
deviance.contribs <- (y_i * log(u_i)) + ((1-y_i) * log(1 - u_i))
deviance.contribs <- (y_i * log(u_i)) + ((1-y_i) * log(1 - u_i))
deviance.contribs[which(is.na(deviance.contribs))] = 0
deviance <- -2 * sum(deviance.contribs)
CV.rfor = deviance/length(y_i)
CV.rfor
#################################################
#Elastic net is winner based on deviance, but it is close enough
#that we will proceed with both models
#
#We will use ten-fold cross validation to assess both models
#
#################################################
#################################################
#Testing Elastic Net
#################################################
#Default mtry for classification is sqrt(predictors)
lambdalist = 0:200/2000
# define the cross-validation splits
set.seed(10)
ncv = 10
groups = c(rep(1:ncv,floor(n/ncv)), 1:(n%%ncv))
cvgroups = sample(groups,n)
factor_y = as.factor(y)
predicted_enet = matrix(rep(0,n), ncol=1)
#Run CV for all values of mtry
for(j in 1:ncv){
print(j)
#Choose validation set
group_assesm = (cvgroups == j)
#Assign training data
trainx = x.matrix[!group_assesm,]
trainy = as.factor(y[!group_assesm])
#Assign testing data
testx = x.matrix[group_assesm,]
testy = as.factor(y[group_assesm])
#Assign groups within training data
trainx.n = dim(trainx)[1]
#First, elastic net choosing best alpha and lamda
bestmodel.enet = glmnet(trainx, trainy, alpha = bestalpha, lambda=bestlambda, family = "binomial")
predicted_enet[group_assesm] = predict(bestmodel.enet, newx = testx, s = bestlambda, type= "response")
}
pred <- prediction(predicted_enet ,factor_y)
roc.perf = performance(pred, measure = "tpr", x.measure = "fpr")
auc.perf = performance(pred, measure = "auc")
auc.perf@y.values
plot(roc.perf)
#################################################
#Testing Random Forest
#################################################
mtrylist = c(5)
m_count = length(mtrylist)
# define the cross-validation splits
set.seed(10)
ncv = 10
groups = c(rep(1:ncv,floor(n/ncv)), 1:(n%%ncv))
cvgroups = sample(groups,n)
factor_y = as.factor(y)
rfor_predictions = matrix(rep(0,n*m_count), ncol=m_count)
rfor.cv = rep(0,1)
#Run CV for a single mtry value of 5
for(b in 1:m_count){
for(c in 1:ncv){
rfgroup = (cvgroups == c)
finalforest = randomForest(factor_y[!rfgroup] ~ ., data = x.matrix[!rfgroup,],
mtry = mtrylist[b], importance = T, ntree = 1000)
rfor_predictions[rfgroup,b] = predict(finalforest, newdata = x.matrix[rfgroup,], type= "prob")[,2]
}
}
pred <- prediction(rfor_predictions[,1],factor_y)
roc.perf = performance(pred, measure = "tpr", x.measure = "fpr")
auc.perf = performance(pred, measure = "auc")
auc.perf@y.values
plot(roc.perf)
pred_df= as.data.frame(rfor_predictions[,1])
names(pred_df)[1] = 'pred_val'
conf_pred = pred_df %>% mutate(class_prod = if_else(pred_val>= .27,1,0)) %>% dplyr::select(class_prod)
table(predicted = conf_pred$class_prod, actual = y)
#Calculate optimal cutoff value
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])},
perf@x.values, perf@y.values, pred@cutoffs)
}
print(opt.cut(roc.perf, pred))
#Last, but not least, fit the final model
bestmodel_final = randomForest(factor_y ~ ., data = x.matrix,
mtry = 5, importance = T, ntree = 5000, cutoff = c(.5,.5))
#Plot variable importance
varImpPlot(bestmodel_final, main = "Random Forests- Variable Importance")
| /Capstone Modeling.R | no_license | kostickl/DS785 | R | false | false | 9,949 | r |
library(glmnet)
library(randomForest)
library(gbm)
library(ROCR)
library(ggfortify)
#After iterations of prin component analysis
base.pca <- prcomp(final_base_table[,c( 15:23, 26:35)], center = TRUE,scale. = TRUE)
summary(base.pca)
autoplot(base.pca, data = final_base_table[,c( 15:23, 26:35)],
loadings = TRUE, loadings.colour = 'blue',
loadings.label = TRUE, loadings.label.size = 3)
#Check for collinearity
data.frame(colnames(final_base_table))
cor_cols = c(15:35)
res <- cor(final_base_table[,cor_cols])
final_base_table_mod_prep1 =
final_base_table %>%
ungroup() %>%
dplyr::select(-patient_id,
-stroke_dt,
-index_dt,
-had_death,
-had_stroke,
-death,
-min_nyha_dt
)
final_base_table_mod_prep1$ino_cnt = as.numeric(final_base_table_mod_prep1$ino_cnt)
final_base_table_mod_prep1$bmi = as.numeric(final_base_table_mod_prep1$bmi)
final_base_table_mod_prep1$region = as.character(final_base_table_mod_prep1$region)
final_base_table_mod_prep1$region[which(final_base_table_mod_prep1$region == "Other/Unknown")] = "Unknown"
final_base_table_mod_prep1$region = as.factor(final_base_table_mod_prep1$region)
final_base_table_mod_prep1 %>% dplyr::select(race) %>% distinct()
data.frame(colnames(final_base_table_mod_prep1))
cols <- c(3,seq(6,28))
final_base_table_mod_prep1[,cols] <- lapply(final_base_table_mod_prep1[,cols], factor)
sapply(final_base_table_mod_prep1, class)
#################################################
#Begin Double Cross Validation
#
#Testing Random Forests and Elastic Net regression
#
#Model selection to choose best 1. # of predictor vars
# and 2. alpha and lamda values
#
#################################################
lambdalist = 0:200/2000
alphalist = c(0, 0.05, 0.1, 0.15,.2,.4)
alpha_count = length(alphalist)
#Default mtry for classification is sqrt(predictors)
mtrylist = c(2,3,4,5,6,7,8,9)
m_count = length(mtrylist)
x.matrix = model.matrix(had_strk_or_dth~., data=final_base_table_mod_prep1)[,-1]
y = as.numeric(as.matrix(final_base_table_mod_prep1[,18]))
n = dim(x.matrix)[1]
# define the cross-validation splits
set.seed(15)
ncv = 10
groups = c(rep(1:ncv,floor(n/ncv)), 1:(n%%ncv))
cvgroups = sample(groups,n)
allpredictedCV = matrix(rep(0,n*2), ncol=2)
for(j in 1:ncv){
print(j)
#Choose validation set
group_assesm = (cvgroups == j)
#Assign training data
trainx = x.matrix[!group_assesm,]
trainy = as.factor(y[!group_assesm])
#Assign testing data
testx = x.matrix[group_assesm,]
testy = as.factor(y[group_assesm])
#Assign groups within training data
trainx.n = dim(trainx)[1]
if ((trainx.n%%ncv) == 0) {
groups.select = rep(1:ncv,floor(trainx.n/ncv))
} else {
#account for different-sized input matrices
groups.select = c(rep(1:ncv,floor(trainx.n / ncv)),(1:(trainx.n%%ncv)))
}
cvgroups.selection = sample(groups.select, trainx.n)
#END- Assign groups within training data
alllambdabest = rep(NA,alpha_count)
allcvbest.net = rep(NA,alpha_count)
rfor_predictions = matrix(rep(0,trainx.n*m_count), ncol=m_count)
rfor.cv = rep(0,m_count)
#################################################
#Begin model selection
#################################################
for (a in 1:alpha_count) {
#First, elastic net choosing best alpha and lamda
cvfit.net = cv.glmnet(trainx, trainy, lambda=lambdalist, alpha = alphalist[a],
nfolds=ncv, foldid=cvgroups.selection, family = "binomial")
#Cool best lambda plot
plot(cvfit.net$lambda, cvfit.net$cvm)
abline(v=cvfit.net$lambda[order(cvfit.net$cvm)[1]], col = "red")
allcvbest.net[a] = cvfit.net$cvm[order(cvfit.net$cvm)[1]]
alllambdabest[a] = cvfit.net$lambda[order(cvfit.net$cvm)[1]]
}
#Second, random forests, choosing best number of predictor vars
for(m in 1:m_count){
for(h in 1:ncv){
rfgroup = (cvgroups.selection == h)
rforest = randomForest(trainy[!rfgroup] ~ ., data = trainx[!rfgroup,],
mtry = mtrylist[m], importance = T, ntree = 500)
rfor_predictions[rfgroup,m] = predict(rforest, newdata = trainx[rfgroup,],type="prob")[,2]
}
}
#For Enet model assessment
#Choose best alpha and best lambda for elastic net
whichmodel = order(allcvbest.net)[1]
bestalpha = alphalist[whichmodel]
bestlambda = alllambdabest[whichmodel]
#Run the best model on all training data
bestmodel.enet = glmnet(trainx, trainy, alpha = bestalpha, lambda=bestlambda, family = "binomial")
#For Random Forests model assessment
#Get CV for each mtry value in random forests
for (rf in 1:m_count) {
y_i <- as.numeric(as.character(trainy))
u_i <- rfor_predictions[,rf]
deviance.contribs <- (y_i * log(u_i)) + ((1-y_i) * log(1 - u_i))
deviance.contribs <- (y_i * log(u_i)) + ((1-y_i) * log(1 - u_i))
deviance <- -2 * sum(deviance.contribs)
rfor.cv[rf] = deviance/length(rfor_predictions[,rf])
}
#Find mtry with lowest cv
whichmodel2 = order(rfor.cv)[1]
bestmtry = mtrylist[whichmodel2]
#Run the best model on all training data
bestmodel.rf = randomForest(trainy ~ ., data = trainx,
mtry = bestmtry, importance = T, ntree = 500)
#################################################
#End model selection
#################################################
#Store predictions on test set from best enent model
allpredictedCV[group_assesm,1] = predict(bestmodel.enet, newx = testx, s = bestlambda, type= "response")
#Store predictions on test set from random forest model
allpredictedCV[group_assesm,2] = predict(bestmodel.rf, newdata = testx, type= "prob")[,2]
}
rfor.cv
allcvbest.net
#Assess elastic net model
y_i <- y
u_i <- allpredictedCV[,1]
deviance.contribs <- (y_i * log(u_i)) + ((1-y_i) * log(1 - u_i))
deviance.contribs <- (y_i * log(u_i)) + ((1-y_i) * log(1 - u_i))
deviance.contribs[which(is.na(deviance.contribs))] = 0
deviance <- -2 * sum(deviance.contribs)
CV.enet = deviance/length(y_i)
CV.enet
#Assess random forest model
y_i <- as.numeric(y)
u_i <- allpredictedCV[,2]
deviance.contribs <- (y_i * log(u_i)) + ((1-y_i) * log(1 - u_i))
deviance.contribs <- (y_i * log(u_i)) + ((1-y_i) * log(1 - u_i))
deviance.contribs[which(is.na(deviance.contribs))] = 0
deviance <- -2 * sum(deviance.contribs)
CV.rfor = deviance/length(y_i)
CV.rfor
#################################################
#Elastic net is winner based on deviance, but it is close enough
#that we will proceed with both models
#
#We will use ten-fold cross validation to assess both models
#
#################################################
#################################################
#Testing Elastic Net
#################################################
#Default mtry for classification is sqrt(predictors)
lambdalist = 0:200/2000
# define the cross-validation splits
set.seed(10)
ncv = 10
groups = c(rep(1:ncv,floor(n/ncv)), 1:(n%%ncv))
cvgroups = sample(groups,n)
factor_y = as.factor(y)
predicted_enet = matrix(rep(0,n), ncol=1)
#Run CV for all values of mtry
for(j in 1:ncv){
print(j)
#Choose validation set
group_assesm = (cvgroups == j)
#Assign training data
trainx = x.matrix[!group_assesm,]
trainy = as.factor(y[!group_assesm])
#Assign testing data
testx = x.matrix[group_assesm,]
testy = as.factor(y[group_assesm])
#Assign groups within training data
trainx.n = dim(trainx)[1]
#First, elastic net choosing best alpha and lamda
bestmodel.enet = glmnet(trainx, trainy, alpha = bestalpha, lambda=bestlambda, family = "binomial")
predicted_enet[group_assesm] = predict(bestmodel.enet, newx = testx, s = bestlambda, type= "response")
}
pred <- prediction(predicted_enet ,factor_y)
roc.perf = performance(pred, measure = "tpr", x.measure = "fpr")
auc.perf = performance(pred, measure = "auc")
auc.perf@y.values
plot(roc.perf)
#################################################
#Testing Random Forest
#################################################
mtrylist = c(5)
m_count = length(mtrylist)
# define the cross-validation splits
set.seed(10)
ncv = 10
groups = c(rep(1:ncv,floor(n/ncv)), 1:(n%%ncv))
cvgroups = sample(groups,n)
factor_y = as.factor(y)
rfor_predictions = matrix(rep(0,n*m_count), ncol=m_count)
rfor.cv = rep(0,1)
#Run CV for a single mtry value of 5
for(b in 1:m_count){
for(c in 1:ncv){
rfgroup = (cvgroups == c)
finalforest = randomForest(factor_y[!rfgroup] ~ ., data = x.matrix[!rfgroup,],
mtry = mtrylist[b], importance = T, ntree = 1000)
rfor_predictions[rfgroup,b] = predict(finalforest, newdata = x.matrix[rfgroup,], type= "prob")[,2]
}
}
pred <- prediction(rfor_predictions[,1],factor_y)
roc.perf = performance(pred, measure = "tpr", x.measure = "fpr")
auc.perf = performance(pred, measure = "auc")
auc.perf@y.values
plot(roc.perf)
pred_df= as.data.frame(rfor_predictions[,1])
names(pred_df)[1] = 'pred_val'
conf_pred = pred_df %>% mutate(class_prod = if_else(pred_val>= .27,1,0)) %>% dplyr::select(class_prod)
table(predicted = conf_pred$class_prod, actual = y)
#Calculate optimal cutoff value
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])},
perf@x.values, perf@y.values, pred@cutoffs)
}
print(opt.cut(roc.perf, pred))
#Last, but not least, fit the final model
bestmodel_final = randomForest(factor_y ~ ., data = x.matrix,
mtry = 5, importance = T, ntree = 5000, cutoff = c(.5,.5))
#Plot variable importance
varImpPlot(bestmodel_final, main = "Random Forests- Variable Importance")
|
#devtools::install_github("jrowen/twitteR", ref = "oauth_httr_1_0")
install.packages("base64enc")
install.packages("httpuv")
install.packages("twitteR")
install.packages("RCurl")
install.packages("httr")
install.packages("syuzhet")
library(twitteR)
library(ROAuth)
library(base64enc)
library(httpuv)
library(rtweet)
library(RCurl)
library(httr)
library(tm)
library(wordcloud)
library(syuzhet)
cred <- OAuthFactory$new(consumerKey='Provide Your Consumer API key', # Consumer Key (API Key) ### Due to security violation of Github removing it
consumerSecret='Provide Your Consumer API Secret key', #Consumer Secret (API Secret) ### Due to security violation of Github removing it
requestURL='https://api.twitter.com/oauth/request_token',
accessURL='https://api.twitter.com/oauth/access_token',
authURL='https://api.twitter.com/oauth/authorize')
#cred$handshake(cainfo="cacert.pem")
save(cred, file="twitter authentication.Rdata")
load("twitter authentication.Rdata")
setup_twitter_oauth("Provide Your Consumer API key", # Consumer Key (API Key) ### Due to security violation of Github removing it
"Provide Your Consumer API Secret key", #Consumer Secret ### Due to security violation of Github removing it
"Provide Your Access Token", # Access Token ### Due to security violation of Github removing it
"Provide Your Access Token Secret key") ###Access Token Secret ####Due to security violation of Github removing it
Tweets <- userTimeline("narendramodi", n = 3200)
# store the tweets into dataframe
tweets.df = twListToDF(Tweets)
write.csv(tweets.df, "Tweets_modi.csv",row.names = F)
getwd()
################################################################################################################################################
makewordc = function(x){
freq = sort(rowSums(as.matrix(x)),decreasing = TRUE)
freq.df = data.frame(word=names(freq), freq=freq)
windows()
wordcloud(freq.df$word[1:120], freq.df$freq[1:120],scale = c(4,.5),random.order = F, colors=1:10)
}
# Making positive wordcloud function
makeposwordc = function(x){
freq = sort(rowSums(as.matrix(x)),decreasing = TRUE)
# matching positive words
pos.matches = match(names(freq), c(pos.words,"approvals"))
pos.matches = !is.na(pos.matches)
freq_pos <- freq[pos.matches]
names <- names(freq_pos)
windows()
wordcloud(names,freq_pos,scale=c(4,.5),colors = brewer.pal(8,"Dark2"))
}
# Making negative wordcloud function
makenegwordc = function(x){
freq = sort(rowSums(as.matrix(x)),decreasing = TRUE)
# matching positive words
neg.matches = match(names(freq), neg.words)
neg.matches = !is.na(neg.matches)
freq_neg <- freq[neg.matches]
names <- names(freq_neg)
windows()
wordcloud(names[1:120],freq_neg[1:120],scale=c(4,.5),colors = brewer.pal(8,"Dark2"))
}
words_bar_plot <- function(x){
freq = sort(rowSums(as.matrix(x)),decreasing = TRUE)
freq.df = data.frame(word=names(freq), freq=freq)
head(freq.df, 20)
library(ggplot2)
windows()
ggplot(head(freq.df,50), aes(reorder(word,freq), freq)) +
geom_bar(stat = "identity") + coord_flip() +
xlab("Words") + ylab("Frequency") +
ggtitle("Most frequent words")
}
pos_words_bar_plot <- function(x){
pos.matches = match(colnames(x), pos.words)
pos.matches = !is.na(pos.matches)
pos_words_freq = as.data.frame(apply(x, 2, sum)[pos.matches])
colnames(pos_words_freq)<-"freq"
pos_words_freq["word"] <- rownames(pos_words_freq)
# Sorting the words in deceasing order of their frequency
pos_words_freq <- pos_words_freq[order(pos_words_freq$freq,decreasing=T),]
windows()
ggplot(head(pos_words_freq,30), aes(reorder(word,freq), freq)) +
geom_bar(stat = "identity") + coord_flip() +
xlab("Positive words") + ylab("Frequency") +
ggtitle("Most frequent positive words")
}
neg_words_bar_plot <- function(x){
neg.matches = match(colnames(x), neg.words)
neg.matches = !is.na(neg.matches)
neg_words_freq = as.data.frame(apply(x, 2, sum)[neg.matches])
colnames(neg_words_freq)<-"freq"
neg_words_freq["word"] <- rownames(neg_words_freq)
# Sorting the words in deceasing order of their frequency
neg_words_freq <- neg_words_freq[order(neg_words_freq$freq,decreasing=T),]
windows()
ggplot(head(neg_words_freq,30), aes(reorder(word,freq), freq)) +
geom_bar(stat = "identity") + coord_flip() +
xlab("words") + ylab("Frequency") +
ggtitle("Most frequent negative words")
}
##### function to make cluster dendograms ##################################################################
clusdend = function(a){ # writing func clusdend()
mydata.df = as.data.frame(inspect(a));
mydata1.df = mydata.df[, order(-colSums(mydata.df))];
min1 = min(ncol(mydata.df), 40) # minimum dimn of dist matrix
test = matrix(0,min1,min1)
test1 = test
for(i1 in 1:(min1-1)){
for(i2 in i1:min1){
test = sum(mydata1.df[ ,i1]-mydata1.df[ ,i2])^2
test1[i1,i2] = test; test1[i2, i1] = test1[i1, i2] }
}
# making dissimilarity matrix out of the freq one
test2 = test1
rownames(test2) = colnames(mydata1.df)[1:min1]
# now plot collocation dendogram
d <- dist(test2, method = "euclidean") # distance matrix
fit <- hclust(d, method="ward")
windows()
plot(fit) # display dendogram
} # clusdend() func ends
# lOADING +VE AND -VE words
pos.words=scan("C:\\Users\\sanu\\Downloads\\Desktop\\Documents\\Excelr\\Text Mining\\positive-words.txt", what="character", comment.char=";") # read-in positive-words.txt
neg.words=scan("C:\\Users\\sanu\\Downloads\\Desktop\\Documents\\Excelr\\Text Mining\\negative-words.txt", what="character", comment.char=";") # read-in negative-words.txt
pos.words=c(pos.words,"wow", "kudos", "hurray","superb","good") # including our own positive words to the existing list
neg.words = c(neg.words)
stopwords = readLines("C:\\Users\\sanu\\Downloads\\Desktop\\Documents\\Excelr\\Text Mining\\stop.txt")
#########################################################################################################################################
####We will remove hashtags, junk characters, other twitter handles and URLs
####from the tags using gsub function so we have tweets for further analysis
# CLEANING TWEETS
tweets.df$text=gsub("&", "", tweets.df$text)
tweets.df$text = gsub("&", "", tweets.df$text)
tweets.df$text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", tweets.df$text)
tweets.df$text = gsub("@\\w+", "", tweets.df$text)
tweets.df$text = gsub("[[:punct:]]", "", tweets.df$text)
tweets.df$text = gsub("[[:digit:]]", "", tweets.df$text)
tweets.df$text = gsub("http\\w+", "", tweets.df$text)
tweets.df$text = gsub("[ \t]{2,}", "", tweets.df$text)
tweets.df$text = gsub("^\\s+|\\s+$", "", tweets.df$text)
tweets.df$text <- iconv(tweets.df$text, "UTF-8", "ASCII", sub="")
####Getting sentiments for each tweet
####Syuzhet breaks emotion into 10 different categories
# Emotions for each tweet using NRC dictionary
emotions <- get_nrc_sentiment(tweets.df$text)
emotions
emo_bar = colSums(emotions)##anger - 18,anticipation-116, disgust-7, fear-19,joy-146,
###sadness=40, surprise-48, trust-110,negative-43,positive-317
emo_sum = data.frame(count=emo_bar, emotion=names(emo_bar))
emo_sum$emotion = factor(emo_sum$emotion, levels=emo_sum$emotion[order(emo_sum$count, decreasing = TRUE)])
####We are ready to visualize the emotions from NRC sentiments
library(plotly)
p <- plot_ly(emo_sum, x=~emotion, y=~count, type="bar", color=~emotion) %>%
layout(xaxis=list(title=""), showlegend=FALSE,
title="Emotion Type for hashtag: narendramodi")
api_create(p,filename="Sentimentanalysis")
####Lets see which word contributes which emotion
# Create comparison word cloud data
wordcloud_tweet = c(
paste(tweets.df$text[emotions$anger > 0], collapse=" "),
paste(tweets.df$text[emotions$anticipation > 0], collapse=" "),
paste(tweets.df$text[emotions$disgust > 0], collapse=" "),
paste(tweets.df$text[emotions$fear > 0], collapse=" "),
paste(tweets.df$text[emotions$joy > 0], collapse=" "),
paste(tweets.df$text[emotions$sadness > 0], collapse=" "),
paste(tweets.df$text[emotions$surprise > 0], collapse=" "),
paste(tweets.df$text[emotions$trust > 0], collapse=" "),
paste(tweets.df$text[emotions$positive > 0], collapse=" "),
paste(tweets.df$text[emotions$negative > 0], collapse=" ")
)
wordcloud_tweet
# create corpus
corpus = Corpus(VectorSource(wordcloud_tweet))
# remove whitespace,punctuation, convert every word in lower case and remove stop words
##corpus = tm_map(corpus, stripwhitespace) ### removes white space
corpus = tm_map(corpus, tolower) ### converts to lower case
corpus = tm_map(corpus, removePunctuation) ### removes punctuation marks
corpus = tm_map(corpus, removeNumbers) ### removes numbers in the documents
corpus = tm_map(corpus, removeWords, c(stopwords("english"),stopwords))
corpus = tm_map(corpus, stemDocument)
# create term document frequency matrix
tdm0 = TermDocumentMatrix(corpus)
inspect(tdm0)
# Term document matrix with inverse frequency
tdm1 <- TermDocumentMatrix(corpus,control = list(weighting = function(p) weightTfIdf(p,normalize = T)))#,stemming=T))
inspect(tdm1)
a0 <- NULL
a1 <- NULL
# getting the indexes of documents having count of words = 0
for (i1 in 1:ncol(tdm0))
{ if (sum(tdm0[, i1]) == 0) {a0 = c(a0, i1)} }
for (i1 in 1:ncol(tdm1))
{ if (sum(tdm1[, i1]) == 0) {a1 = c(a1, i1)} }
# Removing empty docs
tdm0 <- tdm0[,-a0]
tdm1 <- tdm1[,-a1]
# convert as matrix
tdm0 = as.matrix(tdm0)
tdm1 = as.matrix(tdm1)
tdm0new <- tdm0[nchar(rownames(tdm0)) < 11,]
tdm1new <- tdm1[nchar(rownames(tdm1)) < 11,]
# column name binding
colnames(tdm0) = c('anger', 'anticipation', 'disgust', 'fear', 'joy', 'sadness', 'surprise', 'trust','positive','negative')
colnames(tdm0new) <- colnames(tdm0)
comparison.cloud(tdm0new, random.order=FALSE,
colors = c("#00B2FF", "red", "#FF0099", "#6600CC", "green", "orange", "blue", "brown","purple","maroon"),
title.size=1, max.words=250, scale=c(2.5, 0.4),rot.per=0.4)
colnames(tdm1) = c('anger', 'anticipation', 'disgust', 'fear', 'joy', 'sadness', 'surprise', 'trust','positive','negative')
colnames(tdm1new) <- colnames(tdm1)
comparison.cloud(tdm1new, random.order=FALSE,
colors = c("#00B2FF", "red", "#FF0099", "#6600CC", "green", "orange", "blue", "brown","purple","maroon"),
title.size=1, max.words=250, scale=c(2.5, 0.4),rot.per=0.4)
##########################################################################################################
# Document term matrix
dtm0 <- t(tdm0)
dtm1 <- t(tdm1)
# Word cloud - TF - Uni gram
makewordc(tdm0)
title(sub = "UNIGRAM - Wordcloud using TF")
# Frequency Bar plot - TF - Uni gram
words_bar_plot(tdm0)
# Word cloud - TFIDF - Unigram
makewordc(tdm1)
# Frequency Barplot - TFIDF - Unigram
words_bar_plot(tdm1)
# Positive word cloud - TF - Unigram
makeposwordc(tdm0)
title(sub = "UNIGRAM - POSITIVE Wordcloud using TF")
# Frequency Barplot - Positive words - Unigram
pos_words_bar_plot(dtm0)
# Positive word cloud - Unigram - TFIDF
makeposwordc(tdm1)
title(sub = "UNIGRAM - POSITIVE Wordcloud using TFIDF")
# Frequency Barplot - Positive words - TFIDF - Unigram
pos_words_bar_plot(dtm1)
# Negative word cloud - TF - unigam
makenegwordc(tdm0)
title(sub = "UNIGRAM - NEGATIVE Wordcloud using TF")
# Frequency Barplot -negative words - Unigram - TF
neg_words_bar_plot(dtm0)
# Negative word cloud - Unigram - TFIDF
makenegwordc(tdm1)
title(sub = "UNIGRAM - NEGATIVE Wordcloud using TFIDF")
# Frequency Barplot - Negative words - TFIDF
neg_words_bar_plot(dtm1)
# Bi gram word clouds
library(quanteda)
library(Matrix)
# Bi gram document term frequency
dtm0_2 <- dfm(unlist(corpus),ngrams=3,verbose = F)
tdm0_2 <- t(dtm0_2)
a0 = NULL
for (i1 in 1:ncol(tdm0_2)){ if (sum(tdm0_2[, i1]) == 0) {a0 = c(a0, i1)} }
length(a0) # no. of empty docs in the corpus
if (length(a0) >0) { tdm0_2 = tdm0_2[, -a0]} else {tdm0_2 = tdm0_2}; dim(tdm0_2) # under TF weighing
a0 <- NULL;i1 <- NULL
dtm0_2 <- t(tdm0_2)
# Bi gram word cloud
makewordc(tdm0_2) # We have too see warnings to edit few words
title(sub = "BIGRAM - Wordcloud using TF")
# Bi gram barplot on TF
words_bar_plot(tdm0_2)
## Bi gram on TFIDF
dtm1_2 <- tfidf(dtm0_2)
tdm1_2 <- t(dtm1_2)
a0 = NULL
for (i1 in 1:ncol(tdm1_2)){ if (sum(tdm1_2[, i1]) == 0) {a0 = c(a0, i1)} }
length(a0) # no. of empty docs in the corpus
if (length(a0) >0) { tdm1_2 = tdm1_2[, -a0]} else {tdm1_2 = tdm1_2}; dim(tdm1_2) # under TF weighing
a0 <- NULL;i1 <- NULL
dtm1_2 <- t(tdm1_2)
# Bi gram word cloud for TFIDF
makewordc(tdm1_2) # We have too see warnings to edit few words
title(sub = "BIGRAM - Wordcloud using TFIDF")
# Bigram barplot on TFIDF
words_bar_plot(tdm1_2)
# Cluster dendrogram on Uni gram - TF
clusdend=function(dtm0)
title(sub = "Dendrogram using TF")
# Cluster dendrogram on Uni gram - TFIDF
clusdend(dtm1)
title(sub = "Dendrogram using TFIDF")
# --- Can we segment the respondents (or cluster the documents) based on term usage? --- #
### --- kmeans proc ---- ###
# better cluster on TF dtm rather than tfidf dtm for solution stability #
wss = (nrow(dtm0)-1)*sum(apply(dtm0, 2, var)) # Determine number of clusters by scree-plot
for (i in 2:8) wss[i] = sum(kmeans(dtm0, centers=i)$withinss)
windows()
plot(1:8, wss, type="b", xlab="Number of Clusters", ylab="Within groups sum of squares") # Look for an "elbow" in the scree plot #
title(sub = "K-Means Clustering Scree-Plot")
k1 = 4 # based on the scree elbow plot
a3 = kmeans(dtm0, k1); a3$size
a4 = kmeans(dtm1, k1)
round(a3$size/sum(a3$size), 2) # segmt-sizes as proportions
# -- analyze each segment for what they're saying... --- #
for (i1 in 1:max(a3$cluster)) {
a4[[i1]] = t(dtm0[(a3$cluster == i1),])
} # loop ends
a4[[i2]]=t(dtm1[(a4$cluster == i2),])
# now plot wordclouds for by segment and see
par(ask = TRUE)
for (i2 in 1:max(a3$cluster)){
makewordc(a4[[i2]])
sub=paste("wordcloud-Clustering-",as.character(i2),"-",as.character(format(round(ncol(a4[[i2]])*100/nrow(dtm0),2),nsmall=3)),"%",collapse = " ")
title(sub = sub)
} # loop ends
i2 <- NULL
par(ask = FALSE) # close ask facility for graph making
# cluster dendograms cluster terms *within* documents
# in contrast, kmeans clusters documents themselves using word freqs across documents
# now try these examples:
for (i in 1:4){
clusdend(t(a4[[i]]))
title(sub = as.character(i))
}
| /Text Mining/TwitterProblem.R | no_license | hardikvora200/R-Github | R | false | false | 14,555 | r | #devtools::install_github("jrowen/twitteR", ref = "oauth_httr_1_0")
install.packages("base64enc")
install.packages("httpuv")
install.packages("twitteR")
install.packages("RCurl")
install.packages("httr")
install.packages("syuzhet")
library(twitteR)
library(ROAuth)
library(base64enc)
library(httpuv)
library(rtweet)
library(RCurl)
library(httr)
library(tm)
library(wordcloud)
library(syuzhet)
cred <- OAuthFactory$new(consumerKey='Provide Your Consumer API key', # Consumer Key (API Key) ### Due to security violation of Github removing it
consumerSecret='Provide Your Consumer API Secret key', #Consumer Secret (API Secret) ### Due to security violation of Github removing it
requestURL='https://api.twitter.com/oauth/request_token',
accessURL='https://api.twitter.com/oauth/access_token',
authURL='https://api.twitter.com/oauth/authorize')
#cred$handshake(cainfo="cacert.pem")
save(cred, file="twitter authentication.Rdata")
load("twitter authentication.Rdata")
setup_twitter_oauth("Provide Your Consumer API key", # Consumer Key (API Key) ### Due to security violation of Github removing it
"Provide Your Consumer API Secret key", #Consumer Secret ### Due to security violation of Github removing it
"Provide Your Access Token", # Access Token ### Due to security violation of Github removing it
"Provide Your Access Token Secret key") ###Access Token Secret ####Due to security violation of Github removing it
Tweets <- userTimeline("narendramodi", n = 3200)
# store the tweets into dataframe
tweets.df = twListToDF(Tweets)
write.csv(tweets.df, "Tweets_modi.csv",row.names = F)
getwd()
################################################################################################################################################
makewordc = function(x){
freq = sort(rowSums(as.matrix(x)),decreasing = TRUE)
freq.df = data.frame(word=names(freq), freq=freq)
windows()
wordcloud(freq.df$word[1:120], freq.df$freq[1:120],scale = c(4,.5),random.order = F, colors=1:10)
}
# Making positive wordcloud function
makeposwordc = function(x){
freq = sort(rowSums(as.matrix(x)),decreasing = TRUE)
# matching positive words
pos.matches = match(names(freq), c(pos.words,"approvals"))
pos.matches = !is.na(pos.matches)
freq_pos <- freq[pos.matches]
names <- names(freq_pos)
windows()
wordcloud(names,freq_pos,scale=c(4,.5),colors = brewer.pal(8,"Dark2"))
}
# Making negative wordcloud function
makenegwordc = function(x){
freq = sort(rowSums(as.matrix(x)),decreasing = TRUE)
# matching positive words
neg.matches = match(names(freq), neg.words)
neg.matches = !is.na(neg.matches)
freq_neg <- freq[neg.matches]
names <- names(freq_neg)
windows()
wordcloud(names[1:120],freq_neg[1:120],scale=c(4,.5),colors = brewer.pal(8,"Dark2"))
}
words_bar_plot <- function(x){
freq = sort(rowSums(as.matrix(x)),decreasing = TRUE)
freq.df = data.frame(word=names(freq), freq=freq)
head(freq.df, 20)
library(ggplot2)
windows()
ggplot(head(freq.df,50), aes(reorder(word,freq), freq)) +
geom_bar(stat = "identity") + coord_flip() +
xlab("Words") + ylab("Frequency") +
ggtitle("Most frequent words")
}
pos_words_bar_plot <- function(x){
pos.matches = match(colnames(x), pos.words)
pos.matches = !is.na(pos.matches)
pos_words_freq = as.data.frame(apply(x, 2, sum)[pos.matches])
colnames(pos_words_freq)<-"freq"
pos_words_freq["word"] <- rownames(pos_words_freq)
# Sorting the words in deceasing order of their frequency
pos_words_freq <- pos_words_freq[order(pos_words_freq$freq,decreasing=T),]
windows()
ggplot(head(pos_words_freq,30), aes(reorder(word,freq), freq)) +
geom_bar(stat = "identity") + coord_flip() +
xlab("Positive words") + ylab("Frequency") +
ggtitle("Most frequent positive words")
}
neg_words_bar_plot <- function(x){
neg.matches = match(colnames(x), neg.words)
neg.matches = !is.na(neg.matches)
neg_words_freq = as.data.frame(apply(x, 2, sum)[neg.matches])
colnames(neg_words_freq)<-"freq"
neg_words_freq["word"] <- rownames(neg_words_freq)
# Sorting the words in deceasing order of their frequency
neg_words_freq <- neg_words_freq[order(neg_words_freq$freq,decreasing=T),]
windows()
ggplot(head(neg_words_freq,30), aes(reorder(word,freq), freq)) +
geom_bar(stat = "identity") + coord_flip() +
xlab("words") + ylab("Frequency") +
ggtitle("Most frequent negative words")
}
##### function to make cluster dendograms ##################################################################
clusdend = function(a){ # writing func clusdend()
mydata.df = as.data.frame(inspect(a));
mydata1.df = mydata.df[, order(-colSums(mydata.df))];
min1 = min(ncol(mydata.df), 40) # minimum dimn of dist matrix
test = matrix(0,min1,min1)
test1 = test
for(i1 in 1:(min1-1)){
for(i2 in i1:min1){
test = sum(mydata1.df[ ,i1]-mydata1.df[ ,i2])^2
test1[i1,i2] = test; test1[i2, i1] = test1[i1, i2] }
}
# making dissimilarity matrix out of the freq one
test2 = test1
rownames(test2) = colnames(mydata1.df)[1:min1]
# now plot collocation dendogram
d <- dist(test2, method = "euclidean") # distance matrix
fit <- hclust(d, method="ward")
windows()
plot(fit) # display dendogram
} # clusdend() func ends
# lOADING +VE AND -VE words
pos.words=scan("C:\\Users\\sanu\\Downloads\\Desktop\\Documents\\Excelr\\Text Mining\\positive-words.txt", what="character", comment.char=";") # read-in positive-words.txt
neg.words=scan("C:\\Users\\sanu\\Downloads\\Desktop\\Documents\\Excelr\\Text Mining\\negative-words.txt", what="character", comment.char=";") # read-in negative-words.txt
pos.words=c(pos.words,"wow", "kudos", "hurray","superb","good") # including our own positive words to the existing list
neg.words = c(neg.words)
stopwords = readLines("C:\\Users\\sanu\\Downloads\\Desktop\\Documents\\Excelr\\Text Mining\\stop.txt")
#########################################################################################################################################
####We will remove hashtags, junk characters, other twitter handles and URLs
####from the tags using gsub function so we have tweets for further analysis
# CLEANING TWEETS
tweets.df$text=gsub("&", "", tweets.df$text)
tweets.df$text = gsub("&", "", tweets.df$text)
tweets.df$text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", tweets.df$text)
tweets.df$text = gsub("@\\w+", "", tweets.df$text)
tweets.df$text = gsub("[[:punct:]]", "", tweets.df$text)
tweets.df$text = gsub("[[:digit:]]", "", tweets.df$text)
tweets.df$text = gsub("http\\w+", "", tweets.df$text)
tweets.df$text = gsub("[ \t]{2,}", "", tweets.df$text)
tweets.df$text = gsub("^\\s+|\\s+$", "", tweets.df$text)
tweets.df$text <- iconv(tweets.df$text, "UTF-8", "ASCII", sub="")
####Getting sentiments for each tweet
####Syuzhet breaks emotion into 10 different categories
# Emotions for each tweet using NRC dictionary
emotions <- get_nrc_sentiment(tweets.df$text)
emotions
emo_bar = colSums(emotions)##anger - 18,anticipation-116, disgust-7, fear-19,joy-146,
###sadness=40, surprise-48, trust-110,negative-43,positive-317
emo_sum = data.frame(count=emo_bar, emotion=names(emo_bar))
emo_sum$emotion = factor(emo_sum$emotion, levels=emo_sum$emotion[order(emo_sum$count, decreasing = TRUE)])
####We are ready to visualize the emotions from NRC sentiments
library(plotly)
p <- plot_ly(emo_sum, x=~emotion, y=~count, type="bar", color=~emotion) %>%
layout(xaxis=list(title=""), showlegend=FALSE,
title="Emotion Type for hashtag: narendramodi")
api_create(p,filename="Sentimentanalysis")
####Lets see which word contributes which emotion
# Create comparison word cloud data
wordcloud_tweet = c(
paste(tweets.df$text[emotions$anger > 0], collapse=" "),
paste(tweets.df$text[emotions$anticipation > 0], collapse=" "),
paste(tweets.df$text[emotions$disgust > 0], collapse=" "),
paste(tweets.df$text[emotions$fear > 0], collapse=" "),
paste(tweets.df$text[emotions$joy > 0], collapse=" "),
paste(tweets.df$text[emotions$sadness > 0], collapse=" "),
paste(tweets.df$text[emotions$surprise > 0], collapse=" "),
paste(tweets.df$text[emotions$trust > 0], collapse=" "),
paste(tweets.df$text[emotions$positive > 0], collapse=" "),
paste(tweets.df$text[emotions$negative > 0], collapse=" ")
)
wordcloud_tweet
# create corpus
corpus = Corpus(VectorSource(wordcloud_tweet))
# remove whitespace,punctuation, convert every word in lower case and remove stop words
##corpus = tm_map(corpus, stripwhitespace) ### removes white space
corpus = tm_map(corpus, tolower) ### converts to lower case
corpus = tm_map(corpus, removePunctuation) ### removes punctuation marks
corpus = tm_map(corpus, removeNumbers) ### removes numbers in the documents
corpus = tm_map(corpus, removeWords, c(stopwords("english"),stopwords))
corpus = tm_map(corpus, stemDocument)
# create term document frequency matrix
tdm0 = TermDocumentMatrix(corpus)
inspect(tdm0)
# Term document matrix with inverse frequency
tdm1 <- TermDocumentMatrix(corpus,control = list(weighting = function(p) weightTfIdf(p,normalize = T)))#,stemming=T))
inspect(tdm1)
a0 <- NULL
a1 <- NULL
# getting the indexes of documents having count of words = 0
for (i1 in 1:ncol(tdm0))
{ if (sum(tdm0[, i1]) == 0) {a0 = c(a0, i1)} }
for (i1 in 1:ncol(tdm1))
{ if (sum(tdm1[, i1]) == 0) {a1 = c(a1, i1)} }
# Removing empty docs
tdm0 <- tdm0[,-a0]
tdm1 <- tdm1[,-a1]
# convert as matrix
tdm0 = as.matrix(tdm0)
tdm1 = as.matrix(tdm1)
tdm0new <- tdm0[nchar(rownames(tdm0)) < 11,]
tdm1new <- tdm1[nchar(rownames(tdm1)) < 11,]
# column name binding
colnames(tdm0) = c('anger', 'anticipation', 'disgust', 'fear', 'joy', 'sadness', 'surprise', 'trust','positive','negative')
colnames(tdm0new) <- colnames(tdm0)
comparison.cloud(tdm0new, random.order=FALSE,
colors = c("#00B2FF", "red", "#FF0099", "#6600CC", "green", "orange", "blue", "brown","purple","maroon"),
title.size=1, max.words=250, scale=c(2.5, 0.4),rot.per=0.4)
colnames(tdm1) = c('anger', 'anticipation', 'disgust', 'fear', 'joy', 'sadness', 'surprise', 'trust','positive','negative')
colnames(tdm1new) <- colnames(tdm1)
comparison.cloud(tdm1new, random.order=FALSE,
colors = c("#00B2FF", "red", "#FF0099", "#6600CC", "green", "orange", "blue", "brown","purple","maroon"),
title.size=1, max.words=250, scale=c(2.5, 0.4),rot.per=0.4)
##########################################################################################################
# Document term matrix
dtm0 <- t(tdm0)
dtm1 <- t(tdm1)
# Word cloud - TF - Uni gram
makewordc(tdm0)
title(sub = "UNIGRAM - Wordcloud using TF")
# Frequency Bar plot - TF - Uni gram
words_bar_plot(tdm0)
# Word cloud - TFIDF - Unigram
makewordc(tdm1)
# Frequency Barplot - TFIDF - Unigram
words_bar_plot(tdm1)
# Positive word cloud - TF - Unigram
makeposwordc(tdm0)
title(sub = "UNIGRAM - POSITIVE Wordcloud using TF")
# Frequency Barplot - Positive words - Unigram
pos_words_bar_plot(dtm0)
# Positive word cloud - Unigram - TFIDF
makeposwordc(tdm1)
title(sub = "UNIGRAM - POSITIVE Wordcloud using TFIDF")
# Frequency Barplot - Positive words - TFIDF - Unigram
pos_words_bar_plot(dtm1)
# Negative word cloud - TF - unigam
makenegwordc(tdm0)
title(sub = "UNIGRAM - NEGATIVE Wordcloud using TF")
# Frequency Barplot -negative words - Unigram - TF
neg_words_bar_plot(dtm0)
# Negative word cloud - Unigram - TFIDF
makenegwordc(tdm1)
title(sub = "UNIGRAM - NEGATIVE Wordcloud using TFIDF")
# Frequency Barplot - Negative words - TFIDF
neg_words_bar_plot(dtm1)
# Bi gram word clouds
library(quanteda)
library(Matrix)
# Bi gram document term frequency
dtm0_2 <- dfm(unlist(corpus),ngrams=3,verbose = F)
tdm0_2 <- t(dtm0_2)
a0 = NULL
for (i1 in 1:ncol(tdm0_2)){ if (sum(tdm0_2[, i1]) == 0) {a0 = c(a0, i1)} }
length(a0) # no. of empty docs in the corpus
if (length(a0) >0) { tdm0_2 = tdm0_2[, -a0]} else {tdm0_2 = tdm0_2}; dim(tdm0_2) # under TF weighing
a0 <- NULL;i1 <- NULL
dtm0_2 <- t(tdm0_2)
# Bi gram word cloud
makewordc(tdm0_2) # We have too see warnings to edit few words
title(sub = "BIGRAM - Wordcloud using TF")
# Bi gram barplot on TF
words_bar_plot(tdm0_2)
## Bi gram on TFIDF
dtm1_2 <- tfidf(dtm0_2)
tdm1_2 <- t(dtm1_2)
a0 = NULL
for (i1 in 1:ncol(tdm1_2)){ if (sum(tdm1_2[, i1]) == 0) {a0 = c(a0, i1)} }
length(a0) # no. of empty docs in the corpus
if (length(a0) >0) { tdm1_2 = tdm1_2[, -a0]} else {tdm1_2 = tdm1_2}; dim(tdm1_2) # under TF weighing
a0 <- NULL;i1 <- NULL
dtm1_2 <- t(tdm1_2)
# Bi gram word cloud for TFIDF
makewordc(tdm1_2) # We have too see warnings to edit few words
title(sub = "BIGRAM - Wordcloud using TFIDF")
# Bigram barplot on TFIDF
words_bar_plot(tdm1_2)
# Cluster dendrogram on Uni gram - TF
clusdend=function(dtm0)
title(sub = "Dendrogram using TF")
# Cluster dendrogram on Uni gram - TFIDF
clusdend(dtm1)
title(sub = "Dendrogram using TFIDF")
# --- Can we segment the respondents (or cluster the documents) based on term usage? --- #
### --- kmeans proc ---- ###
# better cluster on TF dtm rather than tfidf dtm for solution stability #
wss = (nrow(dtm0)-1)*sum(apply(dtm0, 2, var)) # Determine number of clusters by scree-plot
for (i in 2:8) wss[i] = sum(kmeans(dtm0, centers=i)$withinss)
windows()
plot(1:8, wss, type="b", xlab="Number of Clusters", ylab="Within groups sum of squares") # Look for an "elbow" in the scree plot #
title(sub = "K-Means Clustering Scree-Plot")
k1 = 4 # based on the scree elbow plot
a3 = kmeans(dtm0, k1); a3$size
a4 = kmeans(dtm1, k1)
round(a3$size/sum(a3$size), 2) # segmt-sizes as proportions
# -- analyze each segment for what they're saying... --- #
for (i1 in 1:max(a3$cluster)) {
a4[[i1]] = t(dtm0[(a3$cluster == i1),])
} # loop ends
a4[[i2]]=t(dtm1[(a4$cluster == i2),])
# now plot wordclouds for by segment and see
par(ask = TRUE)
for (i2 in 1:max(a3$cluster)){
makewordc(a4[[i2]])
sub=paste("wordcloud-Clustering-",as.character(i2),"-",as.character(format(round(ncol(a4[[i2]])*100/nrow(dtm0),2),nsmall=3)),"%",collapse = " ")
title(sub = sub)
} # loop ends
i2 <- NULL
par(ask = FALSE) # close ask facility for graph making
# cluster dendograms cluster terms *within* documents
# in contrast, kmeans clusters documents themselves using word freqs across documents
# now try these examples:
for (i in 1:4){
clusdend(t(a4[[i]]))
title(sub = as.character(i))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{stations}
\alias{stations}
\title{stations}
\format{A tibble with 2,322 rows and 9 variables:
\describe{
\item{station_id}{The station's ID from the domain's database}
\item{name}{The station's name}
\item{water_basin}{The station's Water Basin}
\item{water_division}{The station's Water Division}
\item{owner}{The station's owner}
\item{longitude}{The station's longitude in decimal degrees, ETRS89}
\item{latitude}{The station's latitude in decimal degrees, ETRS89}
\item{altitude}{The station's altitude, meters above sea level}
\item{subdomain}{The corresponding Hydroscope's database}
}}
\usage{
stations
}
\description{
Stations' data from the Greek National Data Bank for
Hydrological and Meteorological Information. This dataset is a comprehensive
look-up table with geographical and ownership information of the available
stations in all Hydroscope's databases.
}
\keyword{datasets}
| /man/stations.Rd | permissive | firefoxxy8/hydroscoper | R | false | true | 1,034 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{stations}
\alias{stations}
\title{stations}
\format{A tibble with 2,322 rows and 9 variables:
\describe{
\item{station_id}{The station's ID from the domain's database}
\item{name}{The station's name}
\item{water_basin}{The station's Water Basin}
\item{water_division}{The station's Water Division}
\item{owner}{The station's owner}
\item{longitude}{The station's longitude in decimal degrees, ETRS89}
\item{latitude}{The station's latitude in decimal degrees, ETRS89}
\item{altitude}{The station's altitude, meters above sea level}
\item{subdomain}{The corresponding Hydroscope's database}
}}
\usage{
stations
}
\description{
Stations' data from the Greek National Data Bank for
Hydrological and Meteorological Information. This dataset is a comprehensive
look-up table with geographical and ownership information of the available
stations in all Hydroscope's databases.
}
\keyword{datasets}
|
library(devEMF)
### Name: emf
### Title: Enhanced Metafile Graphics Device
### Aliases: emf devEMF
### Keywords: device
### ** Examples
require(devEMF)
## Not run:
##D # open file "bar.emf" for graphics output
##D emf("bar.emf")
##D # produce the desired graph(s)
##D plot(1,1)
##D dev.off() #turn off device and finalize file
## End(Not run)
| /data/genthat_extracted_code/devEMF/examples/emf.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 351 | r | library(devEMF)
### Name: emf
### Title: Enhanced Metafile Graphics Device
### Aliases: emf devEMF
### Keywords: device
### ** Examples
require(devEMF)
## Not run:
##D # open file "bar.emf" for graphics output
##D emf("bar.emf")
##D # produce the desired graph(s)
##D plot(1,1)
##D dev.off() #turn off device and finalize file
## End(Not run)
|
#' boot_f_cor
#'
#' Generate R bootstrap replicates of a statistic applied to data.
#'
#' @param data The data as a vector, matrix or data frame. If it is a matrix or data frame then each
#' row is considered as one multivariate observation.
#' @param indices indices to be used for calculation
#' @param cor.type a character string indicating which correlation
#' @param fun a function to use in bootstrap procedure
#' coefficient is to be computed. One of "pearson" (default), "kendall", or
#' "spearman".
#'
#' @return A matrix with bootstrapped estimates of correlation coefficients
#'
#' @references https://www.datacamp.com/community/tutorials/bootstrap-r
#' @keywords internal
#'
boot_f_cor <- function(data, indices, cor.type){
dt <- data[indices,]
c(
cor(dt[,1], dt[,2], method = cor.type),
median(dt[,1]),
median(dt[,2])
)
}
| /R/boot_f_cor.R | no_license | npp97/dendroTools | R | false | false | 869 | r | #' boot_f_cor
#'
#' Generate R bootstrap replicates of a statistic applied to data.
#'
#' @param data The data as a vector, matrix or data frame. If it is a matrix or data frame then each
#' row is considered as one multivariate observation.
#' @param indices indices to be used for calculation
#' @param cor.type a character string indicating which correlation
#' @param fun a function to use in bootstrap procedure
#' coefficient is to be computed. One of "pearson" (default), "kendall", or
#' "spearman".
#'
#' @return A matrix with bootstrapped estimates of correlation coefficients
#'
#' @references https://www.datacamp.com/community/tutorials/bootstrap-r
#' @keywords internal
#'
boot_f_cor <- function(data, indices, cor.type){
dt <- data[indices,]
c(
cor(dt[,1], dt[,2], method = cor.type),
median(dt[,1]),
median(dt[,2])
)
}
|
# Calculate the number of vehicles at all states (idle;assign;oprt;etc)
# Derive each number of vehicles
TotalNumVeh <- function(time){
print (paste("Idle is", nrow(IdleVehicle[[time]])))
print (paste("Assign is", nrow(AssignVehicle[[time]])))
print (paste("Oprt is", nrow(OprtVehicle[[time]])))
print (paste("Inter-Relocate is", nrow(InterReloVehicle[[time]])))
print (paste("Intra-Relocate is", nrow(IntraReloVehicle[[time]])))
return(nrow(IdleVehicle[[time]])+
nrow(AssignVehicle[[time]])+
nrow(OprtVehicle[[time]])+
nrow(InterReloVehicle[[time]])+
nrow(IntraReloVehicle[[time]]))
}
| /module/utils/TotalNumVeh.R | no_license | jihoyeo/taxi-relocation | R | false | false | 643 | r | # Calculate the number of vehicles at all states (idle;assign;oprt;etc)
# Derive each number of vehicles
TotalNumVeh <- function(time){
print (paste("Idle is", nrow(IdleVehicle[[time]])))
print (paste("Assign is", nrow(AssignVehicle[[time]])))
print (paste("Oprt is", nrow(OprtVehicle[[time]])))
print (paste("Inter-Relocate is", nrow(InterReloVehicle[[time]])))
print (paste("Intra-Relocate is", nrow(IntraReloVehicle[[time]])))
return(nrow(IdleVehicle[[time]])+
nrow(AssignVehicle[[time]])+
nrow(OprtVehicle[[time]])+
nrow(InterReloVehicle[[time]])+
nrow(IntraReloVehicle[[time]]))
}
|
#################################################################################
##
## R package rugarch by Alexios Ghalanos Copyright (C) 2008-2013.
## This file is part of the R package rugarch.
##
## The R package rugarch is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## The R package rugarch is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
#################################################################################
rugarch.test1a = function(cluster = NULL){
tic = Sys.time()
# simulated parameter distribution
spec = arfimaspec( mean.model = list(armaOrder = c(2,2), include.mean = TRUE, arfima = FALSE),
distribution.model = "norm", fixed.pars = list(ar1=0.6, ar2=0.21, ma1=-0.7, ma2=0.3, mu = 0.02,
sigma = 0.02))
dist = arfimadistribution(spec, n.sim = 2000, n.start = 100, m.sim = 100, recursive = TRUE,
recursive.length = 10000, recursive.window = 1000, cluster = cluster)
save(dist, file = "test1a.rda")
options(width=150)
zz <- file("test1a.txt", open="wt")
sink(zz)
# slots:
slotNames(dist)
# methods:
# summary
show(dist)
# as.data.frame(...., window, which=c("rmse", "stats", "coef", "coefse"))
# default
as.data.frame(dist)
as.data.frame(dist, window = 1, which = "rmse")
as.data.frame(dist, window = 1, which = "stats")
as.data.frame(dist, window = 1, which = "coef")
as.data.frame(dist, window = 1, which = "coefse")
as.data.frame(dist, window = 8, which = "rmse")
as.data.frame(dist, window = 8, which = "stats")
as.data.frame(dist, window = 8, which = "coef")
as.data.frame(dist, window = 8, which = "coefse")
sink(type="message")
sink()
close(zz)
# create some plots
nwindows = dist@dist$details$nwindows
# 2000/3000/4000/5000/6000/7000/8000/9000/10000
# expected reduction factor in RMSE for sqrt(N) consistency
expexcted.rmsegr = sqrt(2000/seq(3000,10000,by=1000))
# actual RMSE reduction
actual.rmsegr = matrix(NA, ncol = 8, nrow = 6)
rownames(actual.rmsegr) = c("mu", "ar1", "ar2", "ma2", "ma2", "sigma")
# start at 2000 (window 1)
rmse.start = as.data.frame(dist, window = 1, which = "rmse")
for(i in 2:nwindows) actual.rmsegr[,i-1] = as.numeric(as.data.frame(dist, window = i, which = "rmse")/rmse.start)
postscript("test1a.eps", bg = "white", width = 800, height = 800)
par(mfrow = c(2,3))
for(i in 1:6){
plot(seq(3000,10000,by=1000), actual.rmsegr[i,], type = "l", lty = 2, ylab = "RMSE Reduction", xlab = "N (sim)",
main = rownames(actual.rmsegr)[i])
lines(seq(3000,10000,by=1000), expexcted.rmsegr, col = 2)
legend("topright", legend = c("Actual", "Expected"), col = 1:2, bty = "m", lty = c(2,1))
}
dev.off()
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
}
rugarch.test1b = function(cluster=NULL){
# fit/filter
tic = Sys.time()
data(sp500ret)
fit = vector(mode = "list", length = 9)
dist = c("norm", "snorm", "std", "sstd", "ged", "sged", "nig", "ghyp", "jsu")
for(i in 1:9){
spec = arfimaspec( mean.model = list(armaOrder = c(1,1), include.mean = TRUE, arfima = FALSE),
distribution.model = dist[i])
fit[[i]] = arfimafit(spec = spec, data = sp500ret, solver = "solnp", fit.control = list(scale = 1))
}
cfmatrix = matrix(NA, nrow = 9, ncol = 7)
colnames(cfmatrix) = c("mu", "ar1", "ma1", "sigma", "skew", "shape", "ghlambda")
rownames(cfmatrix) = dist
for(i in 1:9){
cf = coef(fit[[i]])
cfmatrix[i, match(names(cf), colnames(cfmatrix))] = cf
}
sk = ku = rep(0, 9)
for(i in 1:9){
cf = coef(fit[[i]])
if(fit[[i]]@model$modelinc[16]>0) sk[i] = dskewness(distribution = dist[i],
skew = cf["skew"], shape = cf["shape"], lambda = cf["ghlambda"])
if(fit[[i]]@model$modelinc[17]>0) ku[i] = dkurtosis(distribution = dist[i],
skew = cf["skew"], shape = cf["shape"], lambda = cf["ghlambda"])
}
hq = sapply(fit, FUN = function(x) infocriteria(x)[4])
cfmatrix = cbind(cfmatrix, sk, ku, hq)
colnames(cfmatrix) = c(colnames(cfmatrix[,1:7]), "skewness", "ex.kurtosis","HQIC")
# filter the data to check results
filt = vector(mode = "list", length = 9)
for(i in 1:9){
spec = arfimaspec( mean.model = list(armaOrder = c(1,1), include.mean = TRUE, arfima = FALSE),
distribution.model = dist[i])
setfixed(spec) = as.list(coef(fit[[i]]))
filt[[i]] = arfimafilter(spec = spec, data = sp500ret)
}
options(width = 120)
zz <- file("test1b.txt", open="wt")
sink(zz)
print(cfmatrix, digits = 4)
cat("\nARFIMAfit and ARFIMAfilter residuals check:\n")
print(head(sapply(filt, FUN = function(x) residuals(x))) == head(sapply(fit, FUN = function(x) residuals(x))))
cat("\ncoef method:\n")
print(cbind(coef(filt[[1]]), coef(fit[[1]])))
cat("\nfitted method:\n")
print(cbind(head(fitted(filt[[1]])), head(fitted(fit[[1]]))))
cat("\ninfocriteria method:\n")
# For filter, it assumes estimation of parameters else does not make sense!
print(cbind(infocriteria(filt[[1]]), infocriteria(fit[[1]])))
cat("\nlikelihood method:\n")
print(cbind(likelihood(filt[[1]]), likelihood(fit[[1]])))
cat("\nresiduals method:\n")
print(cbind(head(residuals(filt[[1]])), head(residuals(fit[[1]]))))
cat("\nuncmean method:\n")
print(cbind(uncmean(filt[[1]]), uncmean(fit[[1]])))
cat("\nuncmean method (by simulation):\n")
# For spec and fit
spec = arfimaspec( mean.model = list(armaOrder = c(1,1), include.mean = TRUE,
arfima = FALSE), distribution.model = dist[1])
setfixed(spec) = as.list(coef(fit[[1]]))
print(cbind(uncmean(spec, method = "simulation", n.sim = 100000, rseed = 100),
uncmean(fit[[1]], method = "simulation", n.sim = 100000, rseed = 100)))
cat("\nsummary method:\n")
print(show(filt[[1]]))
print(show(fit[[1]]))
sink(type="message")
sink()
close(zz)
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
}
rugarch.test1c = function(cluster=NULL){
# unconditional forecasting
tic = Sys.time()
data(sp500ret)
fit = vector(mode = "list", length = 9)
dist = c("norm", "snorm", "std", "sstd", "ged", "sged", "nig", "ghyp", "jsu")
for(i in 1:9){
spec = arfimaspec( mean.model = list(armaOrder = c(1,1), include.mean = TRUE, arfima = FALSE),
distribution.model = dist[i])
fit[[i]] = arfimafit(spec = spec, data = sp500ret, solver = "solnp", fit.control = list(scale = 1))
}
cfmatrix = matrix(NA, nrow = 9, ncol = 7)
colnames(cfmatrix) = c("mu", "ar1", "ma1", "sigma", "skew", "shape", "ghlambda")
rownames(cfmatrix) = dist
for(i in 1:9){
cf = coef(fit[[i]])
cfmatrix[i, match(names(cf), colnames(cfmatrix))] = cf
}
umean = rep(0, 9)
for(i in 1:9){
umean[i] = uncmean(fit[[i]])
}
forc = vector(mode = "list", length = 9)
for(i in 1:9){
forc[[i]] = arfimaforecast(fit[[i]], n.ahead = 100)
}
lmean40 = sapply(forc, FUN = function(x) as.numeric(fitted(x)[40,1]))
cfmatrix1 = cbind(cfmatrix, umean, lmean40)
colnames(cfmatrix1) = c(colnames(cfmatrix1[,1:7]), "uncmean", "forecast40")
# forecast with spec to check results
forc2 = vector(mode = "list", length = 9)
for(i in 1:9){
spec = arfimaspec( mean.model = list(armaOrder = c(1,1), include.mean = TRUE, arfima = FALSE),
distribution.model = dist[i])
setfixed(spec) = as.list(coef(fit[[i]]))
forc2[[i]] = arfimaforecast(spec, data = sp500ret, n.ahead = 100)
}
lmean240 = sapply(forc2, FUN = function(x) as.numeric(fitted(x)[40,1]))
cfmatrix2 = cbind(cfmatrix, umean, lmean240)
colnames(cfmatrix2) = c(colnames(cfmatrix2[,1:7]), "uncmean", "forecast40")
# Test Methods on object
options(width = 120)
zz <- file("test1c.txt", open="wt")
sink(zz)
cat("\nARFIMAforecast from ARFIMAfit and ARFIMAspec check:")
cat("\nFit\n")
print(cfmatrix1, digits = 4)
cat("\nSpec\n")
print(cfmatrix2, digits = 4)
slotNames(forc[[1]])
# summary
print(show(forc[[1]]))
sink(type="message")
sink()
close(zz)
nforc = sapply(forc, FUN = function(x) t(as.numeric(fitted(x))))
postscript("test1c.eps", width = 12, height = 5)
# generate FWD dates:
dx = as.POSIXct(tail(rownames(sp500ret),50))
df = generatefwd(tail(dx, 1), length.out = 100+1, by = forc[[1]]@model$modeldata$period)[-1]
dd = c(dx, df)
clrs = rainbow(9, alpha = 1, start = 0.4, end = 0.95)
plot(xts::xts(c(tail(sp500ret[,1], 50), nforc[,1]), dd), type = "l", ylim = c(-0.02, 0.02), col = "lightgrey",
ylab = "", xlab = "", main = "100-ahead Unconditional Forecasts",
minor.ticks=FALSE, auto.grid=FALSE)
for(i in 1:9){
tmp = c(tail(sp500ret[,1], 50), rep(NA, 100))
tmp[51:150] = nforc[1:100,i]
lines(xts::xts(c(rep(NA, 50), tmp[-(1:50)]),dd), col = clrs[i])
}
legend("topright", legend = dist, col = clrs, fill = clrs, bty = "n")
dev.off()
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
}
rugarch.test1d = function(cluster=NULL){
# rolling forecast
tic = Sys.time()
data(sp500ret)
fit = vector(mode = "list", length = 9)
dist = c("norm", "snorm", "std", "sstd", "ged", "sged", "nig", "ghyp", "jsu")
for(i in 1:9){
spec = arfimaspec( mean.model = list(armaOrder = c(1,1), include.mean = TRUE, arfima = FALSE),
distribution.model = dist[i])
fit[[i]] = arfimafit(spec = spec, data = sp500ret, solver = "solnp",
out.sample = 1000, fit.control = list(scale = 1))
}
cfmatrix = matrix(NA, nrow = 9, ncol = 7)
colnames(cfmatrix) = c("mu", "ar1", "ma1", "sigma", "skew", "shape", "ghlambda")
rownames(cfmatrix) = dist
for(i in 1:9){
cf = coef(fit[[i]])
cfmatrix[i, match(names(cf), colnames(cfmatrix))] = cf
}
forc = vector(mode = "list", length = 9)
for(i in 1:9){
forc[[i]] = arfimaforecast(fit[[i]], n.ahead = 1, n.roll = 999)
}
rollforc = sapply(forc, FUN = function(x) t(fitted(x)))
# forecast performance measures:
fpmlist = vector(mode = "list", length = 9)
for(i in 1:9){
fpmlist[[i]] = fpm(forc[[i]], summary = FALSE)
}
postscript("test1d.eps", width = 16, height = 5)
par(mfrow = c(1,2))
dd = as.POSIXct(tail(rownames(sp500ret), 1250))
clrs = rainbow(9, alpha = 1, start = 0.4, end = 0.95)
plot(xts::xts(tail(sp500ret[,1], 1250), dd), type = "l", ylim = c(-0.02, 0.02),
col = "lightgrey", ylab = "", xlab = "",
main = "Rolling 1-ahead Forecasts\nvs Actual", minor.ticks=FALSE,
auto.grid=FALSE)
for(i in 1:9){
tmp = tail(sp500ret[,1], 1250)
tmp[251:1250] = rollforc[1:1000,i]
lines(xts::xts(c(rep(NA, 250), tmp[-(1:250)]), dd), col = clrs[i])
}
legend("topleft", legend = dist, col = clrs, fill = clrs, bty = "n")
# plot deviation measures and range
tmp = vector(mode = "list", length = 9)
for(i in 1:9){
tmp[[i]] = fpmlist[[i]][,"AE"]
names(tmp[[i]]) = dist[i]
}
boxplot(tmp, col = clrs, names = dist, range = 6, notch = TRUE,
main = "Rolling 1-ahead Forecasts\nAbsolute Deviation Loss")
dev.off()
# fpm comparison
compm = matrix(NA, nrow = 3, ncol = 9)
compm = sapply(fpmlist, FUN = function(x) c(mean(x[,"SE"]), mean(x[,"AE"]), mean(x[,"DAC"])))
colnames(compm) = dist
rownames(compm) = c("MSE", "MAD", "DAC")
zz <- file("test1d.txt", open="wt")
sink(zz)
cat("\nRolling Forecast FPM\n")
print(compm, digits = 4)
cat("\nMethods Check\n")
print(fitted(forc[[1]])[,1:10,drop=FALSE])
print(fpm(forc[[1]], summary = TRUE))
print(show(forc[[1]]))
sink(type="message")
sink()
close(zz)
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
}
rugarch.test1e = function(cluster=NULL){
# Multi-Methods
tic = Sys.time()
data(dji30ret)
Dat = dji30ret[, 1:3, drop = FALSE]
#------------------------------------------------
# Unequal Spec
# Fit
spec1 = arfimaspec(mean.model = list(armaOrder = c(2,1)))
spec2 = arfimaspec(mean.model = list(armaOrder = c(2,2)))
spec3 = arfimaspec(mean.model = list(armaOrder = c(1,1)),
distribution.model = "sstd")
speclist = as.list(c(spec1, spec2, spec3))
mspec = multispec( speclist )
mfit1 = multifit(multispec = mspec, data = Dat, fit.control = list(stationarity=1),
cluster = cluster)
# Filter
fspec = vector(mode = "list", length = 3)
fspec[[1]] = spec1
fspec[[2]] = spec2
fspec[[3]] = spec3
for(i in 1:3){
setfixed(fspec[[i]])<-as.list(coef(mfit1)[[i]])
}
mspec1 = multispec( fspec )
mfilt1 = multifilter(multifitORspec = mspec1, data = Dat, cluster = cluster)
# Forecast from Fit
mforc1 = multiforecast(mfit1, n.ahead = 10, cluster = cluster)
# Forecast from Spec
mforc11 = multiforecast(mspec1, data = Dat, n.ahead = 10, cluster = cluster)
#------------------------------------------------
#------------------------------------------------
# Equal Spec
# Fit
spec1 = arfimaspec(mean.model = list(armaOrder = c(1,1)))
mspec = multispec( replicate(3, spec1) )
mfit2 = multifit(multispec = mspec, data = Dat, cluster = cluster)
# Filter
fspec = vector(mode = "list", length = 3)
fspec = replicate(3, spec1)
for(i in 1:3){
setfixed(fspec[[i]])<-as.list(coef(mfit2)[,i])
}
mspec2 = multispec( fspec )
mfilt2 = multifilter(multifitORspec = mspec2, data = Dat, cluster = cluster)
# Forecast From Fit
mforc2 = multiforecast(mfit2, n.ahead = 10)
# Forecast From Spec
mforc21 = multiforecast(mspec2, data = Dat, n.ahead = 10, cluster = cluster)
#------------------------------------------------
#------------------------------------------------
# Equal Spec/Same Data
# Fit
spec1 = arfimaspec(mean.model = list(armaOrder = c(1,1)))
spec2 = arfimaspec(mean.model = list(armaOrder = c(2,1)))
spec3 = arfimaspec(mean.model = list(armaOrder = c(3,1)))
speclist = as.list(c(spec1, spec2, spec3))
mspec = multispec( speclist )
mfit3 = multifit(multispec = mspec, data = cbind(Dat[,1], Dat[,1], Dat[,1]),
cluster = cluster)
# Forecast
mforc3 = multiforecast(mfit3, n.ahead = 10, cluster = cluster)
#------------------------------------------------
zz <- file("test1e.txt", open="wt")
sink(zz)
cat("\nMultifit Evaluation\n")
cat("\nUnequal Spec\n")
print(mfit1)
print(likelihood(mfit1))
print(coef(mfit1))
print(head(fitted(mfit1)))
print(head(residuals(mfit1)))
print(mfilt1)
print(likelihood(mfilt1))
print(coef(mfilt1))
print(head(fitted(mfilt1)))
print(head(residuals(mfilt1)))
print(mforc1)
print(fitted(mforc1))
print(mforc11)
print(fitted(mforc11))
cat("\nEqual Spec\n")
print(mfit2)
print(likelihood(mfit2))
print(coef(mfit2))
print(head(fitted(mfit2)))
print(head(residuals(mfit2)))
print(mfilt2)
print(likelihood(mfilt2))
print(coef(mfilt2))
print(head(fitted(mfilt2)))
print(head(residuals(mfilt2)))
print(mforc2)
print(fitted(mforc2))
print(mforc21)
print(fitted(mforc21))
sink(type="message")
sink()
close(zz)
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
}
rugarch.test1f = function(cluster=NULL){
# rolling fit/forecast
tic = Sys.time()
data(sp500ret)
spec = arfimaspec()
roll1 = arfimaroll(spec, data = sp500ret, n.ahead = 1, forecast.length = 500,
refit.every = 25, refit.window = "moving", cluster = cluster,
solver = "hybrid", fit.control = list(), solver.control = list() ,
calculate.VaR = TRUE, VaR.alpha = c(0.01, 0.05))
# as.ARFIMAforecast
# as.data.frame
zz <- file("test1f.txt", open="wt")
sink(zz)
cat("\nForecast Evaluation\n")
report(roll1, "VaR")
report(roll1, "fpm")
# Extractor Functions:
# default:
print(head(as.data.frame(roll1, which = "density"), 25))
print(tail(as.data.frame(roll1, which = "density"), 25))
print(head(as.data.frame(roll1, which = "VaR"), 25))
print(tail(as.data.frame(roll1, which = "VaR"), 25))
print(coef(roll1)[[1]])
print(coef(roll1)[[20]])
print(head(fpm(roll1, summary=FALSE)))
sink(type="message")
sink()
close(zz)
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
}
rugarch.test1g = function(cluster=NULL){
# simulation
tic = Sys.time()
require(fracdiff)
spec1 = arfimaspec( mean.model = list(armaOrder = c(2,1), include.mean = TRUE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(mu = 0.02, ar1 = 0.6, ar2 = 0.01, ma1 = -0.7, arfima = 0,
shape = 5, sigma = 0.0123))
spec2 = arfimaspec( mean.model = list(armaOrder = c(2,1), include.mean = TRUE, arfima = FALSE),
distribution.model = "std", fixed.pars = list(mu = 0.02, ar1 = 0.6, ar2 = 0.01, ma1 = -0.7,
shape = 5, sigma = 0.0123))
sim1 = arfimapath(spec1, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
n.start=1)
sim2 = arfimapath(spec2, n.sim = 1000, m.sim = 1, rseed = 100)
sim1 = arfimapath(spec1, n.sim = 1000, m.sim = 1, rseed = 100, n.start=1)
sim2 = arfimapath(spec2, n.sim = 1000, m.sim = 1, rseed = 100, n.start=1)
zz <- file("test1g-1.txt", open="wt")
sink(zz)
cat("\nARFIMA and ARMA simulation tests:\n")
print(tail(fitted(sim1)), digits = 5)
print(tail(fitted(sim2)), digits = 5)
sink(type="message")
sink()
close(zz)
# Now the rugarch simulation of ARFIMA/ARMA with arima.sim of R
# Note that arima.sim simulates the residuals (i.e no mean):
# ARMA(2,2)
set.seed(33)
inn = rdist("std", 1000, mu = 0, sigma = 1, lambda = 0, skew = 0, shape = 5)
spec1 = arfimaspec( mean.model = list(armaOrder = c(2,2), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, arfima = 0, ma1 = -0.7,
ma2 = 0.3, shape = 5, sigma = 0.0123))
spec2 = arfimaspec( mean.model = list(armaOrder = c(2,2), include.mean = FALSE, arfima = FALSE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, ma1 = -0.7,
ma2 = 0.3, shape = 5,sigma = 0.0123))
# Notice the warning...it would be an error had we not added 2 extra zeros to the custom distribution
# equal to the MA order since n.start >= MA order in arfima model
sim1 = arfimapath(spec1, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(c(0,0,inn), ncol = 1), type = "z"))
sim2 = arfimapath(spec2, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(inn, ncol = 1), type = "z"))
# Test with a GARCH specification as well (with alpha=beta=0)
specx = ugarchspec( mean.model = list(armaOrder = c(2,2), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, ma1 = -0.7,
ma2 = 0.3, arfima=0, shape = 5, omega = 0.0123^2, alpha1 = 0, beta1=0))
simx = ugarchpath(specx, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
presigma = c(0,0), custom.dist = list(name = "sample",
distfit = matrix(c(0,0,inn), ncol = 1), type = "z"))
# Note that we pass the non-standardized innovations to arima.sim (i.e. multiply by sigma)
sim3 = arima.sim(model = list(ar = c(0.6, 0.21), ma = c(-0.7, 0.3)), n = 1000,
n.start = 4, start.innov = c(0,0,0,0), innov = inn*0.0123)
# set fracdiff setting to n.start=0 and allow.0.nstart=TRUE
sim4 = fracdiff.sim(n=1000, ar = c(0.6, 0.21), ma = c(0.7, -0.3), d = 0,
innov = c(0,0,inn*0.0123), n.start = 0, backComp = TRUE, allow.0.nstart = TRUE,
mu = 0)
tst1 = cbind(head(fitted(sim1)), head(fitted(sim2)), head(sim3), head(sim4$series), head(fitted(simx)))
tst2 = cbind(tail(fitted(sim1)), tail(fitted(sim2)), tail(sim3), tail(sim4$series), tail(fitted(simx)))
colnames(tst1) = colnames(tst2) = c("ARFIMA(d = 0)", "ARMA", "arima.sim", "fracdiff", "GARCH(0,0)")
zz <- file("test1g-2.txt", open="wt")
sink(zz)
cat("\nARFIMA, ARMA arima.sim simulation tests:\n")
print(tst1, digits = 6)
print(tst2, digits = 6)
sink(type="message")
sink()
close(zz)
# ARMA(2,1)
set.seed(33)
inn = rdist("std", 1000, mu = 0, sigma = 1, lambda = 0, skew = 0, shape = 5)
spec1 = arfimaspec( mean.model = list(armaOrder = c(2,1), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, arfima = 0, ma1 = -0.7,
shape = 5, sigma = 0.0123))
spec2 = arfimaspec( mean.model = list(armaOrder = c(2,1), include.mean = FALSE, arfima = FALSE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, ma1 = -0.7,
shape = 5,sigma = 0.0123))
sim1 = arfimapath(spec1, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(c(0,inn), ncol = 1), type = "z"))
sim2 = arfimapath(spec2, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(inn, ncol = 1), type = "z"))
# Test with a GARCH specification as well (with alpha=beta=0)
specx = ugarchspec( mean.model = list(armaOrder = c(2,1), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, ma1 = -0.7,
arfima=0, shape = 5, omega = 0.0123^2, alpha1 = 0, beta1=0))
simx = ugarchpath(specx, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
presigma = c(0,0), custom.dist = list(name = "sample",
distfit = matrix(c(0,inn), ncol = 1), type = "z"))
# Note that we pass the non-standardized innovations to arima.sim (i.e. multiply by sigma)
sim3 = arima.sim(model = list(ar = c(0.6, 0.21), ma = c(-0.7)), n = 1000,
n.start = 3, start.innov = c(0,0,0), innov = inn*0.0123)
tst1 = cbind(head(fitted(sim1)), head(fitted(sim2)), head(sim3), head(fitted(simx)))
tst2 = cbind(tail(fitted(sim1)), tail(fitted(sim2)), tail(sim3), tail(fitted(simx)))
colnames(tst1) = colnames(tst2) = c("ARFIMA(d = 0)", "ARMA", "arima.sim", "GARCH(0,0)")
zz <- file("test1g-3.txt", open="wt")
sink(zz)
cat("\nARFIMA, ARMA arima.sim simulation tests:\n")
print(tst1, digits = 6)
print(tst2, digits = 6)
sink(type="message")
sink()
close(zz)
# Pure AR
set.seed(33)
inn = rdist("std", 1000, mu = 0, sigma = 1, lambda = 0, skew = 0, shape = 5)
spec1 = arfimaspec( mean.model = list(armaOrder = c(2,0), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, arfima = 0, ma1 = -0.7,
shape = 5, sigma = 0.0123))
spec2 = arfimaspec( mean.model = list(armaOrder = c(2,0), include.mean = FALSE, arfima = FALSE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, ma1 = -0.7,
shape = 5,sigma = 0.0123))
sim1 = arfimapath(spec1, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(inn, ncol = 1), type = "z"))
sim2 = arfimapath(spec2, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(inn, ncol = 1), type = "z"))
specx = ugarchspec( mean.model = list(armaOrder = c(2,0), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21,
arfima=0, shape = 5, omega = 0.0123^2, alpha1 = 0, beta1=0))
simx = ugarchpath(specx, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
presigma = c(0,0), custom.dist = list(name = "sample",
distfit = matrix(c(inn), ncol = 1), type = "z"))
# Note that we pass the non-standardized innovations to arima.sim (i.e. multiply by sigma)
sim3 = arima.sim(model = list(ar = c(0.6, 0.21), ma = NULL), n = 1000,
n.start = 2, start.innov = c(0,0), innov = inn*0.0123)
tst1 = cbind(head(fitted(sim1)), head(fitted(sim2)), head(sim3), head(fitted(simx)))
tst2 = cbind(tail(fitted(sim1)), tail(fitted(sim2)), tail(sim3), tail(fitted(simx)))
colnames(tst1) = colnames(tst2) = c("ARFIMA(d = 0)", "ARMA", "arima.sim", "GARCH(0,0)")
zz <- file("test1g-4.txt", open="wt")
sink(zz)
cat("\nARFIMA, ARMA arima.sim simulation tests:\n")
print(tst1, digits = 6)
print(tst2, digits = 6)
sink(type="message")
sink()
close(zz)
# Pure MA
set.seed(33)
inn = rdist("std", 1000, mu = 0, sigma = 1, lambda = 0, skew = 0, shape = 5)
spec1 = arfimaspec( mean.model = list(armaOrder = c(0,2), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ma1 = 0.6, ma2 = -0.21, arfima = 0,
shape = 5, sigma = 0.0123))
spec2 = arfimaspec( mean.model = list(armaOrder = c(0,2), include.mean = FALSE, arfima = FALSE),
distribution.model = "std", fixed.pars = list(ma1 = 0.6, ma2 = -0.21,
shape = 5,sigma = 0.0123))
sim1 = arfimapath(spec = spec1, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(c(0,0,inn), ncol = 1), type = "z"))
sim2 = arfimapath(spec2, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(inn, ncol = 1), type = "z"))
specx = ugarchspec( mean.model = list(armaOrder = c(0,2), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ma1 = 0.6, ma2 = -0.21,
arfima=0, shape = 5, omega = 0.0123^2, alpha1 = 0, beta1=0))
simx = ugarchpath(specx, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
presigma = c(0,0), custom.dist = list(name = "sample",
distfit = matrix(c(0,0,inn), ncol = 1), type = "z"))
# Note that we pass the non-standardized innovations to arima.sim (i.e. multiply by sigma)
set.seed(33)
inn = rdist("std", 1000, mu = 0, sigma = 1, lambda = 0, skew = 0, shape = 5)
sim3 = arima.sim(model = list(ar = NULL, ma = c(0.6, -0.21)), n = 1000,
n.start = 2, start.innov = c(0,0), innov = inn*0.0123)
tst1 = cbind(head(fitted(sim1)), head(fitted(sim2)), head(sim3), head(fitted(simx)))
tst2 = cbind(tail(fitted(sim1)), tail(fitted(sim2)), tail(sim3), tail(fitted(simx)))
colnames(tst1) = colnames(tst2) = c("ARFIMA(d = 0)", "ARMA", "arima.sim", "GARCH(0,0)")
zz <- file("test1g-5.txt", open="wt")
sink(zz)
cat("\nARFIMA, ARMA arima.sim simulation tests:\n")
print(tst1, digits = 6)
print(tst2, digits = 6)
sink(type="message")
sink()
close(zz)
# arfimasim + exogenous regressors + custom innovations
data(dji30ret)
Dat = dji30ret[,1, drop = FALSE]
T = dim(Dat)[1]
Bench = as.matrix(cbind(apply(dji30ret[,2:10], 1, "mean"), apply(dji30ret[,11:20], 1, "mean")))
spec = arfimaspec( mean.model = list(armaOrder = c(1,1), include.mean = TRUE, arfima = FALSE,
external.regressors = Bench), distribution.model = "std")
fit = arfimafit(spec = spec, data = Dat, solver = "solnp", out.sample = 500)
# lag1 Benchmark
BenchF = Bench[(T-500):(T-500+9), , drop = FALSE]
exsim = vector(mode = "list", length = 10000)
for(i in 1:10000) exsim[[i]] = as.matrix(BenchF)
# simulated residuals
res = residuals(fit)
ressim = matrix(NA, ncol = 10000, nrow = 10)
set.seed(10000)
for(i in 1:10000) ressim[,i] = sample(res, 10, replace = TRUE)
sim = arfimasim(fit, n.sim = 10, m.sim = 10000, startMethod="sample",
custom.dist = list(name = "sample", distfit = ressim, type = "res"), mexsimdata = exsim)
forc = fitted(arfimaforecast(fit, n.ahead = 10, external.forecasts = list(mregfor = BenchF)))
simx = fitted(sim)
actual10 = Dat[(T-500+1):(T-500+10), 1, drop = FALSE]
simm = apply(simx, 1 ,"mean")
simsd = apply(simx, 1 ,"sd")
zz <- file("test1g-6.txt", open="wt")
sink(zz)
print(round(cbind(actual10, forc, simm, simsd),5), digits = 4)
sink(type="message")
sink()
close(zz)
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
}
# ARFIMA benchmark tests
rugarch.test1h = function(cluster=NULL){
tic = Sys.time()
# ARFIMA(2,d,1)
require(fracdiff)
truecoef1 = list(mu = 0.005, ar1 = 0.6, ar2 = 0.01, ma1 = -0.7, arfima = 0.3, sigma = 0.0123)
spec1 = arfimaspec(
mean.model = list(armaOrder = c(2,1), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm",
fixed.pars = truecoef1)
sim1 = arfimapath(spec1, n.sim = 5000, n.start = 100, m.sim = 1, rseed = 101)
data1 = fitted(sim1)
#write.csv(data1[,1], file = "D:/temp1.csv")
spec1 = arfimaspec(
mean.model = list(armaOrder = c(2,1), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm")
fit1 = arfimafit(spec1, data = data1)
fit1.fd = fracdiff(as.numeric(data1[,1])-coef(fit1)["mu"], nar = 2, nma = 1)
# Commercial Implementation Program Fit (NLS-with imposed stationarity):
commcheck1 = c(0.00488381, 0.537045, 0.0319251, -0.721266, 0.348604, 0.0122415)
fdcheck1 = c(NA, coef(fit1.fd)[2:3], -coef(fit1.fd)[4], coef(fit1.fd)[1], fit1.fd$sigma)
chk1 = cbind(coef(fit1), commcheck1, fdcheck1, unlist(truecoef1))
colnames(chk1) = c("rugarch", "commercial", "fracdiff", "true")
chk1lik = c(likelihood(fit1), 14920.4279, fit1.fd$log.likelihood)
# ARFIMA(2,d,0)
truecoef2 = list(mu = 0.005, ar1 = 0.6, ar2 = 0.01, arfima = 0.1, sigma = 0.0123)
spec2 = arfimaspec(
mean.model = list(armaOrder = c(2,0), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm",
fixed.pars = truecoef2)
sim2 = arfimapath(spec2, n.sim = 5000, n.start = 100, m.sim = 1, rseed = 102)
data2 = fitted(sim2)
#write.csv(data2[,1], file = "D:/temp2.csv")
spec2 = arfimaspec(
mean.model = list(armaOrder = c(2,0), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm")
fit2 = arfimafit(spec2, data = data2)
fit2.fd = fracdiff(as.numeric(data2[,1])-coef(fit2)["mu"], nar = 2, nma = 0)
fdcheck2 = c(NA, coef(fit2.fd)[2:3], coef(fit2.fd)[1], fit2.fd$sigma)
commcheck2 = c( 0.00585040, 0.692693, 0.000108778,0.00466664,0.0122636)
chk2 = cbind(coef(fit2), commcheck2, fdcheck2, unlist(truecoef2))
colnames(chk2) = c("rugarch", "commercial", "fracdiff", "true")
chk2lik = c(likelihood(fit2), 14954.5702, fit2.fd$log.likelihood)
# ARFIMA(0,d,2)
truecoef3 = list(mu = 0.005, ma1 = 0.3, ma2 = 0.2, arfima = 0.1, sigma = 0.0123)
spec3 = arfimaspec(
mean.model = list(armaOrder = c(0,2), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm",
fixed.pars = truecoef3)
sim3 = arfimapath(spec3, n.sim = 5000, n.start = 100, m.sim = 1, rseed = 103)
data3 = fitted(sim3)
#write.csv(data3[,1], file = "D:/temp3.csv")
spec3 = arfimaspec(
mean.model = list(armaOrder = c(0,2), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm")
fit3 = arfimafit(spec3, data = data3, solver="hybrid")
fit3.fd = fracdiff(as.numeric(data3[,1])-coef(fit3)["mu"], nar = 0, nma = 2)
fdcheck3 = c(NA, -coef(fit3.fd)[2:3], coef(fit3.fd)[1], fit3.fd$sigma)
commcheck3 = c( 0.00580941, 0.320205, 0.206786, 0.0546052, 0.0120114)
chk3 = cbind(coef(fit3), commcheck3, fdcheck3, unlist(truecoef3))
colnames(chk3) = c("rugarch", "commercial", "fracdiff", "true")
chk3lik = c(likelihood(fit3), 15015.2957, fit3.fd$log.likelihood)
# ARFIMA(2,d,1) simulation (using rugarch path)
truecoef = list(mu = 0.005, ar1 = 0.6, ar2 = 0.01, ma1 = -0.7, arfima = 0.45, sigma = 0.0123)
spec = arfimaspec(
mean.model = list(armaOrder = c(2,1), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm", fixed.pars = truecoef)
sim = arfimapath(spec, n.sim = 5000, n.start = 100, m.sim = 50, rseed = 1:50)
Data = fitted(sim)
spec = arfimaspec(
mean.model = list(armaOrder = c(2,1), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm")
coefx = matrix(NA, ncol = 6, nrow = 50)
coefy = matrix(NA, ncol = 6, nrow = 50)
if(!is.null(cluster)){
parallel::clusterEvalQ(cluster, require(rugarch))
parallel::clusterEvalQ(cluster, require(fracdiff))
parallel::clusterExport(cluster, c("Data", "spec"), envir = environment())
sol = parallel::parLapply(cluster, as.list(1:50), fun = function(i){
fit = arfimafit(spec, data = Data[,i], solver="hybrid")
if(fit@fit$convergence == 0) coefx = coef(fit) else coefx = rep(NA, 6)
if(fit@fit$convergence == 0){
fit = fracdiff(as.numeric(Data[,i]) - coef(fit)["mu"], nar = 2, nma = 1)
} else{
fit = fracdiff(scale(as.numeric(Data[,i]), scale=F), nar = 2, nma = 1)
}
coefy = c(NA, coef(fit)[2:3], -coef(fit)[4], coef(fit)[1], fit$sigma)
return(list(coefx = coefx, coefy = coefy))
})
coefx = t(sapply(sol, FUN = function(x) x$coefx))
coefy = t(sapply(sol, FUN = function(x) x$coefy))
} else{
for(i in 1:50){
fit = arfimafit(spec, data = Data[,i], solver="hybrid")
if(fit@fit$convergence == 0) coefx[i,] = coef(fit)
fit = fracdiff(scale(as.numeric(Data[,i]), scale=F), nar = 2, nma = 1)
coefy[i,] = c(NA, coef(fit)[2:3], -coef(fit)[4], coef(fit)[1], fit$sigma)
}
}
zz <- file("test1h-1.txt", open="wt")
sink(zz)
cat("\nARFIMA(2,d,1)\n")
print(chk1)
print(chk1lik)
cat("\nARFIMA(2,d,0)\n")
print(chk2)
print(chk2lik)
cat("\nARFIMA(0,d,2)\n")
print(chk3)
print(chk3lik)
cat("\nARFIMA(2,d,1) mini-simulation/fit\n")
# small sample/simulation also use median:
cat("\nMedian (rugarch, fracdiff)\n")
print( data.frame(rugarch=round(apply(coefx, 2, "median"),5), fracdiff = round(apply(coefy, 2, "median"),5), true=unlist(truecoef) ) )
cat("\nMean (rugarch, fracdiff)\n")
print( data.frame(rugarch=round(apply(coefx, 2, "mean"),5), fracdiff = round(apply(coefy, 2, "mean"),5), true=unlist(truecoef) ) )
print( data.frame(rugarch.sd =round(apply(coefx, 2, "sd"),5), fracdiff.sd = round(apply(coefy, 2, "sd"),5) ) )
sink(type="message")
sink()
close(zz)
# ARFIMA(2,d,1) simulation (using fracdiff path)
truecoef = list(mu = 0.005, ar1 = 0.6, ar2 = 0.01, ma1 = -0.7, arfima = 0.45, sigma = 0.0123)
Data = matrix(NA, ncol = 50, nrow = 5000)
for(i in 1:50){
set.seed(i)
sim = fracdiff.sim(n=5000, ar = c(0.6, 0.01), ma = c(0.7), d = 0.45,
rand.gen = rnorm, n.start = 100, backComp = TRUE, sd = 0.0123, mu = 0.005)
Data[,i] = sim$series
}
spec = arfimaspec(
mean.model = list(armaOrder = c(2,1), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm")
coefx = matrix(NA, ncol = 6, nrow = 50)
coefy = matrix(NA, ncol = 6, nrow = 50)
if(!is.null(cluster)){
parallel::clusterEvalQ(cluster, require(rugarch))
parallel::clusterEvalQ(cluster, require(fracdiff))
parallel::clusterExport(cluster, c("Data", "spec"), envir = environment())
sol = parallel::parLapply(cluster, as.list(1:50), fun = function(i){
fit = arfimafit(spec, data = Data[,i], solver="hybrid")
if(fit@fit$convergence == 0) coefx = coef(fit) else coefx = rep(NA, 6)
if(fit@fit$convergence == 0){
fit = fracdiff(as.numeric(Data[,i]) - coef(fit)["mu"], nar = 2, nma = 1)
} else{
fit = fracdiff(scale(as.numeric(Data[,i]), scale=F), nar = 2, nma = 1)
}
coefy = c(NA, coef(fit)[2:3], -coef(fit)[4], coef(fit)[1], fit$sigma)
return(list(coefx = coefx, coefy = coefy))
})
coefx = t(sapply(sol, FUN = function(x) x$coefx))
coefy = t(sapply(sol, FUN = function(x) x$coefy))
} else{
for(i in 1:50){
fit = arfimafit(spec, data = Data[,i], solver="hybrid")
if(fit@fit$convergence == 0) coefx[i,] = coef(fit)
fit = fracdiff(scale(as.numeric(Data[,i]), scale=F), nar = 2, nma = 1)
coefy[i,] = c(NA, coef(fit)[2:3], -coef(fit)[4], coef(fit)[1], fit$sigma)
}
}
zz <- file("test1h-2.txt", open="wt")
sink(zz)
cat("\nARFIMA(2,d,1) mini-simulation/fit2 (simulation from fracdiff.sim)\n")
# small sample/simulation also use median:
cat("\nMedian (rugarch, fracdiff)\n")
print( data.frame(rugarch=round(apply(coefx, 2, "median"),5), fracdiff = round(apply(coefy, 2, "median"),5), true=unlist(truecoef) ) )
cat("\nMean (rugarch, fracdiff)\n")
print( data.frame(rugarch=round(apply(coefx, 2, "mean"),5), fracdiff = round(apply(coefy, 2, "mean"),5), true=unlist(truecoef) ) )
print( data.frame(rugarch.sd =round(apply(coefx, 2, "sd"),5), fracdiff.sd = round(apply(coefy, 2, "sd"),5) ) )
sink(type="message")
sink()
close(zz)
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
} | /inst/rugarch.tests/rugarch.test1.R | no_license | samedii/rugarch | R | false | false | 36,957 | r | #################################################################################
##
## R package rugarch by Alexios Ghalanos Copyright (C) 2008-2013.
## This file is part of the R package rugarch.
##
## The R package rugarch is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## The R package rugarch is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
#################################################################################
rugarch.test1a = function(cluster = NULL){
tic = Sys.time()
# simulated parameter distribution
spec = arfimaspec( mean.model = list(armaOrder = c(2,2), include.mean = TRUE, arfima = FALSE),
distribution.model = "norm", fixed.pars = list(ar1=0.6, ar2=0.21, ma1=-0.7, ma2=0.3, mu = 0.02,
sigma = 0.02))
dist = arfimadistribution(spec, n.sim = 2000, n.start = 100, m.sim = 100, recursive = TRUE,
recursive.length = 10000, recursive.window = 1000, cluster = cluster)
save(dist, file = "test1a.rda")
options(width=150)
zz <- file("test1a.txt", open="wt")
sink(zz)
# slots:
slotNames(dist)
# methods:
# summary
show(dist)
# as.data.frame(...., window, which=c("rmse", "stats", "coef", "coefse"))
# default
as.data.frame(dist)
as.data.frame(dist, window = 1, which = "rmse")
as.data.frame(dist, window = 1, which = "stats")
as.data.frame(dist, window = 1, which = "coef")
as.data.frame(dist, window = 1, which = "coefse")
as.data.frame(dist, window = 8, which = "rmse")
as.data.frame(dist, window = 8, which = "stats")
as.data.frame(dist, window = 8, which = "coef")
as.data.frame(dist, window = 8, which = "coefse")
sink(type="message")
sink()
close(zz)
# create some plots
nwindows = dist@dist$details$nwindows
# 2000/3000/4000/5000/6000/7000/8000/9000/10000
# expected reduction factor in RMSE for sqrt(N) consistency
expexcted.rmsegr = sqrt(2000/seq(3000,10000,by=1000))
# actual RMSE reduction
actual.rmsegr = matrix(NA, ncol = 8, nrow = 6)
rownames(actual.rmsegr) = c("mu", "ar1", "ar2", "ma2", "ma2", "sigma")
# start at 2000 (window 1)
rmse.start = as.data.frame(dist, window = 1, which = "rmse")
for(i in 2:nwindows) actual.rmsegr[,i-1] = as.numeric(as.data.frame(dist, window = i, which = "rmse")/rmse.start)
postscript("test1a.eps", bg = "white", width = 800, height = 800)
par(mfrow = c(2,3))
for(i in 1:6){
plot(seq(3000,10000,by=1000), actual.rmsegr[i,], type = "l", lty = 2, ylab = "RMSE Reduction", xlab = "N (sim)",
main = rownames(actual.rmsegr)[i])
lines(seq(3000,10000,by=1000), expexcted.rmsegr, col = 2)
legend("topright", legend = c("Actual", "Expected"), col = 1:2, bty = "m", lty = c(2,1))
}
dev.off()
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
}
rugarch.test1b = function(cluster=NULL){
# fit/filter
tic = Sys.time()
data(sp500ret)
fit = vector(mode = "list", length = 9)
dist = c("norm", "snorm", "std", "sstd", "ged", "sged", "nig", "ghyp", "jsu")
for(i in 1:9){
spec = arfimaspec( mean.model = list(armaOrder = c(1,1), include.mean = TRUE, arfima = FALSE),
distribution.model = dist[i])
fit[[i]] = arfimafit(spec = spec, data = sp500ret, solver = "solnp", fit.control = list(scale = 1))
}
cfmatrix = matrix(NA, nrow = 9, ncol = 7)
colnames(cfmatrix) = c("mu", "ar1", "ma1", "sigma", "skew", "shape", "ghlambda")
rownames(cfmatrix) = dist
for(i in 1:9){
cf = coef(fit[[i]])
cfmatrix[i, match(names(cf), colnames(cfmatrix))] = cf
}
sk = ku = rep(0, 9)
for(i in 1:9){
cf = coef(fit[[i]])
if(fit[[i]]@model$modelinc[16]>0) sk[i] = dskewness(distribution = dist[i],
skew = cf["skew"], shape = cf["shape"], lambda = cf["ghlambda"])
if(fit[[i]]@model$modelinc[17]>0) ku[i] = dkurtosis(distribution = dist[i],
skew = cf["skew"], shape = cf["shape"], lambda = cf["ghlambda"])
}
hq = sapply(fit, FUN = function(x) infocriteria(x)[4])
cfmatrix = cbind(cfmatrix, sk, ku, hq)
colnames(cfmatrix) = c(colnames(cfmatrix[,1:7]), "skewness", "ex.kurtosis","HQIC")
# filter the data to check results
filt = vector(mode = "list", length = 9)
for(i in 1:9){
spec = arfimaspec( mean.model = list(armaOrder = c(1,1), include.mean = TRUE, arfima = FALSE),
distribution.model = dist[i])
setfixed(spec) = as.list(coef(fit[[i]]))
filt[[i]] = arfimafilter(spec = spec, data = sp500ret)
}
options(width = 120)
zz <- file("test1b.txt", open="wt")
sink(zz)
print(cfmatrix, digits = 4)
cat("\nARFIMAfit and ARFIMAfilter residuals check:\n")
print(head(sapply(filt, FUN = function(x) residuals(x))) == head(sapply(fit, FUN = function(x) residuals(x))))
cat("\ncoef method:\n")
print(cbind(coef(filt[[1]]), coef(fit[[1]])))
cat("\nfitted method:\n")
print(cbind(head(fitted(filt[[1]])), head(fitted(fit[[1]]))))
cat("\ninfocriteria method:\n")
# For filter, it assumes estimation of parameters else does not make sense!
print(cbind(infocriteria(filt[[1]]), infocriteria(fit[[1]])))
cat("\nlikelihood method:\n")
print(cbind(likelihood(filt[[1]]), likelihood(fit[[1]])))
cat("\nresiduals method:\n")
print(cbind(head(residuals(filt[[1]])), head(residuals(fit[[1]]))))
cat("\nuncmean method:\n")
print(cbind(uncmean(filt[[1]]), uncmean(fit[[1]])))
cat("\nuncmean method (by simulation):\n")
# For spec and fit
spec = arfimaspec( mean.model = list(armaOrder = c(1,1), include.mean = TRUE,
arfima = FALSE), distribution.model = dist[1])
setfixed(spec) = as.list(coef(fit[[1]]))
print(cbind(uncmean(spec, method = "simulation", n.sim = 100000, rseed = 100),
uncmean(fit[[1]], method = "simulation", n.sim = 100000, rseed = 100)))
cat("\nsummary method:\n")
print(show(filt[[1]]))
print(show(fit[[1]]))
sink(type="message")
sink()
close(zz)
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
}
rugarch.test1c = function(cluster=NULL){
# unconditional forecasting
tic = Sys.time()
data(sp500ret)
fit = vector(mode = "list", length = 9)
dist = c("norm", "snorm", "std", "sstd", "ged", "sged", "nig", "ghyp", "jsu")
for(i in 1:9){
spec = arfimaspec( mean.model = list(armaOrder = c(1,1), include.mean = TRUE, arfima = FALSE),
distribution.model = dist[i])
fit[[i]] = arfimafit(spec = spec, data = sp500ret, solver = "solnp", fit.control = list(scale = 1))
}
cfmatrix = matrix(NA, nrow = 9, ncol = 7)
colnames(cfmatrix) = c("mu", "ar1", "ma1", "sigma", "skew", "shape", "ghlambda")
rownames(cfmatrix) = dist
for(i in 1:9){
cf = coef(fit[[i]])
cfmatrix[i, match(names(cf), colnames(cfmatrix))] = cf
}
umean = rep(0, 9)
for(i in 1:9){
umean[i] = uncmean(fit[[i]])
}
forc = vector(mode = "list", length = 9)
for(i in 1:9){
forc[[i]] = arfimaforecast(fit[[i]], n.ahead = 100)
}
lmean40 = sapply(forc, FUN = function(x) as.numeric(fitted(x)[40,1]))
cfmatrix1 = cbind(cfmatrix, umean, lmean40)
colnames(cfmatrix1) = c(colnames(cfmatrix1[,1:7]), "uncmean", "forecast40")
# forecast with spec to check results
forc2 = vector(mode = "list", length = 9)
for(i in 1:9){
spec = arfimaspec( mean.model = list(armaOrder = c(1,1), include.mean = TRUE, arfima = FALSE),
distribution.model = dist[i])
setfixed(spec) = as.list(coef(fit[[i]]))
forc2[[i]] = arfimaforecast(spec, data = sp500ret, n.ahead = 100)
}
lmean240 = sapply(forc2, FUN = function(x) as.numeric(fitted(x)[40,1]))
cfmatrix2 = cbind(cfmatrix, umean, lmean240)
colnames(cfmatrix2) = c(colnames(cfmatrix2[,1:7]), "uncmean", "forecast40")
# Test Methods on object
options(width = 120)
zz <- file("test1c.txt", open="wt")
sink(zz)
cat("\nARFIMAforecast from ARFIMAfit and ARFIMAspec check:")
cat("\nFit\n")
print(cfmatrix1, digits = 4)
cat("\nSpec\n")
print(cfmatrix2, digits = 4)
slotNames(forc[[1]])
# summary
print(show(forc[[1]]))
sink(type="message")
sink()
close(zz)
nforc = sapply(forc, FUN = function(x) t(as.numeric(fitted(x))))
postscript("test1c.eps", width = 12, height = 5)
# generate FWD dates:
dx = as.POSIXct(tail(rownames(sp500ret),50))
df = generatefwd(tail(dx, 1), length.out = 100+1, by = forc[[1]]@model$modeldata$period)[-1]
dd = c(dx, df)
clrs = rainbow(9, alpha = 1, start = 0.4, end = 0.95)
plot(xts::xts(c(tail(sp500ret[,1], 50), nforc[,1]), dd), type = "l", ylim = c(-0.02, 0.02), col = "lightgrey",
ylab = "", xlab = "", main = "100-ahead Unconditional Forecasts",
minor.ticks=FALSE, auto.grid=FALSE)
for(i in 1:9){
tmp = c(tail(sp500ret[,1], 50), rep(NA, 100))
tmp[51:150] = nforc[1:100,i]
lines(xts::xts(c(rep(NA, 50), tmp[-(1:50)]),dd), col = clrs[i])
}
legend("topright", legend = dist, col = clrs, fill = clrs, bty = "n")
dev.off()
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
}
rugarch.test1d = function(cluster=NULL){
# rolling forecast
tic = Sys.time()
data(sp500ret)
fit = vector(mode = "list", length = 9)
dist = c("norm", "snorm", "std", "sstd", "ged", "sged", "nig", "ghyp", "jsu")
for(i in 1:9){
spec = arfimaspec( mean.model = list(armaOrder = c(1,1), include.mean = TRUE, arfima = FALSE),
distribution.model = dist[i])
fit[[i]] = arfimafit(spec = spec, data = sp500ret, solver = "solnp",
out.sample = 1000, fit.control = list(scale = 1))
}
cfmatrix = matrix(NA, nrow = 9, ncol = 7)
colnames(cfmatrix) = c("mu", "ar1", "ma1", "sigma", "skew", "shape", "ghlambda")
rownames(cfmatrix) = dist
for(i in 1:9){
cf = coef(fit[[i]])
cfmatrix[i, match(names(cf), colnames(cfmatrix))] = cf
}
forc = vector(mode = "list", length = 9)
for(i in 1:9){
forc[[i]] = arfimaforecast(fit[[i]], n.ahead = 1, n.roll = 999)
}
rollforc = sapply(forc, FUN = function(x) t(fitted(x)))
# forecast performance measures:
fpmlist = vector(mode = "list", length = 9)
for(i in 1:9){
fpmlist[[i]] = fpm(forc[[i]], summary = FALSE)
}
postscript("test1d.eps", width = 16, height = 5)
par(mfrow = c(1,2))
dd = as.POSIXct(tail(rownames(sp500ret), 1250))
clrs = rainbow(9, alpha = 1, start = 0.4, end = 0.95)
plot(xts::xts(tail(sp500ret[,1], 1250), dd), type = "l", ylim = c(-0.02, 0.02),
col = "lightgrey", ylab = "", xlab = "",
main = "Rolling 1-ahead Forecasts\nvs Actual", minor.ticks=FALSE,
auto.grid=FALSE)
for(i in 1:9){
tmp = tail(sp500ret[,1], 1250)
tmp[251:1250] = rollforc[1:1000,i]
lines(xts::xts(c(rep(NA, 250), tmp[-(1:250)]), dd), col = clrs[i])
}
legend("topleft", legend = dist, col = clrs, fill = clrs, bty = "n")
# plot deviation measures and range
tmp = vector(mode = "list", length = 9)
for(i in 1:9){
tmp[[i]] = fpmlist[[i]][,"AE"]
names(tmp[[i]]) = dist[i]
}
boxplot(tmp, col = clrs, names = dist, range = 6, notch = TRUE,
main = "Rolling 1-ahead Forecasts\nAbsolute Deviation Loss")
dev.off()
# fpm comparison
compm = matrix(NA, nrow = 3, ncol = 9)
compm = sapply(fpmlist, FUN = function(x) c(mean(x[,"SE"]), mean(x[,"AE"]), mean(x[,"DAC"])))
colnames(compm) = dist
rownames(compm) = c("MSE", "MAD", "DAC")
zz <- file("test1d.txt", open="wt")
sink(zz)
cat("\nRolling Forecast FPM\n")
print(compm, digits = 4)
cat("\nMethods Check\n")
print(fitted(forc[[1]])[,1:10,drop=FALSE])
print(fpm(forc[[1]], summary = TRUE))
print(show(forc[[1]]))
sink(type="message")
sink()
close(zz)
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
}
rugarch.test1e = function(cluster=NULL){
# Multi-Methods
tic = Sys.time()
data(dji30ret)
Dat = dji30ret[, 1:3, drop = FALSE]
#------------------------------------------------
# Unequal Spec
# Fit
spec1 = arfimaspec(mean.model = list(armaOrder = c(2,1)))
spec2 = arfimaspec(mean.model = list(armaOrder = c(2,2)))
spec3 = arfimaspec(mean.model = list(armaOrder = c(1,1)),
distribution.model = "sstd")
speclist = as.list(c(spec1, spec2, spec3))
mspec = multispec( speclist )
mfit1 = multifit(multispec = mspec, data = Dat, fit.control = list(stationarity=1),
cluster = cluster)
# Filter
fspec = vector(mode = "list", length = 3)
fspec[[1]] = spec1
fspec[[2]] = spec2
fspec[[3]] = spec3
for(i in 1:3){
setfixed(fspec[[i]])<-as.list(coef(mfit1)[[i]])
}
mspec1 = multispec( fspec )
mfilt1 = multifilter(multifitORspec = mspec1, data = Dat, cluster = cluster)
# Forecast from Fit
mforc1 = multiforecast(mfit1, n.ahead = 10, cluster = cluster)
# Forecast from Spec
mforc11 = multiforecast(mspec1, data = Dat, n.ahead = 10, cluster = cluster)
#------------------------------------------------
#------------------------------------------------
# Equal Spec
# Fit
spec1 = arfimaspec(mean.model = list(armaOrder = c(1,1)))
mspec = multispec( replicate(3, spec1) )
mfit2 = multifit(multispec = mspec, data = Dat, cluster = cluster)
# Filter
fspec = vector(mode = "list", length = 3)
fspec = replicate(3, spec1)
for(i in 1:3){
setfixed(fspec[[i]])<-as.list(coef(mfit2)[,i])
}
mspec2 = multispec( fspec )
mfilt2 = multifilter(multifitORspec = mspec2, data = Dat, cluster = cluster)
# Forecast From Fit
mforc2 = multiforecast(mfit2, n.ahead = 10)
# Forecast From Spec
mforc21 = multiforecast(mspec2, data = Dat, n.ahead = 10, cluster = cluster)
#------------------------------------------------
#------------------------------------------------
# Equal Spec/Same Data
# Fit
spec1 = arfimaspec(mean.model = list(armaOrder = c(1,1)))
spec2 = arfimaspec(mean.model = list(armaOrder = c(2,1)))
spec3 = arfimaspec(mean.model = list(armaOrder = c(3,1)))
speclist = as.list(c(spec1, spec2, spec3))
mspec = multispec( speclist )
mfit3 = multifit(multispec = mspec, data = cbind(Dat[,1], Dat[,1], Dat[,1]),
cluster = cluster)
# Forecast
mforc3 = multiforecast(mfit3, n.ahead = 10, cluster = cluster)
#------------------------------------------------
zz <- file("test1e.txt", open="wt")
sink(zz)
cat("\nMultifit Evaluation\n")
cat("\nUnequal Spec\n")
print(mfit1)
print(likelihood(mfit1))
print(coef(mfit1))
print(head(fitted(mfit1)))
print(head(residuals(mfit1)))
print(mfilt1)
print(likelihood(mfilt1))
print(coef(mfilt1))
print(head(fitted(mfilt1)))
print(head(residuals(mfilt1)))
print(mforc1)
print(fitted(mforc1))
print(mforc11)
print(fitted(mforc11))
cat("\nEqual Spec\n")
print(mfit2)
print(likelihood(mfit2))
print(coef(mfit2))
print(head(fitted(mfit2)))
print(head(residuals(mfit2)))
print(mfilt2)
print(likelihood(mfilt2))
print(coef(mfilt2))
print(head(fitted(mfilt2)))
print(head(residuals(mfilt2)))
print(mforc2)
print(fitted(mforc2))
print(mforc21)
print(fitted(mforc21))
sink(type="message")
sink()
close(zz)
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
}
rugarch.test1f = function(cluster=NULL){
# rolling fit/forecast
tic = Sys.time()
data(sp500ret)
spec = arfimaspec()
roll1 = arfimaroll(spec, data = sp500ret, n.ahead = 1, forecast.length = 500,
refit.every = 25, refit.window = "moving", cluster = cluster,
solver = "hybrid", fit.control = list(), solver.control = list() ,
calculate.VaR = TRUE, VaR.alpha = c(0.01, 0.05))
# as.ARFIMAforecast
# as.data.frame
zz <- file("test1f.txt", open="wt")
sink(zz)
cat("\nForecast Evaluation\n")
report(roll1, "VaR")
report(roll1, "fpm")
# Extractor Functions:
# default:
print(head(as.data.frame(roll1, which = "density"), 25))
print(tail(as.data.frame(roll1, which = "density"), 25))
print(head(as.data.frame(roll1, which = "VaR"), 25))
print(tail(as.data.frame(roll1, which = "VaR"), 25))
print(coef(roll1)[[1]])
print(coef(roll1)[[20]])
print(head(fpm(roll1, summary=FALSE)))
sink(type="message")
sink()
close(zz)
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
}
rugarch.test1g = function(cluster=NULL){
# simulation
tic = Sys.time()
require(fracdiff)
spec1 = arfimaspec( mean.model = list(armaOrder = c(2,1), include.mean = TRUE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(mu = 0.02, ar1 = 0.6, ar2 = 0.01, ma1 = -0.7, arfima = 0,
shape = 5, sigma = 0.0123))
spec2 = arfimaspec( mean.model = list(armaOrder = c(2,1), include.mean = TRUE, arfima = FALSE),
distribution.model = "std", fixed.pars = list(mu = 0.02, ar1 = 0.6, ar2 = 0.01, ma1 = -0.7,
shape = 5, sigma = 0.0123))
sim1 = arfimapath(spec1, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
n.start=1)
sim2 = arfimapath(spec2, n.sim = 1000, m.sim = 1, rseed = 100)
sim1 = arfimapath(spec1, n.sim = 1000, m.sim = 1, rseed = 100, n.start=1)
sim2 = arfimapath(spec2, n.sim = 1000, m.sim = 1, rseed = 100, n.start=1)
zz <- file("test1g-1.txt", open="wt")
sink(zz)
cat("\nARFIMA and ARMA simulation tests:\n")
print(tail(fitted(sim1)), digits = 5)
print(tail(fitted(sim2)), digits = 5)
sink(type="message")
sink()
close(zz)
# Now the rugarch simulation of ARFIMA/ARMA with arima.sim of R
# Note that arima.sim simulates the residuals (i.e no mean):
# ARMA(2,2)
set.seed(33)
inn = rdist("std", 1000, mu = 0, sigma = 1, lambda = 0, skew = 0, shape = 5)
spec1 = arfimaspec( mean.model = list(armaOrder = c(2,2), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, arfima = 0, ma1 = -0.7,
ma2 = 0.3, shape = 5, sigma = 0.0123))
spec2 = arfimaspec( mean.model = list(armaOrder = c(2,2), include.mean = FALSE, arfima = FALSE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, ma1 = -0.7,
ma2 = 0.3, shape = 5,sigma = 0.0123))
# Notice the warning...it would be an error had we not added 2 extra zeros to the custom distribution
# equal to the MA order since n.start >= MA order in arfima model
sim1 = arfimapath(spec1, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(c(0,0,inn), ncol = 1), type = "z"))
sim2 = arfimapath(spec2, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(inn, ncol = 1), type = "z"))
# Test with a GARCH specification as well (with alpha=beta=0)
specx = ugarchspec( mean.model = list(armaOrder = c(2,2), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, ma1 = -0.7,
ma2 = 0.3, arfima=0, shape = 5, omega = 0.0123^2, alpha1 = 0, beta1=0))
simx = ugarchpath(specx, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
presigma = c(0,0), custom.dist = list(name = "sample",
distfit = matrix(c(0,0,inn), ncol = 1), type = "z"))
# Note that we pass the non-standardized innovations to arima.sim (i.e. multiply by sigma)
sim3 = arima.sim(model = list(ar = c(0.6, 0.21), ma = c(-0.7, 0.3)), n = 1000,
n.start = 4, start.innov = c(0,0,0,0), innov = inn*0.0123)
# set fracdiff setting to n.start=0 and allow.0.nstart=TRUE
sim4 = fracdiff.sim(n=1000, ar = c(0.6, 0.21), ma = c(0.7, -0.3), d = 0,
innov = c(0,0,inn*0.0123), n.start = 0, backComp = TRUE, allow.0.nstart = TRUE,
mu = 0)
tst1 = cbind(head(fitted(sim1)), head(fitted(sim2)), head(sim3), head(sim4$series), head(fitted(simx)))
tst2 = cbind(tail(fitted(sim1)), tail(fitted(sim2)), tail(sim3), tail(sim4$series), tail(fitted(simx)))
colnames(tst1) = colnames(tst2) = c("ARFIMA(d = 0)", "ARMA", "arima.sim", "fracdiff", "GARCH(0,0)")
zz <- file("test1g-2.txt", open="wt")
sink(zz)
cat("\nARFIMA, ARMA arima.sim simulation tests:\n")
print(tst1, digits = 6)
print(tst2, digits = 6)
sink(type="message")
sink()
close(zz)
# ARMA(2,1)
set.seed(33)
inn = rdist("std", 1000, mu = 0, sigma = 1, lambda = 0, skew = 0, shape = 5)
spec1 = arfimaspec( mean.model = list(armaOrder = c(2,1), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, arfima = 0, ma1 = -0.7,
shape = 5, sigma = 0.0123))
spec2 = arfimaspec( mean.model = list(armaOrder = c(2,1), include.mean = FALSE, arfima = FALSE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, ma1 = -0.7,
shape = 5,sigma = 0.0123))
sim1 = arfimapath(spec1, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(c(0,inn), ncol = 1), type = "z"))
sim2 = arfimapath(spec2, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(inn, ncol = 1), type = "z"))
# Test with a GARCH specification as well (with alpha=beta=0)
specx = ugarchspec( mean.model = list(armaOrder = c(2,1), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, ma1 = -0.7,
arfima=0, shape = 5, omega = 0.0123^2, alpha1 = 0, beta1=0))
simx = ugarchpath(specx, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
presigma = c(0,0), custom.dist = list(name = "sample",
distfit = matrix(c(0,inn), ncol = 1), type = "z"))
# Note that we pass the non-standardized innovations to arima.sim (i.e. multiply by sigma)
sim3 = arima.sim(model = list(ar = c(0.6, 0.21), ma = c(-0.7)), n = 1000,
n.start = 3, start.innov = c(0,0,0), innov = inn*0.0123)
tst1 = cbind(head(fitted(sim1)), head(fitted(sim2)), head(sim3), head(fitted(simx)))
tst2 = cbind(tail(fitted(sim1)), tail(fitted(sim2)), tail(sim3), tail(fitted(simx)))
colnames(tst1) = colnames(tst2) = c("ARFIMA(d = 0)", "ARMA", "arima.sim", "GARCH(0,0)")
zz <- file("test1g-3.txt", open="wt")
sink(zz)
cat("\nARFIMA, ARMA arima.sim simulation tests:\n")
print(tst1, digits = 6)
print(tst2, digits = 6)
sink(type="message")
sink()
close(zz)
# Pure AR
set.seed(33)
inn = rdist("std", 1000, mu = 0, sigma = 1, lambda = 0, skew = 0, shape = 5)
spec1 = arfimaspec( mean.model = list(armaOrder = c(2,0), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, arfima = 0, ma1 = -0.7,
shape = 5, sigma = 0.0123))
spec2 = arfimaspec( mean.model = list(armaOrder = c(2,0), include.mean = FALSE, arfima = FALSE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21, ma1 = -0.7,
shape = 5,sigma = 0.0123))
sim1 = arfimapath(spec1, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(inn, ncol = 1), type = "z"))
sim2 = arfimapath(spec2, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(inn, ncol = 1), type = "z"))
specx = ugarchspec( mean.model = list(armaOrder = c(2,0), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ar1 = 0.6, ar2 = 0.21,
arfima=0, shape = 5, omega = 0.0123^2, alpha1 = 0, beta1=0))
simx = ugarchpath(specx, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
presigma = c(0,0), custom.dist = list(name = "sample",
distfit = matrix(c(inn), ncol = 1), type = "z"))
# Note that we pass the non-standardized innovations to arima.sim (i.e. multiply by sigma)
sim3 = arima.sim(model = list(ar = c(0.6, 0.21), ma = NULL), n = 1000,
n.start = 2, start.innov = c(0,0), innov = inn*0.0123)
tst1 = cbind(head(fitted(sim1)), head(fitted(sim2)), head(sim3), head(fitted(simx)))
tst2 = cbind(tail(fitted(sim1)), tail(fitted(sim2)), tail(sim3), tail(fitted(simx)))
colnames(tst1) = colnames(tst2) = c("ARFIMA(d = 0)", "ARMA", "arima.sim", "GARCH(0,0)")
zz <- file("test1g-4.txt", open="wt")
sink(zz)
cat("\nARFIMA, ARMA arima.sim simulation tests:\n")
print(tst1, digits = 6)
print(tst2, digits = 6)
sink(type="message")
sink()
close(zz)
# Pure MA
set.seed(33)
inn = rdist("std", 1000, mu = 0, sigma = 1, lambda = 0, skew = 0, shape = 5)
spec1 = arfimaspec( mean.model = list(armaOrder = c(0,2), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ma1 = 0.6, ma2 = -0.21, arfima = 0,
shape = 5, sigma = 0.0123))
spec2 = arfimaspec( mean.model = list(armaOrder = c(0,2), include.mean = FALSE, arfima = FALSE),
distribution.model = "std", fixed.pars = list(ma1 = 0.6, ma2 = -0.21,
shape = 5,sigma = 0.0123))
sim1 = arfimapath(spec = spec1, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(c(0,0,inn), ncol = 1), type = "z"))
sim2 = arfimapath(spec2, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
custom.dist = list(name = "sample",
distfit = matrix(inn, ncol = 1), type = "z"))
specx = ugarchspec( mean.model = list(armaOrder = c(0,2), include.mean = FALSE, arfima = TRUE),
distribution.model = "std", fixed.pars = list(ma1 = 0.6, ma2 = -0.21,
arfima=0, shape = 5, omega = 0.0123^2, alpha1 = 0, beta1=0))
simx = ugarchpath(specx, n.sim = 1000, m.sim = 1, rseed = 100, preresiduals = c(0,0), prereturns = c(0,0),
presigma = c(0,0), custom.dist = list(name = "sample",
distfit = matrix(c(0,0,inn), ncol = 1), type = "z"))
# Note that we pass the non-standardized innovations to arima.sim (i.e. multiply by sigma)
set.seed(33)
inn = rdist("std", 1000, mu = 0, sigma = 1, lambda = 0, skew = 0, shape = 5)
sim3 = arima.sim(model = list(ar = NULL, ma = c(0.6, -0.21)), n = 1000,
n.start = 2, start.innov = c(0,0), innov = inn*0.0123)
tst1 = cbind(head(fitted(sim1)), head(fitted(sim2)), head(sim3), head(fitted(simx)))
tst2 = cbind(tail(fitted(sim1)), tail(fitted(sim2)), tail(sim3), tail(fitted(simx)))
colnames(tst1) = colnames(tst2) = c("ARFIMA(d = 0)", "ARMA", "arima.sim", "GARCH(0,0)")
zz <- file("test1g-5.txt", open="wt")
sink(zz)
cat("\nARFIMA, ARMA arima.sim simulation tests:\n")
print(tst1, digits = 6)
print(tst2, digits = 6)
sink(type="message")
sink()
close(zz)
# arfimasim + exogenous regressors + custom innovations
data(dji30ret)
Dat = dji30ret[,1, drop = FALSE]
T = dim(Dat)[1]
Bench = as.matrix(cbind(apply(dji30ret[,2:10], 1, "mean"), apply(dji30ret[,11:20], 1, "mean")))
spec = arfimaspec( mean.model = list(armaOrder = c(1,1), include.mean = TRUE, arfima = FALSE,
external.regressors = Bench), distribution.model = "std")
fit = arfimafit(spec = spec, data = Dat, solver = "solnp", out.sample = 500)
# lag1 Benchmark
BenchF = Bench[(T-500):(T-500+9), , drop = FALSE]
exsim = vector(mode = "list", length = 10000)
for(i in 1:10000) exsim[[i]] = as.matrix(BenchF)
# simulated residuals
res = residuals(fit)
ressim = matrix(NA, ncol = 10000, nrow = 10)
set.seed(10000)
for(i in 1:10000) ressim[,i] = sample(res, 10, replace = TRUE)
sim = arfimasim(fit, n.sim = 10, m.sim = 10000, startMethod="sample",
custom.dist = list(name = "sample", distfit = ressim, type = "res"), mexsimdata = exsim)
forc = fitted(arfimaforecast(fit, n.ahead = 10, external.forecasts = list(mregfor = BenchF)))
simx = fitted(sim)
actual10 = Dat[(T-500+1):(T-500+10), 1, drop = FALSE]
simm = apply(simx, 1 ,"mean")
simsd = apply(simx, 1 ,"sd")
zz <- file("test1g-6.txt", open="wt")
sink(zz)
print(round(cbind(actual10, forc, simm, simsd),5), digits = 4)
sink(type="message")
sink()
close(zz)
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
}
# ARFIMA benchmark tests
rugarch.test1h = function(cluster=NULL){
tic = Sys.time()
# ARFIMA(2,d,1)
require(fracdiff)
truecoef1 = list(mu = 0.005, ar1 = 0.6, ar2 = 0.01, ma1 = -0.7, arfima = 0.3, sigma = 0.0123)
spec1 = arfimaspec(
mean.model = list(armaOrder = c(2,1), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm",
fixed.pars = truecoef1)
sim1 = arfimapath(spec1, n.sim = 5000, n.start = 100, m.sim = 1, rseed = 101)
data1 = fitted(sim1)
#write.csv(data1[,1], file = "D:/temp1.csv")
spec1 = arfimaspec(
mean.model = list(armaOrder = c(2,1), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm")
fit1 = arfimafit(spec1, data = data1)
fit1.fd = fracdiff(as.numeric(data1[,1])-coef(fit1)["mu"], nar = 2, nma = 1)
# Commercial Implementation Program Fit (NLS-with imposed stationarity):
commcheck1 = c(0.00488381, 0.537045, 0.0319251, -0.721266, 0.348604, 0.0122415)
fdcheck1 = c(NA, coef(fit1.fd)[2:3], -coef(fit1.fd)[4], coef(fit1.fd)[1], fit1.fd$sigma)
chk1 = cbind(coef(fit1), commcheck1, fdcheck1, unlist(truecoef1))
colnames(chk1) = c("rugarch", "commercial", "fracdiff", "true")
chk1lik = c(likelihood(fit1), 14920.4279, fit1.fd$log.likelihood)
# ARFIMA(2,d,0)
truecoef2 = list(mu = 0.005, ar1 = 0.6, ar2 = 0.01, arfima = 0.1, sigma = 0.0123)
spec2 = arfimaspec(
mean.model = list(armaOrder = c(2,0), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm",
fixed.pars = truecoef2)
sim2 = arfimapath(spec2, n.sim = 5000, n.start = 100, m.sim = 1, rseed = 102)
data2 = fitted(sim2)
#write.csv(data2[,1], file = "D:/temp2.csv")
spec2 = arfimaspec(
mean.model = list(armaOrder = c(2,0), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm")
fit2 = arfimafit(spec2, data = data2)
fit2.fd = fracdiff(as.numeric(data2[,1])-coef(fit2)["mu"], nar = 2, nma = 0)
fdcheck2 = c(NA, coef(fit2.fd)[2:3], coef(fit2.fd)[1], fit2.fd$sigma)
commcheck2 = c( 0.00585040, 0.692693, 0.000108778,0.00466664,0.0122636)
chk2 = cbind(coef(fit2), commcheck2, fdcheck2, unlist(truecoef2))
colnames(chk2) = c("rugarch", "commercial", "fracdiff", "true")
chk2lik = c(likelihood(fit2), 14954.5702, fit2.fd$log.likelihood)
# ARFIMA(0,d,2)
truecoef3 = list(mu = 0.005, ma1 = 0.3, ma2 = 0.2, arfima = 0.1, sigma = 0.0123)
spec3 = arfimaspec(
mean.model = list(armaOrder = c(0,2), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm",
fixed.pars = truecoef3)
sim3 = arfimapath(spec3, n.sim = 5000, n.start = 100, m.sim = 1, rseed = 103)
data3 = fitted(sim3)
#write.csv(data3[,1], file = "D:/temp3.csv")
spec3 = arfimaspec(
mean.model = list(armaOrder = c(0,2), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm")
fit3 = arfimafit(spec3, data = data3, solver="hybrid")
fit3.fd = fracdiff(as.numeric(data3[,1])-coef(fit3)["mu"], nar = 0, nma = 2)
fdcheck3 = c(NA, -coef(fit3.fd)[2:3], coef(fit3.fd)[1], fit3.fd$sigma)
commcheck3 = c( 0.00580941, 0.320205, 0.206786, 0.0546052, 0.0120114)
chk3 = cbind(coef(fit3), commcheck3, fdcheck3, unlist(truecoef3))
colnames(chk3) = c("rugarch", "commercial", "fracdiff", "true")
chk3lik = c(likelihood(fit3), 15015.2957, fit3.fd$log.likelihood)
# ARFIMA(2,d,1) simulation (using rugarch path)
truecoef = list(mu = 0.005, ar1 = 0.6, ar2 = 0.01, ma1 = -0.7, arfima = 0.45, sigma = 0.0123)
spec = arfimaspec(
mean.model = list(armaOrder = c(2,1), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm", fixed.pars = truecoef)
sim = arfimapath(spec, n.sim = 5000, n.start = 100, m.sim = 50, rseed = 1:50)
Data = fitted(sim)
spec = arfimaspec(
mean.model = list(armaOrder = c(2,1), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm")
coefx = matrix(NA, ncol = 6, nrow = 50)
coefy = matrix(NA, ncol = 6, nrow = 50)
if(!is.null(cluster)){
parallel::clusterEvalQ(cluster, require(rugarch))
parallel::clusterEvalQ(cluster, require(fracdiff))
parallel::clusterExport(cluster, c("Data", "spec"), envir = environment())
sol = parallel::parLapply(cluster, as.list(1:50), fun = function(i){
fit = arfimafit(spec, data = Data[,i], solver="hybrid")
if(fit@fit$convergence == 0) coefx = coef(fit) else coefx = rep(NA, 6)
if(fit@fit$convergence == 0){
fit = fracdiff(as.numeric(Data[,i]) - coef(fit)["mu"], nar = 2, nma = 1)
} else{
fit = fracdiff(scale(as.numeric(Data[,i]), scale=F), nar = 2, nma = 1)
}
coefy = c(NA, coef(fit)[2:3], -coef(fit)[4], coef(fit)[1], fit$sigma)
return(list(coefx = coefx, coefy = coefy))
})
coefx = t(sapply(sol, FUN = function(x) x$coefx))
coefy = t(sapply(sol, FUN = function(x) x$coefy))
} else{
for(i in 1:50){
fit = arfimafit(spec, data = Data[,i], solver="hybrid")
if(fit@fit$convergence == 0) coefx[i,] = coef(fit)
fit = fracdiff(scale(as.numeric(Data[,i]), scale=F), nar = 2, nma = 1)
coefy[i,] = c(NA, coef(fit)[2:3], -coef(fit)[4], coef(fit)[1], fit$sigma)
}
}
zz <- file("test1h-1.txt", open="wt")
sink(zz)
cat("\nARFIMA(2,d,1)\n")
print(chk1)
print(chk1lik)
cat("\nARFIMA(2,d,0)\n")
print(chk2)
print(chk2lik)
cat("\nARFIMA(0,d,2)\n")
print(chk3)
print(chk3lik)
cat("\nARFIMA(2,d,1) mini-simulation/fit\n")
# small sample/simulation also use median:
cat("\nMedian (rugarch, fracdiff)\n")
print( data.frame(rugarch=round(apply(coefx, 2, "median"),5), fracdiff = round(apply(coefy, 2, "median"),5), true=unlist(truecoef) ) )
cat("\nMean (rugarch, fracdiff)\n")
print( data.frame(rugarch=round(apply(coefx, 2, "mean"),5), fracdiff = round(apply(coefy, 2, "mean"),5), true=unlist(truecoef) ) )
print( data.frame(rugarch.sd =round(apply(coefx, 2, "sd"),5), fracdiff.sd = round(apply(coefy, 2, "sd"),5) ) )
sink(type="message")
sink()
close(zz)
# ARFIMA(2,d,1) simulation (using fracdiff path)
truecoef = list(mu = 0.005, ar1 = 0.6, ar2 = 0.01, ma1 = -0.7, arfima = 0.45, sigma = 0.0123)
Data = matrix(NA, ncol = 50, nrow = 5000)
for(i in 1:50){
set.seed(i)
sim = fracdiff.sim(n=5000, ar = c(0.6, 0.01), ma = c(0.7), d = 0.45,
rand.gen = rnorm, n.start = 100, backComp = TRUE, sd = 0.0123, mu = 0.005)
Data[,i] = sim$series
}
spec = arfimaspec(
mean.model = list(armaOrder = c(2,1), include.mean = TRUE, arfima = TRUE),
distribution.model = "norm")
coefx = matrix(NA, ncol = 6, nrow = 50)
coefy = matrix(NA, ncol = 6, nrow = 50)
if(!is.null(cluster)){
parallel::clusterEvalQ(cluster, require(rugarch))
parallel::clusterEvalQ(cluster, require(fracdiff))
parallel::clusterExport(cluster, c("Data", "spec"), envir = environment())
sol = parallel::parLapply(cluster, as.list(1:50), fun = function(i){
fit = arfimafit(spec, data = Data[,i], solver="hybrid")
if(fit@fit$convergence == 0) coefx = coef(fit) else coefx = rep(NA, 6)
if(fit@fit$convergence == 0){
fit = fracdiff(as.numeric(Data[,i]) - coef(fit)["mu"], nar = 2, nma = 1)
} else{
fit = fracdiff(scale(as.numeric(Data[,i]), scale=F), nar = 2, nma = 1)
}
coefy = c(NA, coef(fit)[2:3], -coef(fit)[4], coef(fit)[1], fit$sigma)
return(list(coefx = coefx, coefy = coefy))
})
coefx = t(sapply(sol, FUN = function(x) x$coefx))
coefy = t(sapply(sol, FUN = function(x) x$coefy))
} else{
for(i in 1:50){
fit = arfimafit(spec, data = Data[,i], solver="hybrid")
if(fit@fit$convergence == 0) coefx[i,] = coef(fit)
fit = fracdiff(scale(as.numeric(Data[,i]), scale=F), nar = 2, nma = 1)
coefy[i,] = c(NA, coef(fit)[2:3], -coef(fit)[4], coef(fit)[1], fit$sigma)
}
}
zz <- file("test1h-2.txt", open="wt")
sink(zz)
cat("\nARFIMA(2,d,1) mini-simulation/fit2 (simulation from fracdiff.sim)\n")
# small sample/simulation also use median:
cat("\nMedian (rugarch, fracdiff)\n")
print( data.frame(rugarch=round(apply(coefx, 2, "median"),5), fracdiff = round(apply(coefy, 2, "median"),5), true=unlist(truecoef) ) )
cat("\nMean (rugarch, fracdiff)\n")
print( data.frame(rugarch=round(apply(coefx, 2, "mean"),5), fracdiff = round(apply(coefy, 2, "mean"),5), true=unlist(truecoef) ) )
print( data.frame(rugarch.sd =round(apply(coefx, 2, "sd"),5), fracdiff.sd = round(apply(coefy, 2, "sd"),5) ) )
sink(type="message")
sink()
close(zz)
toc = Sys.time()-tic
cat("Elapsed:", toc, "\n")
return(toc)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base.functions.R
\name{km.cal.tab}
\alias{km.cal.tab}
\title{IPD Calculation}
\usage{
km.cal.tab(
t.points,
s.points,
t.risk.T,
n.risk.T,
lower.T,
upper.T,
t.event = "NA",
gr.number = "group"
)
}
\arguments{
\item{t.points}{vector of time to event points; they represents the x axis values marked from the KM plot using digizeit, which is greater than 0.}
\item{s.points}{vector of survival rate points; they represents the y axis values marked from the KM plot using digizeit, which ranges from 0 to 1.}
\item{t.risk.T}{vector of time points in the number at risk table from the original KM plot, which starts from zero.}
\item{n.risk.T}{vector of number at risk at each time point in the number at risk table from the original KM plot, which has the same length as t.risk.T.}
\item{lower.T}{numeric vector; number of data points at start between time intervals in the number at risk table}
\item{upper.T}{numeric vector; number of data points at end between time intervals in the number at risk table}
\item{t.event}{number of events}
\item{gr.number}{name for the group}
\item{n.t.T}{number of clicks in the group}
}
\value{
data frame with time to event, censoring, and group name information
}
\description{
This function calculate all the IPDs based on input data
}
\keyword{internal}
| /man/km.cal.tab.Rd | no_license | vandy10s/extractKM | R | false | true | 1,393 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base.functions.R
\name{km.cal.tab}
\alias{km.cal.tab}
\title{IPD Calculation}
\usage{
km.cal.tab(
t.points,
s.points,
t.risk.T,
n.risk.T,
lower.T,
upper.T,
t.event = "NA",
gr.number = "group"
)
}
\arguments{
\item{t.points}{vector of time to event points; they represents the x axis values marked from the KM plot using digizeit, which is greater than 0.}
\item{s.points}{vector of survival rate points; they represents the y axis values marked from the KM plot using digizeit, which ranges from 0 to 1.}
\item{t.risk.T}{vector of time points in the number at risk table from the original KM plot, which starts from zero.}
\item{n.risk.T}{vector of number at risk at each time point in the number at risk table from the original KM plot, which has the same length as t.risk.T.}
\item{lower.T}{numeric vector; number of data points at start between time intervals in the number at risk table}
\item{upper.T}{numeric vector; number of data points at end between time intervals in the number at risk table}
\item{t.event}{number of events}
\item{gr.number}{name for the group}
\item{n.t.T}{number of clicks in the group}
}
\value{
data frame with time to event, censoring, and group name information
}
\description{
This function calculate all the IPDs based on input data
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeVennDiagram.R
\name{makeVennDiagram}
\alias{makeVennDiagram}
\title{Make Venn Diagram from a list of peaks}
\usage{
makeVennDiagram(
Peaks,
NameOfPeaks,
maxgap = -1L,
minoverlap = 0L,
totalTest,
by = c("region", "feature", "base"),
ignore.strand = TRUE,
connectedPeaks = c("min", "merge", "keepAll", "keepFirstListConsistent"),
method = c("hyperG", "permutation"),
TxDb,
plot = TRUE,
...
)
}
\arguments{
\item{Peaks}{A list of peaks in \link[GenomicRanges:GRanges-class]{GRanges}
format: See example below.}
\item{NameOfPeaks}{Character vector to specify the name of Peaks,
e.g., c("TF1", "TF2"). This will be used as label in the Venn Diagram.}
\item{maxgap, minoverlap}{Used in the internal call to
\code{findOverlaps()} to detect overlaps.
See \code{?\link[IRanges:findOverlaps-methods]{findOverlaps}}
in the \pkg{IRanges} package for a description of these arguments.}
\item{totalTest}{Numeric value to specify the total number of tests
performed to obtain the list of peaks. It should be much larger than
the number of peaks in the largest peak set.}
\item{by}{"region", "feature" or "base", default = "region".
"feature" means using feature field in the GRanges for calculating overlap,
"region" means using chromosome range for calculating overlap,
and "base" means calculating overlap in nucleotide level.}
\item{ignore.strand}{Logical: when set to TRUE, the strand information is
ignored in the overlap calculations.}
\item{connectedPeaks}{If multiple peaks involved in overlapping in
several groups, set it to "merge" will count it as only 1,
while set it to "min" will count it as the minimal involved peaks in
any connected peak group. "keepAll" will show all the orginal counts
for each list while the final counts will be same as "min".
"keepFirstListConsistent" will keep the counts consistent with first list.}
\item{method}{method to be used for p value calculation.
hyperG means hypergeometric test and permutation means \link{peakPermTest}.}
\item{TxDb}{An object of \link[GenomicFeatures:TxDb-class]{TxDb}.}
\item{plot}{logical. If TRUE (default), a venn diagram is plotted.}
\item{\dots}{Additional arguments to be passed to
\link[VennDiagram:venn.diagram]{venn.diagram}.}
}
\value{
A p.value is calculated by
hypergeometric test or permutation test to determine whether the overlaps of
peaks or features are significant.
}
\description{
Make Venn Diagram from two or more peak ranges,
Also calculate p-value to determine whether those peaks
overlap significantly.
}
\details{
For customized graph options,
please see venn.diagram in VennDiagram package.
}
\examples{
if (interactive()){
peaks1 <- GRanges(seqnames=c("1", "2", "3"),
IRanges(start=c(967654, 2010897, 2496704),
end=c(967754, 2010997, 2496804),
names=c("Site1", "Site2", "Site3")),
strand="+",
feature=c("a","b","f"))
peaks2 = GRanges(seqnames=c("1", "2", "3", "1", "2"),
IRanges(start = c(967659, 2010898,2496700,
3075866,3123260),
end = c(967869, 2011108, 2496920,
3076166, 3123470),
names = c("t1", "t2", "t3", "t4", "t5")),
strand = c("+", "+", "-", "-", "+"),
feature=c("a","b","c","d","a"))
makeVennDiagram(list(peaks1, peaks2), NameOfPeaks=c("TF1", "TF2"),
totalTest=100,scaled=FALSE, euler.d=FALSE,
fill=c("#009E73", "#F0E442"), # circle fill color
col=c("#D55E00", "#0072B2"), #circle border color
cat.col=c("#D55E00", "#0072B2"))
makeVennDiagram(list(peaks1, peaks2), NameOfPeaks=c("TF1", "TF2"),
totalTest=100,
fill=c("#009E73", "#F0E442"), # circle fill color
col=c("#D55E00", "#0072B2"), #circle border color
cat.col=c("#D55E00", "#0072B2"))
###### 4-way diagram using annotated feature instead of chromosome ranges
makeVennDiagram(list(peaks1, peaks2, peaks1, peaks2),
NameOfPeaks=c("TF1", "TF2","TF3", "TF4"),
totalTest=100, by="feature",
main = "Venn Diagram for 4 peak lists",
fill=c(1,2,3,4))
}
}
\seealso{
\link{findOverlapsOfPeaks},
\link[VennDiagram:venn.diagram]{venn.diagram}, \link{peakPermTest}
}
\author{
Lihua Julie Zhu, Jianhong Ou
}
\keyword{graph}
| /man/makeVennDiagram.Rd | no_license | jianhong/ChIPpeakAnno | R | false | true | 4,579 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeVennDiagram.R
\name{makeVennDiagram}
\alias{makeVennDiagram}
\title{Make Venn Diagram from a list of peaks}
\usage{
makeVennDiagram(
Peaks,
NameOfPeaks,
maxgap = -1L,
minoverlap = 0L,
totalTest,
by = c("region", "feature", "base"),
ignore.strand = TRUE,
connectedPeaks = c("min", "merge", "keepAll", "keepFirstListConsistent"),
method = c("hyperG", "permutation"),
TxDb,
plot = TRUE,
...
)
}
\arguments{
\item{Peaks}{A list of peaks in \link[GenomicRanges:GRanges-class]{GRanges}
format: See example below.}
\item{NameOfPeaks}{Character vector to specify the name of Peaks,
e.g., c("TF1", "TF2"). This will be used as label in the Venn Diagram.}
\item{maxgap, minoverlap}{Used in the internal call to
\code{findOverlaps()} to detect overlaps.
See \code{?\link[IRanges:findOverlaps-methods]{findOverlaps}}
in the \pkg{IRanges} package for a description of these arguments.}
\item{totalTest}{Numeric value to specify the total number of tests
performed to obtain the list of peaks. It should be much larger than
the number of peaks in the largest peak set.}
\item{by}{"region", "feature" or "base", default = "region".
"feature" means using feature field in the GRanges for calculating overlap,
"region" means using chromosome range for calculating overlap,
and "base" means calculating overlap in nucleotide level.}
\item{ignore.strand}{Logical: when set to TRUE, the strand information is
ignored in the overlap calculations.}
\item{connectedPeaks}{If multiple peaks involved in overlapping in
several groups, set it to "merge" will count it as only 1,
while set it to "min" will count it as the minimal involved peaks in
any connected peak group. "keepAll" will show all the orginal counts
for each list while the final counts will be same as "min".
"keepFirstListConsistent" will keep the counts consistent with first list.}
\item{method}{method to be used for p value calculation.
hyperG means hypergeometric test and permutation means \link{peakPermTest}.}
\item{TxDb}{An object of \link[GenomicFeatures:TxDb-class]{TxDb}.}
\item{plot}{logical. If TRUE (default), a venn diagram is plotted.}
\item{\dots}{Additional arguments to be passed to
\link[VennDiagram:venn.diagram]{venn.diagram}.}
}
\value{
A p.value is calculated by
hypergeometric test or permutation test to determine whether the overlaps of
peaks or features are significant.
}
\description{
Make Venn Diagram from two or more peak ranges,
Also calculate p-value to determine whether those peaks
overlap significantly.
}
\details{
For customized graph options,
please see venn.diagram in VennDiagram package.
}
\examples{
if (interactive()){
peaks1 <- GRanges(seqnames=c("1", "2", "3"),
IRanges(start=c(967654, 2010897, 2496704),
end=c(967754, 2010997, 2496804),
names=c("Site1", "Site2", "Site3")),
strand="+",
feature=c("a","b","f"))
peaks2 = GRanges(seqnames=c("1", "2", "3", "1", "2"),
IRanges(start = c(967659, 2010898,2496700,
3075866,3123260),
end = c(967869, 2011108, 2496920,
3076166, 3123470),
names = c("t1", "t2", "t3", "t4", "t5")),
strand = c("+", "+", "-", "-", "+"),
feature=c("a","b","c","d","a"))
makeVennDiagram(list(peaks1, peaks2), NameOfPeaks=c("TF1", "TF2"),
totalTest=100,scaled=FALSE, euler.d=FALSE,
fill=c("#009E73", "#F0E442"), # circle fill color
col=c("#D55E00", "#0072B2"), #circle border color
cat.col=c("#D55E00", "#0072B2"))
makeVennDiagram(list(peaks1, peaks2), NameOfPeaks=c("TF1", "TF2"),
totalTest=100,
fill=c("#009E73", "#F0E442"), # circle fill color
col=c("#D55E00", "#0072B2"), #circle border color
cat.col=c("#D55E00", "#0072B2"))
###### 4-way diagram using annotated feature instead of chromosome ranges
makeVennDiagram(list(peaks1, peaks2, peaks1, peaks2),
NameOfPeaks=c("TF1", "TF2","TF3", "TF4"),
totalTest=100, by="feature",
main = "Venn Diagram for 4 peak lists",
fill=c(1,2,3,4))
}
}
\seealso{
\link{findOverlapsOfPeaks},
\link[VennDiagram:venn.diagram]{venn.diagram}, \link{peakPermTest}
}
\author{
Lihua Julie Zhu, Jianhong Ou
}
\keyword{graph}
|
#' @export
heraEnv <- new.env()
# the following are the intercepts currently being obtained
#' @export
jn <- c(1.4, 1.096, 0.55)
#' @export
e0 <- 1 - jn
#' @export
Q2s <- NULL
#' @export
loadData.F2 <- function(f2, maxX = 0.01, maxF2 = 5, maxQ2 = 1110, minQ2 = 0.1) {
flog.debug(paste('Loading HERA data with maxX', maxX, ' and ', minQ2, ' <= Q2 <=', maxQ2))
# read the HERA nce+p data
nceppPath <- system.file('extdata', 'd09-158.nce+p.txt', package = 'HQCDP')
flog.debug(paste('[HERA] Loading DIS HERA data from ', nceppPath))
ncepp <- read.table(nceppPath, header = TRUE)
# remove all the high x together with some points with "weird" F2
data <- ncepp[ncepp$x < maxX & ncepp$F2 < maxF2 & ncepp$Q2 <= maxQ2 & ncepp$Q2 >= minQ2,]
#data <- ncepp[ncepp$x < maxX & ncepp$F2 < maxF2 & ncepp$Q2 < maxQ2 & ncepp$Q2 > 7,]
flog.debug(paste('[HERA] Q2 range [', min(data$Q2),',', max(data$Q2), '], number of data points', length(data$Q2)))
f2x <- data[,c('F2', 'Q2', 'x', 's_r', 'tot')]
# this list contains all the different Q2 entries
Q2s <- unique(data[, c("Q2")])
# let's also compute here what is the effective intercept by fitting the data using f(Q)x^e(Q)
Q2L <- length(Q2s)
W <- list()
X <- list()
eff <- data.frame(Q2 = numeric(Q2L), ep = numeric(Q2L), epErr = numeric(Q2L), minX = numeric(Q2L), maxX = numeric(Q2L))
for(i in 1:Q2L) {
Q2 <- Q2s[i]
# now we need to extract the columns that we are interested for a given value of Q2
f2xFit <- data[data$Q2 == Q2,][,c("x","F2")]
s_r <- data[data$Q2 == Q2,][,c("s_r")]
tot <- data[data$Q2 == Q2,][,c("tot")]
err <- s_r * tot / 100
w <- 1 / (err^2)
# skip those data which are too small
if(length(f2xFit$x) > 2) {
# now let's try to fit it
fit <- lm( log(F2) ~ log(x),# p0 * x^(-ep),
data = f2xFit,
weights = w)#,
#start = list(p0 = 1, ep = 1))
s <- summary(fit)$coefficients
eff$Q2[i] <- Q2
eff$ep[i] <- -s['log(x)', 'Estimate']
# let's use 3 sigma as the error uncertainty
eff$epErr[i] <- 3 * s['log(x)', 'Std. Error']
#cat('e = ', eff$ep[i], 'de = ', eff$epErr[i], '\n')
eff$minX[i] <- min(f2xFit$x)
eff$maxX[i] <- max(f2xFit$x)
}
W[[i]] <- w
X[[i]] <- f2xFit$x
}
eff <- eff[eff$Q2 !=0,]
list(F2 = f2x$F2, Q2 = f2x$Q2, x = f2x$x, err = f2x$s_r * f2x$tot / 100, eff = eff, weights = W, xs = X, Q2s = Q2s)
}
# receives a data frame with the structure of HERA data
# returns a data frame
#' @export
addAlternatingColToDataByQ2 <- function(df, colName = 'color', col = c('blue', 'red', 'green')) {
# df <- data.frame(F2 = data$F2, Q2 = data$Q2, x = data$x, err = data$err)
# a color represent a fixed value of Q2
Q2s <- unique(df$Q2)
cl <- rep_len(col, as.integer(length(Q2s)))
# combine them
Q2cl <- cbind(Q2s, cl)
# make a new column with the right features per Q2 values
newCol <- unlist(lapply(df$Q2, function(Q2) Q2cl[Q2s == Q2][2]))
# and add it to the data
df[[colName]] <- newCol
df
}
#' @export
plotHERARangeXvsQ <- function(maxX = 0.01, maxF2 = 5, maxQ2 = 1110, minQ2 = 0.1) {
ncepp <- read.table("d09-158.nce+p.txt", header = TRUE)
dataAbove <- ncepp[ncepp$x < maxX & ncepp$F2 < maxF2 & ncepp$Q2 <= maxQ2 & ncepp$Q2 >= minQ2,]
dataBelow <- ncepp[ncepp$x > maxX & ncepp$F2 < maxF2 & ncepp$Q2 <= maxQ2 & ncepp$Q2 >= minQ2,]
cat('data below x <', maxX, length(dataAbove$x), ', total points ', length(ncepp$x),'\n')
plot(dataAbove$Q2, 1/dataAbove$x, log = 'xy', type = 'p', pch = 16, cex = 0.5,
xlab = expression(Q^2), ylab = expression(1/x), ylim = c(1, 1e6))
abline(h = c(1, 1e2, 1e4, 1e6), v = c(0.5, 5, 50, 500), col = "lightgray", lty = 'dashed' )
abline(h = c(1e2), col = "black", lwd = 2 )
lines(dataBelow$Q2, 1/dataBelow$x, type = 'p', cex = 0.5)
}
#' @export
loadHERA <- function(useIHQCD = TRUE, js = NULL, plotF2 = TRUE) {
A <- function(z) -log(z)
if(useIHQCD)
A <- splinefun(z, A)
if(is.null(js))
js = jn
# read the HERA nce+p data
ncepp <- read.table("d09-158.nce+p.txt", header = TRUE)
# remove all the high x together with some points with "weird" F2
data <- ncepp[ncepp$x < 0.01 & ncepp$F2 < 5 & ncepp$Q2 > 0.10 & ncepp$Q2 < 2200,]
# this list contains all the different Q2 entries
Q2s <- unique(data[, c("Q2")])
f2x <- data[,c("x","F2")]
if(plotF2) {
# initialize the plot
plot(log(f2x$x, 10), f2x$F2, type="n",
main=expression("F"[2]),
xlab = expression("log"[10]*"x"), ylab = expression("F"[2]),
xlim = c(-6,-2), ylim = c(0,1.5))
}
# first let's create a bunch of colors to differentiate the graphs
cl <- rainbow(length(Q2s))
paramVsQ2 <- data.frame( "Q2" = numeric(0),
"p0" = numeric(0),
"p1" = numeric(0),
# "p2" = numeric(0),
"z" = numeric(0),
stringsAsFactors=FALSE)
rss <- 0
for(i in 2:(length(Q2s)))
{
# now we need to extract the columns that we are interested for a given value of Q2
f2x <- data[data$Q2 == Q2s[i],][,c("x","F2")]
s_r <- data[data$Q2 == Q2s[i],][,c("s_r")]
tot <- data[data$Q2 == Q2s[i],][,c("tot")]
err <- s_r * tot / 100
w <- 1 / (err^2)
# skip those data which are too small
if(length(f2x$F2) < 3)
next
# now let's try to fit it
tryCatch({
fit <- nlsLM( F2 ~ p0 * x^(1 - js[1]) + p1 * x^(1 - js[2]),# + p2 * x^(1 - js[3]),
data = f2x,
weights = w,
start = list(p0 = 1, p1 = 1))#, p2 = 1))
# sum the residuals to have a control of the quality of the fit
rss <- rss + sum(residuals(fit)^2)
# get the parameters for the given value of Q2
# to find z we need to invert Q = exp(A(z))
qvsz <- function(z) { return(exp(A(z)) - sqrt(Q2s[i])) }
zSol = uniroot(qvsz, c(0, 7), tol = 1e-9)
# print(paste(zSol$root, " in AdS should be ", 1/sqrt(Q2s[i])))
row = union(union(Q2s[i],fit$m$getAllPars()), zSol$root)
paramVsQ2[nrow(paramVsQ2) + 1, ] <- row
if(plotF2) {
# Draw the fit on the plot by getting the prediction from the fit at 200 x-coordinates across the range of xdata
fitPlot = data.frame(x = seq(min(f2x$x), max(f2x$x), len = 200))
lines(log(fitPlot$x, 10), predict(fit, newdata = fitPlot), col = cl[i])
# plot the dots
lines(log(f2x$x, 10), f2x$F2, type = "p", col= cl[i])
}
},
error = function(e){
print(paste("Unable to fit for Q2=", Q2s[i], " data ", f2x))
print(paste("-> ERROR :",conditionMessage(e), "\n"))
})
}
# now make these available through the file environment
assign("paramVsQ2", paramVsQ2, envir = heraEnv)
assign("js", js, envir = heraEnv)
assign("A", A, envir = heraEnv)
assign("rss", rss, envir = heraEnv)
cat('rss for ', js, '=', rss, '\n')
assign("data", data, envir = heraEnv)
z <- paramVsQ2$z # beware this is not the z of IHQCD, this is the z for each Q2, so is a different list.
Asfun <- splinefun(z, As)
lambdafun <- splinefun(z, lambda)
ff <- lapply(js, function(J) z^(-2 * J) * exp((-J + 0.5) * Asfun(z)) * lambdafun(z))
# reference: F2 ~ Q2^J P13 exp((-J + 0.5) * As) exp(phi)
# for phi = cte ~ z^(-2J) z^(J - 0.5) ~ z^(-J - 0.5)
# P13 ~ delta(z - 1/Q)
assign("phi0z", paramVsQ2$p0 / ff[[1]], envir = heraEnv)
assign("phi1z", paramVsQ2$p1 / ff[[2]], envir = heraEnv)
#assign("phi2z", paramVsQ2$p2 / ff[[3]], envir = heraEnv)
#computeU(A)
return(list( z = paramVsQ2$z, paramVsQ2 = paramVsQ2, data = data, rss = rss))
}
#' @export
showBestJs <- function(nX = 10, nY = 10) {
j0s <- seq(0.9, 1.2, len = nX)
j1s <- seq(1.21, 1.6, len = nY)
minRss <- list(j0 = 0, j1 = 0, rss = 1e10)
rss <- matrix(nrow = nX, ncol = nY)
for (i in 1:length(j0s)) {
for (j in 1:length(j1s)) {
hera <- loadHERA(js = c(j0s[i], j1s[j]), plotF2 = FALSE)
rss[[i, j]] = hera$rss
if(hera$rss < minRss$rss){
minRss$rss <- hera$rss
minRss$j0 <- j0s[i]
minRss$j1 <- j1s[j]
}
}
}
plotData <- list( x = j0s, y = j1s, z = rss)
contour(plotData, levels = c(0.03, 0.04, 0.05, 0.06, 0.07, 0.1, 0.2, 0.3, 1, 3),
xlab = expression('j'[0]),
ylab = expression('j'[1]),
main = 'Residues squared sum')
cat('Minimun rss =', minRss$rss,' found for j0 =', minRss$j0, ' j1 =', minRss$j1, '\n')
points(x = minRss$j0, y = minRss$j1)
return(minRss)
}
#' @export
computeU <- function(A = function(z) {return(-log(z / 1))}) {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
z = paramVsQ2$z
integrand <- function(z) { return(-exp(A(z))) }
u <- lapply(paramVsQ2$z, function(z) { return (integrate(integrand, 10, z)$value) })
phi1u = splinefun(u, exp(-0.5 * A(z) * (0.5 + j0[1])) * paramVsQ2$p1)
phi2u = splinefun(u, exp(-0.5 * A(z) * (0.5 + j0[2])) * paramVsQ2$p2)
# now make these available through the file environment
assign("phi1u", phi1u, envir = heraEnv)
assign("phi2u", phi2u, envir = heraEnv)
assign("u", u, envir = heraEnv)
}
#' @export
reconstructVu <- function() {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
z = paramVsQ2$z
u = get("u", envir = heraEnv)
A = get("A", envir = heraEnv)
phi1 = get("phi1u", envir = heraEnv)
phi2 = get("phi2u", envir = heraEnv)
# first reconstruction
Vu1 = lapply(u, function(u) {
V = (phi1(u, deriv = 2) / phi1(u)) - j0[1]
return(V)
})
plot.new()
plot(u, Vu1, type="p", main="Reconstructing with first wavefunction (in blue)", xlab = "u", ylab = "V(u)", ylim = c(-500, 500))
lines(u, Vu1, type="o")
lines(u, 3000 * phi1(u), type="l", col = "blue")
# second reconstruction
Vu2 = lapply(u, function(u) {
V = (phi2(u, deriv = 2) / phi2(u)) - j0[2]
return(V)
})
plot.new()
plot(u, Vu2, type="p", main="Reconstructing with second wavefunction (in blue)", xlab = "u", ylab = "V(u)")
lines(u, Vu2, type="o")
lines(u, 1000 * phi2(u), type="l", col = "blue")
}
#' @export
plotUvsZ <- function() {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
z = paramVsQ2$z
u = get("u", envir = heraEnv)
plot.new()
plot(z, u, type="p", main=expression(paste("u vs. z")), xlab = "z", ylab = "u")
lines(z, -log(z / 10), type = "l", col = "blue")
legend("topright", expression(paste("Line represents AdS")), col=c("black", "blue"))
}
#' @export
plotZvsQ <- function() {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
z = paramVsQ2$z
plot.new()
plot(sqrt(paramVsQ2$Q2), paramVsQ2$z, type="p", main=expression(paste("z vs. Q")), xlab = "Q", ylab = "z")
lines(sqrt(paramVsQ2$Q2), 1/sqrt(paramVsQ2$Q2), type = "l", col = "blue")
legend("topright", expression(paste("Line represents AdS")), col=c("black", "blue"))
}
# plot of the parameters as a function of Q2
#' @export
plotPvsQ2 <- function() {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
Q2 = paramVsQ2$Q2
# yLim <- c(0.9 * min(paramVsQ2$p0, paramVsQ2$p1), 1.1 * max(paramVsQ2$p0, paramVsQ2$p1))
yLim <- c(-0.5, 1.1 * max(paramVsQ2$p0, paramVsQ2$p1))
plot.new()
plot(Q2, paramVsQ2$p0, type = "o",
xlab = expression(paste(Q^2,(GeV^2))), log = 'x',
ylab = expression(paste("f"[0],", f"[1])),
xlim = c(1e-1, max(Q2)),
ylim = yLim)
#lines(Q2, paramVsQ2$p0, type="o", col="red")
lines(Q2, paramVsQ2$p1, type="o", col="blue")
#lines(z, paramVsQ2$p2 / 5, type="o", col="green")
#abline(h = 0, col = "gray60")
abline(h = seq(yLim[1], yLim[2], len = 5), v = seq(0.2, 250, len = 5), col = "lightgray", lty = 3)
}
# let's try to show how the functions should look like in z
#' @export
plotPvsZ <- function() {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
z = paramVsQ2$z
plot.new()
plot(z, type = "n",
xlab = "z", log = 'x',
ylab = expression(paste("f"[1],", f"[2])),
xlim = c(7e-2, 2.1),
ylim = c(-0.5, 0.6))
lines(z, paramVsQ2$p0, type="o", col="red")
lines(z, paramVsQ2$p1, type="o", col="blue")
#lines(z, paramVsQ2$p2 / 5, type="o", col="green")
axis(3, at = z, labels = paramVsQ2$Q2, col.axis="blue", cex.axis=0.7, tck=-0.01)
mtext(expression(paste(Q^2,(GeV^2))), side=3, at=c(2.2), col="blue", line=1.5)
}
# now the p1 and p2 functions are related non trivially to the wavefunctions,
# let's try to guess how the wavefunctions should actually look like
#' @export
plotPhivsZ <- function(maxValue = 1) {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
z = paramVsQ2$z
phi0z = get("phi0z", envir = heraEnv)
phi1z = get("phi1z", envir = heraEnv)
# phi2z = get("phi2z", envir = heraEnv)
if(maxValue > 0) {
phi0z = (maxValue / max(phi0z)) * phi0z
phi1z = (maxValue / max(phi1z)) * phi1z
# phi2z = (maxValue / phi2z[2]) * phi2z
}
rangeY = 1#max(max(phi0z, phi1z, phi2z))
plot.new()
plot(z, type = "n",
xlab = "z", #log = 'x',
ylab = expression(paste(psi[0],", ", psi[1])),
xlim = c(0.05, 1.1 * max(z)),
ylim = c(-0.2 * rangeY, 1.1 * rangeY))
phis = list(phi0z, phi1z)
cols = c('red', 'blue')
mapply(function(phi, color) {
lines(z, phi, type="p", col=color)
lines(z, phi, type="l", col=alpha(color, 0.3))
}, phis, cols)
axis(3, at = z, labels = paramVsQ2$Q2, cex.axis=0.7, tck=-0.01)
mtext(expression(paste(Q^2,(GeV^2))), side=3, at=c(1.7), line=1.5)
abline(h = 0, col = "gray60")
abline(h = seq(-0.2, 1, by = 0.2), v = seq(0.5, 2.5, by = 0.5), col = "lightgray", lty = 3)
# lines(z, phi2z /20, type="o", col="green")
}
# What about the last in the u variable?
#' @export
plotPhivsU <- function() {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
u = get("u", envir = heraEnv)
A = get("A", envir = heraEnv)
z = paramVsQ2$z
j0 = get("j0", envir = heraEnv)
phi1 = exp(-0.5 * A(z) * (0.5 + j0[1])) * paramVsQ2$p1
phi2 = exp(-0.5 * A(z) * (0.5 + j0[2])) * paramVsQ2$p2
plot.new()
plot(z, type = "n",
main = expression(paste(phi[0]," and ", phi[1]," vs u")),
xlab = "u",
ylab = expression(paste(phi[0],", ", phi[1])),
xlim = c(0, 7),
ylim = c(0, 0.3))
lines(u, phi1, type="o", col="red")
lines(u, phi2, type="o", col="blue")
#axis(3, at = z, labels = paramVsQ2$Q2, col.axis="blue", cex.axis=0.7, tck=-0.01)
}
| /R/DIS_data.R | permissive | rcarcasses/HQCD-P | R | false | false | 14,095 | r |
#' @export
heraEnv <- new.env()
# the following are the intercepts currently being obtained
#' @export
jn <- c(1.4, 1.096, 0.55)
#' @export
e0 <- 1 - jn
#' @export
Q2s <- NULL
#' @export
loadData.F2 <- function(f2, maxX = 0.01, maxF2 = 5, maxQ2 = 1110, minQ2 = 0.1) {
flog.debug(paste('Loading HERA data with maxX', maxX, ' and ', minQ2, ' <= Q2 <=', maxQ2))
# read the HERA nce+p data
nceppPath <- system.file('extdata', 'd09-158.nce+p.txt', package = 'HQCDP')
flog.debug(paste('[HERA] Loading DIS HERA data from ', nceppPath))
ncepp <- read.table(nceppPath, header = TRUE)
# remove all the high x together with some points with "weird" F2
data <- ncepp[ncepp$x < maxX & ncepp$F2 < maxF2 & ncepp$Q2 <= maxQ2 & ncepp$Q2 >= minQ2,]
#data <- ncepp[ncepp$x < maxX & ncepp$F2 < maxF2 & ncepp$Q2 < maxQ2 & ncepp$Q2 > 7,]
flog.debug(paste('[HERA] Q2 range [', min(data$Q2),',', max(data$Q2), '], number of data points', length(data$Q2)))
f2x <- data[,c('F2', 'Q2', 'x', 's_r', 'tot')]
# this list contains all the different Q2 entries
Q2s <- unique(data[, c("Q2")])
# let's also compute here what is the effective intercept by fitting the data using f(Q)x^e(Q)
Q2L <- length(Q2s)
W <- list()
X <- list()
eff <- data.frame(Q2 = numeric(Q2L), ep = numeric(Q2L), epErr = numeric(Q2L), minX = numeric(Q2L), maxX = numeric(Q2L))
for(i in 1:Q2L) {
Q2 <- Q2s[i]
# now we need to extract the columns that we are interested for a given value of Q2
f2xFit <- data[data$Q2 == Q2,][,c("x","F2")]
s_r <- data[data$Q2 == Q2,][,c("s_r")]
tot <- data[data$Q2 == Q2,][,c("tot")]
err <- s_r * tot / 100
w <- 1 / (err^2)
# skip those data which are too small
if(length(f2xFit$x) > 2) {
# now let's try to fit it
fit <- lm( log(F2) ~ log(x),# p0 * x^(-ep),
data = f2xFit,
weights = w)#,
#start = list(p0 = 1, ep = 1))
s <- summary(fit)$coefficients
eff$Q2[i] <- Q2
eff$ep[i] <- -s['log(x)', 'Estimate']
# let's use 3 sigma as the error uncertainty
eff$epErr[i] <- 3 * s['log(x)', 'Std. Error']
#cat('e = ', eff$ep[i], 'de = ', eff$epErr[i], '\n')
eff$minX[i] <- min(f2xFit$x)
eff$maxX[i] <- max(f2xFit$x)
}
W[[i]] <- w
X[[i]] <- f2xFit$x
}
eff <- eff[eff$Q2 !=0,]
list(F2 = f2x$F2, Q2 = f2x$Q2, x = f2x$x, err = f2x$s_r * f2x$tot / 100, eff = eff, weights = W, xs = X, Q2s = Q2s)
}
# receives a data frame with the structure of HERA data
# returns a data frame
#' @export
addAlternatingColToDataByQ2 <- function(df, colName = 'color', col = c('blue', 'red', 'green')) {
# df <- data.frame(F2 = data$F2, Q2 = data$Q2, x = data$x, err = data$err)
# a color represent a fixed value of Q2
Q2s <- unique(df$Q2)
cl <- rep_len(col, as.integer(length(Q2s)))
# combine them
Q2cl <- cbind(Q2s, cl)
# make a new column with the right features per Q2 values
newCol <- unlist(lapply(df$Q2, function(Q2) Q2cl[Q2s == Q2][2]))
# and add it to the data
df[[colName]] <- newCol
df
}
#' @export
plotHERARangeXvsQ <- function(maxX = 0.01, maxF2 = 5, maxQ2 = 1110, minQ2 = 0.1) {
ncepp <- read.table("d09-158.nce+p.txt", header = TRUE)
dataAbove <- ncepp[ncepp$x < maxX & ncepp$F2 < maxF2 & ncepp$Q2 <= maxQ2 & ncepp$Q2 >= minQ2,]
dataBelow <- ncepp[ncepp$x > maxX & ncepp$F2 < maxF2 & ncepp$Q2 <= maxQ2 & ncepp$Q2 >= minQ2,]
cat('data below x <', maxX, length(dataAbove$x), ', total points ', length(ncepp$x),'\n')
plot(dataAbove$Q2, 1/dataAbove$x, log = 'xy', type = 'p', pch = 16, cex = 0.5,
xlab = expression(Q^2), ylab = expression(1/x), ylim = c(1, 1e6))
abline(h = c(1, 1e2, 1e4, 1e6), v = c(0.5, 5, 50, 500), col = "lightgray", lty = 'dashed' )
abline(h = c(1e2), col = "black", lwd = 2 )
lines(dataBelow$Q2, 1/dataBelow$x, type = 'p', cex = 0.5)
}
#' @export
loadHERA <- function(useIHQCD = TRUE, js = NULL, plotF2 = TRUE) {
A <- function(z) -log(z)
if(useIHQCD)
A <- splinefun(z, A)
if(is.null(js))
js = jn
# read the HERA nce+p data
ncepp <- read.table("d09-158.nce+p.txt", header = TRUE)
# remove all the high x together with some points with "weird" F2
data <- ncepp[ncepp$x < 0.01 & ncepp$F2 < 5 & ncepp$Q2 > 0.10 & ncepp$Q2 < 2200,]
# this list contains all the different Q2 entries
Q2s <- unique(data[, c("Q2")])
f2x <- data[,c("x","F2")]
if(plotF2) {
# initialize the plot
plot(log(f2x$x, 10), f2x$F2, type="n",
main=expression("F"[2]),
xlab = expression("log"[10]*"x"), ylab = expression("F"[2]),
xlim = c(-6,-2), ylim = c(0,1.5))
}
# first let's create a bunch of colors to differentiate the graphs
cl <- rainbow(length(Q2s))
paramVsQ2 <- data.frame( "Q2" = numeric(0),
"p0" = numeric(0),
"p1" = numeric(0),
# "p2" = numeric(0),
"z" = numeric(0),
stringsAsFactors=FALSE)
rss <- 0
for(i in 2:(length(Q2s)))
{
# now we need to extract the columns that we are interested for a given value of Q2
f2x <- data[data$Q2 == Q2s[i],][,c("x","F2")]
s_r <- data[data$Q2 == Q2s[i],][,c("s_r")]
tot <- data[data$Q2 == Q2s[i],][,c("tot")]
err <- s_r * tot / 100
w <- 1 / (err^2)
# skip those data which are too small
if(length(f2x$F2) < 3)
next
# now let's try to fit it
tryCatch({
fit <- nlsLM( F2 ~ p0 * x^(1 - js[1]) + p1 * x^(1 - js[2]),# + p2 * x^(1 - js[3]),
data = f2x,
weights = w,
start = list(p0 = 1, p1 = 1))#, p2 = 1))
# sum the residuals to have a control of the quality of the fit
rss <- rss + sum(residuals(fit)^2)
# get the parameters for the given value of Q2
# to find z we need to invert Q = exp(A(z))
qvsz <- function(z) { return(exp(A(z)) - sqrt(Q2s[i])) }
zSol = uniroot(qvsz, c(0, 7), tol = 1e-9)
# print(paste(zSol$root, " in AdS should be ", 1/sqrt(Q2s[i])))
row = union(union(Q2s[i],fit$m$getAllPars()), zSol$root)
paramVsQ2[nrow(paramVsQ2) + 1, ] <- row
if(plotF2) {
# Draw the fit on the plot by getting the prediction from the fit at 200 x-coordinates across the range of xdata
fitPlot = data.frame(x = seq(min(f2x$x), max(f2x$x), len = 200))
lines(log(fitPlot$x, 10), predict(fit, newdata = fitPlot), col = cl[i])
# plot the dots
lines(log(f2x$x, 10), f2x$F2, type = "p", col= cl[i])
}
},
error = function(e){
print(paste("Unable to fit for Q2=", Q2s[i], " data ", f2x))
print(paste("-> ERROR :",conditionMessage(e), "\n"))
})
}
# now make these available through the file environment
assign("paramVsQ2", paramVsQ2, envir = heraEnv)
assign("js", js, envir = heraEnv)
assign("A", A, envir = heraEnv)
assign("rss", rss, envir = heraEnv)
cat('rss for ', js, '=', rss, '\n')
assign("data", data, envir = heraEnv)
z <- paramVsQ2$z # beware this is not the z of IHQCD, this is the z for each Q2, so is a different list.
Asfun <- splinefun(z, As)
lambdafun <- splinefun(z, lambda)
ff <- lapply(js, function(J) z^(-2 * J) * exp((-J + 0.5) * Asfun(z)) * lambdafun(z))
# reference: F2 ~ Q2^J P13 exp((-J + 0.5) * As) exp(phi)
# for phi = cte ~ z^(-2J) z^(J - 0.5) ~ z^(-J - 0.5)
# P13 ~ delta(z - 1/Q)
assign("phi0z", paramVsQ2$p0 / ff[[1]], envir = heraEnv)
assign("phi1z", paramVsQ2$p1 / ff[[2]], envir = heraEnv)
#assign("phi2z", paramVsQ2$p2 / ff[[3]], envir = heraEnv)
#computeU(A)
return(list( z = paramVsQ2$z, paramVsQ2 = paramVsQ2, data = data, rss = rss))
}
#' @export
showBestJs <- function(nX = 10, nY = 10) {
j0s <- seq(0.9, 1.2, len = nX)
j1s <- seq(1.21, 1.6, len = nY)
minRss <- list(j0 = 0, j1 = 0, rss = 1e10)
rss <- matrix(nrow = nX, ncol = nY)
for (i in 1:length(j0s)) {
for (j in 1:length(j1s)) {
hera <- loadHERA(js = c(j0s[i], j1s[j]), plotF2 = FALSE)
rss[[i, j]] = hera$rss
if(hera$rss < minRss$rss){
minRss$rss <- hera$rss
minRss$j0 <- j0s[i]
minRss$j1 <- j1s[j]
}
}
}
plotData <- list( x = j0s, y = j1s, z = rss)
contour(plotData, levels = c(0.03, 0.04, 0.05, 0.06, 0.07, 0.1, 0.2, 0.3, 1, 3),
xlab = expression('j'[0]),
ylab = expression('j'[1]),
main = 'Residues squared sum')
cat('Minimun rss =', minRss$rss,' found for j0 =', minRss$j0, ' j1 =', minRss$j1, '\n')
points(x = minRss$j0, y = minRss$j1)
return(minRss)
}
#' @export
computeU <- function(A = function(z) {return(-log(z / 1))}) {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
z = paramVsQ2$z
integrand <- function(z) { return(-exp(A(z))) }
u <- lapply(paramVsQ2$z, function(z) { return (integrate(integrand, 10, z)$value) })
phi1u = splinefun(u, exp(-0.5 * A(z) * (0.5 + j0[1])) * paramVsQ2$p1)
phi2u = splinefun(u, exp(-0.5 * A(z) * (0.5 + j0[2])) * paramVsQ2$p2)
# now make these available through the file environment
assign("phi1u", phi1u, envir = heraEnv)
assign("phi2u", phi2u, envir = heraEnv)
assign("u", u, envir = heraEnv)
}
#' @export
reconstructVu <- function() {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
z = paramVsQ2$z
u = get("u", envir = heraEnv)
A = get("A", envir = heraEnv)
phi1 = get("phi1u", envir = heraEnv)
phi2 = get("phi2u", envir = heraEnv)
# first reconstruction
Vu1 = lapply(u, function(u) {
V = (phi1(u, deriv = 2) / phi1(u)) - j0[1]
return(V)
})
plot.new()
plot(u, Vu1, type="p", main="Reconstructing with first wavefunction (in blue)", xlab = "u", ylab = "V(u)", ylim = c(-500, 500))
lines(u, Vu1, type="o")
lines(u, 3000 * phi1(u), type="l", col = "blue")
# second reconstruction
Vu2 = lapply(u, function(u) {
V = (phi2(u, deriv = 2) / phi2(u)) - j0[2]
return(V)
})
plot.new()
plot(u, Vu2, type="p", main="Reconstructing with second wavefunction (in blue)", xlab = "u", ylab = "V(u)")
lines(u, Vu2, type="o")
lines(u, 1000 * phi2(u), type="l", col = "blue")
}
#' @export
plotUvsZ <- function() {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
z = paramVsQ2$z
u = get("u", envir = heraEnv)
plot.new()
plot(z, u, type="p", main=expression(paste("u vs. z")), xlab = "z", ylab = "u")
lines(z, -log(z / 10), type = "l", col = "blue")
legend("topright", expression(paste("Line represents AdS")), col=c("black", "blue"))
}
#' @export
plotZvsQ <- function() {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
z = paramVsQ2$z
plot.new()
plot(sqrt(paramVsQ2$Q2), paramVsQ2$z, type="p", main=expression(paste("z vs. Q")), xlab = "Q", ylab = "z")
lines(sqrt(paramVsQ2$Q2), 1/sqrt(paramVsQ2$Q2), type = "l", col = "blue")
legend("topright", expression(paste("Line represents AdS")), col=c("black", "blue"))
}
# plot of the parameters as a function of Q2
#' @export
plotPvsQ2 <- function() {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
Q2 = paramVsQ2$Q2
# yLim <- c(0.9 * min(paramVsQ2$p0, paramVsQ2$p1), 1.1 * max(paramVsQ2$p0, paramVsQ2$p1))
yLim <- c(-0.5, 1.1 * max(paramVsQ2$p0, paramVsQ2$p1))
plot.new()
plot(Q2, paramVsQ2$p0, type = "o",
xlab = expression(paste(Q^2,(GeV^2))), log = 'x',
ylab = expression(paste("f"[0],", f"[1])),
xlim = c(1e-1, max(Q2)),
ylim = yLim)
#lines(Q2, paramVsQ2$p0, type="o", col="red")
lines(Q2, paramVsQ2$p1, type="o", col="blue")
#lines(z, paramVsQ2$p2 / 5, type="o", col="green")
#abline(h = 0, col = "gray60")
abline(h = seq(yLim[1], yLim[2], len = 5), v = seq(0.2, 250, len = 5), col = "lightgray", lty = 3)
}
# let's try to show how the functions should look like in z
#' @export
plotPvsZ <- function() {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
z = paramVsQ2$z
plot.new()
plot(z, type = "n",
xlab = "z", log = 'x',
ylab = expression(paste("f"[1],", f"[2])),
xlim = c(7e-2, 2.1),
ylim = c(-0.5, 0.6))
lines(z, paramVsQ2$p0, type="o", col="red")
lines(z, paramVsQ2$p1, type="o", col="blue")
#lines(z, paramVsQ2$p2 / 5, type="o", col="green")
axis(3, at = z, labels = paramVsQ2$Q2, col.axis="blue", cex.axis=0.7, tck=-0.01)
mtext(expression(paste(Q^2,(GeV^2))), side=3, at=c(2.2), col="blue", line=1.5)
}
# now the p1 and p2 functions are related non trivially to the wavefunctions,
# let's try to guess how the wavefunctions should actually look like
#' @export
plotPhivsZ <- function(maxValue = 1) {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
z = paramVsQ2$z
phi0z = get("phi0z", envir = heraEnv)
phi1z = get("phi1z", envir = heraEnv)
# phi2z = get("phi2z", envir = heraEnv)
if(maxValue > 0) {
phi0z = (maxValue / max(phi0z)) * phi0z
phi1z = (maxValue / max(phi1z)) * phi1z
# phi2z = (maxValue / phi2z[2]) * phi2z
}
rangeY = 1#max(max(phi0z, phi1z, phi2z))
plot.new()
plot(z, type = "n",
xlab = "z", #log = 'x',
ylab = expression(paste(psi[0],", ", psi[1])),
xlim = c(0.05, 1.1 * max(z)),
ylim = c(-0.2 * rangeY, 1.1 * rangeY))
phis = list(phi0z, phi1z)
cols = c('red', 'blue')
mapply(function(phi, color) {
lines(z, phi, type="p", col=color)
lines(z, phi, type="l", col=alpha(color, 0.3))
}, phis, cols)
axis(3, at = z, labels = paramVsQ2$Q2, cex.axis=0.7, tck=-0.01)
mtext(expression(paste(Q^2,(GeV^2))), side=3, at=c(1.7), line=1.5)
abline(h = 0, col = "gray60")
abline(h = seq(-0.2, 1, by = 0.2), v = seq(0.5, 2.5, by = 0.5), col = "lightgray", lty = 3)
# lines(z, phi2z /20, type="o", col="green")
}
# What about the last in the u variable?
#' @export
plotPhivsU <- function() {
paramVsQ2 = get("paramVsQ2", envir = heraEnv)
u = get("u", envir = heraEnv)
A = get("A", envir = heraEnv)
z = paramVsQ2$z
j0 = get("j0", envir = heraEnv)
phi1 = exp(-0.5 * A(z) * (0.5 + j0[1])) * paramVsQ2$p1
phi2 = exp(-0.5 * A(z) * (0.5 + j0[2])) * paramVsQ2$p2
plot.new()
plot(z, type = "n",
main = expression(paste(phi[0]," and ", phi[1]," vs u")),
xlab = "u",
ylab = expression(paste(phi[0],", ", phi[1])),
xlim = c(0, 7),
ylim = c(0, 0.3))
lines(u, phi1, type="o", col="red")
lines(u, phi2, type="o", col="blue")
#axis(3, at = z, labels = paramVsQ2$Q2, col.axis="blue", cex.axis=0.7, tck=-0.01)
}
|
data <- read.table("household_power_consumption.txt", sep=";", header = TRUE, na.strings = "?", stringsAsFactors=FALSE)
gapData <- subset(data, data$Date == "1/2/2007" | data$Date == "2/2/2007")
png("plot1.png", width = 480, height = 480)
hist(gapData$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off() | /assignment_1/plot1.R | no_license | juandes/coursera-exploratory-data-analysis | R | false | false | 367 | r | data <- read.table("household_power_consumption.txt", sep=";", header = TRUE, na.strings = "?", stringsAsFactors=FALSE)
gapData <- subset(data, data$Date == "1/2/2007" | data$Date == "2/2/2007")
png("plot1.png", width = 480, height = 480)
hist(gapData$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting.R
\name{vcf_histogram}
\alias{vcf_histogram}
\title{Plotting Histogram of Variant consequences}
\usage{
vcf_histogram(vcf, ...)
}
\arguments{
\item{vcf}{an object of class VcfFile}
\item{...}{additional parameters for the plotting}
}
\value{
A \code{\link{ggplot2}} plot object
}
\description{
Plotting Histogram of Variant consequences
}
\examples{
\dontrun{
vcf_histogram(vcf)
}
}
| /man/vcf_histogram.Rd | no_license | lescai/vcfplot | R | false | true | 471 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting.R
\name{vcf_histogram}
\alias{vcf_histogram}
\title{Plotting Histogram of Variant consequences}
\usage{
vcf_histogram(vcf, ...)
}
\arguments{
\item{vcf}{an object of class VcfFile}
\item{...}{additional parameters for the plotting}
}
\value{
A \code{\link{ggplot2}} plot object
}
\description{
Plotting Histogram of Variant consequences
}
\examples{
\dontrun{
vcf_histogram(vcf)
}
}
|
split_mat <- function(input, max.res) {
pruned.status <- vector("list", max.res)
pruned.status[[1]] <- "FALSE"
if (max.res >= 2) {
for (res in 2:max.res) {
pruned.status[[res]] <- rep(list("FALSE"), times = 4 ^ (res - 1))
}
}
pruned.mat <- vector("list", max.res)
pruned.mat[[1]] <- list(input)
if (max.res >= 2) {
for (res in 2:max.res) {
for (elem in 1:length(pruned.mat[[res - 1]])) {
no.row <- nrow(pruned.mat[[res - 1]][[1]])
pruned.mat[[res]][[1 + (elem - 1) * 4]] <- pruned.mat[[res - 1]][[elem]][1:(no.row/2), 1:(no.row/2)]
pruned.mat[[res]][[2 + (elem - 1) * 4]] <- pruned.mat[[res - 1]][[elem]][1:(no.row/2), (1 + (no.row/2)):no.row]
pruned.mat[[res]][[3 + (elem - 1) * 4]] <- pruned.mat[[res - 1]][[elem]][(1 + (no.row/2)):no.row, 1:(no.row/2)]
pruned.mat[[res]][[4 + (elem - 1) * 4]] <- pruned.mat[[res - 1]][[elem]][(1 + (no.row/2)):no.row, (1 + (no.row/2)):no.row]
}
}
}
pruned.alpha <- vector("list", max.res)
pruned.theta <- vector("list", max.res)
if (max.res >= 2) {
for (res in 1:max.res) {
for (elem in 1:length(pruned.mat[[res]])) {
pruned.alpha[[res]][[elem]] <- length(pruned.mat[[res]][[elem]])
pruned.theta[[res]][[elem]] <- sum(pruned.mat[[res]][[elem]])
pruned.theta[[res]][[elem]] <- pruned.theta[[res]][[elem]] / pruned.alpha[[res]][[elem]]
}
}
}
output <- structure(list(matrices = pruned.mat, status = pruned.status,
alpha = pruned.alpha, theta = pruned.theta))
return(output)
} | /functions/split_mat.R | no_license | significantstats/flow_cytometry | R | false | false | 1,609 | r | split_mat <- function(input, max.res) {
pruned.status <- vector("list", max.res)
pruned.status[[1]] <- "FALSE"
if (max.res >= 2) {
for (res in 2:max.res) {
pruned.status[[res]] <- rep(list("FALSE"), times = 4 ^ (res - 1))
}
}
pruned.mat <- vector("list", max.res)
pruned.mat[[1]] <- list(input)
if (max.res >= 2) {
for (res in 2:max.res) {
for (elem in 1:length(pruned.mat[[res - 1]])) {
no.row <- nrow(pruned.mat[[res - 1]][[1]])
pruned.mat[[res]][[1 + (elem - 1) * 4]] <- pruned.mat[[res - 1]][[elem]][1:(no.row/2), 1:(no.row/2)]
pruned.mat[[res]][[2 + (elem - 1) * 4]] <- pruned.mat[[res - 1]][[elem]][1:(no.row/2), (1 + (no.row/2)):no.row]
pruned.mat[[res]][[3 + (elem - 1) * 4]] <- pruned.mat[[res - 1]][[elem]][(1 + (no.row/2)):no.row, 1:(no.row/2)]
pruned.mat[[res]][[4 + (elem - 1) * 4]] <- pruned.mat[[res - 1]][[elem]][(1 + (no.row/2)):no.row, (1 + (no.row/2)):no.row]
}
}
}
pruned.alpha <- vector("list", max.res)
pruned.theta <- vector("list", max.res)
if (max.res >= 2) {
for (res in 1:max.res) {
for (elem in 1:length(pruned.mat[[res]])) {
pruned.alpha[[res]][[elem]] <- length(pruned.mat[[res]][[elem]])
pruned.theta[[res]][[elem]] <- sum(pruned.mat[[res]][[elem]])
pruned.theta[[res]][[elem]] <- pruned.theta[[res]][[elem]] / pruned.alpha[[res]][[elem]]
}
}
}
output <- structure(list(matrices = pruned.mat, status = pruned.status,
alpha = pruned.alpha, theta = pruned.theta))
return(output)
} |
if (FALSE)
{"
Perform simple linear regression for Ex3.74 page 153
x=number of FACTORS
y=length of stay
"}
library(faraway) #this command brings in a library of regression functions
#BE SURE TO CHANGE THE DIRECTORIES BELOW TO YOUR DIRECTORIES
#append=FALSE indicates to build a new file
#split=TRUE indicates to send output to file and to the console window
#write output to a file, append or overwrite, split to file and console window
#sink("C:/Users/jmard/Desktop/RegressionMethodsSpring2020/Lecture 02 28JAN2020/FACTORS_out.txt",append=FALSE,split=TRUE)
#read in the data which is in a csv file
#change the directory below to your directory
ex374 <- read.csv(file="C:/Users/jmard/Desktop/RegressionMethodsSpring2020/Lecture 02 28Jan2020/FACTORS.csv",header = TRUE)
head(ex374,10L)
ex374
summary(ex374)
mod <- lm(LOS~ FACTORS, ex374)
windows(7,7)
#save graph in pdf
pdf(file="C:/Users/jmard/Desktop/RegressionMethodsSpring2020/Lecture 02 28JAN2020/Ex3_74R_out.pdf")
plot(LOS~ FACTORS,ex374) #keep in mind - R is case sensitive SAS is not
abline(mod)
plot(LOS~ FACTORS,ex374)
abline(mod)
#another approach
library(ggplot2)
ggplot(ex374,aes(x=FACTORS,y=LOS)) + geom_point(color="red",size=2) + geom_smooth(method=lm, color="blue")
#the plot shows a 95% confidence interval about the regression line - method lm asks for the least squares line
summary(mod)
anova(mod)
names(mod) #the names function is used to get or set the names of an object
names(summary(mod))
info_mod <- mod #save information in mod
summary_mod <- summary(mod) #save summary information in summary_mod
typeof(info_mod) #indicates the type of vector info_mod is
typeof(summary_mod) #indicates the type of vector summary_mod is
info_mod$coefficients
summary_mod$r.squared
cor(ex374$FACTORS,ex374$LOS)
R2=(cor(ex374$FACTORS,ex374$LOS))^2
R2
confint(mod) #95% confidence interval on regression coefficients
new.dat <- data.frame(FACTORS=231) #creates an observation where FACTORS=231
new.dat
predict(mod, newdata = new.dat, interval = 'confidence')
predict(mod, newdata = new.dat, interval = 'prediction')
##----------------------------------------------------------------##
dev.off() #closes pdf file)
| /regression/MidtermExamFilesLecture2/Ex3_74.R | no_license | himesh257/classwork-research | R | false | false | 2,206 | r | if (FALSE)
{"
Perform simple linear regression for Ex3.74 page 153
x=number of FACTORS
y=length of stay
"}
library(faraway) #this command brings in a library of regression functions
#BE SURE TO CHANGE THE DIRECTORIES BELOW TO YOUR DIRECTORIES
#append=FALSE indicates to build a new file
#split=TRUE indicates to send output to file and to the console window
#write output to a file, append or overwrite, split to file and console window
#sink("C:/Users/jmard/Desktop/RegressionMethodsSpring2020/Lecture 02 28JAN2020/FACTORS_out.txt",append=FALSE,split=TRUE)
#read in the data which is in a csv file
#change the directory below to your directory
ex374 <- read.csv(file="C:/Users/jmard/Desktop/RegressionMethodsSpring2020/Lecture 02 28Jan2020/FACTORS.csv",header = TRUE)
head(ex374,10L)
ex374
summary(ex374)
mod <- lm(LOS~ FACTORS, ex374)
windows(7,7)
#save graph in pdf
pdf(file="C:/Users/jmard/Desktop/RegressionMethodsSpring2020/Lecture 02 28JAN2020/Ex3_74R_out.pdf")
plot(LOS~ FACTORS,ex374) #keep in mind - R is case sensitive SAS is not
abline(mod)
plot(LOS~ FACTORS,ex374)
abline(mod)
#another approach
library(ggplot2)
ggplot(ex374,aes(x=FACTORS,y=LOS)) + geom_point(color="red",size=2) + geom_smooth(method=lm, color="blue")
#the plot shows a 95% confidence interval about the regression line - method lm asks for the least squares line
summary(mod)
anova(mod)
names(mod) #the names function is used to get or set the names of an object
names(summary(mod))
info_mod <- mod #save information in mod
summary_mod <- summary(mod) #save summary information in summary_mod
typeof(info_mod) #indicates the type of vector info_mod is
typeof(summary_mod) #indicates the type of vector summary_mod is
info_mod$coefficients
summary_mod$r.squared
cor(ex374$FACTORS,ex374$LOS)
R2=(cor(ex374$FACTORS,ex374$LOS))^2
R2
confint(mod) #95% confidence interval on regression coefficients
new.dat <- data.frame(FACTORS=231) #creates an observation where FACTORS=231
new.dat
predict(mod, newdata = new.dat, interval = 'confidence')
predict(mod, newdata = new.dat, interval = 'prediction')
##----------------------------------------------------------------##
dev.off() #closes pdf file)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Summary.R
\name{.transformVLD}
\alias{.transformVLD}
\title{Transform an aspect with data type}
\usage{
.transformVLD(aspect)
}
\arguments{
\item{aspect}{an aspect (data.frame)}
}
\value{
the aspect (data.frame)
}
\description{
Transforms an aspect with \code{value}, \code{dataType} and \code{isList} columns to force a custom format
in summary generation.
}
\note{
Internal function only for convenience
}
\examples{
df = data.frame(bla=c("a","b","c"),
value=list("a",2,TRUE),
dataType=c("string","integer","boolean"),
isList=c(FALSE,FALSE,FALSE))
df = RCX:::.transformVLD(df)
summary(df)
}
\keyword{internal}
| /man/dot-transformVLD.Rd | permissive | frankkramer-lab/RCX | R | false | true | 739 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Summary.R
\name{.transformVLD}
\alias{.transformVLD}
\title{Transform an aspect with data type}
\usage{
.transformVLD(aspect)
}
\arguments{
\item{aspect}{an aspect (data.frame)}
}
\value{
the aspect (data.frame)
}
\description{
Transforms an aspect with \code{value}, \code{dataType} and \code{isList} columns to force a custom format
in summary generation.
}
\note{
Internal function only for convenience
}
\examples{
df = data.frame(bla=c("a","b","c"),
value=list("a",2,TRUE),
dataType=c("string","integer","boolean"),
isList=c(FALSE,FALSE,FALSE))
df = RCX:::.transformVLD(df)
summary(df)
}
\keyword{internal}
|
source("R/get_moves.R") #use get_moves(__gametext__)
source("R/white_move.R") #used within get_boards()
source("R/black_move.R") #used within get_boards()
source("R/get_boards.R") #use get_boards(__moveslist__)
firstfile <- list.files("data/")[10]
test <- readLines(paste0("data/",firstfile))
mymoves <- get_moves(test)
full_game <-get_boards(mymoves)
| /R/read_png.R | no_license | pmckenz1/590_final | R | false | false | 355 | r | source("R/get_moves.R") #use get_moves(__gametext__)
source("R/white_move.R") #used within get_boards()
source("R/black_move.R") #used within get_boards()
source("R/get_boards.R") #use get_boards(__moveslist__)
firstfile <- list.files("data/")[10]
test <- readLines(paste0("data/",firstfile))
mymoves <- get_moves(test)
full_game <-get_boards(mymoves)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colors.R
\name{royalbluered}
\alias{royalbluered}
\alias{royalredblue}
\alias{royalbluegrayred}
\alias{royalredgrayblue}
\alias{turyeb}
\alias{redgreen}
\alias{greenred}
\alias{bluered}
\alias{redblue}
\alias{redblackblue}
\alias{cyanblackyellow}
\alias{yellowblackcyan}
\alias{blueblackred}
\alias{blackredyellow}
\alias{blackgoldred}
\alias{whiteblueblackheat}
\alias{heat}
\alias{magentayellow}
\alias{yellowmagenta}
\alias{blackyellow}
\alias{yellowblack}
\alias{whiteblue}
\alias{whitered}
\alias{blackred}
\alias{blackgreen}
\alias{whiteblack}
\alias{blackwhite}
\title{Two and three-color panels}
\usage{
royalbluered(n)
royalredblue(n)
royalbluegrayred(n)
royalredgrayblue(n)
blackyellow(n)
yellowblack(n)
whiteblue(n)
whitered(n)
blackred(n)
blackgreen(n)
whiteblack(n)
blackwhite(n)
turyeb(n)
redgreen(n)
greenred(n)
bluered(n)
redblue(n)
blueblackred(n)
cyanblackyellow(n)
yellowblackcyan(n)
redblackblue(n)
blackredyellow(n)
blackgoldred(n)
magentayellow(n)
yellowmagenta(n)
whiteblueblackheat(n)
heat(n)
}
\arguments{
\item{n}{Number of colors needed}
}
\value{
Character vector of length \code{n} coding colors
}
\description{
Two and three-color panels
}
\examples{
display.threecolor.panels()
}
\seealso{
\code{\link{blackyellow}} for two-color systems
}
| /man/royalbluered.Rd | no_license | bedapub/ribiosPlot | R | false | true | 1,377 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colors.R
\name{royalbluered}
\alias{royalbluered}
\alias{royalredblue}
\alias{royalbluegrayred}
\alias{royalredgrayblue}
\alias{turyeb}
\alias{redgreen}
\alias{greenred}
\alias{bluered}
\alias{redblue}
\alias{redblackblue}
\alias{cyanblackyellow}
\alias{yellowblackcyan}
\alias{blueblackred}
\alias{blackredyellow}
\alias{blackgoldred}
\alias{whiteblueblackheat}
\alias{heat}
\alias{magentayellow}
\alias{yellowmagenta}
\alias{blackyellow}
\alias{yellowblack}
\alias{whiteblue}
\alias{whitered}
\alias{blackred}
\alias{blackgreen}
\alias{whiteblack}
\alias{blackwhite}
\title{Two and three-color panels}
\usage{
royalbluered(n)
royalredblue(n)
royalbluegrayred(n)
royalredgrayblue(n)
blackyellow(n)
yellowblack(n)
whiteblue(n)
whitered(n)
blackred(n)
blackgreen(n)
whiteblack(n)
blackwhite(n)
turyeb(n)
redgreen(n)
greenred(n)
bluered(n)
redblue(n)
blueblackred(n)
cyanblackyellow(n)
yellowblackcyan(n)
redblackblue(n)
blackredyellow(n)
blackgoldred(n)
magentayellow(n)
yellowmagenta(n)
whiteblueblackheat(n)
heat(n)
}
\arguments{
\item{n}{Number of colors needed}
}
\value{
Character vector of length \code{n} coding colors
}
\description{
Two and three-color panels
}
\examples{
display.threecolor.panels()
}
\seealso{
\code{\link{blackyellow}} for two-color systems
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{read_rdf}
\alias{read_rdf}
\title{Read rdf file and create a data.table.}
\usage{
read_rdf(path_to_rdf, type = c("ntriples", "nquads"))
}
\arguments{
\item{path_to_rdf}{The path of the file.}
\item{type}{the rdf serialisation, either 'ntriples' or 'nquads'}
}
\description{
Read rdf file and create a data.table.
}
| /man/read_rdf.Rd | no_license | rijpma/cower | R | false | true | 407 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{read_rdf}
\alias{read_rdf}
\title{Read rdf file and create a data.table.}
\usage{
read_rdf(path_to_rdf, type = c("ntriples", "nquads"))
}
\arguments{
\item{path_to_rdf}{The path of the file.}
\item{type}{the rdf serialisation, either 'ntriples' or 'nquads'}
}
\description{
Read rdf file and create a data.table.
}
|
### Jinliang Yang
### plot the stat of the teo20 methylation data
library("data.table")
### check the results
library("farmeR")
f1 <- list.files(path="largedata/wgbs_bismark", pattern="PE_report.txt$", full.names=TRUE)
res <- get_file2tab(files=f1, features="Mapping efficiency:\t", replace=F)
files <- list.files(path="largedata/wgbs_bismark/", pattern="pe.CX_report.txt", full.names=TRUE)
cvg <- dtp <- data.frame()
for(i in 1:length(files)){
res <- fread("largedata/wgbs_bismark/JRA2_pe.CX_report.txt")
cg <- res[V6 == "CG"]
chg <- res[V6 == "CHG"]
chh <- res[V6 == "CHH"]
rm(list="res")
cg$tot <- cg$V4 + cg$V5
chg$tot <- chg$V4 + chg$V5
chh$tot <- chh$V4 + chh$V5
tem <- data.frame(cg=cg[,sum(tot == 0)]/nrow(cg),
chg=chg[,sum(tot == 0)]/nrow(chg),
chh=chh[,sum(tot == 0)]/nrow(chh))
}
### CG, CHG and CHH
###180125000, 158277169, 624401016
res <- read.table("data/res.txt", header=FALSE)
names(res) <- c("sample", "cov_CG", "cov_CHG", "cov_CHH", "ratio_CG", "ratio_CHG", "ratio_CHH",
"tot_CG", "tot_CHG", "tot_CHH")
res$sample <- gsub(".*/|_methratio.*", "", res$sample)
res$tot_CG <- res$tot_CG/180125000
res$tot_CHG <- res$tot_CHG/158277169
res$tot_CHH <- res$tot_CHH/624401016
#library(tidyr)
library(reshape2)
resl <- melt(res, id.vars="sample")
resl$variable <- as.character(resl$variable)
resl$type <- gsub("_.*", "", resl$variable)
resl$context <- gsub(".*_", "", resl$variable)
p1 <- ggplot(subset(resl, type=="cov"), aes(x=context, y=value, fill=context)) +
geom_boxplot() +
theme_bw() +
theme(plot.title = element_text(color="red", size=20, face="bold.italic"),
axis.text.x = element_text(size=18),
axis.text.y = element_text(size=13),
axis.title = element_text(size=18, face="bold")) +
#scale_fill_manual(values=c("#008080", "#003366", "#40e0d0")) +
ggtitle("Sequencing Depth") + xlab("") + ylab("Depth per cytosine site") +
guides(fill=FALSE)
#guides(colour=FALSE, linetype=FALSE)
p2 <- ggplot(subset(resl, type=="ratio"), aes(x=context, y=value, fill=context)) +
geom_boxplot() +
theme_bw() +
theme(plot.title = element_text(color="red", size=20, face="bold.italic"),
axis.text.x = element_text(size=18),
axis.text.y = element_text(size=13),
axis.title = element_text(size=18, face="bold")) +
#scale_fill_manual(values=c("#008080", "#003366", "#40e0d0")) +
ggtitle("Methylation Ratio") + xlab("") + ylab("Mean C/CT Ratio") +
guides(fill=FALSE)
p3 <- ggplot(subset(resl, type=="tot"), aes(x=context, y=value, fill=context)) +
geom_boxplot() +
theme_bw() +
theme(plot.title = element_text(color="red", size=20, face="bold.italic"),
axis.text.x = element_text(size=18),
axis.text.y = element_text(size=13),
axis.title = element_text(size=18, face="bold")) +
#scale_fill_manual(values=c("#008080", "#003366", "#40e0d0")) +
ggtitle("Coverage of cytosine sites") + xlab("") + ylab("covered / possible C sites") +
guides(fill=FALSE)
#guides(colour=FALSE, linetype=FALSE)
source("~/Documents/Github/zmSNPtools/Rcodes/multiplot.R")
#multiplot(p1, p4, p2, p5, p3, p6, cols=3)
pdf("graphs/stat.pdf", width=16, height=5)
multiplot(p3, p1, p2, cols=3)
dev.off()
| /profiling/2.Teo20_WGBS/2.C.3_align_cov_old.R | no_license | yangjl/methylation | R | false | false | 3,377 | r | ### Jinliang Yang
### plot the stat of the teo20 methylation data
library("data.table")
### check the results
library("farmeR")
f1 <- list.files(path="largedata/wgbs_bismark", pattern="PE_report.txt$", full.names=TRUE)
res <- get_file2tab(files=f1, features="Mapping efficiency:\t", replace=F)
files <- list.files(path="largedata/wgbs_bismark/", pattern="pe.CX_report.txt", full.names=TRUE)
cvg <- dtp <- data.frame()
for(i in 1:length(files)){
res <- fread("largedata/wgbs_bismark/JRA2_pe.CX_report.txt")
cg <- res[V6 == "CG"]
chg <- res[V6 == "CHG"]
chh <- res[V6 == "CHH"]
rm(list="res")
cg$tot <- cg$V4 + cg$V5
chg$tot <- chg$V4 + chg$V5
chh$tot <- chh$V4 + chh$V5
tem <- data.frame(cg=cg[,sum(tot == 0)]/nrow(cg),
chg=chg[,sum(tot == 0)]/nrow(chg),
chh=chh[,sum(tot == 0)]/nrow(chh))
}
### CG, CHG and CHH
###180125000, 158277169, 624401016
res <- read.table("data/res.txt", header=FALSE)
names(res) <- c("sample", "cov_CG", "cov_CHG", "cov_CHH", "ratio_CG", "ratio_CHG", "ratio_CHH",
"tot_CG", "tot_CHG", "tot_CHH")
res$sample <- gsub(".*/|_methratio.*", "", res$sample)
res$tot_CG <- res$tot_CG/180125000
res$tot_CHG <- res$tot_CHG/158277169
res$tot_CHH <- res$tot_CHH/624401016
#library(tidyr)
library(reshape2)
resl <- melt(res, id.vars="sample")
resl$variable <- as.character(resl$variable)
resl$type <- gsub("_.*", "", resl$variable)
resl$context <- gsub(".*_", "", resl$variable)
p1 <- ggplot(subset(resl, type=="cov"), aes(x=context, y=value, fill=context)) +
geom_boxplot() +
theme_bw() +
theme(plot.title = element_text(color="red", size=20, face="bold.italic"),
axis.text.x = element_text(size=18),
axis.text.y = element_text(size=13),
axis.title = element_text(size=18, face="bold")) +
#scale_fill_manual(values=c("#008080", "#003366", "#40e0d0")) +
ggtitle("Sequencing Depth") + xlab("") + ylab("Depth per cytosine site") +
guides(fill=FALSE)
#guides(colour=FALSE, linetype=FALSE)
p2 <- ggplot(subset(resl, type=="ratio"), aes(x=context, y=value, fill=context)) +
geom_boxplot() +
theme_bw() +
theme(plot.title = element_text(color="red", size=20, face="bold.italic"),
axis.text.x = element_text(size=18),
axis.text.y = element_text(size=13),
axis.title = element_text(size=18, face="bold")) +
#scale_fill_manual(values=c("#008080", "#003366", "#40e0d0")) +
ggtitle("Methylation Ratio") + xlab("") + ylab("Mean C/CT Ratio") +
guides(fill=FALSE)
p3 <- ggplot(subset(resl, type=="tot"), aes(x=context, y=value, fill=context)) +
geom_boxplot() +
theme_bw() +
theme(plot.title = element_text(color="red", size=20, face="bold.italic"),
axis.text.x = element_text(size=18),
axis.text.y = element_text(size=13),
axis.title = element_text(size=18, face="bold")) +
#scale_fill_manual(values=c("#008080", "#003366", "#40e0d0")) +
ggtitle("Coverage of cytosine sites") + xlab("") + ylab("covered / possible C sites") +
guides(fill=FALSE)
#guides(colour=FALSE, linetype=FALSE)
source("~/Documents/Github/zmSNPtools/Rcodes/multiplot.R")
#multiplot(p1, p4, p2, p5, p3, p6, cols=3)
pdf("graphs/stat.pdf", width=16, height=5)
multiplot(p3, p1, p2, cols=3)
dev.off()
|
source("loadDataset.R")
plot(data$Datetime,
data$Global_active_power,
type = "l",
ylab = "Global Active Power (kilowatts)",
xlab = ""
)
#Save the plot as png file
dev.copy(png, "plot2.png", width = 480, height = 480, units = "px")
dev.off() | /plot2.R | no_license | antfranzoso/ExData_Plotting1 | R | false | false | 267 | r | source("loadDataset.R")
plot(data$Datetime,
data$Global_active_power,
type = "l",
ylab = "Global Active Power (kilowatts)",
xlab = ""
)
#Save the plot as png file
dev.copy(png, "plot2.png", width = 480, height = 480, units = "px")
dev.off() |
library(ggplot2)
library(wordcloud)
library(wordcloud2)
library(tm)
library(RColorBrewer)
library(tidyverse)
df <- read.csv('/mnt/3602F83B02F80223/Downloads/imperial/Sem 2 - Machine Learning/Project/Data/lemma_dfUadm.csv')
ggplot(df, aes(x=DAYS_NEXT_UADM)) +
geom_histogram() +
scale_x_log10()
ggplot(df, aes(x=DAYS_NEXT_UADM)) +
geom_histogram(binwidth = 30) +
xlim(3365, 4500)
#### wordcloud viz ####
cases <- df %>%
filter(TARGET == 'True')
controls <- df %>%
filter(TARGET == 'False')
docs_full <- Corpus(VectorSource(df$TEXT_CONCAT))
dtm_full <- TermDocumentMatrix(docs_full)
sparse_full <- removeSparseTerms(dtm_full, 0.95)
matrix_full <- as.matrix(sparse_full)
words_full <- sort(rowSums(matrix_full),decreasing=TRUE)
df_full <- data.frame(word = names(words_full),freq=words_full)
# cases wordcloud
docs <- Corpus(VectorSource(cases$TEXT_CONCAT))
dtm <- TermDocumentMatrix(docs)
matrix <- as.matrix(dtm)
words <- sort(rowSums(matrix),decreasing=TRUE)
df_cases <- data.frame(word = names(words),freq=words)
# drop less than 5 freq for viz
df_cases_top <- df_cases %>%
filter(freq >= 5)
df_cases_top2 <- df_cases %>%
filter(freq > 1000)
wordcloud(words = df_cases_top$word,
freq = df_cases$freq, min.freq = 3,
max.words=2000,
random.order=TRUE,
rot.per=0.35,
colors=brewer.pal(8, "Oranges")
)
cross_fig = '/mnt/3602F83B02F80223/Downloads/imperial/Sem 2 - Machine Learning/Project/Data/wordcloud_mask/medical-cross-symbol.png'
wordcloud2(data=df_cases, size=1, figPath = cross_fig, color='random-dark')
wordcloud2(data=df_cases_top2, size=1, color='orange', backgroundColor='#EEECE2')
#gridSize=c(500,300))
wordcloud2(demoFreq, figPath = cross_fig, size = 1.5, color = "skyblue", backgroundColor="black")
wordcloud2(demoFreq, size = 0.7, shape = 'star')
wordcloud2(data=df, size=1.6, color='random-light', backgroundColor = 'black')
wordcloud2(data=df, size=1.6, color='random-dark')
# cases wordcloud
docs_controls <- Corpus(VectorSource(controls$TEXT_CONCAT))
dtm_controls <- TermDocumentMatrix(docs_controls)
sparse_controls <- removeSparseTerms(dtm_controls, 0.95) # save memory
matrix_controls <- as.matrix(sparse_controls)
words_controls <- sort(rowSums(matrix_controls),decreasing=TRUE)
df_controls <- data.frame(word = names(words_controls),freq=words_controls)
# drop less than 5 freq for viz
df_controls_top <- df_controls %>%
filter(freq > 1000)
wordcloud2(data=df_controls_top, size=1, color='skyblue', backgroundColor='#EEECE2')
| /Scripts/data_processing/descriptive_statistics_vizualization.R | no_license | dcstang/readmissionBERT | R | false | false | 2,551 | r | library(ggplot2)
library(wordcloud)
library(wordcloud2)
library(tm)
library(RColorBrewer)
library(tidyverse)
df <- read.csv('/mnt/3602F83B02F80223/Downloads/imperial/Sem 2 - Machine Learning/Project/Data/lemma_dfUadm.csv')
ggplot(df, aes(x=DAYS_NEXT_UADM)) +
geom_histogram() +
scale_x_log10()
ggplot(df, aes(x=DAYS_NEXT_UADM)) +
geom_histogram(binwidth = 30) +
xlim(3365, 4500)
#### wordcloud viz ####
cases <- df %>%
filter(TARGET == 'True')
controls <- df %>%
filter(TARGET == 'False')
docs_full <- Corpus(VectorSource(df$TEXT_CONCAT))
dtm_full <- TermDocumentMatrix(docs_full)
sparse_full <- removeSparseTerms(dtm_full, 0.95)
matrix_full <- as.matrix(sparse_full)
words_full <- sort(rowSums(matrix_full),decreasing=TRUE)
df_full <- data.frame(word = names(words_full),freq=words_full)
# cases wordcloud
docs <- Corpus(VectorSource(cases$TEXT_CONCAT))
dtm <- TermDocumentMatrix(docs)
matrix <- as.matrix(dtm)
words <- sort(rowSums(matrix),decreasing=TRUE)
df_cases <- data.frame(word = names(words),freq=words)
# drop less than 5 freq for viz
df_cases_top <- df_cases %>%
filter(freq >= 5)
df_cases_top2 <- df_cases %>%
filter(freq > 1000)
wordcloud(words = df_cases_top$word,
freq = df_cases$freq, min.freq = 3,
max.words=2000,
random.order=TRUE,
rot.per=0.35,
colors=brewer.pal(8, "Oranges")
)
cross_fig = '/mnt/3602F83B02F80223/Downloads/imperial/Sem 2 - Machine Learning/Project/Data/wordcloud_mask/medical-cross-symbol.png'
wordcloud2(data=df_cases, size=1, figPath = cross_fig, color='random-dark')
wordcloud2(data=df_cases_top2, size=1, color='orange', backgroundColor='#EEECE2')
#gridSize=c(500,300))
wordcloud2(demoFreq, figPath = cross_fig, size = 1.5, color = "skyblue", backgroundColor="black")
wordcloud2(demoFreq, size = 0.7, shape = 'star')
wordcloud2(data=df, size=1.6, color='random-light', backgroundColor = 'black')
wordcloud2(data=df, size=1.6, color='random-dark')
# cases wordcloud
docs_controls <- Corpus(VectorSource(controls$TEXT_CONCAT))
dtm_controls <- TermDocumentMatrix(docs_controls)
sparse_controls <- removeSparseTerms(dtm_controls, 0.95) # save memory
matrix_controls <- as.matrix(sparse_controls)
words_controls <- sort(rowSums(matrix_controls),decreasing=TRUE)
df_controls <- data.frame(word = names(words_controls),freq=words_controls)
# drop less than 5 freq for viz
df_controls_top <- df_controls %>%
filter(freq > 1000)
wordcloud2(data=df_controls_top, size=1, color='skyblue', backgroundColor='#EEECE2')
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
#function to set the value of the object
setMatrix <- function(y) {
x <<- y
m <<- NULL
}
#function to extract the value from the object.
getMatrix <- function() x
#function to store the given value as mean for future use.
setInverse <- function(inverse) m <<- inverse
#function to extract the mean
getInverse <- function() m
list(setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
#if inversed matrix is not null, then return the cache directly
if(!is.null(m)) {
message("getting cached data")
return(m)
}
#if inversed matrix is null, then inverse the Matrix
data <- x$getMatrix()
m <- solve(data, ...)
x$setInverse(m)
m
}
| /cachematrix.R | no_license | LeifuChen/ProgrammingAssignment2 | R | false | false | 1,115 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
#function to set the value of the object
setMatrix <- function(y) {
x <<- y
m <<- NULL
}
#function to extract the value from the object.
getMatrix <- function() x
#function to store the given value as mean for future use.
setInverse <- function(inverse) m <<- inverse
#function to extract the mean
getInverse <- function() m
list(setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
#if inversed matrix is not null, then return the cache directly
if(!is.null(m)) {
message("getting cached data")
return(m)
}
#if inversed matrix is null, then inverse the Matrix
data <- x$getMatrix()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/info.R
\name{info}
\alias{info}
\title{Print package info}
\usage{
info()
}
\description{
Print package info
}
| /man/info.Rd | no_license | zeburek/r-info-pkg | R | false | true | 189 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/info.R
\name{info}
\alias{info}
\title{Print package info}
\usage{
info()
}
\description{
Print package info
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GermanCredit.R
\docType{data}
\name{GermanCredit}
\alias{GermanCredit}
\title{Statlog (German Credit Data) Data Set}
\format{A data frame with 1000 rows and 21 variables}
\usage{
data(GermanCredit)
}
\description{
This dataset classifies people described by a set of attributes as good or bad credit risks.
The variables are as follows:
}
\details{
\itemize{
\item Credit. Target variable
\item balance_credit_acc. Status of existing checking account
\item duration. Duration in month
\item moral. Credit history
\item verw. Purpose
\item hoehe. Credit amount
\item sparkont. Savings account/bonds
\item beszeit. Present employment since
\item rate. Installment rate in percentage of disposable income
\item famges. Personal status and sex
\item buerge. Other debtors / guarantors
\item wohnzeit. Present residence since
\item verm. Property
\item alter. Age in years
\item weitkred. Other installment plans
\item wohn. Housing
\item bishkred. Number of existing credits at this bank
\item beruf. Job
\item pers. Number of people being liable to provide maintenance for
\item telef. Telephone
\item gastarb. Foreign worker
}
}
\keyword{datasets}
| /man/GermanCredit.Rd | no_license | afmoebius1/featureCorMatrix | R | false | true | 1,266 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GermanCredit.R
\docType{data}
\name{GermanCredit}
\alias{GermanCredit}
\title{Statlog (German Credit Data) Data Set}
\format{A data frame with 1000 rows and 21 variables}
\usage{
data(GermanCredit)
}
\description{
This dataset classifies people described by a set of attributes as good or bad credit risks.
The variables are as follows:
}
\details{
\itemize{
\item Credit. Target variable
\item balance_credit_acc. Status of existing checking account
\item duration. Duration in month
\item moral. Credit history
\item verw. Purpose
\item hoehe. Credit amount
\item sparkont. Savings account/bonds
\item beszeit. Present employment since
\item rate. Installment rate in percentage of disposable income
\item famges. Personal status and sex
\item buerge. Other debtors / guarantors
\item wohnzeit. Present residence since
\item verm. Property
\item alter. Age in years
\item weitkred. Other installment plans
\item wohn. Housing
\item bishkred. Number of existing credits at this bank
\item beruf. Job
\item pers. Number of people being liable to provide maintenance for
\item telef. Telephone
\item gastarb. Foreign worker
}
}
\keyword{datasets}
|
Precip_mergeGetInfoALL <- function(){
listOpenFiles <- openFile_ttkcomboList()
if(WindowsOS()){
largeur0 <- 19
largeur1 <- 42
largeur2 <- 45
largeur3 <- 27
largeur4 <- 28
largeur5 <- 32
}else{
largeur0 <- 16
largeur1 <- 38
largeur2 <- 39
largeur3 <- 21
largeur4 <- 17
largeur5 <- 22
}
# xml.dlg <- file.path(.cdtDir$dirLocal, "languages", "cdtPrecip_MergingALL_dlgBox.xml")
# lang.dlg <- cdtLanguageParse(xml.dlg, .cdtData$Config$lang.iso)
####################################
tt <- tktoplevel()
tkgrab.set(tt)
tkfocus(tt)
frMRG0 <- tkframe(tt, relief = 'raised', borderwidth = 2, padx = 3, pady = 3)
frMRG1 <- tkframe(tt)
####################################
bwnote <- bwNoteBook(frMRG0)
conf.tab1 <- bwAddTab(bwnote, text = "Input")
conf.tab2 <- bwAddTab(bwnote, text = "Merging")
conf.tab3 <- bwAddTab(bwnote, text = "Bias Coeff")
conf.tab4 <- bwAddTab(bwnote, text = "LM Coeff")
conf.tab5 <- bwAddTab(bwnote, text = "Output")
bwRaiseTab(bwnote, conf.tab1)
tkgrid.columnconfigure(conf.tab1, 0, weight = 1)
tkgrid.columnconfigure(conf.tab2, 0, weight = 1)
tkgrid.columnconfigure(conf.tab3, 0, weight = 1)
tkgrid.columnconfigure(conf.tab4, 0, weight = 1)
tkgrid.columnconfigure(conf.tab5, 0, weight = 1)
####################################
frTab1 <- tkframe(conf.tab1)
####################################
frtimestep <- tkframe(frTab1, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
file.period <- tclVar()
CbperiodVAL <- .cdtEnv$tcl$lang$global[['combobox']][['1']][2:5]
periodVAL <- c('daily', 'pentad', 'dekadal', 'monthly')
tclvalue(file.period) <- CbperiodVAL[periodVAL %in% .cdtData$GalParams$period]
txtdek <- switch(.cdtData$GalParams$period, 'dekadal' = 'Dekad', 'pentad' = 'Pentad', 'Day')
day.txtVar <- tclVar(txtdek)
statedate <- if(.cdtData$GalParams$period == 'monthly') 'disabled' else 'normal'
cb.period <- ttkcombobox(frtimestep, values = CbperiodVAL, textvariable = file.period, width = largeur0)
bt.DateRange <- ttkbutton(frtimestep, text = "Set Date Range")
tkconfigure(bt.DateRange, command = function(){
.cdtData$GalParams[["Merging.Date"]] <- getInfoDateRange(.cdtEnv$tcl$main$win,
.cdtData$GalParams[["Merging.Date"]],
daypendek.lab = tclvalue(day.txtVar),
state.dek = statedate)
})
tkgrid(cb.period, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.DateRange, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(cb.period, 'Select the time step of the data')
status.bar.display(cb.period, 'Select the time step of the data')
infobulle(bt.DateRange, 'Set the start and end date to merge RFE data')
status.bar.display(bt.DateRange, 'Set the start and end date to merge RFE data')
###########
tkbind(cb.period, "<<ComboboxSelected>>", function(){
tclvalue(day.txtVar) <- ifelse(str_trim(tclvalue(file.period)) == CbperiodVAL[3], 'Dekad',
ifelse(str_trim(tclvalue(file.period)) == CbperiodVAL[2], 'Pentad', 'Day'))
statedate <<- if(str_trim(tclvalue(file.period)) == CbperiodVAL[4]) 'disabled' else 'normal'
})
####################################
frInputData <- tkframe(frTab1, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
file.stnfl <- tclVar(.cdtData$GalParams$STN.file)
dir.RFE <- tclVar(.cdtData$GalParams$RFE$dir)
txt.stnfl <- tklabel(frInputData, text = 'Station data file', anchor = 'w', justify = 'left')
cb.stnfl <- ttkcombobox(frInputData, values = unlist(listOpenFiles), textvariable = file.stnfl, width = largeur1)
bt.stnfl <- tkbutton(frInputData, text = "...")
txt.RFE <- tklabel(frInputData, text = 'Directory containing RFE data', anchor = 'w', justify = 'left')
set.RFE <- ttkbutton(frInputData, text = .cdtEnv$tcl$lang$global[['button']][['5']])
en.RFE <- tkentry(frInputData, textvariable = dir.RFE, width = largeur2)
bt.RFE <- tkbutton(frInputData, text = "...")
######
tkconfigure(bt.stnfl, command = function(){
dat.opfiles <- getOpenFiles(tt)
if(!is.null(dat.opfiles)){
update.OpenFiles('ascii', dat.opfiles)
listOpenFiles[[length(listOpenFiles) + 1]] <<- dat.opfiles[[1]]
tclvalue(file.stnfl) <- dat.opfiles[[1]]
lapply(list(cb.stnfl, cb.grddem, cb.blkshp), tkconfigure, values = unlist(listOpenFiles))
}
})
tkconfigure(set.RFE, command = function(){
.cdtData$GalParams[["RFE"]] <- getInfoNetcdfData(tt, .cdtData$GalParams[["RFE"]],
str_trim(tclvalue(dir.RFE)), str_trim(tclvalue(file.period)))
})
tkconfigure(bt.RFE, command = function(){
dirrfe <- tk_choose.dir(getwd(), "")
tclvalue(dir.RFE) <- if(!is.na(dirrfe)) dirrfe else ""
})
######
tkgrid(txt.stnfl, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(cb.stnfl, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(bt.stnfl, row = 1, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(txt.RFE, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 3, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(set.RFE, row = 2, column = 3, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(en.RFE, row = 3, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(bt.RFE, row = 3, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
infobulle(cb.stnfl, 'Select the file from the list')
status.bar.display(cb.stnfl, 'Select the file containing the gauge data')
infobulle(bt.stnfl, 'Browse file if not listed')
status.bar.display(bt.stnfl, 'Browse file if not listed')
infobulle(en.RFE, 'Enter the full path to the directory containing the RFE data')
status.bar.display(en.RFE, 'Enter the full path to the directory containing the RFE data')
infobulle(bt.RFE, 'Or browse here')
status.bar.display(bt.RFE, 'Or browse here')
infobulle(set.RFE, 'Setting netcdf data options')
status.bar.display(set.RFE, 'Setting netcdf data options')
####################################
frDEM <- tkframe(frTab1, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
file.grddem <- tclVar(.cdtData$GalParams$DEM.file)
statedem <- if((!.cdtData$GalParams$BIAS$deja.calc &
.cdtData$GalParams$BIAS$interp.method == "NN") |
(.cdtData$GalParams$Merging$mrg.method == "Spatio-Temporal LM" &
!.cdtData$GalParams$LMCOEF$deja.calc &
.cdtData$GalParams$LMCOEF$interp.method == "NN") |
.cdtData$GalParams$blank$blank == "2") 'normal' else 'disabled'
txt.grddem <- tklabel(frDEM, text = "Elevation data (NetCDF)", anchor = 'w', justify = 'left')
cb.grddem <- ttkcombobox(frDEM, values = unlist(listOpenFiles), textvariable = file.grddem, state = statedem, width = largeur1)
bt.grddem <- tkbutton(frDEM, text = "...", state = statedem)
tkconfigure(bt.grddem, command = function(){
nc.opfiles <- getOpenNetcdf(tt, initialdir = getwd())
if(!is.null(nc.opfiles)){
update.OpenFiles('netcdf', nc.opfiles)
listOpenFiles[[length(listOpenFiles) + 1]] <<- nc.opfiles[[1]]
tclvalue(file.grddem) <- nc.opfiles[[1]]
lapply(list(cb.stnfl, cb.grddem, cb.blkshp), tkconfigure, values = unlist(listOpenFiles))
}
})
tkgrid(txt.grddem, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.grddem, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.grddem, row = 1, column = 1, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
infobulle(cb.grddem, 'Select the file in the list')
status.bar.display(cb.grddem, 'File containing the elevation data in netcdf')
infobulle(bt.grddem, 'Browse file if not listed')
status.bar.display(bt.grddem, 'Browse file if not listed')
####################################
tkgrid(frtimestep, row = 0, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frInputData, row = 1, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frDEM, row = 2, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
####################################
tkgrid(frTab1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
####################################
frTab2 <- tkframe(conf.tab2)
####################################
frMrg <- tkframe(frTab2, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
cb.MrgMthd <- c("Regression Kriging", "Spatio-Temporal LM", "Simple Bias Adjustment")
mrg.method <- tclVar(str_trim(.cdtData$GalParams$Merging$mrg.method))
mrg.min.stn <- tclVar(.cdtData$GalParams$Merging$min.stn)
mrg.min.non.zero <- tclVar(.cdtData$GalParams$Merging$min.non.zero)
txt.mrg <- tklabel(frMrg, text = 'Merging method', anchor = 'w', justify = 'left')
cb.mrg <- ttkcombobox(frMrg, values = cb.MrgMthd, textvariable = mrg.method, width = largeur4)
bt.mrg.interp <- ttkbutton(frMrg, text = "Merging Interpolations Parameters")
txt.min.nbrs.stn <- tklabel(frMrg, text = 'Min.Nb.Stn', anchor = 'e', justify = 'right')
en.min.nbrs.stn <- tkentry(frMrg, width = 4, textvariable = mrg.min.stn, justify = 'right')
txt.min.non.zero <- tklabel(frMrg, text = 'Min.No.Zero', anchor = 'e', justify = 'right')
en.min.non.zero <- tkentry(frMrg, width = 4, textvariable = mrg.min.non.zero, justify = 'right')
tkconfigure(bt.mrg.interp, command = function(){
.cdtData$GalParams[["Merging"]] <- getInterpolationPars(tt, .cdtData$GalParams[["Merging"]], interpChoix = 0)
})
tkgrid(txt.mrg, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.mrg, row = 0, column = 2, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.mrg.interp, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.min.nbrs.stn, row = 2, column = 0, sticky = 'e', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.min.nbrs.stn, row = 2, column = 2, sticky = 'w', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.min.non.zero, row = 2, column = 3, sticky = 'e', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.min.non.zero, row = 2, column = 5, sticky = 'w', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(cb.mrg, 'Method to be used to perform merging')
status.bar.display(cb.mrg, 'Method to be used to perform merging')
infobulle(en.min.nbrs.stn, 'Minimum number of gauges with data to be used to do the merging')
status.bar.display(en.min.nbrs.stn, 'Minimum number of gauges with data to be used to do the merging')
infobulle(en.min.non.zero, 'Minimum number of non-zero gauge values to perform the merging')
status.bar.display(en.min.non.zero, 'Minimum number of non-zero gauge values to perform the merging')
###############
tkbind(cb.mrg, "<<ComboboxSelected>>", function(){
stateLMCoef1 <- if(tclvalue(mrg.method) == "Spatio-Temporal LM") 'normal' else 'disabled'
stateLMCoef2 <- if(tclvalue(mrg.method) == "Spatio-Temporal LM" & tclvalue(lmcoef.calc) == "0") 'normal' else 'disabled'
stateLMCoef3 <- if(tclvalue(mrg.method) == "Spatio-Temporal LM" & tclvalue(lmcoef.calc) == "1") 'normal' else 'disabled'
tkconfigure(chk.LMCoef, state = stateLMCoef1)
tkconfigure(bt.baseLM, state = stateLMCoef2)
tkconfigure(bt.LMCoef.interp, state = stateLMCoef2)
tkconfigure(en.LMCoef.dir, state = stateLMCoef3)
tkconfigure(bt.LMCoef.dir, state = stateLMCoef3)
statedem <- if((tclvalue(bias.calc) == "0" &
.cdtData$GalParams$BIAS$interp.method == "NN") |
(tclvalue(mrg.method) == "Spatio-Temporal LM" &
tclvalue(lmcoef.calc) == "0" &
.cdtData$GalParams$LMCOEF$interp.method == "NN") |
tclvalue(blankGrd) == "Use DEM") 'normal' else 'disabled'
tkconfigure(cb.grddem, state = statedem)
tkconfigure(bt.grddem, state = statedem)
})
####################################
frRnoR <- tkframe(frTab2, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
use.RnoR <- tclVar(.cdtData$GalParams$RnoR$use.RnoR)
maxdist.RnoR <- tclVar(.cdtData$GalParams$RnoR$maxdist.RnoR)
smooth.RnoR <- tclVar(.cdtData$GalParams$RnoR$smooth.RnoR)
stateRnoR <- if(.cdtData$GalParams$RnoR$use.RnoR) 'normal' else 'disabled'
########
txt.mrg.pars <- tklabel(frRnoR, text = 'Rain-no-Rain mask', anchor = 'w', justify = 'left')
chk.use.rnr <- tkcheckbutton(frRnoR, variable = use.RnoR, text = 'Apply Rain-no-Rain mask', anchor = 'w', justify = 'left')
txt.maxdist.rnr <- tklabel(frRnoR, text = 'maxdist.RnoR', anchor = 'e', justify = 'right')
en.maxdist.rnr <- tkentry(frRnoR, width = 4, textvariable = maxdist.RnoR, justify = 'right', state = stateRnoR)
chk.smooth.rnr <- tkcheckbutton(frRnoR, variable = smooth.RnoR, text = 'Smooth Rain-no-Rain mask', anchor = 'w', justify = 'left', state = stateRnoR)
tkgrid(txt.mrg.pars, row = 0, column = 0, sticky = '', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(chk.use.rnr, row = 1, column = 0, sticky = 'ew', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.maxdist.rnr, row = 2, column = 0, sticky = 'e', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.maxdist.rnr, row = 2, column = 1, sticky = 'w', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(chk.smooth.rnr, row = 3, column = 0, sticky = 'ew', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(chk.use.rnr, 'Check this box to apply a mask over no rain area')
status.bar.display(chk.use.rnr, 'Check this box to apply a mask over no rain area')
infobulle(en.maxdist.rnr, 'Maximum distance (in decimal degrees) to be used to interpolate Rain-noRain mask')
status.bar.display(en.maxdist.rnr, 'Maximum distance (in decimal degrees) to be used to interpolate Rain-noRain mask')
infobulle(chk.smooth.rnr, 'Check this box to smooth the gradient between high value and no rain area')
status.bar.display(chk.smooth.rnr, 'Check this box to smooth the gradient between high value and no rain area')
tkbind(chk.use.rnr, "<Button-1>", function(){
stateRnoR <- if(tclvalue(use.RnoR) == '0') 'normal' else 'disabled'
tkconfigure(en.maxdist.rnr, state = stateRnoR)
tkconfigure(chk.smooth.rnr, state = stateRnoR)
})
####################################
tkgrid(frMrg, row = 0, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frRnoR, row = 1, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
####################################
tkgrid(frTab2, padx = 0, pady = 1, ipadx = 1, ipady = 1)
####################################
frTab3 <- tkframe(conf.tab3)
####################################
frameBias <- tkframe(frTab3, relief = 'sunken', borderwidth = 2, padx = 5, pady = 5)
cb.biasMthd <- c("Quantile.Mapping", "Multiplicative.Bias.Var", "Multiplicative.Bias.Mon")
bias.method <- tclVar(str_trim(.cdtData$GalParams$BIAS$bias.method))
txt.bias <- tklabel(frameBias, text = 'Bias method', anchor = 'w', justify = 'left')
cb.bias <- ttkcombobox(frameBias, values = cb.biasMthd, textvariable = bias.method, width = largeur3)
tkgrid(txt.bias, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.bias, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(cb.bias, 'Select the method to be used to calculate the Bias Factors or Parameters')
status.bar.display(cb.bias, 'Select the method to be used to calculate the Bias Factors or Parameters')
####################################
frameBiasSet <- tkframe(frTab3, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
bias.calc <- tclVar(.cdtData$GalParams$BIAS$deja.calc)
statebias1 <- if(.cdtData$GalParams$BIAS$deja.calc) 'disabled' else 'normal'
chk.bias <- tkcheckbutton(frameBiasSet, variable = bias.calc, text = "Bias factors are already calculated", anchor = 'w', justify = 'left', background = 'lightblue')
bt.baseBias <- ttkbutton(frameBiasSet, text = "Set Bias Base Period", state = statebias1)
bt.bias.interp <- ttkbutton(frameBiasSet, text = "Bias Interpolations Parameters", state = statebias1)
tkconfigure(bt.baseBias, command = function(){
.cdtData$GalParams[["BIAS"]] <- getInfoBasePeriod(tt, .cdtData$GalParams[["BIAS"]])
})
tkconfigure(bt.bias.interp, command = function(){
.cdtData$GalParams[["BIAS"]] <- getInterpolationPars(tt, .cdtData$GalParams[["BIAS"]], interpChoix = 1)
statedem <- if((tclvalue(bias.calc) == "0" &
.cdtData$GalParams$BIAS$interp.method == "NN") |
(tclvalue(mrg.method) == "Spatio-Temporal LM" &
tclvalue(lmcoef.calc) == "0" &
.cdtData$GalParams$LMCOEF$interp.method == "NN") |
tclvalue(blankGrd) == "Use DEM") 'normal' else 'disabled'
tkconfigure(cb.grddem, state = statedem)
tkconfigure(bt.grddem, state = statedem)
})
tkgrid(chk.bias, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.baseBias, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.bias.interp, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(chk.bias, 'Check this box if the bias factors or parameters are already calculated')
status.bar.display(chk.bias, 'Check this box if the bias factors or parameters are already calculated')
infobulle(bt.baseBias, 'Set the base period to be used to compute bias factors')
status.bar.display(bt.baseBias, 'Set the base period to be used to compute bias factors')
###############
tkbind(chk.bias, "<Button-1>", function(){
statebias1 <- if(tclvalue(bias.calc) == '1') 'normal' else 'disabled'
statebias2 <- if(tclvalue(bias.calc) == '0') 'normal' else 'disabled'
tkconfigure(bt.baseBias, state = statebias1)
tkconfigure(bt.bias.interp, state = statebias1)
tkconfigure(en.bias.dir, state = statebias2)
tkconfigure(bt.bias.dir, state = statebias2)
statedem <- if((tclvalue(bias.calc) == "1" &
.cdtData$GalParams$BIAS$interp.method == "NN") |
(tclvalue(mrg.method) == "Spatio-Temporal LM" &
tclvalue(lmcoef.calc) == "0" &
.cdtData$GalParams$LMCOEF$interp.method == "NN") |
tclvalue(blankGrd) == "Use DEM") 'normal' else 'disabled'
tkconfigure(cb.grddem, state = statedem)
tkconfigure(bt.grddem, state = statedem)
})
####################################
frameBiasDir <- tkframe(frTab3, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
bias.dir <- tclVar(.cdtData$GalParams$BIAS$dir.Bias)
statebias2 <- if(.cdtData$GalParams$BIAS$deja.calc) 'normal' else 'disabled'
txt.bias.dir <- tklabel(frameBiasDir, text = "Directory of bias files", anchor = 'w', justify = 'left')
en.bias.dir <- tkentry(frameBiasDir, textvariable = bias.dir, state = statebias2, width = largeur2)
bt.bias.dir <- tkbutton(frameBiasDir, text = "...", state = statebias2)
tkconfigure(bt.bias.dir, command = function(){
dirbias <- tk_choose.dir(getwd(), "")
tclvalue(bias.dir) <- if(!is.na(dirbias)) dirbias else ""
})
tkgrid(txt.bias.dir, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.bias.dir, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.bias.dir, row = 1, column = 5, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
infobulle(en.bias.dir, 'Enter the full path to directory containing the bias files')
status.bar.display(en.bias.dir, 'Enter the full path to directory containing the bias files')
####################################
tkgrid(frameBias, row = 0, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameBiasSet, row = 1, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameBiasDir, row = 2, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
####################################
tkgrid(frTab3, padx = 0, pady = 1, ipadx = 1, ipady = 1)
####################################
frTab4 <- tkframe(conf.tab4)
####################################
frLMCoef <- tkframe(frTab4, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
lmcoef.calc <- tclVar(.cdtData$GalParams$LMCOEF$deja.calc)
stateLMCoef1 <- if(str_trim(.cdtData$GalParams$Merging$mrg.method) == "Spatio-Temporal LM") 'normal' else 'disabled'
stateLMCoef2 <- if(str_trim(.cdtData$GalParams$Merging$mrg.method) == "Spatio-Temporal LM" & !.cdtData$GalParams$LMCOEF$deja.calc) 'normal' else 'disabled'
stateLMCoef3 <- if(str_trim(.cdtData$GalParams$Merging$mrg.method) == "Spatio-Temporal LM" & .cdtData$GalParams$LMCOEF$deja.calc) 'normal' else 'disabled'
chk.LMCoef <- tkcheckbutton(frLMCoef, variable = lmcoef.calc, text = "LMCoef are already calculated", state = stateLMCoef1, anchor = 'w', justify = 'left', background = 'lightblue')
bt.baseLM <- ttkbutton(frLMCoef, text = "Set LMCoef Base Period", state = stateLMCoef2)
bt.LMCoef.interp <- ttkbutton(frLMCoef, text = "LMCoef Interpolations Parameters", state = stateLMCoef2)
tkconfigure(bt.baseLM, command = function(){
.cdtData$GalParams[["LMCOEF"]] <- getInfoBasePeriod(tt, .cdtData$GalParams[["LMCOEF"]])
})
tkconfigure(bt.LMCoef.interp, command = function(){
.cdtData$GalParams[["LMCOEF"]] <- getInterpolationPars(tt, .cdtData$GalParams[["LMCOEF"]], interpChoix = 1)
statedem <- if((tclvalue(bias.calc) == "0" &
.cdtData$GalParams$BIAS$interp.method == "NN") |
(tclvalue(mrg.method) == "Spatio-Temporal LM" &
tclvalue(lmcoef.calc) == "0" &
.cdtData$GalParams$LMCOEF$interp.method == "NN") |
tclvalue(blankGrd) == "Use DEM") 'normal' else 'disabled'
tkconfigure(cb.grddem, state = statedem)
tkconfigure(bt.grddem, state = statedem)
})
tkgrid(chk.LMCoef, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.baseLM, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.LMCoef.interp, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(chk.LMCoef, 'Check this box if the linear model coefficients are already calculated')
status.bar.display(chk.LMCoef, 'Check this box if the linear model coefficients are already calculated')
infobulle(bt.baseLM, 'Start and end year to be used to compute LM coefficients')
status.bar.display(bt.baseLM, 'Start and end year to be used to compute LM coefficients')
###############
tkbind(chk.LMCoef, "<Button-1>", function(){
stateLMCoef2 <- if(tclvalue(lmcoef.calc) == '1' & tclvalue(mrg.method) == "Spatio-Temporal LM") 'normal' else 'disabled'
stateLMCoef3 <- if(tclvalue(lmcoef.calc) == '0' & tclvalue(mrg.method) == "Spatio-Temporal LM") 'normal' else 'disabled'
tkconfigure(bt.baseLM, state = stateLMCoef2)
tkconfigure(bt.LMCoef.interp, state = stateLMCoef2)
tkconfigure(en.LMCoef.dir, state = stateLMCoef3)
tkconfigure(bt.LMCoef.dir, state = stateLMCoef3)
statedem <- if((tclvalue(bias.calc) == "0" &
.cdtData$GalParams$BIAS$interp.method == "NN") |
(tclvalue(mrg.method) == "Spatio-Temporal LM" &
tclvalue(lmcoef.calc) == "1" &
.cdtData$GalParams$LMCOEF$interp.method == "NN") |
tclvalue(blankGrd) == "Use DEM") 'normal' else 'disabled'
tkconfigure(cb.grddem, state = statedem)
tkconfigure(bt.grddem, state = statedem)
})
####################################
frLMCoefdir <- tkframe(frTab4, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
LMCoef.dir <- tclVar(.cdtData$GalParams$LMCOEF$dir.LMCoef)
txt.LMCoef.dir <- tklabel(frLMCoefdir, text = "Directory of LMCoef files", anchor = 'w', justify = 'left')
en.LMCoef.dir <- tkentry(frLMCoefdir, textvariable = LMCoef.dir, state = stateLMCoef3, width = largeur2)
bt.LMCoef.dir <- tkbutton(frLMCoefdir, text = "...", state = stateLMCoef3)
tkconfigure(bt.LMCoef.dir, command = function(){
dirLM <- tk_choose.dir(getwd(), "")
tclvalue(LMCoef.dir) <- if(!is.na(dirLM)) dirLM else ""
})
tkgrid(txt.LMCoef.dir, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.LMCoef.dir, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.LMCoef.dir, row = 1, column = 5, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
infobulle(en.LMCoef.dir, 'Enter the full path to directory containing the LM coefficients files')
status.bar.display(en.LMCoef.dir, 'Enter the full path to directory containing the LM coefficients files')
infobulle(bt.LMCoef.dir, 'or browse here')
status.bar.display(bt.LMCoef.dir, 'or browse here')
####################################
tkgrid(frLMCoef, row = 0, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frLMCoefdir, row = 1, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
####################################
tkgrid(frTab4, padx = 0, pady = 1, ipadx = 1, ipady = 1)
####################################
frTab5 <- tkframe(conf.tab5)
####################################
frSave <- tkframe(frTab5, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
dir2save <- tclVar(.cdtData$GalParams$output$dir)
outmrgff <- tclVar(.cdtData$GalParams$output$format)
txt.dir2save <- tklabel(frSave, text = 'Directory to save result', anchor = 'w', justify = 'left')
en.dir2save <- tkentry(frSave, textvariable = dir2save, width = largeur2)
bt.dir2save <- tkbutton(frSave, text = "...")
txt.outmrgff <- tklabel(frSave, text = 'Merged data filename format', anchor = 'w', justify = 'left')
en.outmrgff <- tkentry(frSave, textvariable = outmrgff, width = largeur2)
#####
tkconfigure(bt.dir2save, command = function(){
dir2savepth <- tk_choose.dir(.cdtData$GalParams$output$dir, "")
if(is.na(dir2savepth)) tclvalue(dir2save) <- .cdtData$GalParams$output$dir
else{
dir.create(dir2savepth, showWarnings = FALSE, recursive = TRUE)
tclvalue(dir2save) <- dir2savepth
}
})
#####
tkgrid(txt.dir2save, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 2, ipadx = 1, ipady = 1)
tkgrid(en.dir2save, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.dir2save, row = 1, column = 1, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.outmrgff, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.outmrgff, row = 3, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(en.dir2save, 'Enter the full path to directory to save result')
status.bar.display(en.dir2save, 'Enter the full path to directory to save result')
infobulle(bt.dir2save, 'or browse here')
status.bar.display(bt.dir2save, 'or browse here')
infobulle(en.outmrgff, 'Format of the merged data files names in NetCDF, example: rr_mrg_1981011_ALL.nc')
status.bar.display(en.outmrgff, 'Format of the merged data files names in NetCDF, example: rr_mrg_1981011_ALL.nc')
############################################
frblank <- tkframe(frTab5, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
blankGrd <- tclVar()
cb.blankVAL <- c("None", "Use DEM", "Use ESRI shapefile")
tclvalue(blankGrd) <- switch(str_trim(.cdtData$GalParams$blank$blank),
'1' = cb.blankVAL[1],
'2' = cb.blankVAL[2],
'3' = cb.blankVAL[3])
txt.blankGrd <- tklabel(frblank, text = 'Blank merged data', anchor = 'w', justify = 'left')
cb.blankGrd <- ttkcombobox(frblank, values = cb.blankVAL, textvariable = blankGrd, width = largeur5)
#####
tkgrid(txt.blankGrd, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.blankGrd, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
infobulle(cb.blankGrd, 'Blank grid outside the country boundaries or over ocean')
status.bar.display(cb.blankGrd, 'Blank grid outside the country boundaries or over ocean\ngiven by the DEM mask or the shapefile')
############################################
tkbind(cb.blankGrd, "<<ComboboxSelected>>", function(){
stateshp <- if(tclvalue(blankGrd) == 'Use ESRI shapefile') 'normal' else 'disabled'
tkconfigure(cb.blkshp, state = stateshp)
tkconfigure(bt.blkshp, state = stateshp)
statedem <- if(tclvalue(blankGrd) == "Use DEM" |
(tclvalue(bias.calc) == "0" &
.cdtData$GalParams$BIAS$interp.method == "NN") |
(tclvalue(mrg.method) == "Spatio-Temporal LM" &
tclvalue(lmcoef.calc) == "0" &
.cdtData$GalParams$LMCOEF$interp.method == "NN")) 'normal' else 'disabled'
tkconfigure(cb.grddem, state = statedem)
tkconfigure(bt.grddem, state = statedem)
})
############################################
frSHP <- tkframe(frTab5, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
file.blkshp <- tclVar(.cdtData$GalParams$blank$SHP.file)
stateshp <- if(str_trim(.cdtData$GalParams$blank$blank) == '3') 'normal' else 'disabled'
txt.blkshp <- tklabel(frSHP, text = "ESRI shapefiles for blanking", anchor = 'w', justify = 'left')
cb.blkshp <- ttkcombobox(frSHP, values = unlist(listOpenFiles), textvariable = file.blkshp, state = stateshp, width = largeur1)
bt.blkshp <- tkbutton(frSHP, text = "...", state = stateshp)
########
tkconfigure(bt.blkshp, command = function(){
shp.opfiles <- getOpenShp(tt)
if(!is.null(shp.opfiles)){
update.OpenFiles('shp', shp.opfiles)
tclvalue(file.blkshp) <- shp.opfiles[[1]]
listOpenFiles[[length(listOpenFiles) + 1]] <<- shp.opfiles[[1]]
lapply(list(cb.stnfl, cb.grddem, cb.blkshp), tkconfigure, values = unlist(listOpenFiles))
}
})
#####
tkgrid(txt.blkshp, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.blkshp, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.blkshp, row = 1, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
infobulle(cb.blkshp, 'Select the file in the list')
status.bar.display(cb.blkshp, 'Select the file containing the ESRI shapefiles')
infobulle(bt.blkshp, 'Browse file if not listed')
status.bar.display(bt.blkshp, 'Browse file if not listed')
############################################
tkgrid(frSave, row = 0, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frblank, row = 1, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frSHP, row = 2, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
####################################
tkgrid(frTab5, padx = 0, pady = 1, ipadx = 1, ipady = 1)
####################################
bt.prm.OK <- ttkbutton(frMRG1, text = .cdtEnv$tcl$lang$global[['button']][['1']])
bt.prm.CA <- ttkbutton(frMRG1, text = .cdtEnv$tcl$lang$global[['button']][['2']])
tkconfigure(bt.prm.OK, command = function(){
if(str_trim(tclvalue(file.stnfl)) == ""){
tkmessageBox(message = "Select the file containing the station data", icon = "warning", type = "ok")
tkwait.window(tt)
}else if(str_trim(tclvalue(dir.RFE)) %in% c("", "NA")){
tkmessageBox(message = "Browse or enter the directory containing the RFE files", icon = "warning", type = "ok")
tkwait.window(tt)
}else if(tclvalue(bias.calc) == '1' & str_trim(tclvalue(bias.dir)) %in% c("", "NA"))
{
tkmessageBox(message = "Enter the path to directory containing the Bias factors", icon = "warning", type = "ok")
tkwait.window(tt)
}else if(tclvalue(mrg.method) == "Spatio-Temporal LM" & tclvalue(lmcoef.calc) == '1' &
str_trim(tclvalue(LMCoef.dir)) %in% c("", "NA"))
{
tkmessageBox(message = "Enter the path to directory containing the lm coefficients", icon = "warning", type = "ok")
tkwait.window(tt)
}else if(((tclvalue(bias.calc) == '0' & .cdtData$GalParams$BIAS$interp.method == "NN") |
(tclvalue(mrg.method) == "Spatio-Temporal LM" & tclvalue(lmcoef.calc) == '0' &
.cdtData$GalParams$LMCOEF$interp.method == "NN") | tclvalue(blankGrd) == "Use DEM") &
(str_trim(tclvalue(file.grddem)) == ""))
{
tkmessageBox(message = "You have to provide DEM data in NetCDF format", icon = "warning", type = "ok")
tkwait.window(tt)
}else if(str_trim(tclvalue(file.blkshp)) == "" & str_trim(tclvalue(blankGrd)) == "Use ESRI shapefile"){
tkmessageBox(message = "You have to provide the shapefile", icon = "warning", type = "ok")
tkwait.window(tt)
}else if(str_trim(tclvalue(dir2save)) %in% c("", "NA")){
tkmessageBox(message = "Browse or enter the path to directory to save results", icon = "warning", type = "ok")
tkwait.window(tt)
}else{
.cdtData$GalParams$STN.file <- str_trim(tclvalue(file.stnfl))
.cdtData$GalParams$RFE$dir <- str_trim(tclvalue(dir.RFE))
.cdtData$GalParams$BIAS$bias.method <- str_trim(tclvalue(bias.method))
.cdtData$GalParams$BIAS$deja.calc <- switch(tclvalue(bias.calc), '0' = FALSE, '1' = TRUE)
.cdtData$GalParams$BIAS$dir.Bias <- str_trim(tclvalue(bias.dir))
.cdtData$GalParams$Merging$mrg.method <- str_trim(tclvalue(mrg.method))
.cdtData$GalParams$Merging$min.stn <- as.numeric(str_trim(tclvalue(mrg.min.stn)))
.cdtData$GalParams$Merging$min.non.zero <- as.numeric(str_trim(tclvalue(mrg.min.non.zero)))
.cdtData$GalParams$period <- periodVAL[CbperiodVAL %in% str_trim(tclvalue(file.period))]
.cdtData$GalParams$LMCOEF$deja.calc <- switch(tclvalue(lmcoef.calc), '0' = FALSE, '1' = TRUE)
.cdtData$GalParams$LMCOEF$dir.LMCoef <- str_trim(tclvalue(LMCoef.dir))
.cdtData$GalParams$RnoR$use.RnoR <- switch(tclvalue(use.RnoR), '0' = FALSE, '1' = TRUE)
.cdtData$GalParams$RnoR$maxdist.RnoR <- as.numeric(str_trim(tclvalue(maxdist.RnoR)))
.cdtData$GalParams$RnoR$smooth.RnoR <- switch(tclvalue(smooth.RnoR), '0' = FALSE, '1' = TRUE)
.cdtData$GalParams$DEM.file <- str_trim(tclvalue(file.grddem))
.cdtData$GalParams$output$dir <- str_trim(tclvalue(dir2save))
.cdtData$GalParams$output$format <- str_trim(tclvalue(outmrgff))
.cdtData$GalParams$blank$blank <- switch(str_trim(tclvalue(blankGrd)),
"None" = '1', "Use DEM" = '2',
"Use ESRI shapefile" = '3')
.cdtData$GalParams$blank$SHP.file <- str_trim(tclvalue(file.blkshp))
tkgrab.release(tt)
tkdestroy(tt)
tkfocus(.cdtEnv$tcl$main$win)
}
})
tkconfigure(bt.prm.CA, command = function(){
tkgrab.release(tt)
tkdestroy(tt)
tkfocus(.cdtEnv$tcl$main$win)
})
tkgrid(bt.prm.CA, row = 0, column = 0, sticky = 'w', padx = 5, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.prm.OK, row = 0, column = 1, sticky = 'e', padx = 5, pady = 1, ipadx = 1, ipady = 1)
####################################
tkgrid(frMRG0, row = 0, column = 0, sticky = 'nswe', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frMRG1, row = 1, column = 1, sticky = 'se', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tcl('update')
tkgrid(bwnote, sticky = 'nwes')
tkgrid.columnconfigure(bwnote, 0, weight = 1)
####################################
tkwm.withdraw(tt)
tcl('update')
tt.w <- as.integer(tkwinfo("reqwidth", tt))
tt.h <- as.integer(tkwinfo("reqheight", tt))
tt.x <- as.integer(.cdtEnv$tcl$data$width.scr*0.5 - tt.w*0.5)
tt.y <- as.integer(.cdtEnv$tcl$data$height.scr*0.5 - tt.h*0.5)
tkwm.geometry(tt, paste0('+', tt.x, '+', tt.y))
tkwm.transient(tt)
tkwm.title(tt, 'Merging data - Settings')
tkwm.deiconify(tt)
tkfocus(tt)
tkbind(tt, "<Destroy>", function(){
tkgrab.release(tt)
tkfocus(.cdtEnv$tcl$main$win)
})
tkwait.window(tt)
invisible()
}
| /R/cdtPrecip_MergingALL_dlgBox.R | no_license | heureux1985/CDT | R | false | false | 36,968 | r |
Precip_mergeGetInfoALL <- function(){
listOpenFiles <- openFile_ttkcomboList()
if(WindowsOS()){
largeur0 <- 19
largeur1 <- 42
largeur2 <- 45
largeur3 <- 27
largeur4 <- 28
largeur5 <- 32
}else{
largeur0 <- 16
largeur1 <- 38
largeur2 <- 39
largeur3 <- 21
largeur4 <- 17
largeur5 <- 22
}
# xml.dlg <- file.path(.cdtDir$dirLocal, "languages", "cdtPrecip_MergingALL_dlgBox.xml")
# lang.dlg <- cdtLanguageParse(xml.dlg, .cdtData$Config$lang.iso)
####################################
tt <- tktoplevel()
tkgrab.set(tt)
tkfocus(tt)
frMRG0 <- tkframe(tt, relief = 'raised', borderwidth = 2, padx = 3, pady = 3)
frMRG1 <- tkframe(tt)
####################################
bwnote <- bwNoteBook(frMRG0)
conf.tab1 <- bwAddTab(bwnote, text = "Input")
conf.tab2 <- bwAddTab(bwnote, text = "Merging")
conf.tab3 <- bwAddTab(bwnote, text = "Bias Coeff")
conf.tab4 <- bwAddTab(bwnote, text = "LM Coeff")
conf.tab5 <- bwAddTab(bwnote, text = "Output")
bwRaiseTab(bwnote, conf.tab1)
tkgrid.columnconfigure(conf.tab1, 0, weight = 1)
tkgrid.columnconfigure(conf.tab2, 0, weight = 1)
tkgrid.columnconfigure(conf.tab3, 0, weight = 1)
tkgrid.columnconfigure(conf.tab4, 0, weight = 1)
tkgrid.columnconfigure(conf.tab5, 0, weight = 1)
####################################
frTab1 <- tkframe(conf.tab1)
####################################
frtimestep <- tkframe(frTab1, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
file.period <- tclVar()
CbperiodVAL <- .cdtEnv$tcl$lang$global[['combobox']][['1']][2:5]
periodVAL <- c('daily', 'pentad', 'dekadal', 'monthly')
tclvalue(file.period) <- CbperiodVAL[periodVAL %in% .cdtData$GalParams$period]
txtdek <- switch(.cdtData$GalParams$period, 'dekadal' = 'Dekad', 'pentad' = 'Pentad', 'Day')
day.txtVar <- tclVar(txtdek)
statedate <- if(.cdtData$GalParams$period == 'monthly') 'disabled' else 'normal'
cb.period <- ttkcombobox(frtimestep, values = CbperiodVAL, textvariable = file.period, width = largeur0)
bt.DateRange <- ttkbutton(frtimestep, text = "Set Date Range")
tkconfigure(bt.DateRange, command = function(){
.cdtData$GalParams[["Merging.Date"]] <- getInfoDateRange(.cdtEnv$tcl$main$win,
.cdtData$GalParams[["Merging.Date"]],
daypendek.lab = tclvalue(day.txtVar),
state.dek = statedate)
})
tkgrid(cb.period, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.DateRange, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(cb.period, 'Select the time step of the data')
status.bar.display(cb.period, 'Select the time step of the data')
infobulle(bt.DateRange, 'Set the start and end date to merge RFE data')
status.bar.display(bt.DateRange, 'Set the start and end date to merge RFE data')
###########
tkbind(cb.period, "<<ComboboxSelected>>", function(){
tclvalue(day.txtVar) <- ifelse(str_trim(tclvalue(file.period)) == CbperiodVAL[3], 'Dekad',
ifelse(str_trim(tclvalue(file.period)) == CbperiodVAL[2], 'Pentad', 'Day'))
statedate <<- if(str_trim(tclvalue(file.period)) == CbperiodVAL[4]) 'disabled' else 'normal'
})
####################################
frInputData <- tkframe(frTab1, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
file.stnfl <- tclVar(.cdtData$GalParams$STN.file)
dir.RFE <- tclVar(.cdtData$GalParams$RFE$dir)
txt.stnfl <- tklabel(frInputData, text = 'Station data file', anchor = 'w', justify = 'left')
cb.stnfl <- ttkcombobox(frInputData, values = unlist(listOpenFiles), textvariable = file.stnfl, width = largeur1)
bt.stnfl <- tkbutton(frInputData, text = "...")
txt.RFE <- tklabel(frInputData, text = 'Directory containing RFE data', anchor = 'w', justify = 'left')
set.RFE <- ttkbutton(frInputData, text = .cdtEnv$tcl$lang$global[['button']][['5']])
en.RFE <- tkentry(frInputData, textvariable = dir.RFE, width = largeur2)
bt.RFE <- tkbutton(frInputData, text = "...")
######
tkconfigure(bt.stnfl, command = function(){
dat.opfiles <- getOpenFiles(tt)
if(!is.null(dat.opfiles)){
update.OpenFiles('ascii', dat.opfiles)
listOpenFiles[[length(listOpenFiles) + 1]] <<- dat.opfiles[[1]]
tclvalue(file.stnfl) <- dat.opfiles[[1]]
lapply(list(cb.stnfl, cb.grddem, cb.blkshp), tkconfigure, values = unlist(listOpenFiles))
}
})
tkconfigure(set.RFE, command = function(){
.cdtData$GalParams[["RFE"]] <- getInfoNetcdfData(tt, .cdtData$GalParams[["RFE"]],
str_trim(tclvalue(dir.RFE)), str_trim(tclvalue(file.period)))
})
tkconfigure(bt.RFE, command = function(){
dirrfe <- tk_choose.dir(getwd(), "")
tclvalue(dir.RFE) <- if(!is.na(dirrfe)) dirrfe else ""
})
######
tkgrid(txt.stnfl, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(cb.stnfl, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(bt.stnfl, row = 1, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(txt.RFE, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 3, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(set.RFE, row = 2, column = 3, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(en.RFE, row = 3, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(bt.RFE, row = 3, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
infobulle(cb.stnfl, 'Select the file from the list')
status.bar.display(cb.stnfl, 'Select the file containing the gauge data')
infobulle(bt.stnfl, 'Browse file if not listed')
status.bar.display(bt.stnfl, 'Browse file if not listed')
infobulle(en.RFE, 'Enter the full path to the directory containing the RFE data')
status.bar.display(en.RFE, 'Enter the full path to the directory containing the RFE data')
infobulle(bt.RFE, 'Or browse here')
status.bar.display(bt.RFE, 'Or browse here')
infobulle(set.RFE, 'Setting netcdf data options')
status.bar.display(set.RFE, 'Setting netcdf data options')
####################################
frDEM <- tkframe(frTab1, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
file.grddem <- tclVar(.cdtData$GalParams$DEM.file)
statedem <- if((!.cdtData$GalParams$BIAS$deja.calc &
.cdtData$GalParams$BIAS$interp.method == "NN") |
(.cdtData$GalParams$Merging$mrg.method == "Spatio-Temporal LM" &
!.cdtData$GalParams$LMCOEF$deja.calc &
.cdtData$GalParams$LMCOEF$interp.method == "NN") |
.cdtData$GalParams$blank$blank == "2") 'normal' else 'disabled'
txt.grddem <- tklabel(frDEM, text = "Elevation data (NetCDF)", anchor = 'w', justify = 'left')
cb.grddem <- ttkcombobox(frDEM, values = unlist(listOpenFiles), textvariable = file.grddem, state = statedem, width = largeur1)
bt.grddem <- tkbutton(frDEM, text = "...", state = statedem)
tkconfigure(bt.grddem, command = function(){
nc.opfiles <- getOpenNetcdf(tt, initialdir = getwd())
if(!is.null(nc.opfiles)){
update.OpenFiles('netcdf', nc.opfiles)
listOpenFiles[[length(listOpenFiles) + 1]] <<- nc.opfiles[[1]]
tclvalue(file.grddem) <- nc.opfiles[[1]]
lapply(list(cb.stnfl, cb.grddem, cb.blkshp), tkconfigure, values = unlist(listOpenFiles))
}
})
tkgrid(txt.grddem, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.grddem, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.grddem, row = 1, column = 1, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
infobulle(cb.grddem, 'Select the file in the list')
status.bar.display(cb.grddem, 'File containing the elevation data in netcdf')
infobulle(bt.grddem, 'Browse file if not listed')
status.bar.display(bt.grddem, 'Browse file if not listed')
####################################
tkgrid(frtimestep, row = 0, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frInputData, row = 1, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frDEM, row = 2, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
####################################
tkgrid(frTab1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
####################################
frTab2 <- tkframe(conf.tab2)
####################################
frMrg <- tkframe(frTab2, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
cb.MrgMthd <- c("Regression Kriging", "Spatio-Temporal LM", "Simple Bias Adjustment")
mrg.method <- tclVar(str_trim(.cdtData$GalParams$Merging$mrg.method))
mrg.min.stn <- tclVar(.cdtData$GalParams$Merging$min.stn)
mrg.min.non.zero <- tclVar(.cdtData$GalParams$Merging$min.non.zero)
txt.mrg <- tklabel(frMrg, text = 'Merging method', anchor = 'w', justify = 'left')
cb.mrg <- ttkcombobox(frMrg, values = cb.MrgMthd, textvariable = mrg.method, width = largeur4)
bt.mrg.interp <- ttkbutton(frMrg, text = "Merging Interpolations Parameters")
txt.min.nbrs.stn <- tklabel(frMrg, text = 'Min.Nb.Stn', anchor = 'e', justify = 'right')
en.min.nbrs.stn <- tkentry(frMrg, width = 4, textvariable = mrg.min.stn, justify = 'right')
txt.min.non.zero <- tklabel(frMrg, text = 'Min.No.Zero', anchor = 'e', justify = 'right')
en.min.non.zero <- tkentry(frMrg, width = 4, textvariable = mrg.min.non.zero, justify = 'right')
tkconfigure(bt.mrg.interp, command = function(){
.cdtData$GalParams[["Merging"]] <- getInterpolationPars(tt, .cdtData$GalParams[["Merging"]], interpChoix = 0)
})
tkgrid(txt.mrg, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.mrg, row = 0, column = 2, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.mrg.interp, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.min.nbrs.stn, row = 2, column = 0, sticky = 'e', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.min.nbrs.stn, row = 2, column = 2, sticky = 'w', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.min.non.zero, row = 2, column = 3, sticky = 'e', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.min.non.zero, row = 2, column = 5, sticky = 'w', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(cb.mrg, 'Method to be used to perform merging')
status.bar.display(cb.mrg, 'Method to be used to perform merging')
infobulle(en.min.nbrs.stn, 'Minimum number of gauges with data to be used to do the merging')
status.bar.display(en.min.nbrs.stn, 'Minimum number of gauges with data to be used to do the merging')
infobulle(en.min.non.zero, 'Minimum number of non-zero gauge values to perform the merging')
status.bar.display(en.min.non.zero, 'Minimum number of non-zero gauge values to perform the merging')
###############
tkbind(cb.mrg, "<<ComboboxSelected>>", function(){
stateLMCoef1 <- if(tclvalue(mrg.method) == "Spatio-Temporal LM") 'normal' else 'disabled'
stateLMCoef2 <- if(tclvalue(mrg.method) == "Spatio-Temporal LM" & tclvalue(lmcoef.calc) == "0") 'normal' else 'disabled'
stateLMCoef3 <- if(tclvalue(mrg.method) == "Spatio-Temporal LM" & tclvalue(lmcoef.calc) == "1") 'normal' else 'disabled'
tkconfigure(chk.LMCoef, state = stateLMCoef1)
tkconfigure(bt.baseLM, state = stateLMCoef2)
tkconfigure(bt.LMCoef.interp, state = stateLMCoef2)
tkconfigure(en.LMCoef.dir, state = stateLMCoef3)
tkconfigure(bt.LMCoef.dir, state = stateLMCoef3)
statedem <- if((tclvalue(bias.calc) == "0" &
.cdtData$GalParams$BIAS$interp.method == "NN") |
(tclvalue(mrg.method) == "Spatio-Temporal LM" &
tclvalue(lmcoef.calc) == "0" &
.cdtData$GalParams$LMCOEF$interp.method == "NN") |
tclvalue(blankGrd) == "Use DEM") 'normal' else 'disabled'
tkconfigure(cb.grddem, state = statedem)
tkconfigure(bt.grddem, state = statedem)
})
####################################
frRnoR <- tkframe(frTab2, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
use.RnoR <- tclVar(.cdtData$GalParams$RnoR$use.RnoR)
maxdist.RnoR <- tclVar(.cdtData$GalParams$RnoR$maxdist.RnoR)
smooth.RnoR <- tclVar(.cdtData$GalParams$RnoR$smooth.RnoR)
stateRnoR <- if(.cdtData$GalParams$RnoR$use.RnoR) 'normal' else 'disabled'
########
txt.mrg.pars <- tklabel(frRnoR, text = 'Rain-no-Rain mask', anchor = 'w', justify = 'left')
chk.use.rnr <- tkcheckbutton(frRnoR, variable = use.RnoR, text = 'Apply Rain-no-Rain mask', anchor = 'w', justify = 'left')
txt.maxdist.rnr <- tklabel(frRnoR, text = 'maxdist.RnoR', anchor = 'e', justify = 'right')
en.maxdist.rnr <- tkentry(frRnoR, width = 4, textvariable = maxdist.RnoR, justify = 'right', state = stateRnoR)
chk.smooth.rnr <- tkcheckbutton(frRnoR, variable = smooth.RnoR, text = 'Smooth Rain-no-Rain mask', anchor = 'w', justify = 'left', state = stateRnoR)
tkgrid(txt.mrg.pars, row = 0, column = 0, sticky = '', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(chk.use.rnr, row = 1, column = 0, sticky = 'ew', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.maxdist.rnr, row = 2, column = 0, sticky = 'e', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.maxdist.rnr, row = 2, column = 1, sticky = 'w', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(chk.smooth.rnr, row = 3, column = 0, sticky = 'ew', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(chk.use.rnr, 'Check this box to apply a mask over no rain area')
status.bar.display(chk.use.rnr, 'Check this box to apply a mask over no rain area')
infobulle(en.maxdist.rnr, 'Maximum distance (in decimal degrees) to be used to interpolate Rain-noRain mask')
status.bar.display(en.maxdist.rnr, 'Maximum distance (in decimal degrees) to be used to interpolate Rain-noRain mask')
infobulle(chk.smooth.rnr, 'Check this box to smooth the gradient between high value and no rain area')
status.bar.display(chk.smooth.rnr, 'Check this box to smooth the gradient between high value and no rain area')
tkbind(chk.use.rnr, "<Button-1>", function(){
stateRnoR <- if(tclvalue(use.RnoR) == '0') 'normal' else 'disabled'
tkconfigure(en.maxdist.rnr, state = stateRnoR)
tkconfigure(chk.smooth.rnr, state = stateRnoR)
})
####################################
tkgrid(frMrg, row = 0, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frRnoR, row = 1, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
####################################
tkgrid(frTab2, padx = 0, pady = 1, ipadx = 1, ipady = 1)
####################################
frTab3 <- tkframe(conf.tab3)
####################################
frameBias <- tkframe(frTab3, relief = 'sunken', borderwidth = 2, padx = 5, pady = 5)
cb.biasMthd <- c("Quantile.Mapping", "Multiplicative.Bias.Var", "Multiplicative.Bias.Mon")
bias.method <- tclVar(str_trim(.cdtData$GalParams$BIAS$bias.method))
txt.bias <- tklabel(frameBias, text = 'Bias method', anchor = 'w', justify = 'left')
cb.bias <- ttkcombobox(frameBias, values = cb.biasMthd, textvariable = bias.method, width = largeur3)
tkgrid(txt.bias, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.bias, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(cb.bias, 'Select the method to be used to calculate the Bias Factors or Parameters')
status.bar.display(cb.bias, 'Select the method to be used to calculate the Bias Factors or Parameters')
####################################
frameBiasSet <- tkframe(frTab3, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
bias.calc <- tclVar(.cdtData$GalParams$BIAS$deja.calc)
statebias1 <- if(.cdtData$GalParams$BIAS$deja.calc) 'disabled' else 'normal'
chk.bias <- tkcheckbutton(frameBiasSet, variable = bias.calc, text = "Bias factors are already calculated", anchor = 'w', justify = 'left', background = 'lightblue')
bt.baseBias <- ttkbutton(frameBiasSet, text = "Set Bias Base Period", state = statebias1)
bt.bias.interp <- ttkbutton(frameBiasSet, text = "Bias Interpolations Parameters", state = statebias1)
tkconfigure(bt.baseBias, command = function(){
.cdtData$GalParams[["BIAS"]] <- getInfoBasePeriod(tt, .cdtData$GalParams[["BIAS"]])
})
tkconfigure(bt.bias.interp, command = function(){
.cdtData$GalParams[["BIAS"]] <- getInterpolationPars(tt, .cdtData$GalParams[["BIAS"]], interpChoix = 1)
statedem <- if((tclvalue(bias.calc) == "0" &
.cdtData$GalParams$BIAS$interp.method == "NN") |
(tclvalue(mrg.method) == "Spatio-Temporal LM" &
tclvalue(lmcoef.calc) == "0" &
.cdtData$GalParams$LMCOEF$interp.method == "NN") |
tclvalue(blankGrd) == "Use DEM") 'normal' else 'disabled'
tkconfigure(cb.grddem, state = statedem)
tkconfigure(bt.grddem, state = statedem)
})
tkgrid(chk.bias, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.baseBias, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.bias.interp, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(chk.bias, 'Check this box if the bias factors or parameters are already calculated')
status.bar.display(chk.bias, 'Check this box if the bias factors or parameters are already calculated')
infobulle(bt.baseBias, 'Set the base period to be used to compute bias factors')
status.bar.display(bt.baseBias, 'Set the base period to be used to compute bias factors')
###############
tkbind(chk.bias, "<Button-1>", function(){
statebias1 <- if(tclvalue(bias.calc) == '1') 'normal' else 'disabled'
statebias2 <- if(tclvalue(bias.calc) == '0') 'normal' else 'disabled'
tkconfigure(bt.baseBias, state = statebias1)
tkconfigure(bt.bias.interp, state = statebias1)
tkconfigure(en.bias.dir, state = statebias2)
tkconfigure(bt.bias.dir, state = statebias2)
statedem <- if((tclvalue(bias.calc) == "1" &
.cdtData$GalParams$BIAS$interp.method == "NN") |
(tclvalue(mrg.method) == "Spatio-Temporal LM" &
tclvalue(lmcoef.calc) == "0" &
.cdtData$GalParams$LMCOEF$interp.method == "NN") |
tclvalue(blankGrd) == "Use DEM") 'normal' else 'disabled'
tkconfigure(cb.grddem, state = statedem)
tkconfigure(bt.grddem, state = statedem)
})
####################################
frameBiasDir <- tkframe(frTab3, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
bias.dir <- tclVar(.cdtData$GalParams$BIAS$dir.Bias)
statebias2 <- if(.cdtData$GalParams$BIAS$deja.calc) 'normal' else 'disabled'
txt.bias.dir <- tklabel(frameBiasDir, text = "Directory of bias files", anchor = 'w', justify = 'left')
en.bias.dir <- tkentry(frameBiasDir, textvariable = bias.dir, state = statebias2, width = largeur2)
bt.bias.dir <- tkbutton(frameBiasDir, text = "...", state = statebias2)
tkconfigure(bt.bias.dir, command = function(){
dirbias <- tk_choose.dir(getwd(), "")
tclvalue(bias.dir) <- if(!is.na(dirbias)) dirbias else ""
})
tkgrid(txt.bias.dir, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.bias.dir, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.bias.dir, row = 1, column = 5, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
infobulle(en.bias.dir, 'Enter the full path to directory containing the bias files')
status.bar.display(en.bias.dir, 'Enter the full path to directory containing the bias files')
####################################
tkgrid(frameBias, row = 0, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameBiasSet, row = 1, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameBiasDir, row = 2, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
####################################
tkgrid(frTab3, padx = 0, pady = 1, ipadx = 1, ipady = 1)
####################################
frTab4 <- tkframe(conf.tab4)
####################################
frLMCoef <- tkframe(frTab4, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
lmcoef.calc <- tclVar(.cdtData$GalParams$LMCOEF$deja.calc)
stateLMCoef1 <- if(str_trim(.cdtData$GalParams$Merging$mrg.method) == "Spatio-Temporal LM") 'normal' else 'disabled'
stateLMCoef2 <- if(str_trim(.cdtData$GalParams$Merging$mrg.method) == "Spatio-Temporal LM" & !.cdtData$GalParams$LMCOEF$deja.calc) 'normal' else 'disabled'
stateLMCoef3 <- if(str_trim(.cdtData$GalParams$Merging$mrg.method) == "Spatio-Temporal LM" & .cdtData$GalParams$LMCOEF$deja.calc) 'normal' else 'disabled'
chk.LMCoef <- tkcheckbutton(frLMCoef, variable = lmcoef.calc, text = "LMCoef are already calculated", state = stateLMCoef1, anchor = 'w', justify = 'left', background = 'lightblue')
bt.baseLM <- ttkbutton(frLMCoef, text = "Set LMCoef Base Period", state = stateLMCoef2)
bt.LMCoef.interp <- ttkbutton(frLMCoef, text = "LMCoef Interpolations Parameters", state = stateLMCoef2)
tkconfigure(bt.baseLM, command = function(){
.cdtData$GalParams[["LMCOEF"]] <- getInfoBasePeriod(tt, .cdtData$GalParams[["LMCOEF"]])
})
tkconfigure(bt.LMCoef.interp, command = function(){
.cdtData$GalParams[["LMCOEF"]] <- getInterpolationPars(tt, .cdtData$GalParams[["LMCOEF"]], interpChoix = 1)
statedem <- if((tclvalue(bias.calc) == "0" &
.cdtData$GalParams$BIAS$interp.method == "NN") |
(tclvalue(mrg.method) == "Spatio-Temporal LM" &
tclvalue(lmcoef.calc) == "0" &
.cdtData$GalParams$LMCOEF$interp.method == "NN") |
tclvalue(blankGrd) == "Use DEM") 'normal' else 'disabled'
tkconfigure(cb.grddem, state = statedem)
tkconfigure(bt.grddem, state = statedem)
})
tkgrid(chk.LMCoef, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.baseLM, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.LMCoef.interp, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(chk.LMCoef, 'Check this box if the linear model coefficients are already calculated')
status.bar.display(chk.LMCoef, 'Check this box if the linear model coefficients are already calculated')
infobulle(bt.baseLM, 'Start and end year to be used to compute LM coefficients')
status.bar.display(bt.baseLM, 'Start and end year to be used to compute LM coefficients')
###############
tkbind(chk.LMCoef, "<Button-1>", function(){
stateLMCoef2 <- if(tclvalue(lmcoef.calc) == '1' & tclvalue(mrg.method) == "Spatio-Temporal LM") 'normal' else 'disabled'
stateLMCoef3 <- if(tclvalue(lmcoef.calc) == '0' & tclvalue(mrg.method) == "Spatio-Temporal LM") 'normal' else 'disabled'
tkconfigure(bt.baseLM, state = stateLMCoef2)
tkconfigure(bt.LMCoef.interp, state = stateLMCoef2)
tkconfigure(en.LMCoef.dir, state = stateLMCoef3)
tkconfigure(bt.LMCoef.dir, state = stateLMCoef3)
statedem <- if((tclvalue(bias.calc) == "0" &
.cdtData$GalParams$BIAS$interp.method == "NN") |
(tclvalue(mrg.method) == "Spatio-Temporal LM" &
tclvalue(lmcoef.calc) == "1" &
.cdtData$GalParams$LMCOEF$interp.method == "NN") |
tclvalue(blankGrd) == "Use DEM") 'normal' else 'disabled'
tkconfigure(cb.grddem, state = statedem)
tkconfigure(bt.grddem, state = statedem)
})
####################################
frLMCoefdir <- tkframe(frTab4, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
LMCoef.dir <- tclVar(.cdtData$GalParams$LMCOEF$dir.LMCoef)
txt.LMCoef.dir <- tklabel(frLMCoefdir, text = "Directory of LMCoef files", anchor = 'w', justify = 'left')
en.LMCoef.dir <- tkentry(frLMCoefdir, textvariable = LMCoef.dir, state = stateLMCoef3, width = largeur2)
bt.LMCoef.dir <- tkbutton(frLMCoefdir, text = "...", state = stateLMCoef3)
tkconfigure(bt.LMCoef.dir, command = function(){
dirLM <- tk_choose.dir(getwd(), "")
tclvalue(LMCoef.dir) <- if(!is.na(dirLM)) dirLM else ""
})
tkgrid(txt.LMCoef.dir, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.LMCoef.dir, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.LMCoef.dir, row = 1, column = 5, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
infobulle(en.LMCoef.dir, 'Enter the full path to directory containing the LM coefficients files')
status.bar.display(en.LMCoef.dir, 'Enter the full path to directory containing the LM coefficients files')
infobulle(bt.LMCoef.dir, 'or browse here')
status.bar.display(bt.LMCoef.dir, 'or browse here')
####################################
tkgrid(frLMCoef, row = 0, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frLMCoefdir, row = 1, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
####################################
tkgrid(frTab4, padx = 0, pady = 1, ipadx = 1, ipady = 1)
####################################
frTab5 <- tkframe(conf.tab5)
####################################
frSave <- tkframe(frTab5, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
dir2save <- tclVar(.cdtData$GalParams$output$dir)
outmrgff <- tclVar(.cdtData$GalParams$output$format)
txt.dir2save <- tklabel(frSave, text = 'Directory to save result', anchor = 'w', justify = 'left')
en.dir2save <- tkentry(frSave, textvariable = dir2save, width = largeur2)
bt.dir2save <- tkbutton(frSave, text = "...")
txt.outmrgff <- tklabel(frSave, text = 'Merged data filename format', anchor = 'w', justify = 'left')
en.outmrgff <- tkentry(frSave, textvariable = outmrgff, width = largeur2)
#####
tkconfigure(bt.dir2save, command = function(){
dir2savepth <- tk_choose.dir(.cdtData$GalParams$output$dir, "")
if(is.na(dir2savepth)) tclvalue(dir2save) <- .cdtData$GalParams$output$dir
else{
dir.create(dir2savepth, showWarnings = FALSE, recursive = TRUE)
tclvalue(dir2save) <- dir2savepth
}
})
#####
tkgrid(txt.dir2save, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 2, ipadx = 1, ipady = 1)
tkgrid(en.dir2save, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.dir2save, row = 1, column = 1, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.outmrgff, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.outmrgff, row = 3, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(en.dir2save, 'Enter the full path to directory to save result')
status.bar.display(en.dir2save, 'Enter the full path to directory to save result')
infobulle(bt.dir2save, 'or browse here')
status.bar.display(bt.dir2save, 'or browse here')
infobulle(en.outmrgff, 'Format of the merged data files names in NetCDF, example: rr_mrg_1981011_ALL.nc')
status.bar.display(en.outmrgff, 'Format of the merged data files names in NetCDF, example: rr_mrg_1981011_ALL.nc')
############################################
frblank <- tkframe(frTab5, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
blankGrd <- tclVar()
cb.blankVAL <- c("None", "Use DEM", "Use ESRI shapefile")
tclvalue(blankGrd) <- switch(str_trim(.cdtData$GalParams$blank$blank),
'1' = cb.blankVAL[1],
'2' = cb.blankVAL[2],
'3' = cb.blankVAL[3])
txt.blankGrd <- tklabel(frblank, text = 'Blank merged data', anchor = 'w', justify = 'left')
cb.blankGrd <- ttkcombobox(frblank, values = cb.blankVAL, textvariable = blankGrd, width = largeur5)
#####
tkgrid(txt.blankGrd, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.blankGrd, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
infobulle(cb.blankGrd, 'Blank grid outside the country boundaries or over ocean')
status.bar.display(cb.blankGrd, 'Blank grid outside the country boundaries or over ocean\ngiven by the DEM mask or the shapefile')
############################################
tkbind(cb.blankGrd, "<<ComboboxSelected>>", function(){
stateshp <- if(tclvalue(blankGrd) == 'Use ESRI shapefile') 'normal' else 'disabled'
tkconfigure(cb.blkshp, state = stateshp)
tkconfigure(bt.blkshp, state = stateshp)
statedem <- if(tclvalue(blankGrd) == "Use DEM" |
(tclvalue(bias.calc) == "0" &
.cdtData$GalParams$BIAS$interp.method == "NN") |
(tclvalue(mrg.method) == "Spatio-Temporal LM" &
tclvalue(lmcoef.calc) == "0" &
.cdtData$GalParams$LMCOEF$interp.method == "NN")) 'normal' else 'disabled'
tkconfigure(cb.grddem, state = statedem)
tkconfigure(bt.grddem, state = statedem)
})
############################################
frSHP <- tkframe(frTab5, relief = 'sunken', borderwidth = 2, padx = 3, pady = 3)
file.blkshp <- tclVar(.cdtData$GalParams$blank$SHP.file)
stateshp <- if(str_trim(.cdtData$GalParams$blank$blank) == '3') 'normal' else 'disabled'
txt.blkshp <- tklabel(frSHP, text = "ESRI shapefiles for blanking", anchor = 'w', justify = 'left')
cb.blkshp <- ttkcombobox(frSHP, values = unlist(listOpenFiles), textvariable = file.blkshp, state = stateshp, width = largeur1)
bt.blkshp <- tkbutton(frSHP, text = "...", state = stateshp)
########
tkconfigure(bt.blkshp, command = function(){
shp.opfiles <- getOpenShp(tt)
if(!is.null(shp.opfiles)){
update.OpenFiles('shp', shp.opfiles)
tclvalue(file.blkshp) <- shp.opfiles[[1]]
listOpenFiles[[length(listOpenFiles) + 1]] <<- shp.opfiles[[1]]
lapply(list(cb.stnfl, cb.grddem, cb.blkshp), tkconfigure, values = unlist(listOpenFiles))
}
})
#####
tkgrid(txt.blkshp, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.blkshp, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.blkshp, row = 1, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
infobulle(cb.blkshp, 'Select the file in the list')
status.bar.display(cb.blkshp, 'Select the file containing the ESRI shapefiles')
infobulle(bt.blkshp, 'Browse file if not listed')
status.bar.display(bt.blkshp, 'Browse file if not listed')
############################################
tkgrid(frSave, row = 0, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frblank, row = 1, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frSHP, row = 2, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
####################################
tkgrid(frTab5, padx = 0, pady = 1, ipadx = 1, ipady = 1)
####################################
bt.prm.OK <- ttkbutton(frMRG1, text = .cdtEnv$tcl$lang$global[['button']][['1']])
bt.prm.CA <- ttkbutton(frMRG1, text = .cdtEnv$tcl$lang$global[['button']][['2']])
tkconfigure(bt.prm.OK, command = function(){
if(str_trim(tclvalue(file.stnfl)) == ""){
tkmessageBox(message = "Select the file containing the station data", icon = "warning", type = "ok")
tkwait.window(tt)
}else if(str_trim(tclvalue(dir.RFE)) %in% c("", "NA")){
tkmessageBox(message = "Browse or enter the directory containing the RFE files", icon = "warning", type = "ok")
tkwait.window(tt)
}else if(tclvalue(bias.calc) == '1' & str_trim(tclvalue(bias.dir)) %in% c("", "NA"))
{
tkmessageBox(message = "Enter the path to directory containing the Bias factors", icon = "warning", type = "ok")
tkwait.window(tt)
}else if(tclvalue(mrg.method) == "Spatio-Temporal LM" & tclvalue(lmcoef.calc) == '1' &
str_trim(tclvalue(LMCoef.dir)) %in% c("", "NA"))
{
tkmessageBox(message = "Enter the path to directory containing the lm coefficients", icon = "warning", type = "ok")
tkwait.window(tt)
}else if(((tclvalue(bias.calc) == '0' & .cdtData$GalParams$BIAS$interp.method == "NN") |
(tclvalue(mrg.method) == "Spatio-Temporal LM" & tclvalue(lmcoef.calc) == '0' &
.cdtData$GalParams$LMCOEF$interp.method == "NN") | tclvalue(blankGrd) == "Use DEM") &
(str_trim(tclvalue(file.grddem)) == ""))
{
tkmessageBox(message = "You have to provide DEM data in NetCDF format", icon = "warning", type = "ok")
tkwait.window(tt)
}else if(str_trim(tclvalue(file.blkshp)) == "" & str_trim(tclvalue(blankGrd)) == "Use ESRI shapefile"){
tkmessageBox(message = "You have to provide the shapefile", icon = "warning", type = "ok")
tkwait.window(tt)
}else if(str_trim(tclvalue(dir2save)) %in% c("", "NA")){
tkmessageBox(message = "Browse or enter the path to directory to save results", icon = "warning", type = "ok")
tkwait.window(tt)
}else{
.cdtData$GalParams$STN.file <- str_trim(tclvalue(file.stnfl))
.cdtData$GalParams$RFE$dir <- str_trim(tclvalue(dir.RFE))
.cdtData$GalParams$BIAS$bias.method <- str_trim(tclvalue(bias.method))
.cdtData$GalParams$BIAS$deja.calc <- switch(tclvalue(bias.calc), '0' = FALSE, '1' = TRUE)
.cdtData$GalParams$BIAS$dir.Bias <- str_trim(tclvalue(bias.dir))
.cdtData$GalParams$Merging$mrg.method <- str_trim(tclvalue(mrg.method))
.cdtData$GalParams$Merging$min.stn <- as.numeric(str_trim(tclvalue(mrg.min.stn)))
.cdtData$GalParams$Merging$min.non.zero <- as.numeric(str_trim(tclvalue(mrg.min.non.zero)))
.cdtData$GalParams$period <- periodVAL[CbperiodVAL %in% str_trim(tclvalue(file.period))]
.cdtData$GalParams$LMCOEF$deja.calc <- switch(tclvalue(lmcoef.calc), '0' = FALSE, '1' = TRUE)
.cdtData$GalParams$LMCOEF$dir.LMCoef <- str_trim(tclvalue(LMCoef.dir))
.cdtData$GalParams$RnoR$use.RnoR <- switch(tclvalue(use.RnoR), '0' = FALSE, '1' = TRUE)
.cdtData$GalParams$RnoR$maxdist.RnoR <- as.numeric(str_trim(tclvalue(maxdist.RnoR)))
.cdtData$GalParams$RnoR$smooth.RnoR <- switch(tclvalue(smooth.RnoR), '0' = FALSE, '1' = TRUE)
.cdtData$GalParams$DEM.file <- str_trim(tclvalue(file.grddem))
.cdtData$GalParams$output$dir <- str_trim(tclvalue(dir2save))
.cdtData$GalParams$output$format <- str_trim(tclvalue(outmrgff))
.cdtData$GalParams$blank$blank <- switch(str_trim(tclvalue(blankGrd)),
"None" = '1', "Use DEM" = '2',
"Use ESRI shapefile" = '3')
.cdtData$GalParams$blank$SHP.file <- str_trim(tclvalue(file.blkshp))
tkgrab.release(tt)
tkdestroy(tt)
tkfocus(.cdtEnv$tcl$main$win)
}
})
tkconfigure(bt.prm.CA, command = function(){
tkgrab.release(tt)
tkdestroy(tt)
tkfocus(.cdtEnv$tcl$main$win)
})
tkgrid(bt.prm.CA, row = 0, column = 0, sticky = 'w', padx = 5, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.prm.OK, row = 0, column = 1, sticky = 'e', padx = 5, pady = 1, ipadx = 1, ipady = 1)
####################################
tkgrid(frMRG0, row = 0, column = 0, sticky = 'nswe', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frMRG1, row = 1, column = 1, sticky = 'se', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tcl('update')
tkgrid(bwnote, sticky = 'nwes')
tkgrid.columnconfigure(bwnote, 0, weight = 1)
####################################
tkwm.withdraw(tt)
tcl('update')
tt.w <- as.integer(tkwinfo("reqwidth", tt))
tt.h <- as.integer(tkwinfo("reqheight", tt))
tt.x <- as.integer(.cdtEnv$tcl$data$width.scr*0.5 - tt.w*0.5)
tt.y <- as.integer(.cdtEnv$tcl$data$height.scr*0.5 - tt.h*0.5)
tkwm.geometry(tt, paste0('+', tt.x, '+', tt.y))
tkwm.transient(tt)
tkwm.title(tt, 'Merging data - Settings')
tkwm.deiconify(tt)
tkfocus(tt)
tkbind(tt, "<Destroy>", function(){
tkgrab.release(tt)
tkfocus(.cdtEnv$tcl$main$win)
})
tkwait.window(tt)
invisible()
}
|
#' @title
#' Download MODIS snow cover data (version 6) from the National Snow and Ice Data Center.
#'
#' @description
#' \code{download_data} is the main function to download a scene given the correct tile, date and satellite.
#'
#' \code{get_tile} is a helper function that actually downloads a tile. Supplied with a correct \code{ftp} address and \code{tile} the function downloads the MODIS tile, and transforms the coordinate reference system to latlong (EPSG:4326).
#'
#' @details
#' When downloading the data, the correct tile has to be specified. At the moment there is no automated way to find the tile. This means that the user has to consult the \href{http://landweb.nascom.nasa.gov/developers/is_tiles/is_bound_10deg.txt}{MODIS land grid} to find the correct tile. Alternatively the \href{http://landweb.nascom.nasa.gov/cgi-bin/developer/tilemap.cgi}{MODIS tile calculator} may be used.
#'
#' @param ftp Address of the repository.
#' @param tile Name of the tile.
#' @param progress Indicates whether or not progress is displayed.
#' @param clean Indidcates whether or not temporary files are deleted.
#' @param date Day for which snow data should be downloaded as \code{Date}, \code{POSIXct}, or \code{POSIXlt}.
#' @param sat Satellite mission used. Currently Terra (\code{"MYD10A1"}) and Aqua (\code{"MOD10A1"}) are supported.
#' @param h Horizontal tile number, see also details.
#' @param v Vertical tile number, see also details.
#' @param printFTP If \code{TRUE}, the FTP address where the data are downloaded is printed.
#' @param ... Further arguments passed to \code{get_tile()}.
#'
#' @return
#' The function returns an object of the class \code{RasterLayer} with the following cell values:
#' \itemize{
#' \item 0-100 NDSI snow cover
#' \item 200 missing data
#' \item 201 no decision
#' \item 211 night
#' \item 237 inland water
#' \item 239 ocean
#' \item 250 cloud
#' \item 254 detector saturated
#' \item 255 fill
#' }
#' but see also the documentation for the \emph{NDSI_SNOW_COVER} \href{http://nsidc.org/data/MOD10A1}{here}.
#'
#' @references
#' When using the MODIS snow cover data, please acknowledge the data appropriately by
#' \enumerate{
#' \item reading the \href{http://nsidc.org/about/use_copyright.html}{use and copyright}
#' \item citing the original data: \emph{Hall, D. K. and G. A. Riggs. 2016. MODIS/[Terra/Aqua] Snow Cover Daily L3 Global 500m Grid, Version 6. [Indicate subset used]. Boulder, Colorado USA. NASA National Snow and Ice Data Center Distributed Active Archive Center. doi: http://dx.doi.org/10.5067/MODIS/MOD10A1.006. [Date Accessed].}
#' }
#' @export
#' @rdname MODISSnow
#'
#' @examples
#' \dontrun{
#' # Download MODIS snow data for a central europe h = 18 and v = 5 for the 1 of January 2016
#' dat <- download_data(lubridate::ymd("2016-01-01"), h = 18, v = 5)
#' class(dat)
#' raster::plot(dat)
#' }
download_data <- function(date, sat = "MYD10A1", h = 10, v = 10, printFTP = FALSE, ...) {
# checks
if (!class(date) %in% c("Date", "POSIXlt", "POSIXct")) {
stop("MODISSnow: date should be an object of class Date")
}
if (!sat %in% c("MYD10A1", "MOD10A1")) {
stop("MODISSnow: unknown satellite requested")
}
folder_date <- base::format(date, "%Y.%m.%d")
ftp <- if(sat == 'MYD10A1') {
paste0('ftp://n5eil01u.ecs.nsidc.org/SAN/MOSA/', sat, '.006/', folder_date, '/')
} else {
paste0('ftp://n5eil01u.ecs.nsidc.org/SAN/MOST/', sat, '.006/', folder_date, '/')
}
if (printFTP)
print(ftp)
# use handels: http://stackoverflow.com/questions/37713293/how-to-circumvent-ftp-server-slowdown
curl <- RCurl::getCurlHandle()
fls <- RCurl::getURL(ftp, curl = curl, dirlistonly = TRUE)
rm(curl)
base::gc()
base::gc()
fls <- unlist(strsplit(fls, "\\n"))
fls <- fls[grepl("hdf$", fls)]
tile <- fls[grepl(
paste0(sat, ".A", lubridate::year(date), "[0-9]{3}.h", formatC(h, width = 2, flag = 0), "v", formatC(v, width = 2, flag = 0)),
fls)]
if (length(tile) != 1) {
stop("MODISSnow: requested tile not found")
}
get_tile(ftp, tile, ...)
}
#'
#' @rdname MODISSnow
#' @export
#'
get_tile <- function(ftp, tile, progress = FALSE, clean = TRUE){
out_file <- file.path(tempdir(), tile)
new_file <- paste0(tools::file_path_sans_ext(out_file), ".tif")
dst_file <- paste0(tools::file_path_sans_ext(new_file), "_epsg4326.tif")
if (progress) cat("[", format(Sys.time(), "%H-%M-%S"), "]: Starting download")
utils::download.file(paste(ftp, tile, sep = "/"), out_file)
if (progress) cat("[", format(Sys.time(), "%H-%M-%S"), "]: Processing file")
sds <- gdalUtils::get_subdatasets(out_file)
gdalUtils::gdal_translate(sds[1], dst_dataset = new_file)
gdalUtils::gdalwarp(srcfile = new_file,
dstfile = dst_file,
s_srs = "+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_def",
t_srs = "EPSG:4326", overwrite = TRUE)
res <- raster::raster(dst_file)
res[] <- raster::getValues(res) # to have values in memory
if (clean) {
file.remove(c(out_file, new_file))
}
# http://nsidc.org/data/MOD10A1
return(res)
}
| /R/download_data.R | no_license | cran/MODISSnow | R | false | false | 5,166 | r | #' @title
#' Download MODIS snow cover data (version 6) from the National Snow and Ice Data Center.
#'
#' @description
#' \code{download_data} is the main function to download a scene given the correct tile, date and satellite.
#'
#' \code{get_tile} is a helper function that actually downloads a tile. Supplied with a correct \code{ftp} address and \code{tile} the function downloads the MODIS tile, and transforms the coordinate reference system to latlong (EPSG:4326).
#'
#' @details
#' When downloading the data, the correct tile has to be specified. At the moment there is no automated way to find the tile. This means that the user has to consult the \href{http://landweb.nascom.nasa.gov/developers/is_tiles/is_bound_10deg.txt}{MODIS land grid} to find the correct tile. Alternatively the \href{http://landweb.nascom.nasa.gov/cgi-bin/developer/tilemap.cgi}{MODIS tile calculator} may be used.
#'
#' @param ftp Address of the repository.
#' @param tile Name of the tile.
#' @param progress Indicates whether or not progress is displayed.
#' @param clean Indidcates whether or not temporary files are deleted.
#' @param date Day for which snow data should be downloaded as \code{Date}, \code{POSIXct}, or \code{POSIXlt}.
#' @param sat Satellite mission used. Currently Terra (\code{"MYD10A1"}) and Aqua (\code{"MOD10A1"}) are supported.
#' @param h Horizontal tile number, see also details.
#' @param v Vertical tile number, see also details.
#' @param printFTP If \code{TRUE}, the FTP address where the data are downloaded is printed.
#' @param ... Further arguments passed to \code{get_tile()}.
#'
#' @return
#' The function returns an object of the class \code{RasterLayer} with the following cell values:
#' \itemize{
#' \item 0-100 NDSI snow cover
#' \item 200 missing data
#' \item 201 no decision
#' \item 211 night
#' \item 237 inland water
#' \item 239 ocean
#' \item 250 cloud
#' \item 254 detector saturated
#' \item 255 fill
#' }
#' but see also the documentation for the \emph{NDSI_SNOW_COVER} \href{http://nsidc.org/data/MOD10A1}{here}.
#'
#' @references
#' When using the MODIS snow cover data, please acknowledge the data appropriately by
#' \enumerate{
#' \item reading the \href{http://nsidc.org/about/use_copyright.html}{use and copyright}
#' \item citing the original data: \emph{Hall, D. K. and G. A. Riggs. 2016. MODIS/[Terra/Aqua] Snow Cover Daily L3 Global 500m Grid, Version 6. [Indicate subset used]. Boulder, Colorado USA. NASA National Snow and Ice Data Center Distributed Active Archive Center. doi: http://dx.doi.org/10.5067/MODIS/MOD10A1.006. [Date Accessed].}
#' }
#' @export
#' @rdname MODISSnow
#'
#' @examples
#' \dontrun{
#' # Download MODIS snow data for a central europe h = 18 and v = 5 for the 1 of January 2016
#' dat <- download_data(lubridate::ymd("2016-01-01"), h = 18, v = 5)
#' class(dat)
#' raster::plot(dat)
#' }
download_data <- function(date, sat = "MYD10A1", h = 10, v = 10, printFTP = FALSE, ...) {
# checks
if (!class(date) %in% c("Date", "POSIXlt", "POSIXct")) {
stop("MODISSnow: date should be an object of class Date")
}
if (!sat %in% c("MYD10A1", "MOD10A1")) {
stop("MODISSnow: unknown satellite requested")
}
folder_date <- base::format(date, "%Y.%m.%d")
ftp <- if(sat == 'MYD10A1') {
paste0('ftp://n5eil01u.ecs.nsidc.org/SAN/MOSA/', sat, '.006/', folder_date, '/')
} else {
paste0('ftp://n5eil01u.ecs.nsidc.org/SAN/MOST/', sat, '.006/', folder_date, '/')
}
if (printFTP)
print(ftp)
# use handels: http://stackoverflow.com/questions/37713293/how-to-circumvent-ftp-server-slowdown
curl <- RCurl::getCurlHandle()
fls <- RCurl::getURL(ftp, curl = curl, dirlistonly = TRUE)
rm(curl)
base::gc()
base::gc()
fls <- unlist(strsplit(fls, "\\n"))
fls <- fls[grepl("hdf$", fls)]
tile <- fls[grepl(
paste0(sat, ".A", lubridate::year(date), "[0-9]{3}.h", formatC(h, width = 2, flag = 0), "v", formatC(v, width = 2, flag = 0)),
fls)]
if (length(tile) != 1) {
stop("MODISSnow: requested tile not found")
}
get_tile(ftp, tile, ...)
}
#'
#' @rdname MODISSnow
#' @export
#'
get_tile <- function(ftp, tile, progress = FALSE, clean = TRUE){
out_file <- file.path(tempdir(), tile)
new_file <- paste0(tools::file_path_sans_ext(out_file), ".tif")
dst_file <- paste0(tools::file_path_sans_ext(new_file), "_epsg4326.tif")
if (progress) cat("[", format(Sys.time(), "%H-%M-%S"), "]: Starting download")
utils::download.file(paste(ftp, tile, sep = "/"), out_file)
if (progress) cat("[", format(Sys.time(), "%H-%M-%S"), "]: Processing file")
sds <- gdalUtils::get_subdatasets(out_file)
gdalUtils::gdal_translate(sds[1], dst_dataset = new_file)
gdalUtils::gdalwarp(srcfile = new_file,
dstfile = dst_file,
s_srs = "+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_def",
t_srs = "EPSG:4326", overwrite = TRUE)
res <- raster::raster(dst_file)
res[] <- raster::getValues(res) # to have values in memory
if (clean) {
file.remove(c(out_file, new_file))
}
# http://nsidc.org/data/MOD10A1
return(res)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{generate_uuid}
\alias{generate_uuid}
\title{Generate a Unique Id}
\usage{
generate_uuid(keep_dashes = TRUE)
}
\arguments{
\item{keep_dashes}{logical; an indicator of whether to keep the symbol "-" in
the generated Id}
}
\value{
character
}
\description{
This function generates universally unique ids with or without dashes
}
\note{
This function is meant to be used internally. Only use when debugging.
}
\examples{
id_w_dashes <- generate_uuid()
id_wo_dashes <- generate_uuid(keep_dashes=FALSE)
}
\keyword{internal}
| /man/generate_uuid.Rd | no_license | eric88tchong/Rinstapkg | R | false | true | 611 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{generate_uuid}
\alias{generate_uuid}
\title{Generate a Unique Id}
\usage{
generate_uuid(keep_dashes = TRUE)
}
\arguments{
\item{keep_dashes}{logical; an indicator of whether to keep the symbol "-" in
the generated Id}
}
\value{
character
}
\description{
This function generates universally unique ids with or without dashes
}
\note{
This function is meant to be used internally. Only use when debugging.
}
\examples{
id_w_dashes <- generate_uuid()
id_wo_dashes <- generate_uuid(keep_dashes=FALSE)
}
\keyword{internal}
|
# feature selection xgboost
library(tidyverse)
library(tidymodels)
library(data.table)
library(RcppRoll)
library(xgboost)
# data preparing----
features_all <- read_rds("data/features/features_boruta_all.rds")
features_type <- read_rds("data/features/features_boruta_type.RDS")
features_type <- read_rds("data/features/features_boruta_type.RDS")
features_after <- read_rds("data/features/features_boruta_after.RDS")
features_normal <- read_rds("data/features/features_boruta_normal.RDS")
features_scaled <- read_rds("data/features/features_boruta_scaled.RDS")
features_scale <- read_rds("data/features/features_boruta_scale.RDS")
folds <- read_csv("data/processed/folds.csv")
sample <- read_csv("data/sample_submission.csv")
tr_te <- read_csv("data/features/features.csv")
tr <- tr_te %>%
drop_na(TTF)
# type----
index <- tr$acc_sd < 100
validation_set <- folds[index,] %>%
select(id, fold_index) %>%
mutate(flg = T) %>%
spread(fold_index, flg, fill=F) %>%
select(-id) %>%
lapply(which)
label = if_else(tr$TTF < 0.3, 1L, 0L)[index]
dtrain <- tr %>%
filter(index) %>%
select(features_type) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
params_type <- list(max_depth = 4,
min_child_weight = 2,
colsample_bytree = 0.7,
subsample = 0.9,
eta = .03,
booster = "gbtree",
objective = "binary:logistic",
eval_metric = "logloss",
nthread = 1)
set.seed(1234)
cv_type <- xgb.cv(params = params_type, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_type <- xgb.train(params = params_type, dtrain, nrounds = cv_type$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_type)
hcorr_type <- tr[index,] %>%
select(features_type) %>%
cor %>%
as.data.frame() %>%
rownames_to_column("feature1") %>%
as_tibble() %>%
gather(feature2, corr, -feature1) %>%
filter(feature1 != feature2) %>%
arrange(desc(abs(corr))) %>%
filter(corr > .95) %>%
left_join(impo, by = c("feature1" = "Feature")) %>%
left_join(impo, by = c("feature2" = "Feature")) %>%
mutate(feature = if_else(Gain.x > Gain.y, feature2, feature1)) %>%
distinct(feature) %>%
drop_na() %>%
.$feature
dtrain <- tr %>%
filter(index) %>%
select(features_type) %>%
select(-hcorr_type) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
features_type_list <- list()
score_type <- list()
for(i in 1:(ncol(dtrain)-1)){
cv_type <- xgb.cv(params = params_type, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_type <- xgb.train(params = params_type, dtrain, nrounds = cv_type$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_type)
features_type_list[[i]] <- impo$Feature
score_type[[i]] <- cv_type$evaluation_log[cv_type$best_iteration,]
dtrain <- tr %>%
filter(index) %>%
select(impo$Feature[1:(nrow(impo)-1)]) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
print(dim(dtrain))
}
score_type <- do.call(rbind, score_type)
do.call(rbind, score_type) %>%
filter(test_logloss_mean == min(test_logloss_mean))
features_type_list[[]]
score_type %>%
mutate(index = 1:n()) %>%
ggplot(aes(index, test_logloss_mean))+
geom_ribbon(aes(ymin = test_logloss_mean - test_logloss_std,
ymax = test_logloss_mean + test_logloss_std),
alpha = .3)+
geom_line()
score_type %>%
mutate(index = 1:n()) %>%
arrange(test_logloss_mean) %>%
head()
features_type_list[[49]] %>%
write_rds("data/features/features_xgb_type.rds")
# all----
index <- tr$acc_sd < 100
validation_set <- folds[index,] %>%
select(id, fold_index) %>%
mutate(flg = T) %>%
spread(fold_index, flg, fill=F) %>%
select(-id) %>%
lapply(which)
label = tr$TTF[index]
dtrain <- tr %>%
filter(index) %>%
select(features_all) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
fair <- function(preds, dtrain) {
d <- getinfo(dtrain, 'label') - preds
c = .9
den = abs(d) + c
grad = -c*d / den
hess = c*c / den ^ 2
return(list(grad = grad, hess = hess))
}
params_all <- list(max_depth = 5,
min_child_weight = 2,
colsample_bytree = 0.9,
subsample = 0.9,
eta = .03,
silent = 1,
booster = "gbtree",
objective = fair,
eval_metric = "mae",
nthread = 1)
set.seed(1234)
cv_all <- xgb.cv(params = params_all, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_all <- xgb.train(params = params_all, dtrain, nrounds = cv_all$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_all)
hcorr_all <- tr[index,] %>%
select(features_all) %>%
cor %>%
as.data.frame() %>%
rownames_to_column("feature1") %>%
as_tibble() %>%
gather(feature2, corr, -feature1) %>%
filter(feature1 != feature2) %>%
arrange(desc(abs(corr))) %>%
filter(corr > .95) %>%
left_join(impo, by = c("feature1" = "Feature")) %>%
left_join(impo, by = c("feature2" = "Feature")) %>%
mutate(feature = if_else(Gain.x > Gain.y, feature2, feature1)) %>%
distinct(feature) %>%
drop_na() %>%
.$feature
dtrain <- tr %>%
filter(index) %>%
select(features_all) %>%
select(-hcorr_all) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
features_all_list <- list()
score_all <- list()
(ncol(dtrain)-1)
for(i in 48:63){
cv_all <- xgb.cv(params = params_all, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_all <- xgb.train(params = params_all, dtrain, nrounds = cv_all$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_all)
features_all_list[[i]] <- impo$Feature
score_all[[i]] <- cv_all$evaluation_log[cv_all$best_iteration,]
dtrain <- tr %>%
filter(index) %>%
select(impo$Feature[1:(nrow(impo)-1)]) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
print(dim(dtrain));gc()
}
score_all <- do.call(rbind, score_all)
score_all %>%
mutate(index = 1:n()) %>%
ggplot(aes(index, test_mae_mean))+
geom_ribbon(aes(ymin = test_mae_mean - test_mae_std,
ymax = test_mae_mean + test_mae_std),
alpha = .3)+
geom_line()
score_all %>%
mutate(index = 1:n()) %>%
arrange(test_mae_mean) %>%
head()
features_all_list[[50]] %>%
write_rds("data/features/features_xgb_all.rds")
# after----
index <- (tr$acc_sd < 100 & tr$TTF < 0.3)
validation_set <- folds[index,] %>%
select(id, fold_index) %>%
mutate(flg = T) %>%
spread(fold_index, flg, fill=F) %>%
select(-id) %>%
lapply(which)
label = tr$TTF[index]
dtrain <- tr %>%
filter(index) %>%
select(features_after) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
fair <- function(preds, dtrain) {
d <- getinfo(dtrain, 'label') - preds
c = .9
den = abs(d) + c
grad = -c*d / den
hess = c*c / den ^ 2
return(list(grad = grad, hess = hess))
}
params_after <- list(max_depth = 5,
min_child_weight = 2,
colsample_bytree = 0.9,
subsample = 0.9,
eta = .03,
silent = 1,
booster = "gbtree",
objective = fair,
eval_metric = "mae",
nthread = 1)
set.seed(1234)
cv_after <- xgb.cv(params = params_after, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_after <- xgb.train(params = params_after, dtrain, nrounds = cv_after$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_after)
hcorr_after <- tr[index,] %>%
select(features_after) %>%
cor %>%
as.data.frame() %>%
rownames_to_column("feature1") %>%
as_tibble() %>%
gather(feature2, corr, -feature1) %>%
filter(feature1 != feature2) %>%
arrange(desc(abs(corr))) %>%
filter(corr > .95) %>%
left_join(impo, by = c("feature1" = "Feature")) %>%
left_join(impo, by = c("feature2" = "Feature")) %>%
mutate(feature = if_else(Gain.x > Gain.y, feature2, feature1)) %>%
distinct(feature) %>%
drop_na() %>%
.$feature
dtrain <- tr %>%
filter(index) %>%
select(features_after) %>%
select(-hcorr_after) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
features_after_list <- list()
score_after <- list()
for(i in 1:(ncol(dtrain)-1)){
cv_after <- xgb.cv(params = params_after, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_after <- xgb.train(params = params_after, dtrain, nrounds = cv_after$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_after)
features_after_list[[i]] <- impo$Feature
score_after[[i]] <- cv_after$evaluation_log[cv_after$best_iteration,]
dtrain <- tr %>%
filter(index) %>%
select(impo$Feature[1:(nrow(impo)-1)]) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
print(dim(dtrain));gc()
}
score_after <- do.call(rbind, score_after)
score_after %>%
mutate(index = 1:n()) %>%
ggplot(aes(index, test_mae_mean))+
geom_ribbon(aes(ymin = test_mae_mean - test_mae_std,
ymax = test_mae_mean + test_mae_std),
alpha = .3)+
geom_line()
score_after %>%
mutate(index = 1:n()) %>%
arrange(test_mae_mean) %>%
head()
features_after_list[[21]] %>%
write_rds("data/features/features_xgb_after.rds")
# normal----
index <- (tr$acc_sd < 100 & tr$TTF > 0.3)
validation_set <- folds[index,] %>%
select(id, fold_index) %>%
mutate(flg = T) %>%
spread(fold_index, flg, fill=F) %>%
select(-id) %>%
lapply(which)
label = tr$TTF[index]
dtrain <- tr %>%
filter(index) %>%
select(features_normal) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
fair <- function(preds, dtrain) {
d <- getinfo(dtrain, 'label') - preds
c = .9
den = abs(d) + c
grad = -c*d / den
hess = c*c / den ^ 2
return(list(grad = grad, hess = hess))
}
params_normal <- list(max_depth = 5,
min_child_weight = 2,
colsample_bytree = 0.9,
subsample = 0.9,
eta = .03,
silent = 1,
booster = "gbtree",
objective = fair,
eval_metric = "mae",
nthread = 1)
set.seed(1234)
cv_normal <- xgb.cv(params = params_normal, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_normal <- xgb.train(params = params_normal, dtrain, nrounds = cv_normal$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_normal)
hcorr_normal <- tr[index,] %>%
select(features_normal) %>%
cor %>%
as.data.frame() %>%
rownames_to_column("feature1") %>%
as_tibble() %>%
gather(feature2, corr, -feature1) %>%
filter(feature1 != feature2) %>%
arrange(desc(abs(corr))) %>%
filter(corr > .95) %>%
left_join(impo, by = c("feature1" = "Feature")) %>%
left_join(impo, by = c("feature2" = "Feature")) %>%
mutate(feature = if_else(Gain.x > Gain.y, feature2, feature1)) %>%
distinct(feature) %>%
drop_na() %>%
.$feature
dtrain <- tr %>%
filter(index) %>%
select(features_normal) %>%
select(-hcorr_normal) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
features_normal_list <- list()
score_normal <- list()
for(i in 1:(ncol(dtrain)-1)){
cv_normal <- xgb.cv(params = params_normal, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_normal <- xgb.train(params = params_normal, dtrain, nrounds = cv_normal$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_normal)
features_normal_list[[i]] <- impo$Feature
score_normal[[i]] <- cv_normal$evaluation_log[cv_normal$best_iteration,]
dtrain <- tr %>%
filter(index) %>%
select(impo$Feature[1:(nrow(impo)-1)]) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
print(dim(dtrain));gc()
}
score_normal <- do.call(rbind, score_normal)
score_normal %>%
mutate(index = 1:n()) %>%
ggplot(aes(index, test_mae_mean))+
geom_ribbon(aes(ymin = test_mae_mean - test_mae_std,
ymax = test_mae_mean + test_mae_std),
alpha = .3)+
geom_line()
score_normal %>%
mutate(index = 1:n()) %>%
arrange(test_mae_mean) %>%
head()
features_normal_list[[48]] %>%
write_rds("data/features/features_xgb_normal.rds")
# scaled----
index <- (tr$acc_sd < 100)
validation_set <- folds[index,] %>%
select(id, fold_index) %>%
mutate(flg = T) %>%
spread(fold_index, flg, fill=F) %>%
select(-id) %>%
lapply(which)
label <- tr %>%
mutate(wave_index = folds$wave_index) %>%
group_by(wave_index) %>%
mutate(scaled = TTF / max(TTF)) %>%
ungroup() %>%
filter(index) %>%
.$scaled
dtrain <- tr %>%
filter(index) %>%
select(features_scaled) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
fair <- function(preds, dtrain) {
d <- getinfo(dtrain, 'label') - preds
c = .9
den = abs(d) + c
grad = -c*d / den
hess = c*c / den ^ 2
return(list(grad = grad, hess = hess))
}
params_scaled <- list(max_depth = 5,
min_child_weight = 2,
colsample_bytree = 0.9,
subsample = 0.9,
eta = .03,
silent = 1,
booster = "gbtree",
objective = fair,
eval_metric = "mae",
nthread = 1)
set.seed(1234)
cv_scaled <- xgb.cv(params = params_scaled, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_scaled <- xgb.train(params = params_scaled, dtrain, nrounds = cv_scaled$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_scaled)
hcorr_scaled <- tr[index,] %>%
select(features_scaled) %>%
cor %>%
as.data.frame() %>%
rownames_to_column("feature1") %>%
as_tibble() %>%
gather(feature2, corr, -feature1) %>%
filter(feature1 != feature2) %>%
arrange(desc(abs(corr))) %>%
filter(corr > .95) %>%
left_join(impo, by = c("feature1" = "Feature")) %>%
left_join(impo, by = c("feature2" = "Feature")) %>%
mutate(feature = if_else(Gain.x > Gain.y, feature2, feature1)) %>%
distinct(feature) %>%
drop_na() %>%
.$feature
dtrain <- tr %>%
filter(index) %>%
select(features_scaled) %>%
select(-hcorr_scaled) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
features_scaled_list <- list()
score_scaled <- list()
for(i in 1:(ncol(dtrain)-1)){
cv_scaled <- xgb.cv(params = params_scaled, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_scaled <- xgb.train(params = params_scaled, dtrain, nrounds = cv_scaled$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_scaled)
features_scaled_list[[i]] <- impo$Feature
score_scaled[[i]] <- cv_scaled$evaluation_log[cv_scaled$best_iteration,]
dtrain <- tr %>%
filter(index) %>%
select(impo$Feature[1:(nrow(impo)-1)]) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
print(dim(dtrain));gc()
}
score_scaled <- do.call(rbind, score_scaled)
score_scaled %>%
mutate(index = 1:n()) %>%
ggplot(aes(index, test_mae_mean))+
geom_ribbon(aes(ymin = test_mae_mean - test_mae_std,
ymax = test_mae_mean + test_mae_std),
alpha = .3)+
geom_line()
score_scaled %>%
mutate(index = 1:n()) %>%
arrange(test_mae_mean) %>%
head()
features_scaled_list[[46]] %>%
write_rds("data/features/features_xgb_scaled.rds")
# scale----
index <- (tr$acc_sd < 100)
validation_set <- folds[index,] %>%
select(id, fold_index) %>%
mutate(flg = T) %>%
spread(fold_index, flg, fill=F) %>%
select(-id) %>%
lapply(which)
label <- tr %>%
mutate(wave_index = folds$wave_index) %>%
group_by(wave_index) %>%
mutate(scale = max(TTF)) %>%
ungroup() %>%
filter(index) %>%
.$scale
dtrain <- tr %>%
filter(index) %>%
select(features_scale) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
fair <- function(preds, dtrain) {
d <- getinfo(dtrain, 'label') - preds
c = .9
den = abs(d) + c
grad = -c*d / den
hess = c*c / den ^ 2
return(list(grad = grad, hess = hess))
}
params_scale <- list(max_depth = 5,
min_child_weight = 2,
colsample_bytree = 0.9,
subsample = 0.9,
eta = .03,
silent = 1,
booster = "gbtree",
objective = "reg:linear",
eval_metric = "mae",
nthread = 1)
set.seed(1234)
cv_scale <- xgb.cv(params = params_scale, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_scale <- xgb.train(params = params_scale, dtrain, nrounds = cv_scale$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_scale)
hcorr_scale <- tr[index,] %>%
select(features_scale) %>%
cor %>%
as.data.frame() %>%
rownames_to_column("feature1") %>%
as_tibble() %>%
gather(feature2, corr, -feature1) %>%
filter(feature1 != feature2) %>%
arrange(desc(abs(corr))) %>%
filter(corr > .95) %>%
left_join(impo, by = c("feature1" = "Feature")) %>%
left_join(impo, by = c("feature2" = "Feature")) %>%
mutate(feature = if_else(Gain.x > Gain.y, feature2, feature1)) %>%
distinct(feature) %>%
drop_na() %>%
.$feature
dtrain <- tr %>%
filter(index) %>%
select(features_scale) %>%
select(-hcorr_scale) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
features_scale_list <- list()
score_scale <- list()
for(i in 1:(ncol(dtrain)-1)){
cv_scale <- xgb.cv(params = params_scale, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_scale <- xgb.train(params = params_scale, dtrain, nrounds = cv_scale$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_scale)
features_scale_list[[i]] <- impo$Feature
score_scale[[i]] <- cv_scale$evaluation_log[cv_scale$best_iteration,]
dtrain <- tr %>%
filter(index) %>%
select(impo$Feature[1:(nrow(impo)-1)]) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
print(dim(dtrain));gc()
}
score_scale <- do.call(rbind, score_scale)
score_scale %>%
mutate(index = 1:n()) %>%
ggplot(aes(index, test_mae_mean))+
geom_ribbon(aes(ymin = test_mae_mean - test_mae_std,
ymax = test_mae_mean + test_mae_std),
alpha = .3)+
geom_line()
score_scale %>%
mutate(index = 1:n()) %>%
arrange(test_mae_mean) %>%
head()
features_scale_list[[46]] %>%
write_rds("data/features/features_xgb_scale.rds")
| /src/train/feature_selection_xgb.R | no_license | kur0cky/LANL | R | false | false | 20,606 | r | # feature selection xgboost
library(tidyverse)
library(tidymodels)
library(data.table)
library(RcppRoll)
library(xgboost)
# data preparing----
features_all <- read_rds("data/features/features_boruta_all.rds")
features_type <- read_rds("data/features/features_boruta_type.RDS")
features_type <- read_rds("data/features/features_boruta_type.RDS")
features_after <- read_rds("data/features/features_boruta_after.RDS")
features_normal <- read_rds("data/features/features_boruta_normal.RDS")
features_scaled <- read_rds("data/features/features_boruta_scaled.RDS")
features_scale <- read_rds("data/features/features_boruta_scale.RDS")
folds <- read_csv("data/processed/folds.csv")
sample <- read_csv("data/sample_submission.csv")
tr_te <- read_csv("data/features/features.csv")
tr <- tr_te %>%
drop_na(TTF)
# type----
index <- tr$acc_sd < 100
validation_set <- folds[index,] %>%
select(id, fold_index) %>%
mutate(flg = T) %>%
spread(fold_index, flg, fill=F) %>%
select(-id) %>%
lapply(which)
label = if_else(tr$TTF < 0.3, 1L, 0L)[index]
dtrain <- tr %>%
filter(index) %>%
select(features_type) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
params_type <- list(max_depth = 4,
min_child_weight = 2,
colsample_bytree = 0.7,
subsample = 0.9,
eta = .03,
booster = "gbtree",
objective = "binary:logistic",
eval_metric = "logloss",
nthread = 1)
set.seed(1234)
cv_type <- xgb.cv(params = params_type, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_type <- xgb.train(params = params_type, dtrain, nrounds = cv_type$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_type)
hcorr_type <- tr[index,] %>%
select(features_type) %>%
cor %>%
as.data.frame() %>%
rownames_to_column("feature1") %>%
as_tibble() %>%
gather(feature2, corr, -feature1) %>%
filter(feature1 != feature2) %>%
arrange(desc(abs(corr))) %>%
filter(corr > .95) %>%
left_join(impo, by = c("feature1" = "Feature")) %>%
left_join(impo, by = c("feature2" = "Feature")) %>%
mutate(feature = if_else(Gain.x > Gain.y, feature2, feature1)) %>%
distinct(feature) %>%
drop_na() %>%
.$feature
dtrain <- tr %>%
filter(index) %>%
select(features_type) %>%
select(-hcorr_type) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
features_type_list <- list()
score_type <- list()
for(i in 1:(ncol(dtrain)-1)){
cv_type <- xgb.cv(params = params_type, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_type <- xgb.train(params = params_type, dtrain, nrounds = cv_type$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_type)
features_type_list[[i]] <- impo$Feature
score_type[[i]] <- cv_type$evaluation_log[cv_type$best_iteration,]
dtrain <- tr %>%
filter(index) %>%
select(impo$Feature[1:(nrow(impo)-1)]) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
print(dim(dtrain))
}
score_type <- do.call(rbind, score_type)
do.call(rbind, score_type) %>%
filter(test_logloss_mean == min(test_logloss_mean))
features_type_list[[]]
score_type %>%
mutate(index = 1:n()) %>%
ggplot(aes(index, test_logloss_mean))+
geom_ribbon(aes(ymin = test_logloss_mean - test_logloss_std,
ymax = test_logloss_mean + test_logloss_std),
alpha = .3)+
geom_line()
score_type %>%
mutate(index = 1:n()) %>%
arrange(test_logloss_mean) %>%
head()
features_type_list[[49]] %>%
write_rds("data/features/features_xgb_type.rds")
# all----
index <- tr$acc_sd < 100
validation_set <- folds[index,] %>%
select(id, fold_index) %>%
mutate(flg = T) %>%
spread(fold_index, flg, fill=F) %>%
select(-id) %>%
lapply(which)
label = tr$TTF[index]
dtrain <- tr %>%
filter(index) %>%
select(features_all) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
fair <- function(preds, dtrain) {
d <- getinfo(dtrain, 'label') - preds
c = .9
den = abs(d) + c
grad = -c*d / den
hess = c*c / den ^ 2
return(list(grad = grad, hess = hess))
}
params_all <- list(max_depth = 5,
min_child_weight = 2,
colsample_bytree = 0.9,
subsample = 0.9,
eta = .03,
silent = 1,
booster = "gbtree",
objective = fair,
eval_metric = "mae",
nthread = 1)
set.seed(1234)
cv_all <- xgb.cv(params = params_all, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_all <- xgb.train(params = params_all, dtrain, nrounds = cv_all$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_all)
hcorr_all <- tr[index,] %>%
select(features_all) %>%
cor %>%
as.data.frame() %>%
rownames_to_column("feature1") %>%
as_tibble() %>%
gather(feature2, corr, -feature1) %>%
filter(feature1 != feature2) %>%
arrange(desc(abs(corr))) %>%
filter(corr > .95) %>%
left_join(impo, by = c("feature1" = "Feature")) %>%
left_join(impo, by = c("feature2" = "Feature")) %>%
mutate(feature = if_else(Gain.x > Gain.y, feature2, feature1)) %>%
distinct(feature) %>%
drop_na() %>%
.$feature
dtrain <- tr %>%
filter(index) %>%
select(features_all) %>%
select(-hcorr_all) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
features_all_list <- list()
score_all <- list()
(ncol(dtrain)-1)
for(i in 48:63){
cv_all <- xgb.cv(params = params_all, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_all <- xgb.train(params = params_all, dtrain, nrounds = cv_all$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_all)
features_all_list[[i]] <- impo$Feature
score_all[[i]] <- cv_all$evaluation_log[cv_all$best_iteration,]
dtrain <- tr %>%
filter(index) %>%
select(impo$Feature[1:(nrow(impo)-1)]) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
print(dim(dtrain));gc()
}
score_all <- do.call(rbind, score_all)
score_all %>%
mutate(index = 1:n()) %>%
ggplot(aes(index, test_mae_mean))+
geom_ribbon(aes(ymin = test_mae_mean - test_mae_std,
ymax = test_mae_mean + test_mae_std),
alpha = .3)+
geom_line()
score_all %>%
mutate(index = 1:n()) %>%
arrange(test_mae_mean) %>%
head()
features_all_list[[50]] %>%
write_rds("data/features/features_xgb_all.rds")
# after----
index <- (tr$acc_sd < 100 & tr$TTF < 0.3)
validation_set <- folds[index,] %>%
select(id, fold_index) %>%
mutate(flg = T) %>%
spread(fold_index, flg, fill=F) %>%
select(-id) %>%
lapply(which)
label = tr$TTF[index]
dtrain <- tr %>%
filter(index) %>%
select(features_after) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
fair <- function(preds, dtrain) {
d <- getinfo(dtrain, 'label') - preds
c = .9
den = abs(d) + c
grad = -c*d / den
hess = c*c / den ^ 2
return(list(grad = grad, hess = hess))
}
params_after <- list(max_depth = 5,
min_child_weight = 2,
colsample_bytree = 0.9,
subsample = 0.9,
eta = .03,
silent = 1,
booster = "gbtree",
objective = fair,
eval_metric = "mae",
nthread = 1)
set.seed(1234)
cv_after <- xgb.cv(params = params_after, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_after <- xgb.train(params = params_after, dtrain, nrounds = cv_after$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_after)
hcorr_after <- tr[index,] %>%
select(features_after) %>%
cor %>%
as.data.frame() %>%
rownames_to_column("feature1") %>%
as_tibble() %>%
gather(feature2, corr, -feature1) %>%
filter(feature1 != feature2) %>%
arrange(desc(abs(corr))) %>%
filter(corr > .95) %>%
left_join(impo, by = c("feature1" = "Feature")) %>%
left_join(impo, by = c("feature2" = "Feature")) %>%
mutate(feature = if_else(Gain.x > Gain.y, feature2, feature1)) %>%
distinct(feature) %>%
drop_na() %>%
.$feature
dtrain <- tr %>%
filter(index) %>%
select(features_after) %>%
select(-hcorr_after) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
features_after_list <- list()
score_after <- list()
for(i in 1:(ncol(dtrain)-1)){
cv_after <- xgb.cv(params = params_after, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_after <- xgb.train(params = params_after, dtrain, nrounds = cv_after$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_after)
features_after_list[[i]] <- impo$Feature
score_after[[i]] <- cv_after$evaluation_log[cv_after$best_iteration,]
dtrain <- tr %>%
filter(index) %>%
select(impo$Feature[1:(nrow(impo)-1)]) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
print(dim(dtrain));gc()
}
score_after <- do.call(rbind, score_after)
score_after %>%
mutate(index = 1:n()) %>%
ggplot(aes(index, test_mae_mean))+
geom_ribbon(aes(ymin = test_mae_mean - test_mae_std,
ymax = test_mae_mean + test_mae_std),
alpha = .3)+
geom_line()
score_after %>%
mutate(index = 1:n()) %>%
arrange(test_mae_mean) %>%
head()
features_after_list[[21]] %>%
write_rds("data/features/features_xgb_after.rds")
# normal----
index <- (tr$acc_sd < 100 & tr$TTF > 0.3)
validation_set <- folds[index,] %>%
select(id, fold_index) %>%
mutate(flg = T) %>%
spread(fold_index, flg, fill=F) %>%
select(-id) %>%
lapply(which)
label = tr$TTF[index]
dtrain <- tr %>%
filter(index) %>%
select(features_normal) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
fair <- function(preds, dtrain) {
d <- getinfo(dtrain, 'label') - preds
c = .9
den = abs(d) + c
grad = -c*d / den
hess = c*c / den ^ 2
return(list(grad = grad, hess = hess))
}
params_normal <- list(max_depth = 5,
min_child_weight = 2,
colsample_bytree = 0.9,
subsample = 0.9,
eta = .03,
silent = 1,
booster = "gbtree",
objective = fair,
eval_metric = "mae",
nthread = 1)
set.seed(1234)
cv_normal <- xgb.cv(params = params_normal, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_normal <- xgb.train(params = params_normal, dtrain, nrounds = cv_normal$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_normal)
hcorr_normal <- tr[index,] %>%
select(features_normal) %>%
cor %>%
as.data.frame() %>%
rownames_to_column("feature1") %>%
as_tibble() %>%
gather(feature2, corr, -feature1) %>%
filter(feature1 != feature2) %>%
arrange(desc(abs(corr))) %>%
filter(corr > .95) %>%
left_join(impo, by = c("feature1" = "Feature")) %>%
left_join(impo, by = c("feature2" = "Feature")) %>%
mutate(feature = if_else(Gain.x > Gain.y, feature2, feature1)) %>%
distinct(feature) %>%
drop_na() %>%
.$feature
dtrain <- tr %>%
filter(index) %>%
select(features_normal) %>%
select(-hcorr_normal) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
features_normal_list <- list()
score_normal <- list()
for(i in 1:(ncol(dtrain)-1)){
cv_normal <- xgb.cv(params = params_normal, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_normal <- xgb.train(params = params_normal, dtrain, nrounds = cv_normal$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_normal)
features_normal_list[[i]] <- impo$Feature
score_normal[[i]] <- cv_normal$evaluation_log[cv_normal$best_iteration,]
dtrain <- tr %>%
filter(index) %>%
select(impo$Feature[1:(nrow(impo)-1)]) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
print(dim(dtrain));gc()
}
score_normal <- do.call(rbind, score_normal)
score_normal %>%
mutate(index = 1:n()) %>%
ggplot(aes(index, test_mae_mean))+
geom_ribbon(aes(ymin = test_mae_mean - test_mae_std,
ymax = test_mae_mean + test_mae_std),
alpha = .3)+
geom_line()
score_normal %>%
mutate(index = 1:n()) %>%
arrange(test_mae_mean) %>%
head()
features_normal_list[[48]] %>%
write_rds("data/features/features_xgb_normal.rds")
# scaled----
index <- (tr$acc_sd < 100)
validation_set <- folds[index,] %>%
select(id, fold_index) %>%
mutate(flg = T) %>%
spread(fold_index, flg, fill=F) %>%
select(-id) %>%
lapply(which)
label <- tr %>%
mutate(wave_index = folds$wave_index) %>%
group_by(wave_index) %>%
mutate(scaled = TTF / max(TTF)) %>%
ungroup() %>%
filter(index) %>%
.$scaled
dtrain <- tr %>%
filter(index) %>%
select(features_scaled) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
fair <- function(preds, dtrain) {
d <- getinfo(dtrain, 'label') - preds
c = .9
den = abs(d) + c
grad = -c*d / den
hess = c*c / den ^ 2
return(list(grad = grad, hess = hess))
}
params_scaled <- list(max_depth = 5,
min_child_weight = 2,
colsample_bytree = 0.9,
subsample = 0.9,
eta = .03,
silent = 1,
booster = "gbtree",
objective = fair,
eval_metric = "mae",
nthread = 1)
set.seed(1234)
cv_scaled <- xgb.cv(params = params_scaled, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_scaled <- xgb.train(params = params_scaled, dtrain, nrounds = cv_scaled$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_scaled)
hcorr_scaled <- tr[index,] %>%
select(features_scaled) %>%
cor %>%
as.data.frame() %>%
rownames_to_column("feature1") %>%
as_tibble() %>%
gather(feature2, corr, -feature1) %>%
filter(feature1 != feature2) %>%
arrange(desc(abs(corr))) %>%
filter(corr > .95) %>%
left_join(impo, by = c("feature1" = "Feature")) %>%
left_join(impo, by = c("feature2" = "Feature")) %>%
mutate(feature = if_else(Gain.x > Gain.y, feature2, feature1)) %>%
distinct(feature) %>%
drop_na() %>%
.$feature
dtrain <- tr %>%
filter(index) %>%
select(features_scaled) %>%
select(-hcorr_scaled) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
features_scaled_list <- list()
score_scaled <- list()
for(i in 1:(ncol(dtrain)-1)){
cv_scaled <- xgb.cv(params = params_scaled, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_scaled <- xgb.train(params = params_scaled, dtrain, nrounds = cv_scaled$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_scaled)
features_scaled_list[[i]] <- impo$Feature
score_scaled[[i]] <- cv_scaled$evaluation_log[cv_scaled$best_iteration,]
dtrain <- tr %>%
filter(index) %>%
select(impo$Feature[1:(nrow(impo)-1)]) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
print(dim(dtrain));gc()
}
score_scaled <- do.call(rbind, score_scaled)
score_scaled %>%
mutate(index = 1:n()) %>%
ggplot(aes(index, test_mae_mean))+
geom_ribbon(aes(ymin = test_mae_mean - test_mae_std,
ymax = test_mae_mean + test_mae_std),
alpha = .3)+
geom_line()
score_scaled %>%
mutate(index = 1:n()) %>%
arrange(test_mae_mean) %>%
head()
features_scaled_list[[46]] %>%
write_rds("data/features/features_xgb_scaled.rds")
# scale----
index <- (tr$acc_sd < 100)
validation_set <- folds[index,] %>%
select(id, fold_index) %>%
mutate(flg = T) %>%
spread(fold_index, flg, fill=F) %>%
select(-id) %>%
lapply(which)
label <- tr %>%
mutate(wave_index = folds$wave_index) %>%
group_by(wave_index) %>%
mutate(scale = max(TTF)) %>%
ungroup() %>%
filter(index) %>%
.$scale
dtrain <- tr %>%
filter(index) %>%
select(features_scale) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
fair <- function(preds, dtrain) {
d <- getinfo(dtrain, 'label') - preds
c = .9
den = abs(d) + c
grad = -c*d / den
hess = c*c / den ^ 2
return(list(grad = grad, hess = hess))
}
params_scale <- list(max_depth = 5,
min_child_weight = 2,
colsample_bytree = 0.9,
subsample = 0.9,
eta = .03,
silent = 1,
booster = "gbtree",
objective = "reg:linear",
eval_metric = "mae",
nthread = 1)
set.seed(1234)
cv_scale <- xgb.cv(params = params_scale, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_scale <- xgb.train(params = params_scale, dtrain, nrounds = cv_scale$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_scale)
hcorr_scale <- tr[index,] %>%
select(features_scale) %>%
cor %>%
as.data.frame() %>%
rownames_to_column("feature1") %>%
as_tibble() %>%
gather(feature2, corr, -feature1) %>%
filter(feature1 != feature2) %>%
arrange(desc(abs(corr))) %>%
filter(corr > .95) %>%
left_join(impo, by = c("feature1" = "Feature")) %>%
left_join(impo, by = c("feature2" = "Feature")) %>%
mutate(feature = if_else(Gain.x > Gain.y, feature2, feature1)) %>%
distinct(feature) %>%
drop_na() %>%
.$feature
dtrain <- tr %>%
filter(index) %>%
select(features_scale) %>%
select(-hcorr_scale) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
features_scale_list <- list()
score_scale <- list()
for(i in 1:(ncol(dtrain)-1)){
cv_scale <- xgb.cv(params = params_scale, dtrain, nrounds = 10000, nfold = 10,
early_stopping_rounds = 50,
verbose = 1,
folds = validation_set,
print_every_n = 10,
prediction = TRUE)
fit_scale <- xgb.train(params = params_scale, dtrain, nrounds = cv_scale$best_iteration)
impo <- xgb.importance(colnames(dtrain), fit_scale)
features_scale_list[[i]] <- impo$Feature
score_scale[[i]] <- cv_scale$evaluation_log[cv_scale$best_iteration,]
dtrain <- tr %>%
filter(index) %>%
select(impo$Feature[1:(nrow(impo)-1)]) %>%
as.matrix() %>%
xgb.DMatrix(label = label)
print(dim(dtrain));gc()
}
score_scale <- do.call(rbind, score_scale)
score_scale %>%
mutate(index = 1:n()) %>%
ggplot(aes(index, test_mae_mean))+
geom_ribbon(aes(ymin = test_mae_mean - test_mae_std,
ymax = test_mae_mean + test_mae_std),
alpha = .3)+
geom_line()
score_scale %>%
mutate(index = 1:n()) %>%
arrange(test_mae_mean) %>%
head()
features_scale_list[[46]] %>%
write_rds("data/features/features_xgb_scale.rds")
|
`modeDist` <-
function(x,num=TRUE){
tab<-table(x)
out<-names(tab)[tab==max(tab)]
if(length(out)>1)
out<-sample(out,1)
if(num)
out<-as.numeric(out)
out
}
| /R/modeDist.R | no_license | cran/scrime | R | false | false | 174 | r | `modeDist` <-
function(x,num=TRUE){
tab<-table(x)
out<-names(tab)[tab==max(tab)]
if(length(out)>1)
out<-sample(out,1)
if(num)
out<-as.numeric(out)
out
}
|
## This code is part of the megaptera package
## © C. Heibl 2016 (last update 2017-10-18)
#' @title Plot Large Phylogenies
#' @description Create a PDF file of a large phylogeny.
#' @param phy An object of class \code{\link{phylo}}.
#' @param file A vector of mode \code{"character"} giving a filename (and path)
#' for the PDF file.
#' @param view Logical, if \code{TRUE}, the PDF will be opened in the default
#' PDF viewer, but nothing is saved.
#' @param save Logical, if \code{TRUE}, the PDF is saved to \code{file}.
#' @return None, \code{slicePhylo} is called for its side effect of generating a
#' PDF file.
#' @importFrom ape nodelabels
#' @importFrom graphics plot
#' @export
pdfPhyloA0 <- function(phy, file = "bigtree.pdf",
view = FALSE, save = TRUE){
## graphical parameters optimized for A0
cex = .4
if ( Ntip(phy) > 750 ){
height <- 46.81; width <- 33.11 # DIN A0
}
if ( Ntip(phy) <= 750 & Ntip(phy) > 380 ){
height <- 33.11; width <- 23.41 # DIN A1
}
if ( Ntip(phy) <= 380 & Ntip(phy) > 190 ){
height <- 23.41; width <- 16.56 # DIN A2
}
if ( Ntip(phy) <= 190 ){
height <- 16.56; width <- 11.70 # DIN A3
}
## tip colors
tcol <- rep("black", Ntip(phy))
tcol[grep("monotypic", phy$tip.label)] <- "blue"
tcol[grep("incl[.]", phy$tip.label)] <- "grey35"
tcol[grep("p[.]p[.]_-_[[:lower:]]", phy$tip.label)] <- "orange"
tcol[grep("p[.]p[.]_-_[[:digit:]]", phy$tip.label)] <- "red"
pdf(file, height = height, width = width)
plot(phy, no.margin = TRUE,
edge.width = .25,
tip.color = tcol,
cex = cex,
type = "phylo",
use.edge.length = FALSE)
nodelabels(phy$node.label, cex = cex, adj = c(1.1, -.3), frame = "n", col = "red")
#tiplabels(cex = cex, adj = c(-.25, .5), frame = "n", col = "red")
dev.off()
if ( view ) system(paste("open", file))
if ( !save ) unlink(file)
} | /R/pdfPhyloA0.R | no_license | heibl/megaptera | R | false | false | 1,913 | r | ## This code is part of the megaptera package
## © C. Heibl 2016 (last update 2017-10-18)
#' @title Plot Large Phylogenies
#' @description Create a PDF file of a large phylogeny.
#' @param phy An object of class \code{\link{phylo}}.
#' @param file A vector of mode \code{"character"} giving a filename (and path)
#' for the PDF file.
#' @param view Logical, if \code{TRUE}, the PDF will be opened in the default
#' PDF viewer, but nothing is saved.
#' @param save Logical, if \code{TRUE}, the PDF is saved to \code{file}.
#' @return None, \code{slicePhylo} is called for its side effect of generating a
#' PDF file.
#' @importFrom ape nodelabels
#' @importFrom graphics plot
#' @export
pdfPhyloA0 <- function(phy, file = "bigtree.pdf",
view = FALSE, save = TRUE){
## graphical parameters optimized for A0
cex = .4
if ( Ntip(phy) > 750 ){
height <- 46.81; width <- 33.11 # DIN A0
}
if ( Ntip(phy) <= 750 & Ntip(phy) > 380 ){
height <- 33.11; width <- 23.41 # DIN A1
}
if ( Ntip(phy) <= 380 & Ntip(phy) > 190 ){
height <- 23.41; width <- 16.56 # DIN A2
}
if ( Ntip(phy) <= 190 ){
height <- 16.56; width <- 11.70 # DIN A3
}
## tip colors
tcol <- rep("black", Ntip(phy))
tcol[grep("monotypic", phy$tip.label)] <- "blue"
tcol[grep("incl[.]", phy$tip.label)] <- "grey35"
tcol[grep("p[.]p[.]_-_[[:lower:]]", phy$tip.label)] <- "orange"
tcol[grep("p[.]p[.]_-_[[:digit:]]", phy$tip.label)] <- "red"
pdf(file, height = height, width = width)
plot(phy, no.margin = TRUE,
edge.width = .25,
tip.color = tcol,
cex = cex,
type = "phylo",
use.edge.length = FALSE)
nodelabels(phy$node.label, cex = cex, adj = c(1.1, -.3), frame = "n", col = "red")
#tiplabels(cex = cex, adj = c(-.25, .5), frame = "n", col = "red")
dev.off()
if ( view ) system(paste("open", file))
if ( !save ) unlink(file)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.