id stringlengths 40 40 | repo_name stringlengths 5 110 | path stringlengths 2 233 | content stringlengths 0 1.03M ⌀ | size int32 0 60M ⌀ | license stringclasses 15 values |
|---|---|---|---|---|---|
5a27816aaa6be982268a66d5a6a920350567e792 | printedheart/h2o-3 | h2o-test-integ/tests/hdfs-bigdata/runit_NOPASS_airlines_billion_rows.R | #----------------------------------------------------------------------
# Purpose: This test exercises HDFS operations from R.
#----------------------------------------------------------------------
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit-hadoop.R')
ipPort <- get_args(commandArgs(trailingOnly = TRUE))
myIP <- ipPort[[1]]
myPort <- ipPort[[2]]
hdfs_name_node <- Sys.getenv(c("NAME_NODE"))
print(hdfs_name_node)
library(RCurl)
library(testthat)
library(h2o)
heading("BEGIN TEST")
h2o.init(ip=myIP, port=myPort, startH2O = FALSE)
h2o.removeAll()
hdfs_data_file = "/datasets/airlinesbillion.csv"
#----------------------------------------------------------------------
# Single file cases.
#----------------------------------------------------------------------
heading("Testing single file importHDFS")
url <- sprintf("hdfs://%s%s", hdfs_name_node, hdfs_data_file)
data.hex <- h2o.importFile(url)
data1.hex <- data.hex
n <- nrow(data.hex)
print(n)
if (n != 1166952590) {
stop("nrows is wrong")
}
#Constructing validation and train sets by sampling (20/80)
#creating a column as tall as airlines(nrow(air))
s <- h2o.runif(data.hex) # Useful when number of rows too large for R to handle
data.train <- data.hex[s <= 0.8,]
data.valid <- data.hex[s > 0.8,]
## Response = Distance
myY = "C19"
myX = setdiff(names(data.hex), myY)
## Build GLM Model and compare AUC with h2o1
data_irlsm.glm <- h2o.glm(x = myX, y = myY, training_frame = data.train, validation_frame=data.valid, family = "gaussian", solver = "IRLSM")
data_lbfgs.glm <- h2o.glm(x = myX, y = myY, training_frame = data.train, validation_frame=data.valid, family = "gaussian", solver = "L_BFGS")
## Chose which col as response
## Response = IsDepDelayed
myY = "C31"
myX = setdiff(names(data1.hex), myY)
data1.gbm <- h2o.gbm(x = myX, y = myY, training_frame = data.train, validation_frame=data.valid, ntrees = 10, max_depth = 5, distribution = "AUTO")
data2.gbm <- h2o.gbm(x = myX, y = myY, training_frame = data.train, validation_frame=data.valid, ntrees = 10, max_depth = 5, distribution = "bernoulli")
data3.gbm <- h2o.gbm(x = myX, y = myY, training_frame = data.train, validation_frame=data.valid, ntrees = 10, max_depth = 5, distribution = "multinomial")
#Random Forest
#data1.rf = h2o.randomForest(x = myX, y = myY, training_frame = data.train, validation_frame=data.valid, ntrees = 10, max_depth = 5)
#data1.rf
#Deep Learning
#data1.dl <- h2o.deeplearning(x=myX, y=myY, training_frame=data.train, validation_frame=data.valid, replicate_training_data=FALSE, epochs=.1, hidden=c(5,5))
#data1.dl
PASS_BANNER()
| 2,645 | apache-2.0 |
5a27816aaa6be982268a66d5a6a920350567e792 | madmax983/h2o-3 | h2o-test-integ/tests/hdfs-bigdata/runit_NOPASS_airlines_billion_rows.R | #----------------------------------------------------------------------
# Purpose: This test exercises HDFS operations from R.
#----------------------------------------------------------------------
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit-hadoop.R')
ipPort <- get_args(commandArgs(trailingOnly = TRUE))
myIP <- ipPort[[1]]
myPort <- ipPort[[2]]
hdfs_name_node <- Sys.getenv(c("NAME_NODE"))
print(hdfs_name_node)
library(RCurl)
library(testthat)
library(h2o)
heading("BEGIN TEST")
h2o.init(ip=myIP, port=myPort, startH2O = FALSE)
h2o.removeAll()
hdfs_data_file = "/datasets/airlinesbillion.csv"
#----------------------------------------------------------------------
# Single file cases.
#----------------------------------------------------------------------
heading("Testing single file importHDFS")
url <- sprintf("hdfs://%s%s", hdfs_name_node, hdfs_data_file)
data.hex <- h2o.importFile(url)
data1.hex <- data.hex
n <- nrow(data.hex)
print(n)
if (n != 1166952590) {
stop("nrows is wrong")
}
#Constructing validation and train sets by sampling (20/80)
#creating a column as tall as airlines(nrow(air))
s <- h2o.runif(data.hex) # Useful when number of rows too large for R to handle
data.train <- data.hex[s <= 0.8,]
data.valid <- data.hex[s > 0.8,]
## Response = Distance
myY = "C19"
myX = setdiff(names(data.hex), myY)
## Build GLM Model and compare AUC with h2o1
data_irlsm.glm <- h2o.glm(x = myX, y = myY, training_frame = data.train, validation_frame=data.valid, family = "gaussian", solver = "IRLSM")
data_lbfgs.glm <- h2o.glm(x = myX, y = myY, training_frame = data.train, validation_frame=data.valid, family = "gaussian", solver = "L_BFGS")
## Chose which col as response
## Response = IsDepDelayed
myY = "C31"
myX = setdiff(names(data1.hex), myY)
data1.gbm <- h2o.gbm(x = myX, y = myY, training_frame = data.train, validation_frame=data.valid, ntrees = 10, max_depth = 5, distribution = "AUTO")
data2.gbm <- h2o.gbm(x = myX, y = myY, training_frame = data.train, validation_frame=data.valid, ntrees = 10, max_depth = 5, distribution = "bernoulli")
data3.gbm <- h2o.gbm(x = myX, y = myY, training_frame = data.train, validation_frame=data.valid, ntrees = 10, max_depth = 5, distribution = "multinomial")
#Random Forest
#data1.rf = h2o.randomForest(x = myX, y = myY, training_frame = data.train, validation_frame=data.valid, ntrees = 10, max_depth = 5)
#data1.rf
#Deep Learning
#data1.dl <- h2o.deeplearning(x=myX, y=myY, training_frame=data.train, validation_frame=data.valid, replicate_training_data=FALSE, epochs=.1, hidden=c(5,5))
#data1.dl
PASS_BANNER()
| 2,645 | apache-2.0 |
c9bd589ae83bc3f61cd2861e8287f1d8a9dcbf0c | mtennekes/tmap | examples/tm_text.R | current.mode <- tmap_mode("plot")
data(World, metro)
tm_shape(World) +
tm_text("name", size="AREA")
tm_shape(World) +
tm_text("name", size="pop_est", col="continent", palette="Dark2",
title.size = "Population", title.col="Continent") +
tm_legend(outside = TRUE)
tmap_mode("view")
\dontrun{
require(tmaptools)
metro_aus <- crop_shape(metro, bb("Australia"))
tm_shape(metro_aus) +
tm_dots() +
tm_text("name", just = "top")
# alternative
tm_shape(metro_aus) +
tm_markers(text = "name")
}
# restore current mode
tmap_mode(current.mode)
| 552 | gpl-3.0 |
59aa93f2415a5a0f4c6d89650568bf25e5b1a91b | philchalmers/faoutlier | inst/vignette-source/R/objects.R | library(faoutlier)
nfact <- 3
CFAmodel <- specifyModel("CFAmodel.txt")
mod1 <- sem(CFAmodel, data = holzinger)
so <- summary(mod1)
MD1 <- robustMD(holzinger)
MD2 <- robustMD(holzinger.outlier)
OR1 <- obs.resid(holzinger, nfact)
OR2 <- obs.resid(holzinger.outlier, nfact)
GOF1 <- GOF(holzinger, nfact)
GOF2 <- GOF(holzinger.outlier, nfact)
gCD1 <- gCD(holzinger, nfact)
gCD2 <- gCD(holzinger.outlier, nfact)
gCD3 <- gCD(holzinger, CFAmodel)
gCD4 <- gCD(holzinger.outlier, CFAmodel)
FS1 <- forward.search(holzinger, nfact)
FS2 <- forward.search(holzinger.outlier, nfact)
save.image('objects.Rdata')
| 613 | gpl-2.0 |
25e2c60467ffef107ec4e5ceb273174458724eb2 | andrewdefries/andrewdefries.github.io | FDA_Pesticide_Glossary/sodium_azide.R | library("knitr")
library("rgl")
#knit("sodium_azide.Rmd")
#markdownToHTML('sodium_azide.md', 'sodium_azide.html', options=c("use_xhml"))
#system("pandoc -s sodium_azide.html -o sodium_azide.pdf")
knit2html('sodium_azide.Rmd')
| 228 | mit |
c30cff98ff5071bdf5dce138743a7d83a6794562 | spennihana/h2o-3 | h2o-r/tests/testdir_misc/runit_groupby.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
##
# Run through groupby methods with different column types
##
test <- function(conn) {
Log.info("Upload prostate dataset into H2O...")
df.hex <- h2o.uploadFile(locate("smalldata/prostate/prostate.csv"))
Log.info("Import airlines dataset into R...")
df.R <- read.csv(locate("smalldata/prostate/prostate.csv"))
races <- lapply(0:2, function(x) df.R[df.R$RACE == x, "VOL" ])
Log.info("Test method = nrow...")
gp_nrow <- h2o.group_by(data = df.hex, by = "RACE", nrow("VOL"))
gp_nrow <- as.data.frame(gp_nrow)[,2]
r_nrow <- sapply(races, length)
checkEqualsNumeric(gp_nrow, r_nrow)
Log.info("Test method = sum...")
gp_sum <- h2o.group_by(data = df.hex, by = "RACE", sum("VOL"))
gp_sum <- as.data.frame(gp_sum)[,2]
r_sum <- sapply(races, sum)
checkEqualsNumeric(r_sum, gp_sum)
Log.info("Test method = mean ...")
gp_mean <- h2o.group_by(data = df.hex, by = "RACE", mean("VOL"))
gp_mean <- as.data.frame(gp_mean)[,2]
r_mean <- sapply(races, mean)
checkEqualsNumeric(r_mean, gp_mean)
# Unimplemented at the moment - refer to jira: https://0xdata.atlassian.net/browse/PUBDEV-2319
# Log.info("Test method = median ...")
# gp_median <- h2o.group_by(data = df.hex, by = "RACE", median("VOL"))
# gp_median <- as.data.frame(gp_median)[,2]
# r_median <- sapply(races, median)
# checkEqualsNumeric(r_median, gp_median)
Log.info("Test method = var ...")
gp_var <- h2o.group_by(data = df.hex, by = "RACE", var("VOL"))
gp_var <- as.data.frame(gp_var)[,2]
r_var <- sapply(races, var)
checkEqualsNumeric(r_var, gp_var)
Log.info("Test method = sd ...")
gp_sd <- h2o.group_by(data = df.hex, by = "RACE", sd("VOL"))
gp_sd <- as.data.frame(gp_sd)[,2]
r_sd <- sapply(races, sd)
checkEqualsNumeric(r_sd, gp_sd)
}
doTest("Testing different methods for groupby:", test)
| 1,965 | apache-2.0 |
65dd8e2fa6bfb9a398640774a8729c57b2bcac26 | hidelab/Amberkar_Scripts | msmm_reseq_Braak_STG_DCN_analysis.R | library(cocor)
library(org.Hs.eg.db)
library(parallel)
library(data.table)
ncore = detectCores()
blocksize=100000
ProcessElement <- function(ic){
A = ceiling((sqrt(8*(ic+1)-7)+1)/2)
B = ic-choose(floor(1/2+sqrt(2*ic)),2)
c_A = c_exprs_rank[A,]
c_B = c_exprs_rank[B,]
t_A = t_exprs_rank[A,]
t_B = t_exprs_rank[B,]
# get correlation between the summaries for the unique genes
tmp = data.frame(Gene.A=gene.names[A],Gene.B=gene.names[B])
c_cortest<-cor.test(unname(unlist(c_A)), unname(unlist(c_B)), method="spearman")
t_cortest<-cor.test(unname(unlist(t_A)), unname(unlist(t_B)), method="spearman")
rc<-unname(cor.test(unlist(c_A),unlist(c_B))$estimate)
rt<-unname(cor.test(unlist(t_A),unlist(t_B))$estimate)
# diffcor<-cocor.indep.groups(rc, rt, n.c, n.t)
tmp$r.c<-rc
tmp$p.c<-c_cortest$p.value
tmp$n.c<-n.c
tmp$r.t<-rt
tmp$p.t<-t_cortest$p.value
tmp$n.t<-n.t
tmp$p.cocor<-NA
tmp$abs.corr.change<-abs(rt-rc)
if ( (!is.na(tmp$r.c)) && (!is.na(tmp$r.t)) )
{
diffcor<-cocor.indep.groups(tmp$r.c, tmp$r.t, tmp$n.c, tmp$n.t)
tmp$p.cocor<-diffcor@fisher1925$p.value
}
setTxtProgressBar(pb,ic %% blocksize)
return(tmp)
}
msmm_data=fread("./MSMM/MSSM_FP_STG_PHG_IFG_netResidualExpression.tsv",sep="\t",header=T,data.table=F,showProgress=T)
msmm_data2=msmm_data[,-grep(pattern="resequenced",x=colnames(msmm_data))]
rownames(msmm_data2)=msmm_data$ensembl_gene_id
#Merge clinical and pathological covariates
msmm_rnaseq_covariates=read.csv("./MSMM/MSBB_RNAseq_covariates.csv",header = T,as.is = T)
msmm_rnaseq_clinical_covariates=read.csv("./MSMM/MSBB_clinical.csv",header = T,as.is = T)
msmm_rnaseq_covariates.merged=merge(x = msmm_rnaseq_clinical_covariates,y=msmm_rnaseq_covariates,by=c("individualIdentifier","individualIdentifier"))
msmm_rnaseq_covariates.merged2=msmm_rnaseq_covariates.merged[grep(pattern = "unmapped|resequenced",x = msmm_rnaseq_covariates.merged$fileName,invert = T),]
msmm_rnaseq_covariates.merged_allBraak=msmm_rnaseq_covariates.merged2[grep(pattern="0|9",x=msmm_rnaseq_covariates.merged2$bbscore,invert=T),]
msmm_rnaseq_sampleList.Braak=vector(mode = "list",length = 4)
names(msmm_rnaseq_sampleList.Braak)=c("FP","IFG","STG","PHG")
msmm_rnaseq_sampleList.Braak$FP=msmm_rnaseq_sampleList.Braak$IFG=msmm_rnaseq_sampleList.Braak$STG=msmm_rnaseq_sampleList.Braak$PHG=vector(mode = "list",length = 6)
names(msmm_rnaseq_sampleList.Braak$FP)=names(msmm_rnaseq_sampleList.Braak$IFG)=names(msmm_rnaseq_sampleList.Braak$STG)=names(msmm_rnaseq_sampleList.Braak$PHG)=paste("B",c(1:6),sep = "")
braak_scores=c(1:6)
brain_regions=c("BM10","BM44","BM22","BM36")
for(r in 1:length(brain_regions)){
for(b in 1:length(braak_scores)){
msmm_rnaseq_sampleList.Braak[[r]][[b]]=msmm_rnaseq_covariates.merged_allBraak$sampleIdentifier[which((msmm_rnaseq_covariates.merged_allBraak$BrodmannArea==brain_regions[r])&(msmm_rnaseq_covariates.merged_allBraak$bbscore==braak_scores[b]))]
}
}
c=1
exprs_rank=lapply(msmm_rnaseq_sampleList.Braak$FP,function(x)msmm_data2[,which(colnames(msmm_data2)%in%x)])
common_genes=Reduce(intersect,lapply(lapply(exprs_rank,function(x)x[which((rowSums(x>0)>=ncol(x)/3)==T),]),rownames))
exprs_rank2=lapply(lapply(exprs_rank,function(x)x[which((rowSums(x>0)>=ncol(x)/3)==T),]),function(y)y[rownames(y)%in%common_genes,])
number_of_combinations=choose(nrow(exprs_rank2[[t]]),2)
braak_scores_combn=matrix(NA,nrow=5,ncol=2)
for (i in 1:5){
braak_scores_combn[i,]=c(i,i+1)
}
c_exprs_rank=exprs_rank2[braak_scores_combn[c,1]][[1]]
t_exprs_rank=exprs_rank2[braak_scores_combn[c,2]][[1]]
n.c<-ncol(c_exprs_rank)
n.t<-ncol(t_exprs_rank)
gene.names<-rownames(exprs_rank2$B1)
dir.create(paste("Braak",braak_scores_combn[c,1],"_","Braak",braak_scores_combn[c,2],sep = ""),showWarnings = T,mode = "0777")
setwd(paste("Braak",braak_scores_combn[c,1],"_","Braak",braak_scores_combn[c,2],"/",sep = ""))
i<-0
blocksize=50
start<-i*blocksize+1
end<-min((i+1)*blocksize, number_of_combinations)
#str(Reduce(intersect,lapply(lapply(exprs_rank,function(x)x[which((rowSums(x>0)>=ncol(x)/3)==T),]),rownames)))
while(start < 1000){
input<-start:end
pb = txtProgressBar(min=0,max=length(input),style=3,initial=0)
cat("\n")
res = mclapply(input,ProcessElement,mc.cores=ncore)
close(pb)
#save results
#rbindlist is faster than rbind.fill
result <- rbindlist(res)
result <- as.data.frame(result)
result <- data.frame(result,stringsAsFactors = F)
#Write results, use fwrite instead of write.table for faster processing
fwrite(result, file=paste("mssm_reseq_",names(msmm_rnaseq_sampleList.Braak)[r],"_",paste("Braak",braak_scores_combn[c,],sep = "_",collapse = "_"),"_cocor_tmp", i, ".txt",sep = ""), sep="\t",col.names = T,row.names = F,buffMB = 10,nThread = 4,quote = F)
i<-i+1
start<-i*blocksize+1
end<-min((i+1)*blocksize, number_of_combinations)
}
#Remove headers from tmp files and combine in a single one
system(paste("find . -name '*.txt'|grep 'tmp'|xargs -n 1 tail -n +2",paste(">MSMM_ReSeq_",names(msmm_rnaseq_sampleList.Braak)[r],"_",paste("Braak",braak_scores_combn[c,],sep = "_",collapse = "_"),"_allResults_DiffCorr.txt",sep = "")))
allResults_FDR=fread(input = list.files(pattern = "*allResults_DiffCorr.txt"),sep = "\t",header = F,showProgress = T,data.table = F,strip.white = T,stringsAsFactors = F)
#Assign single header for the combined file
colnames(allResults_FDR)=colnames(result)
#Compute FDR values for all p-values
allResults_FDR$FDR.cocor=p.adjust(p = as.numeric(allResults_FDR$p.cocor),method = "fdr")
allResults_FDR$FDR.c=p.adjust(p = as.numeric(allResults_FDR$p.c),method = "fdr")
allResults_FDR$FDR.t=p.adjust(p = as.numeric(allResults_FDR$p.t),method = "fdr")
#Generate DCNs adn write to R objects
control_DCN=disease_DCN=graph.data.frame(d = allResults_FDR[which(allResults_FDR$FDR.cocor<=0.1),c(1:2)],directed = F)
E(control_DCN)$weight=allResults_FDR[which(allResults_FDR$FDR.cocor<=0.1),3]+1
E(disease_DCN)$weight=allResults_FDR[which(allResults_FDR$FDR.cocor<=0.1),6]+1
saveRDS(control_DCN,file = paste("mssm_reseq_",names(msmm_rnaseq_sampleList.Braak)[r],"_",paste("Braak",braak_scores_combn[c,],sep = "_",collapse = "_"),"_Control.RDS",sep = ""))
saveRDS(disease_DCN,file = paste("mssm_reseq_",names(msmm_rnaseq_sampleList.Braak)[r],"_",paste("Braak",braak_scores_combn[c,],sep = "_",collapse = "_"),"_Disease.RDS",sep = "")) | 6,372 | gpl-3.0 |
c30cff98ff5071bdf5dce138743a7d83a6794562 | mathemage/h2o-3 | h2o-r/tests/testdir_misc/runit_groupby.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
##
# Run through groupby methods with different column types
##
test <- function(conn) {
Log.info("Upload prostate dataset into H2O...")
df.hex <- h2o.uploadFile(locate("smalldata/prostate/prostate.csv"))
Log.info("Import airlines dataset into R...")
df.R <- read.csv(locate("smalldata/prostate/prostate.csv"))
races <- lapply(0:2, function(x) df.R[df.R$RACE == x, "VOL" ])
Log.info("Test method = nrow...")
gp_nrow <- h2o.group_by(data = df.hex, by = "RACE", nrow("VOL"))
gp_nrow <- as.data.frame(gp_nrow)[,2]
r_nrow <- sapply(races, length)
checkEqualsNumeric(gp_nrow, r_nrow)
Log.info("Test method = sum...")
gp_sum <- h2o.group_by(data = df.hex, by = "RACE", sum("VOL"))
gp_sum <- as.data.frame(gp_sum)[,2]
r_sum <- sapply(races, sum)
checkEqualsNumeric(r_sum, gp_sum)
Log.info("Test method = mean ...")
gp_mean <- h2o.group_by(data = df.hex, by = "RACE", mean("VOL"))
gp_mean <- as.data.frame(gp_mean)[,2]
r_mean <- sapply(races, mean)
checkEqualsNumeric(r_mean, gp_mean)
# Unimplemented at the moment - refer to jira: https://0xdata.atlassian.net/browse/PUBDEV-2319
# Log.info("Test method = median ...")
# gp_median <- h2o.group_by(data = df.hex, by = "RACE", median("VOL"))
# gp_median <- as.data.frame(gp_median)[,2]
# r_median <- sapply(races, median)
# checkEqualsNumeric(r_median, gp_median)
Log.info("Test method = var ...")
gp_var <- h2o.group_by(data = df.hex, by = "RACE", var("VOL"))
gp_var <- as.data.frame(gp_var)[,2]
r_var <- sapply(races, var)
checkEqualsNumeric(r_var, gp_var)
Log.info("Test method = sd ...")
gp_sd <- h2o.group_by(data = df.hex, by = "RACE", sd("VOL"))
gp_sd <- as.data.frame(gp_sd)[,2]
r_sd <- sapply(races, sd)
checkEqualsNumeric(r_sd, gp_sd)
}
doTest("Testing different methods for groupby:", test)
| 1,965 | apache-2.0 |
c30cff98ff5071bdf5dce138743a7d83a6794562 | nilbody/h2o-3 | h2o-r/tests/testdir_misc/runit_groupby.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
##
# Run through groupby methods with different column types
##
test <- function(conn) {
Log.info("Upload prostate dataset into H2O...")
df.hex <- h2o.uploadFile(locate("smalldata/prostate/prostate.csv"))
Log.info("Import airlines dataset into R...")
df.R <- read.csv(locate("smalldata/prostate/prostate.csv"))
races <- lapply(0:2, function(x) df.R[df.R$RACE == x, "VOL" ])
Log.info("Test method = nrow...")
gp_nrow <- h2o.group_by(data = df.hex, by = "RACE", nrow("VOL"))
gp_nrow <- as.data.frame(gp_nrow)[,2]
r_nrow <- sapply(races, length)
checkEqualsNumeric(gp_nrow, r_nrow)
Log.info("Test method = sum...")
gp_sum <- h2o.group_by(data = df.hex, by = "RACE", sum("VOL"))
gp_sum <- as.data.frame(gp_sum)[,2]
r_sum <- sapply(races, sum)
checkEqualsNumeric(r_sum, gp_sum)
Log.info("Test method = mean ...")
gp_mean <- h2o.group_by(data = df.hex, by = "RACE", mean("VOL"))
gp_mean <- as.data.frame(gp_mean)[,2]
r_mean <- sapply(races, mean)
checkEqualsNumeric(r_mean, gp_mean)
# Unimplemented at the moment - refer to jira: https://0xdata.atlassian.net/browse/PUBDEV-2319
# Log.info("Test method = median ...")
# gp_median <- h2o.group_by(data = df.hex, by = "RACE", median("VOL"))
# gp_median <- as.data.frame(gp_median)[,2]
# r_median <- sapply(races, median)
# checkEqualsNumeric(r_median, gp_median)
Log.info("Test method = var ...")
gp_var <- h2o.group_by(data = df.hex, by = "RACE", var("VOL"))
gp_var <- as.data.frame(gp_var)[,2]
r_var <- sapply(races, var)
checkEqualsNumeric(r_var, gp_var)
Log.info("Test method = sd ...")
gp_sd <- h2o.group_by(data = df.hex, by = "RACE", sd("VOL"))
gp_sd <- as.data.frame(gp_sd)[,2]
r_sd <- sapply(races, sd)
checkEqualsNumeric(r_sd, gp_sd)
}
doTest("Testing different methods for groupby:", test)
| 1,965 | apache-2.0 |
c30cff98ff5071bdf5dce138743a7d83a6794562 | michalkurka/h2o-3 | h2o-r/tests/testdir_misc/runit_groupby.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
##
# Run through groupby methods with different column types
##
test <- function(conn) {
Log.info("Upload prostate dataset into H2O...")
df.hex <- h2o.uploadFile(locate("smalldata/prostate/prostate.csv"))
Log.info("Import airlines dataset into R...")
df.R <- read.csv(locate("smalldata/prostate/prostate.csv"))
races <- lapply(0:2, function(x) df.R[df.R$RACE == x, "VOL" ])
Log.info("Test method = nrow...")
gp_nrow <- h2o.group_by(data = df.hex, by = "RACE", nrow("VOL"))
gp_nrow <- as.data.frame(gp_nrow)[,2]
r_nrow <- sapply(races, length)
checkEqualsNumeric(gp_nrow, r_nrow)
Log.info("Test method = sum...")
gp_sum <- h2o.group_by(data = df.hex, by = "RACE", sum("VOL"))
gp_sum <- as.data.frame(gp_sum)[,2]
r_sum <- sapply(races, sum)
checkEqualsNumeric(r_sum, gp_sum)
Log.info("Test method = mean ...")
gp_mean <- h2o.group_by(data = df.hex, by = "RACE", mean("VOL"))
gp_mean <- as.data.frame(gp_mean)[,2]
r_mean <- sapply(races, mean)
checkEqualsNumeric(r_mean, gp_mean)
# Unimplemented at the moment - refer to jira: https://0xdata.atlassian.net/browse/PUBDEV-2319
# Log.info("Test method = median ...")
# gp_median <- h2o.group_by(data = df.hex, by = "RACE", median("VOL"))
# gp_median <- as.data.frame(gp_median)[,2]
# r_median <- sapply(races, median)
# checkEqualsNumeric(r_median, gp_median)
Log.info("Test method = var ...")
gp_var <- h2o.group_by(data = df.hex, by = "RACE", var("VOL"))
gp_var <- as.data.frame(gp_var)[,2]
r_var <- sapply(races, var)
checkEqualsNumeric(r_var, gp_var)
Log.info("Test method = sd ...")
gp_sd <- h2o.group_by(data = df.hex, by = "RACE", sd("VOL"))
gp_sd <- as.data.frame(gp_sd)[,2]
r_sd <- sapply(races, sd)
checkEqualsNumeric(r_sd, gp_sd)
}
doTest("Testing different methods for groupby:", test)
| 1,965 | apache-2.0 |
c30cff98ff5071bdf5dce138743a7d83a6794562 | YzPaul3/h2o-3 | h2o-r/tests/testdir_misc/runit_groupby.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
##
# Run through groupby methods with different column types
##
test <- function(conn) {
Log.info("Upload prostate dataset into H2O...")
df.hex <- h2o.uploadFile(locate("smalldata/prostate/prostate.csv"))
Log.info("Import airlines dataset into R...")
df.R <- read.csv(locate("smalldata/prostate/prostate.csv"))
races <- lapply(0:2, function(x) df.R[df.R$RACE == x, "VOL" ])
Log.info("Test method = nrow...")
gp_nrow <- h2o.group_by(data = df.hex, by = "RACE", nrow("VOL"))
gp_nrow <- as.data.frame(gp_nrow)[,2]
r_nrow <- sapply(races, length)
checkEqualsNumeric(gp_nrow, r_nrow)
Log.info("Test method = sum...")
gp_sum <- h2o.group_by(data = df.hex, by = "RACE", sum("VOL"))
gp_sum <- as.data.frame(gp_sum)[,2]
r_sum <- sapply(races, sum)
checkEqualsNumeric(r_sum, gp_sum)
Log.info("Test method = mean ...")
gp_mean <- h2o.group_by(data = df.hex, by = "RACE", mean("VOL"))
gp_mean <- as.data.frame(gp_mean)[,2]
r_mean <- sapply(races, mean)
checkEqualsNumeric(r_mean, gp_mean)
# Unimplemented at the moment - refer to jira: https://0xdata.atlassian.net/browse/PUBDEV-2319
# Log.info("Test method = median ...")
# gp_median <- h2o.group_by(data = df.hex, by = "RACE", median("VOL"))
# gp_median <- as.data.frame(gp_median)[,2]
# r_median <- sapply(races, median)
# checkEqualsNumeric(r_median, gp_median)
Log.info("Test method = var ...")
gp_var <- h2o.group_by(data = df.hex, by = "RACE", var("VOL"))
gp_var <- as.data.frame(gp_var)[,2]
r_var <- sapply(races, var)
checkEqualsNumeric(r_var, gp_var)
Log.info("Test method = sd ...")
gp_sd <- h2o.group_by(data = df.hex, by = "RACE", sd("VOL"))
gp_sd <- as.data.frame(gp_sd)[,2]
r_sd <- sapply(races, sd)
checkEqualsNumeric(r_sd, gp_sd)
}
doTest("Testing different methods for groupby:", test)
| 1,965 | apache-2.0 |
c30cff98ff5071bdf5dce138743a7d83a6794562 | jangorecki/h2o-3 | h2o-r/tests/testdir_misc/runit_groupby.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
##
# Run through groupby methods with different column types
##
test <- function(conn) {
Log.info("Upload prostate dataset into H2O...")
df.hex <- h2o.uploadFile(locate("smalldata/prostate/prostate.csv"))
Log.info("Import airlines dataset into R...")
df.R <- read.csv(locate("smalldata/prostate/prostate.csv"))
races <- lapply(0:2, function(x) df.R[df.R$RACE == x, "VOL" ])
Log.info("Test method = nrow...")
gp_nrow <- h2o.group_by(data = df.hex, by = "RACE", nrow("VOL"))
gp_nrow <- as.data.frame(gp_nrow)[,2]
r_nrow <- sapply(races, length)
checkEqualsNumeric(gp_nrow, r_nrow)
Log.info("Test method = sum...")
gp_sum <- h2o.group_by(data = df.hex, by = "RACE", sum("VOL"))
gp_sum <- as.data.frame(gp_sum)[,2]
r_sum <- sapply(races, sum)
checkEqualsNumeric(r_sum, gp_sum)
Log.info("Test method = mean ...")
gp_mean <- h2o.group_by(data = df.hex, by = "RACE", mean("VOL"))
gp_mean <- as.data.frame(gp_mean)[,2]
r_mean <- sapply(races, mean)
checkEqualsNumeric(r_mean, gp_mean)
# Unimplemented at the moment - refer to jira: https://0xdata.atlassian.net/browse/PUBDEV-2319
# Log.info("Test method = median ...")
# gp_median <- h2o.group_by(data = df.hex, by = "RACE", median("VOL"))
# gp_median <- as.data.frame(gp_median)[,2]
# r_median <- sapply(races, median)
# checkEqualsNumeric(r_median, gp_median)
Log.info("Test method = var ...")
gp_var <- h2o.group_by(data = df.hex, by = "RACE", var("VOL"))
gp_var <- as.data.frame(gp_var)[,2]
r_var <- sapply(races, var)
checkEqualsNumeric(r_var, gp_var)
Log.info("Test method = sd ...")
gp_sd <- h2o.group_by(data = df.hex, by = "RACE", sd("VOL"))
gp_sd <- as.data.frame(gp_sd)[,2]
r_sd <- sapply(races, sd)
checkEqualsNumeric(r_sd, gp_sd)
}
doTest("Testing different methods for groupby:", test)
| 1,965 | apache-2.0 |
c30cff98ff5071bdf5dce138743a7d83a6794562 | h2oai/h2o-dev | h2o-r/tests/testdir_misc/runit_groupby.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
##
# Run through groupby methods with different column types
##
test <- function(conn) {
Log.info("Upload prostate dataset into H2O...")
df.hex <- h2o.uploadFile(locate("smalldata/prostate/prostate.csv"))
Log.info("Import airlines dataset into R...")
df.R <- read.csv(locate("smalldata/prostate/prostate.csv"))
races <- lapply(0:2, function(x) df.R[df.R$RACE == x, "VOL" ])
Log.info("Test method = nrow...")
gp_nrow <- h2o.group_by(data = df.hex, by = "RACE", nrow("VOL"))
gp_nrow <- as.data.frame(gp_nrow)[,2]
r_nrow <- sapply(races, length)
checkEqualsNumeric(gp_nrow, r_nrow)
Log.info("Test method = sum...")
gp_sum <- h2o.group_by(data = df.hex, by = "RACE", sum("VOL"))
gp_sum <- as.data.frame(gp_sum)[,2]
r_sum <- sapply(races, sum)
checkEqualsNumeric(r_sum, gp_sum)
Log.info("Test method = mean ...")
gp_mean <- h2o.group_by(data = df.hex, by = "RACE", mean("VOL"))
gp_mean <- as.data.frame(gp_mean)[,2]
r_mean <- sapply(races, mean)
checkEqualsNumeric(r_mean, gp_mean)
# Unimplemented at the moment - refer to jira: https://0xdata.atlassian.net/browse/PUBDEV-2319
# Log.info("Test method = median ...")
# gp_median <- h2o.group_by(data = df.hex, by = "RACE", median("VOL"))
# gp_median <- as.data.frame(gp_median)[,2]
# r_median <- sapply(races, median)
# checkEqualsNumeric(r_median, gp_median)
Log.info("Test method = var ...")
gp_var <- h2o.group_by(data = df.hex, by = "RACE", var("VOL"))
gp_var <- as.data.frame(gp_var)[,2]
r_var <- sapply(races, var)
checkEqualsNumeric(r_var, gp_var)
Log.info("Test method = sd ...")
gp_sd <- h2o.group_by(data = df.hex, by = "RACE", sd("VOL"))
gp_sd <- as.data.frame(gp_sd)[,2]
r_sd <- sapply(races, sd)
checkEqualsNumeric(r_sd, gp_sd)
}
doTest("Testing different methods for groupby:", test)
| 1,965 | apache-2.0 |
65dd8e2fa6bfb9a398640774a8729c57b2bcac26 | hidelab/Amberkar_Scripts | msmm_reseq_Braak_IFG_DCN_analysis.R | library(cocor)
library(org.Hs.eg.db)
library(parallel)
library(data.table)
ncore = detectCores()
blocksize=100000
ProcessElement <- function(ic){
A = ceiling((sqrt(8*(ic+1)-7)+1)/2)
B = ic-choose(floor(1/2+sqrt(2*ic)),2)
c_A = c_exprs_rank[A,]
c_B = c_exprs_rank[B,]
t_A = t_exprs_rank[A,]
t_B = t_exprs_rank[B,]
# get correlation between the summaries for the unique genes
tmp = data.frame(Gene.A=gene.names[A],Gene.B=gene.names[B])
c_cortest<-cor.test(unname(unlist(c_A)), unname(unlist(c_B)), method="spearman")
t_cortest<-cor.test(unname(unlist(t_A)), unname(unlist(t_B)), method="spearman")
rc<-unname(cor.test(unlist(c_A),unlist(c_B))$estimate)
rt<-unname(cor.test(unlist(t_A),unlist(t_B))$estimate)
# diffcor<-cocor.indep.groups(rc, rt, n.c, n.t)
tmp$r.c<-rc
tmp$p.c<-c_cortest$p.value
tmp$n.c<-n.c
tmp$r.t<-rt
tmp$p.t<-t_cortest$p.value
tmp$n.t<-n.t
tmp$p.cocor<-NA
tmp$abs.corr.change<-abs(rt-rc)
if ( (!is.na(tmp$r.c)) && (!is.na(tmp$r.t)) )
{
diffcor<-cocor.indep.groups(tmp$r.c, tmp$r.t, tmp$n.c, tmp$n.t)
tmp$p.cocor<-diffcor@fisher1925$p.value
}
setTxtProgressBar(pb,ic %% blocksize)
return(tmp)
}
msmm_data=fread("./MSMM/MSSM_FP_STG_PHG_IFG_netResidualExpression.tsv",sep="\t",header=T,data.table=F,showProgress=T)
msmm_data2=msmm_data[,-grep(pattern="resequenced",x=colnames(msmm_data))]
rownames(msmm_data2)=msmm_data$ensembl_gene_id
#Merge clinical and pathological covariates
msmm_rnaseq_covariates=read.csv("./MSMM/MSBB_RNAseq_covariates.csv",header = T,as.is = T)
msmm_rnaseq_clinical_covariates=read.csv("./MSMM/MSBB_clinical.csv",header = T,as.is = T)
msmm_rnaseq_covariates.merged=merge(x = msmm_rnaseq_clinical_covariates,y=msmm_rnaseq_covariates,by=c("individualIdentifier","individualIdentifier"))
msmm_rnaseq_covariates.merged2=msmm_rnaseq_covariates.merged[grep(pattern = "unmapped|resequenced",x = msmm_rnaseq_covariates.merged$fileName,invert = T),]
msmm_rnaseq_covariates.merged_allBraak=msmm_rnaseq_covariates.merged2[grep(pattern="0|9",x=msmm_rnaseq_covariates.merged2$bbscore,invert=T),]
msmm_rnaseq_sampleList.Braak=vector(mode = "list",length = 4)
names(msmm_rnaseq_sampleList.Braak)=c("FP","IFG","STG","PHG")
msmm_rnaseq_sampleList.Braak$FP=msmm_rnaseq_sampleList.Braak$IFG=msmm_rnaseq_sampleList.Braak$STG=msmm_rnaseq_sampleList.Braak$PHG=vector(mode = "list",length = 6)
names(msmm_rnaseq_sampleList.Braak$FP)=names(msmm_rnaseq_sampleList.Braak$IFG)=names(msmm_rnaseq_sampleList.Braak$STG)=names(msmm_rnaseq_sampleList.Braak$PHG)=paste("B",c(1:6),sep = "")
braak_scores=c(1:6)
brain_regions=c("BM10","BM44","BM22","BM36")
for(r in 1:length(brain_regions)){
for(b in 1:length(braak_scores)){
msmm_rnaseq_sampleList.Braak[[r]][[b]]=msmm_rnaseq_covariates.merged_allBraak$sampleIdentifier[which((msmm_rnaseq_covariates.merged_allBraak$BrodmannArea==brain_regions[r])&(msmm_rnaseq_covariates.merged_allBraak$bbscore==braak_scores[b]))]
}
}
c=1
exprs_rank=lapply(msmm_rnaseq_sampleList.Braak$FP,function(x)msmm_data2[,which(colnames(msmm_data2)%in%x)])
common_genes=Reduce(intersect,lapply(lapply(exprs_rank,function(x)x[which((rowSums(x>0)>=ncol(x)/3)==T),]),rownames))
exprs_rank2=lapply(lapply(exprs_rank,function(x)x[which((rowSums(x>0)>=ncol(x)/3)==T),]),function(y)y[rownames(y)%in%common_genes,])
number_of_combinations=choose(nrow(exprs_rank2[[t]]),2)
braak_scores_combn=matrix(NA,nrow=5,ncol=2)
for (i in 1:5){
braak_scores_combn[i,]=c(i,i+1)
}
c_exprs_rank=exprs_rank2[braak_scores_combn[c,1]][[1]]
t_exprs_rank=exprs_rank2[braak_scores_combn[c,2]][[1]]
n.c<-ncol(c_exprs_rank)
n.t<-ncol(t_exprs_rank)
gene.names<-rownames(exprs_rank2$B1)
dir.create(paste("Braak",braak_scores_combn[c,1],"_","Braak",braak_scores_combn[c,2],sep = ""),showWarnings = T,mode = "0777")
setwd(paste("Braak",braak_scores_combn[c,1],"_","Braak",braak_scores_combn[c,2],"/",sep = ""))
i<-0
blocksize=50
start<-i*blocksize+1
end<-min((i+1)*blocksize, number_of_combinations)
#str(Reduce(intersect,lapply(lapply(exprs_rank,function(x)x[which((rowSums(x>0)>=ncol(x)/3)==T),]),rownames)))
while(start < 1000){
input<-start:end
pb = txtProgressBar(min=0,max=length(input),style=3,initial=0)
cat("\n")
res = mclapply(input,ProcessElement,mc.cores=ncore)
close(pb)
#save results
#rbindlist is faster than rbind.fill
result <- rbindlist(res)
result <- as.data.frame(result)
result <- data.frame(result,stringsAsFactors = F)
#Write results, use fwrite instead of write.table for faster processing
fwrite(result, file=paste("mssm_reseq_",names(msmm_rnaseq_sampleList.Braak)[r],"_",paste("Braak",braak_scores_combn[c,],sep = "_",collapse = "_"),"_cocor_tmp", i, ".txt",sep = ""), sep="\t",col.names = T,row.names = F,buffMB = 10,nThread = 4,quote = F)
i<-i+1
start<-i*blocksize+1
end<-min((i+1)*blocksize, number_of_combinations)
}
#Remove headers from tmp files and combine in a single one
system(paste("find . -name '*.txt'|grep 'tmp'|xargs -n 1 tail -n +2",paste(">MSMM_ReSeq_",names(msmm_rnaseq_sampleList.Braak)[r],"_",paste("Braak",braak_scores_combn[c,],sep = "_",collapse = "_"),"_allResults_DiffCorr.txt",sep = "")))
allResults_FDR=fread(input = list.files(pattern = "*allResults_DiffCorr.txt"),sep = "\t",header = F,showProgress = T,data.table = F,strip.white = T,stringsAsFactors = F)
#Assign single header for the combined file
colnames(allResults_FDR)=colnames(result)
#Compute FDR values for all p-values
allResults_FDR$FDR.cocor=p.adjust(p = as.numeric(allResults_FDR$p.cocor),method = "fdr")
allResults_FDR$FDR.c=p.adjust(p = as.numeric(allResults_FDR$p.c),method = "fdr")
allResults_FDR$FDR.t=p.adjust(p = as.numeric(allResults_FDR$p.t),method = "fdr")
#Generate DCNs adn write to R objects
control_DCN=disease_DCN=graph.data.frame(d = allResults_FDR[which(allResults_FDR$FDR.cocor<=0.1),c(1:2)],directed = F)
E(control_DCN)$weight=allResults_FDR[which(allResults_FDR$FDR.cocor<=0.1),3]+1
E(disease_DCN)$weight=allResults_FDR[which(allResults_FDR$FDR.cocor<=0.1),6]+1
saveRDS(control_DCN,file = paste("mssm_reseq_",names(msmm_rnaseq_sampleList.Braak)[r],"_",paste("Braak",braak_scores_combn[c,],sep = "_",collapse = "_"),"_Control.RDS",sep = ""))
saveRDS(disease_DCN,file = paste("mssm_reseq_",names(msmm_rnaseq_sampleList.Braak)[r],"_",paste("Braak",braak_scores_combn[c,],sep = "_",collapse = "_"),"_Disease.RDS",sep = "")) | 6,372 | gpl-3.0 |
ef0b999133472507136f603c5a69cb1b11f25edd | DFITC/fts | introTS/3-1.R | #3-1
require(fUnitRoots)
da = read.table("m-CAUS-7611.txt", header = T)
ca = da[,3]
#(a)
d = 0
acf(ca)
acf(diff(ca))
acf(diff(diff(ca)))
pacf(diff(diff(ca)))
p = 6
q = 3
m2 = arima(ca, order = c(p,d,q))
m2
Box.test(m2$residuals, lag = 12, type = 'Ljung')
#using forecast
require(forecast)
ca = diff(diff(ca))
auto.arima(ca)
auto_mode = arima(ca, order = c(1,0,0))
tsdiag(auto_mode, gof = 36)
Box.test(auto_mode$residuals, lag = 12, type = 'Ljung')
#(b)
us = da[,4]
us = diff(diff(us))
m3 = lm(ca ~ -1 + us)
summary(m3)
#d
d = 0
#p q
pacf(m3$residuals)
acf(m3$residuals)
q = 2
p = 6
m6 = arima(ca, order = c(p,d,q), xreg = us, include.mean = F)
m6
Box.test(m6$residuals, lag = 12, type = 'Ljung')
#(c)
source("backtest.R")
mm1 = backtest(m2, ca, 385, 1)
mm2 = backtest(m6, ca, 385, 1, xre = us) | 801 | gpl-3.0 |
ef0b999133472507136f603c5a69cb1b11f25edd | xiaoyem/fts | introTS/3-1.R | #3-1
require(fUnitRoots)
da = read.table("m-CAUS-7611.txt", header = T)
ca = da[,3]
#(a)
d = 0
acf(ca)
acf(diff(ca))
acf(diff(diff(ca)))
pacf(diff(diff(ca)))
p = 6
q = 3
m2 = arima(ca, order = c(p,d,q))
m2
Box.test(m2$residuals, lag = 12, type = 'Ljung')
#using forecast
require(forecast)
ca = diff(diff(ca))
auto.arima(ca)
auto_mode = arima(ca, order = c(1,0,0))
tsdiag(auto_mode, gof = 36)
Box.test(auto_mode$residuals, lag = 12, type = 'Ljung')
#(b)
us = da[,4]
us = diff(diff(us))
m3 = lm(ca ~ -1 + us)
summary(m3)
#d
d = 0
#p q
pacf(m3$residuals)
acf(m3$residuals)
q = 2
p = 6
m6 = arima(ca, order = c(p,d,q), xreg = us, include.mean = F)
m6
Box.test(m6$residuals, lag = 12, type = 'Ljung')
#(c)
source("backtest.R")
mm1 = backtest(m2, ca, 385, 1)
mm2 = backtest(m6, ca, 385, 1, xre = us) | 801 | gpl-3.0 |
9df21375e92665fac26586ab5fd83051b8052c5f | Bath-ML/parking | r/BANEScarparkinglite/R/process_records.R | #' Clean up raw records
#'
#' Uses functions from the \code{dplyr} package to clean up raw records obtained
#' from the Bath: Hacked datastore. The process is as follows:
#' \itemize{
#' \item Select columns containing useful information only
#' \item Remove any records with NA entries
#' \item Remove records for "test car park"
#' \item Convert Name and Status to factors
#' \item Remove records with negative occupancies
#' \item Calculate Proportion column (Occupancy/Capacity)
#' \item Remove records with Proportion greater than \code{max_prop}
#' \item Remove duplicate records (see \code{first_upload})
#' }
#'
#' @param x A data frame containing records to be cleaned up (e.g. the data
#' frame obtained by calling \code{\link{get_all_crude}}).
#' @param max_prop The point at which records are discarded due to overly-full
#' Occupancy values (default is 1.1, or 110\% full, to allow for circulating
#' cars).
#' @param first_upload If \code{TRUE}, ensures that when duplicate records are
#' removed, the copy which is kept is the first one uploaded after the record
#' was taken. This takes much longer to run, due to sorting.
#' @return A data frame of clean records, with 7 columns:
#' \describe{
#' \item{Name}{The name of the car park where the record was taken.}
#' \item{LastUpdate}{The time the record was taken (POSIXct date-time object).}
#' \item{DateUploaded}{The time the record was uploaded to the Bath: Hacked
#' database (POSIXct date-time object).}
#' \item{Occupancy}{The total number of cars in the car park.}
#' \item{Capacity}{The number of parking spaces in the car park.}
#' \item{Status}{Description of the change in occupancy since the previous
#' record from that car park.}
#' \item{Proportion}{Calculated as (Occupancy/Capacity).}
#' }
#' @examples
#' \dontshow{
#' load(system.file("tests", "testthat", "data", "raw.rda", package = "BANEScarparkinglite"))
#' refined <- refine(raw)
#' }
#' \donttest{
#' raw_data <- get_all_crude()
#' some_records <- raw_data[1:1000, ]
#'
#' dim(some_records)
#' ## 1000 16
#'
#' df <- refine(raw_data)
#' dim(df)
#' ## 813 7
#' }
#' @export
refine <- function(x, max_prop = 1.1, first_upload = FALSE) {
# Select columns of interest
dplyr::select(x, Name = name, LastUpdate = lastupdate,
DateUploaded = dateuploaded, Occupancy = occupancy,
Capacity = capacity, Status = status) %>%
# Remove any records with NA entries
stats::na.omit() %>%
# Remove records for test car park
dplyr::filter(Name != "test car park") %>%
# Convert Names and Statuses to factors
dplyr::mutate(Name = as.factor(Name), Status = as.factor(Status)) %>%
# Remove records with negative occupancies
dplyr::filter(Occupancy >= 0) %>%
# Calculate Proportion column
dplyr::mutate(Proportion = (Occupancy / Capacity)) %>%
# Remove records with overly-full occupancies (arbitrary)
dplyr::filter(Proportion < max_prop) %>%
# Convert date strings to POSIXct date objects (API provides date-time
# objects already)
# dplyr::mutate(LastUpdate = as.POSIXct(LastUpdate, tz = "UTC",
# format = "%d/%m/%Y %I:%M:%S %p"),
# DateUploaded = as.POSIXct(DateUploaded, tz = "UTC",
# format = "%d/%m/%Y %I:%M:%S %p")) %>%
# Remove duplicate records:
refine.deduplicate(first_upload = first_upload)
}
#' Remove duplicate records (internal method)
#'
#' (Internal method)
#'
#' @param x Data frame containing records.
#' @param first_upload If TRUE, ensures record with oldest DateUploaded is kept.
refine.deduplicate <- function(x, first_upload) {
dplyr::group_by(x, Name, LastUpdate) %>%
{if (first_upload == FALSE) {
# Default: Doesn't matter which we keep - only difference between
# records is DateUploaded
dplyr::slice(., 1L)
} else {
# first_upload == TRUE: ensure to choose first uploaded copy after
# record was taken. Takes much longer due to sorting!
dplyr::filter(., LastUpdate < DateUploaded) %>%
dplyr::top_n(-1, DateUploaded)
}} %>%
dplyr::ungroup()
}
| 4,291 | mit |
c0f2fcd0f16f92f9c92e955269c1032f0e47cf6d | PirateGrunt/raw_las | scripts/06_5_MorningRecap.R | ## ------------------------------------------------------------------------
num<-10000; ml<-5; sdl<-2
x1<-rlnorm(num,meanlog=ml,sdlog=sdl)
q1<-quantile(x1,probs=seq(from=.1,to=.9,by=.1))
mySummary<-c(min(x1),max(x1),mean(x1),sd(x1),q1)
names(mySummary)[1:4]<-c("Min","Max","Mean","SD")
mySummary
## ------------------------------------------------------------------------
y1<-log(x1)
(m1<-mean(y1))
(s1<-sd(y1))
## ------------------------------------------------------------------------
hist(y1,freq=FALSE)
## ------------------------------------------------------------------------
qqnorm(y1)
## ------------------------------------------------------------------------
suppressWarnings(library(Lahman)); data(Teams)
## ------------------------------------------------------------------------
data1<-Teams[Teams$yearID>1979,c("yearID","teamID","W","attendance")]
head(data1)
## ------------------------------------------------------------------------
atl<-data1[data1$teamID=="ATL",]
plot(x=atl$W,y=atl$attendance,pch=19,xlab="Wins",ylab="Attendance")
title(main="Atlanta \n 1980-2104")
## ------------------------------------------------------------------------
data1920<-Teams[Teams$yearID > 1919 & Teams$yearID<1930
& Teams$lgID=="NL",c("yearID","teamID","W")]
head(data1920)
## ------------------------------------------------------------------------
totalWins<-aggregate(W~teamID,data=data1920,FUN=sum)
dim(totalWins)
totalWins
## ------------------------------------------------------------------------
data(iris)
boxplot(Sepal.Length~Species,data=iris,xlab="Species")
title(main="Sepal Length of Iris Species")
## ------------------------------------------------------------------------
plot(x=iris$Petal.Length,y=iris$Petal.Width,pch=19,cex=.5,
bty="l",xlab="Petal Length",ylab="Petal Width",
col=as.numeric(iris$Species))
legend("topleft",fill=palette()[1:3],
legend=levels(iris$Species),bty="n")
| 1,984 | gpl-3.0 |
9df21375e92665fac26586ab5fd83051b8052c5f | owenjonesuob/parking | r/BANEScarparkinglite/R/process_records.R | #' Clean up raw records
#'
#' Uses functions from the \code{dplyr} package to clean up raw records obtained
#' from the Bath: Hacked datastore. The process is as follows:
#' \itemize{
#' \item Select columns containing useful information only
#' \item Remove any records with NA entries
#' \item Remove records for "test car park"
#' \item Convert Name and Status to factors
#' \item Remove records with negative occupancies
#' \item Calculate Proportion column (Occupancy/Capacity)
#' \item Remove records with Proportion greater than \code{max_prop}
#' \item Remove duplicate records (see \code{first_upload})
#' }
#'
#' @param x A data frame containing records to be cleaned up (e.g. the data
#' frame obtained by calling \code{\link{get_all_crude}}).
#' @param max_prop The point at which records are discarded due to overly-full
#' Occupancy values (default is 1.1, or 110\% full, to allow for circulating
#' cars).
#' @param first_upload If \code{TRUE}, ensures that when duplicate records are
#' removed, the copy which is kept is the first one uploaded after the record
#' was taken. This takes much longer to run, due to sorting.
#' @return A data frame of clean records, with 7 columns:
#' \describe{
#' \item{Name}{The name of the car park where the record was taken.}
#' \item{LastUpdate}{The time the record was taken (POSIXct date-time object).}
#' \item{DateUploaded}{The time the record was uploaded to the Bath: Hacked
#' database (POSIXct date-time object).}
#' \item{Occupancy}{The total number of cars in the car park.}
#' \item{Capacity}{The number of parking spaces in the car park.}
#' \item{Status}{Description of the change in occupancy since the previous
#' record from that car park.}
#' \item{Proportion}{Calculated as (Occupancy/Capacity).}
#' }
#' @examples
#' \dontshow{
#' load(system.file("tests", "testthat", "data", "raw.rda", package = "BANEScarparkinglite"))
#' refined <- refine(raw)
#' }
#' \donttest{
#' raw_data <- get_all_crude()
#' some_records <- raw_data[1:1000, ]
#'
#' dim(some_records)
#' ## 1000 16
#'
#' df <- refine(raw_data)
#' dim(df)
#' ## 813 7
#' }
#' @export
refine <- function(x, max_prop = 1.1, first_upload = FALSE) {
# Select columns of interest
dplyr::select(x, Name = name, LastUpdate = lastupdate,
DateUploaded = dateuploaded, Occupancy = occupancy,
Capacity = capacity, Status = status) %>%
# Remove any records with NA entries
stats::na.omit() %>%
# Remove records for test car park
dplyr::filter(Name != "test car park") %>%
# Convert Names and Statuses to factors
dplyr::mutate(Name = as.factor(Name), Status = as.factor(Status)) %>%
# Remove records with negative occupancies
dplyr::filter(Occupancy >= 0) %>%
# Calculate Proportion column
dplyr::mutate(Proportion = (Occupancy / Capacity)) %>%
# Remove records with overly-full occupancies (arbitrary)
dplyr::filter(Proportion < max_prop) %>%
# Convert date strings to POSIXct date objects (API provides date-time
# objects already)
# dplyr::mutate(LastUpdate = as.POSIXct(LastUpdate, tz = "UTC",
# format = "%d/%m/%Y %I:%M:%S %p"),
# DateUploaded = as.POSIXct(DateUploaded, tz = "UTC",
# format = "%d/%m/%Y %I:%M:%S %p")) %>%
# Remove duplicate records:
refine.deduplicate(first_upload = first_upload)
}
#' Remove duplicate records (internal method)
#'
#' (Internal method)
#'
#' @param x Data frame containing records.
#' @param first_upload If TRUE, ensures record with oldest DateUploaded is kept.
refine.deduplicate <- function(x, first_upload) {
dplyr::group_by(x, Name, LastUpdate) %>%
{if (first_upload == FALSE) {
# Default: Doesn't matter which we keep - only difference between
# records is DateUploaded
dplyr::slice(., 1L)
} else {
# first_upload == TRUE: ensure to choose first uploaded copy after
# record was taken. Takes much longer due to sorting!
dplyr::filter(., LastUpdate < DateUploaded) %>%
dplyr::top_n(-1, DateUploaded)
}} %>%
dplyr::ungroup()
}
| 4,291 | mit |
3b368b5a22cf27de805799076b27419caf6ec0e0 | EduPek/Coursera-R | corr.R | corr <- function(directory, threshold = 0) {
workDir <- getwd()
setwd(directory)
k = 1
cr<-numeric(length=0)
for(i in 1:332){
rr <- complete("",i)
wdata <- read.csv(fileNbr(i))
if ( rr[1,2] > threshold){
cr[k] <- cor(wdata[["nitrate"]], wdata[["sulfate"]], use = "na.or.complete")
k <- k+1
}
}
setwd(workDir)
cr <- na.omit(cr)
as.numeric(cr)
}
| 410 | gpl-2.0 |
8c2dd9aeb80dc6cdd31927616e4682f7e78dca79 | coolbutuseless/GRIM | data-raw/parse.R | library(readr)
library(stringr)
library(dplyr)
library(pryr)
GRIM <- read_csv("grim.csv") %>%
mutate(AGE_GROUP = as.factor(str_replace_all(AGE_GROUP , "\x96", "-")),
cause_of_death = as.factor(str_replace_all(cause_of_death, "\x96", "-")),
SEX = as.factor(SEX)
) %>% select(-Record_id)
save(GRIM, file="../data/GRIM.rda", compress="xz")
| 380 | mit |
c92123af14e542a1a63aa6fe35be4afce0bd7af0 | GeoscienceAustralia/ptha | misc/nearshore_testing_2020/swals/compare_runtimes_nonlinearOffshore_LinearPlusManningOffshore.R | source('/g/data/w85/tsunami/CODE/gadi/ptha/propagation/SWALS/plot.R')
# Get runtimes for linear-plus-manning model
all_wallclock_times_LM = lapply(Sys.glob('OUTPUTS/Tohoku2011_YamakaziEtAl2018-risetime_0-full-linear_with_manning-0.035-highres_NSW/RUN_20200610_073112227/*.log'), get_domain_wallclock_times_in_log)
all_wallclock_times_LM = do.call(rbind, all_wallclock_times_LM)
all_wallclock_times_LM = aggregate(all_wallclock_times_LM$times, list(all_wallclock_times_LM$index), sum)
all_wallclock_times_LM
## Domains 1-4 are linear+Manning using a leapfrog solver
# Group.1 x
# 1 1 20283.177
# 2 2 20163.383
# 3 3 20208.387
# 4 4 19902.936
# 5 5 31996.415
# 6 6 16082.635
# 7 7 16714.720
# 8 8 5745.297
# 9 9 9079.012
# 10 10 2215.998
# 11 11 3612.888
# 12 12 1512.726
# 13 13 36103.048
# Get runtimes for nonlinear model
all_wallclock_times_nl = lapply(Sys.glob('OUTPUTS/Tohoku2011_YamakaziEtAl2018-risetime_0-full-leapfrog_nonlinear-0.035-highres_NSW/RUN_20200615_123551400/*.log'), get_domain_wallclock_times_in_log)
all_wallclock_times_nl = do.call(rbind, all_wallclock_times_nl)
all_wallclock_times_nl = aggregate(all_wallclock_times_nl$times, list(all_wallclock_times_nl$index), sum)
all_wallclock_times_nl
## Domains 1-4 are full nonlinear using a leapfrog solver
# Group.1 x
# 1 1 134361.901
# 2 2 134557.985
# 3 3 133401.738
# 4 4 134219.769
# 5 5 31900.313
# 6 6 16046.189
# 7 7 16516.690
# 8 8 5825.144
# 9 9 8944.345
# 10 10 2200.755
# 11 11 3510.673
# 12 12 1510.135
# 13 13 36045.603
# Here we see the ratio of ~ 0.15 for the first 4 domains -- that means a speedup of 1/0.15 = 6.666 due to the reduced-physics solver.
all_wallclock_times_LM$x / all_wallclock_times_nl$x
# [1] 0.1509593 0.1498490 0.1514852 0.1482862 1.0030126 1.0022713 1.0119897
# [8] 0.9862928 1.0150561 1.0069262 1.0291156 1.0017156 1.0015937
| 2,052 | bsd-3-clause |
27483be8e2bf1be23770b7464f24cdbbce8e0354 | benizar/capbreu-builder | rscripts/csv_bind.R | #!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
# test if there is at least one argument: if not, return an error
if (length(args)==0) {
stop("At least one argument must be supplied (input file).", call.=FALSE)
} else if (length(args)==1) {
# default output file
args[2] = "out.txt"
}
bind.csvs <- function(input.csvs){
library(dplyr)
library(readr)
df <-
input.csvs[-length(input.csvs)] %>%
lapply(read_csv) %>%
bind_rows %>%
write.csv(file = input.csvs[length(input.csvs)], row.names = FALSE)
}
suppressWarnings(
suppressMessages(
bind.csvs(args)
)
)
| 617 | gpl-3.0 |
cd00aaf18ad36f042794171516cc0d1b110f15a3 | h2oai/h2o-3 | h2o-r/tests/testdir_munging/exec/runit_eq2_slotAssign.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
##
# Test: [<-
# Description: Select a dataset, select columns, change values in the column, re-assign col
# Variations: Single col, multi-col, factor col
# Author: Spencer
##
#setupRandomSeed(1689636624)
test.column.assignment <- function() {
set.seed(1841604082)
hex <- as.h2o(iris)
colsToSelect <- 1 #sample(ncol(hex), 1)
col <- sample(ncol(hex), colsToSelect)
numToReplace <- sample(nrow(hex),1)
rowsToReplace <- sample(nrow(hex), numToReplace)
print("")
print("")
print("Rows to replace: ")
print(rowsToReplace)
print("Num to replace: ")
print(numToReplace)
print("")
print("")
hexOriginal <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
#Log.info(paste("Original Column: ", col, sep = ""))
print(head(hexOriginal))
replacement <- rnorm(numToReplace)
Log.info("Replacing rows for column selected")
replacement <- as.h2o(replacement)
print("Wuz replacement one? ")
print("")
print(replacement)
print("")
print("")
print(rowsToReplace)
print(paste("Column selected: ", col))
print(paste("Column - 1: ", col - 1))
hex[rowsToReplace,col] <- replacement
hexReplaced <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
print(hexReplaced)
expect_false(all(hexReplaced==hexOriginal))
}
doTest("EQ2 Tests: [<-", test.column.assignment)
| 1,458 | apache-2.0 |
829d1270423983ed58d06a65ecb4970dd5fe7d98 | kalibera/rexp | src/library/tools/R/build.R | # File src/library/tools/R/build.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2014 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
#### R based engine for R CMD build
## R developers can use this to debug the function by running it
## directly as tools:::.build_packages(args), where the args should
## be what commandArgs(TRUE) would return, that is a character vector
## of (space-delimited) terms that would be passed to R CMD build.
writeDefaultNamespace <-
function(filename, desc = file.path(dirname(filename), "DESCRIPTION"))
{
pkgInfo <- .split_description(.read_description(desc))
pkgs <- unique(c(names(pkgInfo$Imports), names(pkgInfo$Depends)))
pkgs <- pkgs[pkgs != "base"]
writeLines(c("# Default NAMESPACE created by R",
"# Remove the previous line if you edit this file",
"",
"# Export all names",
"exportPattern(\".\")",
if (length(pkgs))
c("",
"# Import all packages listed as Imports or Depends",
"import(",
paste(" ", pkgs, collapse = ",\n"),
")")),
filename)
}
### formerly Perl R::Utils::get_exclude_patterns
## Return list of file patterns excluded by R CMD build and check.
## Kept here so that we ensure that the lists are in sync, but not exported.
## Has Unix-style '/' path separators hard-coded, but that is what dir() uses.
get_exclude_patterns <- function()
c("^\\.Rbuildignore$",
"(^|/)\\.DS_Store$",
"^\\.(RData|Rhistory)$",
"~$", "\\.bak$", "\\.swp$",
"(^|/)\\.#[^/]*$", "(^|/)#[^/]*#$",
## Outdated ...
"^TITLE$", "^data/00Index$",
"^inst/doc/00Index\\.dcf$",
## Autoconf
"^config\\.(cache|log|status)$",
"^autom4te\\.cache$",
## Windows dependency files
"^src/.*\\.d$", "^src/Makedeps$",
## IRIX, of some vintage
"^src/so_locations$",
## Sweave detrius
"^inst/doc/Rplots\\.(ps|pdf)$"
)
### based on Perl build script
.build_packages <- function(args = NULL)
{
## this requires on Windows sh make
WINDOWS <- .Platform$OS.type == "windows"
Sys.umask("022") # Perl version did not have this.
writeLinesNL <- function(text, file)
{
## a version that uses NL line endings everywhere
con <- file(file, "wb")
on.exit(close(con))
writeLines(text, con)
}
## This version of system_with_capture merges stdout and stderr
## Used to run R to install package and build vignettes.
system_with_capture <- function (command, args) {
outfile <- tempfile("xshell")
on.exit(unlink(outfile))
status <- system2(command, args, outfile, outfile)
list(status = status, stdout = readLines(outfile, warn = FALSE))
}
## Run silently
Ssystem <- function(command, args = character(), ...)
system2(command, args, stdout = NULL, stderr = NULL, ...)
do_exit <- function(status = 1L) q("no", status = status, runLast = FALSE)
env_path <- function(...) file.path(..., fsep = .Platform$path.sep)
## Used for BuildVignettes, BuildManual, BuildKeepEmpty,
## and (character not logical) BuildResaveData
parse_description_field <-
function(desc, field, default = TRUE, logical = TRUE)
{
tmp <- desc[field]
if (is.na(tmp)) default
else if(logical)
switch(tmp,
"yes"=, "Yes" =, "true" =, "True" =, "TRUE" = TRUE,
"no" =, "No" =, "false" =, "False" =, "FALSE" = FALSE,
default)
else tmp
}
Usage <- function() {
cat("Usage: R CMD build [options] pkgdirs",
"",
"Build R packages from package sources in the directories specified by",
sQuote("pkgdirs"),
"",
"Options:",
" -h, --help print short help message and exit",
" -v, --version print version info and exit",
"",
" --force force removal of INDEX file",
" --keep-empty-dirs do not remove empty dirs",
" --no-build-vignettes do not (re)build package vignettes",
" --no-manual do not build the PDF manual even if \\Sexprs are present",
" --resave-data= re-save data files as compactly as possible:",
' "no", "best", "gzip" (default)',
" --resave-data same as --resave-data=best",
" --no-resave-data same as --resave-data=no",
" --compact-vignettes= try to compact PDF files under inst/doc:",
' "no" (default), "qpdf", "gs", "gs+qpdf", "both"',
" --compact-vignettes same as --compact-vignettes=qpdf",
" --md5 add MD5 sums",
"",
"Report bugs at bugs.r-project.org .", sep = "\n")
}
add_build_stamp_to_description_file <- function(ldpath) {
db <- .read_description(ldpath)
## this is an optional function, so could fail
user <- Sys.info()["user"]
if(user == "unknown") user <- Sys.getenv("LOGNAME")
db["Packaged"] <-
sprintf("%s; %s",
format(Sys.time(), '', tz = 'UTC', usetz = TRUE),
user)
.write_description(db, ldpath)
}
## <FIXME>
## This should really be combined with
## add_build_stamp_to_description_file().
## Also, the build code reads DESCRIPTION files too often ...
add_expanded_R_fields_to_description_file <- function(ldpath) {
db <- .read_description(ldpath)
fields <- .expand_package_description_db_R_fields(db)
if(length(fields))
.write_description(c(db, fields), ldpath)
}
## </FIXME>
temp_install_pkg <- function(pkgdir, libdir) {
dir.create(libdir, mode = "0755", showWarnings = FALSE)
## assume vignettes only need one arch
if (WINDOWS) {
cmd <- file.path(R.home("bin"), "Rcmd.exe")
args <- c("INSTALL -l", shQuote(libdir),
"--no-multiarch", shQuote(pkgdir))
} else {
cmd <- file.path(R.home("bin"), "R")
args <- c("CMD", "INSTALL -l", shQuote(libdir),
"--no-multiarch", shQuote(pkgdir))
}
res <- system_with_capture(cmd, args)
if (res$status) {
printLog(Log, " -----------------------------------\n")
printLog0(Log, paste(c(res$stdout, ""), collapse = "\n"))
printLog(Log, " -----------------------------------\n")
unlink(libdir, recursive = TRUE)
printLog(Log, "ERROR: package installation failed\n")
do_exit(1)
}
TRUE
}
prepare_pkg <- function(pkgdir, desc, Log)
{
owd <- setwd(pkgdir); on.exit(setwd(owd))
pkgname <- basename(pkgdir)
checkingLog(Log, "DESCRIPTION meta-information")
res <- try(.check_package_description("DESCRIPTION"))
if (inherits(res, "try-error")) {
resultLog(Log, "ERROR")
messageLog(Log, "running '.check_package_description' failed")
} else {
if (any(sapply(res, length))) {
resultLog(Log, "ERROR")
print(res) # FIXME print to Log?
do_exit(1L)
} else resultLog(Log, "OK")
}
cleanup_pkg(pkgdir, Log)
libdir <- tempfile("Rinst")
ensure_installed <- function()
if (!pkgInstalled) {
messageLog(Log,
"installing the package to build vignettes")
pkgInstalled <<- temp_install_pkg(pkgdir, libdir)
}
pkgInstalled <- build_Rd_db(pkgdir, libdir, desc)
if (file.exists("INDEX")) update_Rd_index("INDEX", "man", Log)
doc_dir <- file.path("inst", "doc")
if ("makefile" %in% dir(doc_dir)) { # avoid case-insensitive match
messageLog(Log, "renaming 'inst/doc/makefile' to 'inst/doc/Makefile'")
file.rename(file.path(doc_dir, "makefile"),
file.path(doc_dir, "Makefile"))
}
if (vignettes &&
parse_description_field(desc, "BuildVignettes", TRUE)) {
## this is not a logical field
## if (nchar(parse_description_field(desc, "VignetteBuilder", "")))
## ensure_installed()
## PR#15775: check VignetteBuilder packages are installed
## This is a bit wasteful: we do not need them in this process
loadVignetteBuilder(pkgdir, TRUE)
## Look for vignette sources
vigns <- pkgVignettes(dir = '.', check = TRUE)
if (!is.null(vigns) && length(vigns$docs)) {
ensure_installed()
## Good to do this in a separate process: it might die
creatingLog(Log, "vignettes")
R_LIBS <- Sys.getenv("R_LIBS", NA_character_)
if (!is.na(R_LIBS)) {
on.exit(Sys.setenv(R_LIBS = R_LIBS), add = TRUE)
Sys.setenv(R_LIBS = env_path(libdir, R_LIBS))
} else {
on.exit(Sys.unsetenv("R_LIBS"), add = TRUE)
Sys.setenv(R_LIBS = libdir)
}
# Tangle all vignettes now.
cmd <- file.path(R.home("bin"), "Rscript")
args <- c("--vanilla",
"--default-packages=", # some vignettes assume methods
"-e", shQuote("tools::buildVignettes(dir = '.', tangle = TRUE)"))
## since so many people use 'R CMD' in Makefiles,
oPATH <- Sys.getenv("PATH")
Sys.setenv(PATH = paste(R.home("bin"), oPATH,
sep = .Platform$path.sep))
res <- system_with_capture(cmd, args)
Sys.setenv(PATH = oPATH)
if (res$status) {
resultLog(Log, "ERROR")
printLog0(Log, paste(c(res$stdout, ""), collapse = "\n"))
do_exit(1L)
} else {
# Rescan for weave and tangle output files
vigns <- pkgVignettes(dir = '.', output = TRUE, source = TRUE)
stopifnot(!is.null(vigns))
resultLog(Log, "OK")
}
## We may need to install them.
if (basename(vigns$dir) == "vignettes") {
## inst may not yet exist
dir.create(doc_dir, recursive = TRUE, showWarnings = FALSE)
file.copy(c(vigns$docs, vigns$outputs, unlist(vigns$sources)), doc_dir)
unlink(c(vigns$outputs, unlist(vigns$sources)))
extras_file <- file.path("vignettes", ".install_extras")
if (file.exists(extras_file)) {
extras <- readLines(extras_file, warn = FALSE)
if(length(extras)) {
allfiles <- dir("vignettes", all.files = TRUE,
full.names = TRUE, recursive = TRUE,
include.dirs = TRUE)
inst <- rep(FALSE, length(allfiles))
for (e in extras)
inst <- inst | grepl(e, allfiles, perl = TRUE,
ignore.case = TRUE)
file.copy(allfiles[inst], doc_dir, recursive = TRUE)
}
}
}
vignetteIndex <- .build_vignette_index(vigns)
if(NROW(vignetteIndex) > 0L) {
## remove any files with no R code (they will have header comments).
## if not correctly declared they might not be in the current encoding
sources <- vignetteIndex$R
for(i in seq_along(sources)) {
file <- file.path(doc_dir, sources[i])
if (!file_test("-f", file)) next
bfr <- readLines(file, warn = FALSE)
if(all(grepl("(^###|^[[:space:]]*$)", bfr, useBytes = TRUE))) {
unlink(file)
vignetteIndex$R[i] <- ""
}
}
}
## Save the list
dir.create("build", showWarnings = FALSE)
saveRDS(vignetteIndex,
file = file.path("build", "vignette.rds"))
}
} else {
fv <- file.path("build", "vignette.rds")
if(file.exists(fv)) {
checkingLog(Log, "vignette meta-information")
db <- readRDS(fv)
pdfs <- file.path("inst", "doc", db[nzchar(db$PDF), ]$PDF)
missing <- !file.exists(pdfs)
if(any(missing)) {
msg <- c("Output(s) listed in 'build/vignette.rds' but not in package:",
strwrap(sQuote(pdfs[missing]), indent = 2L, exdent = 2L),
"Run R CMD build without --no-build-vignettes to re-create")
errorLog(Log, paste(msg, collapse = "\n"))
do_exit(1L)
} else resultLog(Log, "OK")
}
}
if (compact_vignettes != "no" &&
length(pdfs <- dir(doc_dir, pattern = "[.]pdf", recursive = TRUE,
full.names = TRUE))) {
messageLog(Log, "compacting vignettes and other PDF files")
if(compact_vignettes %in% c("gs", "gs+qpdf", "both")) {
gs_cmd <- find_gs_cmd()
gs_quality <- "ebook"
} else {
gs_cmd <- ""
gs_quality <- "none"
}
qpdf <-
ifelse(compact_vignettes %in% c("qpdf", "gs+qpdf", "both"),
Sys.which(Sys.getenv("R_QPDF", "qpdf")), "")
res <- compactPDF(pdfs, qpdf = qpdf,
gs_cmd = gs_cmd, gs_quality = gs_quality)
res <- format(res, diff = 1e5)
if(length(res))
printLog0(Log, paste(" ", format(res), collapse = "\n"), "\n")
}
if (pkgInstalled) {
unlink(libdir, recursive = TRUE)
## And finally, clean up again.
cleanup_pkg(pkgdir, Log)
}
}
cleanup_pkg <- function(pkgdir, Log)
{
owd <- setwd(pkgdir); on.exit(setwd(owd))
pkgname <- basename(pkgdir)
if (dir.exists("src")) {
setwd("src")
messageLog(Log, "cleaning src")
if (WINDOWS) {
have_make <- nzchar(Sys.which(Sys.getenv("MAKE", "make")))
if (file.exists("Makefile.win")) {
if (have_make)
Ssystem(Sys.getenv("MAKE", "make"), "-f Makefile.win clean")
else warning("unable to run 'make clean' in 'src'",
domain = NA)
} else {
if (file.exists("Makevars.win")) {
if (have_make) {
makefiles <- paste()
makefiles <- paste("-f",
shQuote(file.path(R.home("share"), "make", "clean.mk")),
"-f Makevars.win")
Ssystem(Sys.getenv("MAKE", "make"),
c(makefiles, "clean"))
} else warning("unable to run 'make clean' in 'src'",
domain = NA)
}
## Also cleanup possible Unix leftovers ...
unlink(c(Sys.glob(c("*.o", "*.sl", "*.so", "*.dylib")),
paste0(pkgname, c(".a", ".dll", ".def")),
"symbols.rds"))
if (dir.exists(".libs")) unlink(".libs", recursive = TRUE)
if (dir.exists("_libs")) unlink("_libs", recursive = TRUE)
}
} else {
makefiles <- paste("-f",
shQuote(file.path(R.home("etc"),
Sys.getenv("R_ARCH"),
"Makeconf")))
if (file.exists("Makefile")) {
makefiles <- paste(makefiles, "-f", "Makefile")
Ssystem(Sys.getenv("MAKE", "make"), c(makefiles, "clean"))
} else {
if (file.exists("Makevars")) {
## ensure we do have a 'clean' target.
makefiles <- paste(makefiles, "-f",
shQuote(file.path(R.home("share"), "make", "clean.mk")),
"-f Makevars")
Ssystem(Sys.getenv("MAKE", "make"),
c(makefiles, "clean"))
}
## Also cleanup possible Windows leftovers ...
unlink(c(Sys.glob(c("*.o", "*.sl", "*.so", "*.dylib")),
paste0(pkgname, c(".a", ".dll", ".def")),
"symbols.rds"))
if (dir.exists(".libs")) unlink(".libs", recursive = TRUE)
if (dir.exists("_libs")) unlink("_libs", recursive = TRUE)
}
}
}
setwd(owd)
## It is not clear that we want to do this: INSTALL should do so.
## Also, certain environment variables should be set according
## to 'Writing R Extensions', but were not in Perl version (nor
## was cleanup.win used).
if (WINDOWS) {
if (file.exists("cleanup.win")) {
## check we have sh.exe first
if (nzchar(Sys.which("sh.exe"))) {
Sys.setenv(R_PACKAGE_NAME = pkgname)
Sys.setenv(R_PACKAGE_DIR = pkgdir)
Sys.setenv(R_LIBRARY_DIR = dirname(pkgdir))
messageLog(Log, "running 'cleanup.win'")
Ssystem("sh", "./cleanup.win")
}
}
} else if (file_test("-x", "cleanup")) {
Sys.setenv(R_PACKAGE_NAME = pkgname)
Sys.setenv(R_PACKAGE_DIR = pkgdir)
Sys.setenv(R_LIBRARY_DIR = dirname(pkgdir))
messageLog(Log, "running 'cleanup'")
Ssystem("./cleanup")
}
}
update_Rd_index <- function(oldindex, Rd_files, Log)
{
newindex <- tempfile()
res <- try(Rdindex(Rd_files, newindex))
if (inherits(res, "try-error")) {
errorLog(Log, "computing Rd index failed")
do_exit(1L)
}
checkingLog(Log, "whether ", sQuote(oldindex), " is up-to-date")
if (file.exists(oldindex)) {
ol <- readLines(oldindex, warn = FALSE) # e.g. BaM had missing final NL
nl <- readLines(newindex)
if (!identical(ol, nl)) {
resultLog(Log, "NO")
if (force) {
messageLog(Log, "removing ", sQuote(oldindex),
" as '--force' was given")
unlink(oldindex)
} else {
messageLog(Log, "use '--force' to remove ",
"the existing ", sQuote(oldindex))
unlink(newindex)
}
} else {
resultLog(Log, "OK")
unlink(newindex)
}
} else {
resultLog(Log, "NO")
messageLog(Log, "creating new ", sQuote(oldindex))
file.rename(newindex, oldindex)
}
}
build_Rd_db <- function(pkgdir, libdir, desc) {
db <- .build_Rd_db(pkgdir, stages = NULL,
os = c("unix", "windows"), step = 1)
if (!length(db)) return(FALSE)
# Strip the pkgdir off the names
names(db) <- substring(names(db),
nchar(file.path(pkgdir, "man")) + 2L)
containsSexprs <-
which(sapply(db, function(Rd) getDynamicFlags(Rd)["\\Sexpr"]))
if (!length(containsSexprs)) return(FALSE)
messageLog(Log, "installing the package to process help pages")
dir.create(libdir, mode = "0755", showWarnings = FALSE)
savelib <- .libPaths()
.libPaths(c(libdir, savelib))
on.exit(.libPaths(savelib), add = TRUE)
temp_install_pkg(pkgdir, libdir)
containsBuildSexprs <-
which(sapply(db, function(Rd) getDynamicFlags(Rd)["build"]))
if (length(containsBuildSexprs)) {
for (i in containsBuildSexprs)
db[[i]] <- prepare_Rd(db[[i]], stages = "build",
stage2 = FALSE, stage3 = FALSE)
messageLog(Log, "saving partial Rd database")
partial <- db[containsBuildSexprs]
dir.create("build", showWarnings = FALSE)
saveRDS(partial, file.path("build", "partial.rdb"))
}
needRefman <- manual &&
parse_description_field(desc, "BuildManual", TRUE) &&
any(sapply(db, function(Rd) any(getDynamicFlags(Rd)[c("install", "render")])))
if (needRefman) {
messageLog(Log, "building the PDF package manual")
dir.create("build", showWarnings = FALSE)
refman <- file.path(pkgdir, "build",
paste0(basename(pkgdir), ".pdf"))
..Rd2pdf(c("--force", "--no-preview",
paste0("--output=", refman),
pkgdir), quit = FALSE)
}
return(TRUE)
}
## also fixes up missing final NL
fix_nonLF_in_files <- function(pkgname, dirPattern, Log)
{
if(dir.exists(sDir <- file.path(pkgname, "src"))) {
files <- dir(sDir, pattern = dirPattern,
full.names = TRUE, recursive = TRUE)
## FIXME: This "destroys" all timestamps
for (ff in files) {
lines <- readLines(ff, warn = FALSE)
writeLinesNL(lines, ff)
}
}
}
fix_nonLF_in_source_files <- function(pkgname, Log) {
fix_nonLF_in_files(pkgname, dirPattern = "\\.([cfh]|cc|cpp)$", Log)
}
fix_nonLF_in_make_files <- function(pkgname, Log) {
fix_nonLF_in_files(pkgname,
paste0("^",c("Makefile", "Makefile.in", "Makefile.win",
"Makevars", "Makevars.in", "Makevars.win"),
"$"), Log)
}
find_empty_dirs <- function(d)
{
## dir(recursive = TRUE) did not include directories, so
## we needed to do this recursively
files <- dir(d, all.files = TRUE, full.names = TRUE)
for (dd in files[dir.exists(files)]) {
if (grepl("/\\.+$", dd)) next
find_empty_dirs(dd)
}
## allow per-package override
keep_empty1 <- parse_description_field(desc, "BuildKeepEmpty",
keep_empty)
if (!keep_empty1) # might have removed a dir
files <- dir(d, all.files = TRUE, full.names = TRUE)
if (length(files) <= 2L) { # always has ., ..
if (keep_empty1) {
printLog(Log, "WARNING: directory ", sQuote(d), " is empty\n")
} else {
unlink(d, recursive = TRUE)
printLog(Log, "Removed empty directory ", sQuote(d), "\n")
}
}
}
fixup_R_dep <- function(pkgname, ver = "2.10")
{
desc <- .read_description(file.path(pkgname, "DESCRIPTION"))
Rdeps <- .split_description(desc)$Rdepends2
for(dep in Rdeps) {
if(dep$op != '>=') next
if(dep$version >= package_version(ver)) return()
}
on.exit(Sys.setlocale("LC_CTYPE", Sys.getlocale("LC_CTYPE")))
Sys.setlocale("LC_CTYPE", "C")
flatten <- function(x) {
if(length(x) == 3L)
paste0(x$name, " (", x$op, " ", x$version, ")")
else x[[1L]]
}
deps <- desc["Depends"]
desc["Depends"] <- if(!is.na(deps)) {
deps <- .split_dependencies(deps)
deps <- deps[names(deps) != "R"] # could be more than one
paste(c(sprintf("R (>= %s)", ver), sapply(deps, flatten)),
collapse = ", ")
} else sprintf("R (>= %s)", ver)
.write_description(desc, file.path(pkgname, "DESCRIPTION"))
printLog(Log,
" NB: this package now depends on R (>= ", ver, ")\n")
}
resave_data_rda <- function(pkgname, resave_data)
{
if (resave_data == "no") return()
ddir <- file.path(pkgname, "data")
if(resave_data == "best") {
files <- Sys.glob(c(file.path(ddir, "*.rda"),
file.path(ddir, "*.RData"),
file.path(pkgname, "R", "sysdata.rda")))
messageLog(Log, "re-saving image files")
resaveRdaFiles(files)
rdas <- checkRdaFiles(files)
if(any(rdas$compress %in% c("bzip2", "xz")))
fixup_R_dep(pkgname, "2.10")
} else {
## ddir need not exist if just R/sysdata.rda
rdas <- checkRdaFiles(Sys.glob(c(file.path(ddir, "*.rda"),
file.path(ddir, "*.RData"))))
if(nrow(rdas)) {
update <- with(rdas, ASCII | compress == "none" | version < 2)
if(any(update)) {
messageLog(Log, "re-saving image files")
resaveRdaFiles(row.names(rdas)[update], "gzip")
}
}
if(file.exists(f <- file.path(pkgname, "R", "sysdata.rda"))) {
rdas <- checkRdaFiles(f)
update <- with(rdas, ASCII | compress == "none" | version < 2)
if(any(update)) {
messageLog(Log, "re-saving sysdata.rda")
resaveRdaFiles(f, "gzip")
}
}
}
}
resave_data_others <- function(pkgname, resave_data)
{
if (resave_data == "no") return()
ddir <- file.path(pkgname, "data")
dataFiles <- grep("\\.(rda|RData)$",
list_files_with_type(ddir, "data"),
invert = TRUE, value = TRUE)
if (!length(dataFiles)) return()
Rs <- grep("\\.[Rr]$", dataFiles, value = TRUE)
if (length(Rs)) { # these might use .txt etc
messageLog(Log, "re-saving .R files as .rda")
## ensure utils is visible
library("utils")
lapply(Rs, function(x){
envir <- new.env(hash = TRUE)
sys.source(x, chdir = TRUE, envir = envir)
save(list = ls(envir, all.names = TRUE),
file = sub("\\.[Rr]$", ".rda", x),
compress = TRUE, compression_level = 9,
envir = envir)
unlink(x)
})
printLog(Log,
" NB: *.R converted to .rda: other files may need to be removed\n")
}
tabs <- grep("\\.(CSV|csv|TXT|tab|txt)$", dataFiles, value = TRUE)
if (length(tabs)) {
messageLog(Log, "re-saving tabular files")
if (resave_data == "gzip") {
lapply(tabs, function(nm) {
## DiceDesign/data/greenwood.table.txt is missing NL
x <- readLines(nm, warn = FALSE)
con <- gzfile(paste(nm, "gz", sep = "."), "wb")
writeLines(x, con)
close(con)
unlink(nm)
})
} else {
OK <- TRUE
lapply(tabs, function(nm) {
x <- readLines(nm, warn = FALSE)
nm3 <- paste(nm, c("gz", "bz2", "xz"), sep = ".")
con <- gzfile(nm3[1L], "wb", compression = 9L); writeLines(x, con); close(con)
con <- bzfile(nm3[2L], "wb", compression = 9L); writeLines(x, con); close(con)
con <- xzfile(nm3[3L], "wb", compression = 9L); writeLines(x, con); close(con)
sizes <- file.size(nm3) * c(0.9, 1, 1)
ind <- which.min(sizes)
if(ind > 1) OK <<- FALSE
unlink(c(nm, nm3[-ind]))
})
if (!OK) fixup_R_dep(pkgname, "2.10")
}
}
}
force <- FALSE
vignettes <- TRUE
manual <- TRUE # Install the manual if Rds contain \Sexprs
with_md5 <- FALSE
INSTALL_opts <- character()
pkgs <- character()
options(showErrorCalls = FALSE, warn = 1)
## Read in build environment file.
Renv <- Sys.getenv("R_BUILD_ENVIRON", unset = NA)
if(!is.na(Renv)) {
## Do not read any build environment file if R_BUILD_ENVIRON is
## set to empty of something non-existent.
if(nzchar(Renv) && file.exists(Renv)) readRenviron(Renv)
} else {
## Read in ~/.R/build.Renviron[.rarch] (if existent).
rarch <- .Platform$r_arch
if (nzchar(rarch) &&
file.exists(Renv <- paste("~/.R/build.Renviron", rarch, sep = ".")))
readRenviron(Renv)
else if (file.exists(Renv <- "~/.R/build.Renviron"))
readRenviron(Renv)
}
## Configurable variables.
compact_vignettes <- Sys.getenv("_R_BUILD_COMPACT_VIGNETTES_", "no")
resave_data <- Sys.getenv("_R_BUILD_RESAVE_DATA_", "gzip")
keep_empty <-
config_val_to_logical(Sys.getenv("_R_BUILD_KEEP_EMPTY_DIRS_", "FALSE"))
if (is.null(args)) {
args <- commandArgs(TRUE)
## it seems that splits on spaces, so try harder.
args <- paste(args, collapse = " ")
args <- strsplit(args,'nextArg', fixed = TRUE)[[1L]][-1L]
}
while(length(args)) {
a <- args[1L]
if (a %in% c("-h", "--help")) {
Usage()
do_exit(0L)
}
else if (a %in% c("-v", "--version")) {
cat("R add-on package builder: ",
R.version[["major"]], ".", R.version[["minor"]],
" (r", R.version[["svn rev"]], ")\n", sep = "")
cat("",
"Copyright (C) 1997-2013 The R Core Team.",
"This is free software; see the GNU General Public License version 2",
"or later for copying conditions. There is NO warranty.",
sep = "\n")
do_exit(0L)
} else if (a == "--force") {
force <- TRUE
} else if (a == "--keep-empty-dirs") {
keep_empty <- TRUE
} else if (a == "--no-build-vignettes") {
vignettes <- FALSE
} else if (a == "--no-vignettes") { # pre-3.0.0 version
stop("'--no-vignettes' is defunct:\n use '--no-build-vignettes' instead",
call. = FALSE, domain = NA)
} else if (a == "--resave-data") {
resave_data <- "best"
} else if (a == "--no-resave-data") {
resave_data <- "no"
} else if (substr(a, 1, 14) == "--resave-data=") {
resave_data <- substr(a, 15, 1000)
} else if (a == "--no-manual") {
manual <- FALSE
} else if (substr(a, 1, 20) == "--compact-vignettes=") {
compact_vignettes <- substr(a, 21, 1000)
} else if (a == "--compact-vignettes") {
compact_vignettes <- "qpdf"
} else if (a == "--md5") {
with_md5 <- TRUE
} else if (substr(a, 1, 1) == "-") {
message("Warning: unknown option ", sQuote(a))
} else pkgs <- c(pkgs, a)
args <- args[-1L]
}
if(!compact_vignettes %in% c("no", "qpdf", "gs", "gs+qpdf", "both")) {
warning(gettextf("invalid value for '--compact-vignettes', assuming %s",
"\"qpdf\""),
domain = NA)
compact_vignettes <-"qpdf"
}
Sys.unsetenv("R_DEFAULT_PACKAGES")
startdir <- getwd()
if (is.null(startdir))
stop("current working directory cannot be ascertained")
R_platform <- Sys.getenv("R_PLATFORM", "unknown-binary")
libdir <- tempfile("Rinst")
if (WINDOWS) {
## Some people have *assumed* that R_HOME uses / in Makefiles
## Spaces in paths might still cause trouble.
rhome <- chartr("\\", "/", R.home())
Sys.setenv(R_HOME = rhome)
}
for(pkg in pkgs) {
Log <- newLog() # if not stdin; on.exit(closeLog(Log))
## remove any trailing /, for Windows' sake
pkg <- sub("/$", "", pkg)
## 'Older versions used $pkg as absolute or relative to $startdir.
## This does not easily work if $pkg is a symbolic link.
## Hence, we now convert to absolute paths.'
setwd(startdir)
res <- tryCatch(setwd(pkg), error = function(e)e)
if (inherits(res, "error")) {
errorLog(Log, "cannot change to directory ", sQuote(pkg))
do_exit(1L)
}
pkgdir <- getwd()
pkgname <- basename(pkgdir)
checkingLog(Log, "for file ", sQuote(file.path(pkg, "DESCRIPTION")))
f <- file.path(pkgdir, "DESCRIPTION")
if (file.exists(f)) {
desc <- try(.read_description(f))
if (inherits(desc, "try-error") || !length(desc)) {
resultLog(Log, "EXISTS but not correct format")
do_exit(1L)
}
resultLog(Log, "OK")
} else {
resultLog(Log, "NO")
do_exit(1L)
}
intname <- desc["Package"]
## make a copy, cd to parent of copy
setwd(dirname(pkgdir))
filename <- paste0(intname, "_", desc["Version"], ".tar")
filepath <- file.path(startdir, filename)
Tdir <- tempfile("Rbuild")
dir.create(Tdir, mode = "0755")
if (WINDOWS) {
## This preserves read-only for files, but not dates
if (!file.copy(pkgname, Tdir, recursive = TRUE)) {
errorLog(Log, "copying to build directory failed")
do_exit(1L)
}
} else {
## This should preserve dates and permissions (subject to
## umask, if that is consulted which it seems it usually is not).
## Permissions are increased later.
cp_sw <- if(Sys.info()[["sysname"]] == "Linux") ## << need GNU cp
## unfortunately, '-pr' does not dereference sym.links
"-Lr --preserve=timestamps" else "-pr"
if (system(paste("cp", cp_sw, shQuote(pkgname), shQuote(Tdir)))) {
errorLog(Log, "copying to build directory failed")
do_exit(1L)
}
}
setwd(Tdir)
## Now correct the package name (PR#9266)
if (pkgname != intname) {
if (!file.rename(pkgname, intname)) {
message(gettextf("Error: cannot rename directory to %s",
sQuote(intname)), domain = NA)
do_exit(1L)
}
pkgname <- intname
}
## prepare the copy
messageLog(Log, "preparing ", sQuote(pkgname), ":")
prepare_pkg(normalizePath(pkgname, "/"), desc, Log);
owd <- setwd(pkgname)
## remove exclude files
allfiles <- dir(".", all.files = TRUE, recursive = TRUE,
full.names = TRUE, include.dirs = TRUE)
allfiles <- substring(allfiles, 3L) # drop './'
bases <- basename(allfiles)
exclude <- rep(FALSE, length(allfiles))
ignore <- get_exclude_patterns()
## handle .Rbuildignore:
## 'These patterns should be Perl regexps, one per line,
## to be matched against the file names relative to
## the top-level source directory.'
ignore_file <- file.path(pkgdir, ".Rbuildignore")
if (file.exists(ignore_file))
ignore <- c(ignore, readLines(ignore_file, warn = FALSE))
for(e in ignore[nzchar(ignore)])
exclude <- exclude | grepl(e, allfiles, perl = TRUE,
ignore.case = TRUE)
isdir <- dir.exists(allfiles)
## old (pre-2.10.0) dirnames
exclude <- exclude | (isdir & (bases %in%
c("check", "chm", .vc_dir_names)))
exclude <- exclude | (isdir & grepl("([Oo]ld|\\.Rcheck)$", bases))
## FIXME: GNU make uses GNUmakefile (note capitalization)
exclude <- exclude | bases %in% c("Read-and-delete-me", "GNUMakefile")
## Mac resource forks
exclude <- exclude | grepl("^\\._", bases)
exclude <- exclude | (isdir & grepl("^src.*/[.]deps$", allfiles))
## Windows DLL resource file
exclude <- exclude | (allfiles == paste0("src/", pkgname, "_res.rc"))
## inst/doc/.Rinstignore is a mistake
exclude <- exclude | grepl("inst/doc/[.](Rinstignore|build[.]timestamp)$", allfiles)
exclude <- exclude | grepl("vignettes/[.]Rinstignore$", allfiles)
## leftovers
exclude <- exclude | grepl("^.Rbuildindex[.]", allfiles)
exclude <- exclude | (bases %in% .hidden_file_exclusions)
unlink(allfiles[exclude], recursive = TRUE, force = TRUE)
setwd(owd)
## Fix up man, R, demo inst/doc directories
res <- .check_package_subdirs(pkgname, TRUE)
if (any(sapply(res, length))) {
messageLog(Log, "excluding invalid files")
print(res) # FIXME print to Log?
}
setwd(Tdir)
## Fix permissions for all files to be at least 644, and dirs 755
## Not restricted by umask.
if (!WINDOWS) .Call(dirchmod, pkgname, group.writable=FALSE)
## Add build stamp to the DESCRIPTION file.
add_build_stamp_to_description_file(file.path(pkgname,
"DESCRIPTION"))
## Add expanded R fields to the DESCRIPTION file.
add_expanded_R_fields_to_description_file(file.path(pkgname,
"DESCRIPTION"))
messageLog(Log,
"checking for LF line-endings in source and make files")
fix_nonLF_in_source_files(pkgname, Log)
fix_nonLF_in_make_files(pkgname, Log)
messageLog(Log, "checking for empty or unneeded directories");
find_empty_dirs(pkgname)
for(dir in c("Meta", "R-ex", "chtml", "help", "html", "latex")) {
d <- file.path(pkgname, dir)
if (dir.exists(d)) {
msg <- paste("WARNING: Removing directory",
sQuote(d),
"which should only occur",
"in an installed package")
printLog(Log, paste(strwrap(msg, indent = 0L, exdent = 2L),
collapse = "\n"), "\n")
unlink(d, recursive = TRUE)
}
}
## remove subarch build directories
unlink(file.path(pkgname,
c("src-i386", "src-x64", "src-x86_64", "src-ppc")),
recursive = TRUE)
## work on 'data' directory if present
if(dir.exists(file.path(pkgname, "data")) ||
file_test("-f", file.path(pkgname, "R", "sysdata.rda"))) {
messageLog(Log, "looking to see if a 'data/datalist' file should be added")
## in some cases data() needs the package installed as
## there are links to the package's namespace
tryCatch(add_datalist(pkgname),
error = function(e)
printLog(Log, " unable to create a 'datalist' file: may need the package to be installed\n"))
## allow per-package override
resave_data1 <- parse_description_field(desc, "BuildResaveData",
resave_data, FALSE)
resave_data_others(pkgname, resave_data1)
resave_data_rda(pkgname, resave_data1)
}
## add NAMESPACE if the author didn't write one
if(!file.exists(namespace <- file.path(pkgname, "NAMESPACE")) ) {
messageLog(Log, "creating default NAMESPACE file")
writeDefaultNamespace(namespace)
}
if(with_md5) {
messageLog(Log, "adding MD5 file")
.installMD5sums(pkgname)
} else {
## remove any stale file
unlink(file.path(pkgname, "MD5"))
}
## Finalize
filename <- paste0(pkgname, "_", desc["Version"], ".tar.gz")
filepath <- file.path(startdir, filename)
## NB: tests/reg-packages.R relies on this exact format!
messageLog(Log, "building ", sQuote(filename))
res <- utils::tar(filepath, pkgname, compression = "gzip",
compression_level = 9L,
tar = Sys.getenv("R_BUILD_TAR"),
extra_flags = NULL) # use trapdoor
if (res) {
errorLog(Log, "packaging into .tar.gz failed")
do_exit(1L)
}
message("") # blank line
setwd(startdir)
unlink(Tdir, recursive = TRUE)
on.exit() # cancel closeLog
closeLog(Log)
}
do_exit(0L)
}
| 41,797 | gpl-2.0 |
cd00aaf18ad36f042794171516cc0d1b110f15a3 | YzPaul3/h2o-3 | h2o-r/tests/testdir_munging/exec/runit_eq2_slotAssign.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
##
# Test: [<-
# Description: Select a dataset, select columns, change values in the column, re-assign col
# Variations: Single col, multi-col, factor col
# Author: Spencer
##
#setupRandomSeed(1689636624)
test.column.assignment <- function() {
set.seed(1841604082)
hex <- as.h2o(iris)
colsToSelect <- 1 #sample(ncol(hex), 1)
col <- sample(ncol(hex), colsToSelect)
numToReplace <- sample(nrow(hex),1)
rowsToReplace <- sample(nrow(hex), numToReplace)
print("")
print("")
print("Rows to replace: ")
print(rowsToReplace)
print("Num to replace: ")
print(numToReplace)
print("")
print("")
hexOriginal <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
#Log.info(paste("Original Column: ", col, sep = ""))
print(head(hexOriginal))
replacement <- rnorm(numToReplace)
Log.info("Replacing rows for column selected")
replacement <- as.h2o(replacement)
print("Wuz replacement one? ")
print("")
print(replacement)
print("")
print("")
print(rowsToReplace)
print(paste("Column selected: ", col))
print(paste("Column - 1: ", col - 1))
hex[rowsToReplace,col] <- replacement
hexReplaced <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
print(hexReplaced)
expect_false(all(hexReplaced==hexOriginal))
}
doTest("EQ2 Tests: [<-", test.column.assignment)
| 1,458 | apache-2.0 |
cd00aaf18ad36f042794171516cc0d1b110f15a3 | jangorecki/h2o-3 | h2o-r/tests/testdir_munging/exec/runit_eq2_slotAssign.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
##
# Test: [<-
# Description: Select a dataset, select columns, change values in the column, re-assign col
# Variations: Single col, multi-col, factor col
# Author: Spencer
##
#setupRandomSeed(1689636624)
test.column.assignment <- function() {
set.seed(1841604082)
hex <- as.h2o(iris)
colsToSelect <- 1 #sample(ncol(hex), 1)
col <- sample(ncol(hex), colsToSelect)
numToReplace <- sample(nrow(hex),1)
rowsToReplace <- sample(nrow(hex), numToReplace)
print("")
print("")
print("Rows to replace: ")
print(rowsToReplace)
print("Num to replace: ")
print(numToReplace)
print("")
print("")
hexOriginal <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
#Log.info(paste("Original Column: ", col, sep = ""))
print(head(hexOriginal))
replacement <- rnorm(numToReplace)
Log.info("Replacing rows for column selected")
replacement <- as.h2o(replacement)
print("Wuz replacement one? ")
print("")
print(replacement)
print("")
print("")
print(rowsToReplace)
print(paste("Column selected: ", col))
print(paste("Column - 1: ", col - 1))
hex[rowsToReplace,col] <- replacement
hexReplaced <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
print(hexReplaced)
expect_false(all(hexReplaced==hexOriginal))
}
doTest("EQ2 Tests: [<-", test.column.assignment)
| 1,458 | apache-2.0 |
cd00aaf18ad36f042794171516cc0d1b110f15a3 | h2oai/h2o-dev | h2o-r/tests/testdir_munging/exec/runit_eq2_slotAssign.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
##
# Test: [<-
# Description: Select a dataset, select columns, change values in the column, re-assign col
# Variations: Single col, multi-col, factor col
# Author: Spencer
##
#setupRandomSeed(1689636624)
test.column.assignment <- function() {
set.seed(1841604082)
hex <- as.h2o(iris)
colsToSelect <- 1 #sample(ncol(hex), 1)
col <- sample(ncol(hex), colsToSelect)
numToReplace <- sample(nrow(hex),1)
rowsToReplace <- sample(nrow(hex), numToReplace)
print("")
print("")
print("Rows to replace: ")
print(rowsToReplace)
print("Num to replace: ")
print(numToReplace)
print("")
print("")
hexOriginal <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
#Log.info(paste("Original Column: ", col, sep = ""))
print(head(hexOriginal))
replacement <- rnorm(numToReplace)
Log.info("Replacing rows for column selected")
replacement <- as.h2o(replacement)
print("Wuz replacement one? ")
print("")
print(replacement)
print("")
print("")
print(rowsToReplace)
print(paste("Column selected: ", col))
print(paste("Column - 1: ", col - 1))
hex[rowsToReplace,col] <- replacement
hexReplaced <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
print(hexReplaced)
expect_false(all(hexReplaced==hexOriginal))
}
doTest("EQ2 Tests: [<-", test.column.assignment)
| 1,458 | apache-2.0 |
cd00aaf18ad36f042794171516cc0d1b110f15a3 | spennihana/h2o-3 | h2o-r/tests/testdir_munging/exec/runit_eq2_slotAssign.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
##
# Test: [<-
# Description: Select a dataset, select columns, change values in the column, re-assign col
# Variations: Single col, multi-col, factor col
# Author: Spencer
##
#setupRandomSeed(1689636624)
test.column.assignment <- function() {
set.seed(1841604082)
hex <- as.h2o(iris)
colsToSelect <- 1 #sample(ncol(hex), 1)
col <- sample(ncol(hex), colsToSelect)
numToReplace <- sample(nrow(hex),1)
rowsToReplace <- sample(nrow(hex), numToReplace)
print("")
print("")
print("Rows to replace: ")
print(rowsToReplace)
print("Num to replace: ")
print(numToReplace)
print("")
print("")
hexOriginal <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
#Log.info(paste("Original Column: ", col, sep = ""))
print(head(hexOriginal))
replacement <- rnorm(numToReplace)
Log.info("Replacing rows for column selected")
replacement <- as.h2o(replacement)
print("Wuz replacement one? ")
print("")
print(replacement)
print("")
print("")
print(rowsToReplace)
print(paste("Column selected: ", col))
print(paste("Column - 1: ", col - 1))
hex[rowsToReplace,col] <- replacement
hexReplaced <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
print(hexReplaced)
expect_false(all(hexReplaced==hexOriginal))
}
doTest("EQ2 Tests: [<-", test.column.assignment)
| 1,458 | apache-2.0 |
cd00aaf18ad36f042794171516cc0d1b110f15a3 | mathemage/h2o-3 | h2o-r/tests/testdir_munging/exec/runit_eq2_slotAssign.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
##
# Test: [<-
# Description: Select a dataset, select columns, change values in the column, re-assign col
# Variations: Single col, multi-col, factor col
# Author: Spencer
##
#setupRandomSeed(1689636624)
test.column.assignment <- function() {
set.seed(1841604082)
hex <- as.h2o(iris)
colsToSelect <- 1 #sample(ncol(hex), 1)
col <- sample(ncol(hex), colsToSelect)
numToReplace <- sample(nrow(hex),1)
rowsToReplace <- sample(nrow(hex), numToReplace)
print("")
print("")
print("Rows to replace: ")
print(rowsToReplace)
print("Num to replace: ")
print(numToReplace)
print("")
print("")
hexOriginal <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
#Log.info(paste("Original Column: ", col, sep = ""))
print(head(hexOriginal))
replacement <- rnorm(numToReplace)
Log.info("Replacing rows for column selected")
replacement <- as.h2o(replacement)
print("Wuz replacement one? ")
print("")
print(replacement)
print("")
print("")
print(rowsToReplace)
print(paste("Column selected: ", col))
print(paste("Column - 1: ", col - 1))
hex[rowsToReplace,col] <- replacement
hexReplaced <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
print(hexReplaced)
expect_false(all(hexReplaced==hexOriginal))
}
doTest("EQ2 Tests: [<-", test.column.assignment)
| 1,458 | apache-2.0 |
cd00aaf18ad36f042794171516cc0d1b110f15a3 | michalkurka/h2o-3 | h2o-r/tests/testdir_munging/exec/runit_eq2_slotAssign.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
##
# Test: [<-
# Description: Select a dataset, select columns, change values in the column, re-assign col
# Variations: Single col, multi-col, factor col
# Author: Spencer
##
#setupRandomSeed(1689636624)
test.column.assignment <- function() {
set.seed(1841604082)
hex <- as.h2o(iris)
colsToSelect <- 1 #sample(ncol(hex), 1)
col <- sample(ncol(hex), colsToSelect)
numToReplace <- sample(nrow(hex),1)
rowsToReplace <- sample(nrow(hex), numToReplace)
print("")
print("")
print("Rows to replace: ")
print(rowsToReplace)
print("Num to replace: ")
print(numToReplace)
print("")
print("")
hexOriginal <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
#Log.info(paste("Original Column: ", col, sep = ""))
print(head(hexOriginal))
replacement <- rnorm(numToReplace)
Log.info("Replacing rows for column selected")
replacement <- as.h2o(replacement)
print("Wuz replacement one? ")
print("")
print(replacement)
print("")
print("")
print(rowsToReplace)
print(paste("Column selected: ", col))
print(paste("Column - 1: ", col - 1))
hex[rowsToReplace,col] <- replacement
hexReplaced <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
print(hexReplaced)
expect_false(all(hexReplaced==hexOriginal))
}
doTest("EQ2 Tests: [<-", test.column.assignment)
| 1,458 | apache-2.0 |
cd00aaf18ad36f042794171516cc0d1b110f15a3 | nilbody/h2o-3 | h2o-r/tests/testdir_munging/exec/runit_eq2_slotAssign.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
##
# Test: [<-
# Description: Select a dataset, select columns, change values in the column, re-assign col
# Variations: Single col, multi-col, factor col
# Author: Spencer
##
#setupRandomSeed(1689636624)
test.column.assignment <- function() {
set.seed(1841604082)
hex <- as.h2o(iris)
colsToSelect <- 1 #sample(ncol(hex), 1)
col <- sample(ncol(hex), colsToSelect)
numToReplace <- sample(nrow(hex),1)
rowsToReplace <- sample(nrow(hex), numToReplace)
print("")
print("")
print("Rows to replace: ")
print(rowsToReplace)
print("Num to replace: ")
print(numToReplace)
print("")
print("")
hexOriginal <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
#Log.info(paste("Original Column: ", col, sep = ""))
print(head(hexOriginal))
replacement <- rnorm(numToReplace)
Log.info("Replacing rows for column selected")
replacement <- as.h2o(replacement)
print("Wuz replacement one? ")
print("")
print(replacement)
print("")
print("")
print(rowsToReplace)
print(paste("Column selected: ", col))
print(paste("Column - 1: ", col - 1))
hex[rowsToReplace,col] <- replacement
hexReplaced <- data.frame(col = as.data.frame(hex)[rowsToReplace,col])
print(hexReplaced)
expect_false(all(hexReplaced==hexOriginal))
}
doTest("EQ2 Tests: [<-", test.column.assignment)
| 1,458 | apache-2.0 |
10615031e120d5eeb2b2a871d5507da49bfba3b2 | sfcheung/RNotes | Misc/medMC.R | # Test indirect effect in mediation using Monte Carlo method.
# Shu Fai Cheung
# Last modified: 2015-01-17
# Adapted from the R code in Preacher and Selig (2012).
# Preacher, K. J., & Selig, J. P. (2012). Advantages of
# Monte Carlo confidence intervals for indirect effects.
# Communication Methods and Measures, 6, 77-98.
medMC <- function(a, b, ase, bse, nrep=1000, conf=.95,
seed=NULL, printout=TRUE,
sd_iv=NULL, sd_dv=NULL,
mednames=NULL, ...) {
# Input:
# a: A vector of one or more sample regression
# coefficient estimates from the independent
# variable to the mediator(s).
# b: A vector of one or more sample regression
# coefficient estimates from the mediator(s)
# to the dependent variable.
# ase: A vector of the standard error(s) of a.
# bse: A vector of the standard error(s) of b.
# nrep: Number of sets of random numbers to generate.
# conf: The proportion of coverage of the confidence
# interval.
# seed: Random seed. Used to make the simulation reproducible.
# printout: If TRUE, the results will be printed.
# mednames: Optional. A vector of the names of the mediators.
# sd_iv, sd_dv: The standard deviations of the independent
# variable and dependent variable. If
# provided, can compute the completely standardized
# indirect effect.
# ...: Optional arguments to be passed to print().
require(MASS)
k <- length(ase)
# Generate the random numbers
if (!is.null(seed)) set.seed(seed)
av_s <- if (k > 1) diag(ase^2) else ase^2
bv_s <- if (k > 1) diag(bse^2) else bse^2
a_mc <- mvrnorm(nrep, a, av_s)
b_mc <- mvrnorm(nrep, b, bv_s)
ab_mc <- a_mc * b_mc
total_mc <- apply(ab_mc, 1, sum)
# Compute the indirect effect
ab <- a * b
total <- sum(ab)
# Construct the confidence intervals
conf_lo <- (1-conf)/2
conf_hi <- 1-(1-conf)/2
ab_ci <- t(apply(ab_mc, 2, quantile,
probs=c(conf_lo, conf_hi)))
colnames(ab_ci) <- c(paste((conf*100),"% Lower", sep=""),
paste((conf*100),"% Upper", sep=""))
rownames(ab_ci) <- paste("Specific Indirect Effect", 1:k,
sep=" ")
if (!is.null(mednames)) rownames(ab_ci) <- mednames
total_ci <- matrix(quantile(total_mc, probs=c(conf_lo, conf_hi)),
1, 2)
colnames(total_ci) <- colnames(ab_ci)
rownames(total_ci) <- "Total Indirect Effect"
# Compute the completely standardized indirect effect
if (!is.null(sd_iv) & !is.null(sd_dv)) {
ab_std <- ab*sd_iv/sd_dv
total_std <- total*sd_iv/sd_dv
} else {
ab_std <- rep(NA, k)
total_std <- NA
}
if (printout) {
tmp <- cbind(c(ab,total),rbind(ab_ci, total_ci),
c(ab_std,total_std))
colnames(tmp) <- c("Estimate", colnames(ab_ci),
"Completely Standardized")
print(tmp, ...)
}
results <- list(specific_est=ab, total_est=total,
specific_ci=ab_ci, total_ci=total_ci,
ab_simualted=ab_mc,
specific_std=ab_std, total_std=total_std,
total_simulated=total_mc)
invisible(results)
}
# Examples:
# In real analysis, nrep should be at least 20000
# medMC(a=c(.5, .7), b=c(.4, .6),
# ase=c(.1, .2), bse=c(.2, .1), nrep=1000, print=TRUE, digits=4)
# medMC(a=.5, b=.4, ase=.1, bse=.2, nrep=1000, print=TRUE, digits=4) | 3,535 | gpl-2.0 |
90d14f0d5320577c2d8ce8f82df8a79a5ed07d93 | andrewejaffe/winterR_2016 | Functions/lecture/Functions.R | ## ----return2, comment="",prompt=TRUE-------------------------------------
return2 = function(x) {
return(x[2])
}
return2(c(1,4,5,76))
## ----return2a, comment="",prompt=TRUE------------------------------------
return2a = function(x) {
x[2]
}
return2a(c(1,4,5,76))
## ----return2b, comment="",prompt=TRUE------------------------------------
return2b = function(x) x[2]
return2b(c(1,4,5,76))
## ----return2c, comment="",prompt=TRUE------------------------------------
return2c = function(x,n) x[n]
return2c(c(1,4,5,76), 3)
## ----sqdif, comment="",prompt=TRUE---------------------------------------
sqdif <- function(x=2,y=3){
(x-y)^2
}
sqdif()
sqdif(x=10,y=5)
sqdif(10,5)
## ----top, comment="",prompt=TRUE-----------------------------------------
top = function(mat,n=5) mat[1:n,1:n]
my.mat = matrix(1:1000,nr=100)
top(my.mat) #note that we are using the default value for n
## ----top2, comment="",prompt=TRUE----------------------------------------
circ = read.csv("http://www.aejaffe.com/winterR_2016/data/Charm_City_Circulator_Ridership.csv",
header=TRUE,as.is=TRUE)
dayList = split(circ, circ$day)
lapply(dayList, top, n = 2)
## ----top3, comment="",prompt=TRUE----------------------------------------
lapply(dayList, function(x) x[1:2,1:2])
## ----sapply1, comment="", prompt=TRUE------------------------------------
sapply(dayList, dim)
sapply(circ, class)
## ----sapply2, comment="", prompt=TRUE------------------------------------
myList = list(a=1:10, b=c(2,4,5), c = c("a","b","c"),
d = factor(c("boy","girl","girl")))
tmp = lapply(myList,function(x) x[1])
tmp
sapply(tmp, class)
## ----sapply3, comment="", prompt=TRUE------------------------------------
sapply(myList,function(x) x[1])
sapply(myList,function(x) as.character(x[1]))
| 1,799 | mit |
25079ecb5e8106514c7bf7f95a09c40103b5f2a2 | quantifish/TagGrowth | R/plot_linf.R | #' Plot L infinity
#'
#' @export
#'
plot_linf <- function(report, file_name = "LinfPrior")
{
png(paste(file_name, ".png", sep = ""), width=5, height=5, units="in", res=300)
par(mfrow = c(1, 1))
x <- 75:275
priorF <- dnorm(x = x, mean = 180.20, sd = 0.102*180.20)
priorM <- dnorm(x = x, mean = 169.07, sd = 0.102*169.07)
plot(x, priorM, type = "l", col = "blue", lwd = 2, xlab = expression(L[infinity]), ylab = "Density", las = 1)
lines(x, priorF, col="pink", lwd=2)
LinfF <- report$value[names(report$value) %in% "Linf"][1]
LinfM <- report$value[names(report$value) %in% "Linf"][2]
abline(v=LinfF, lty=2, lwd=2, col="pink")
abline(v=LinfM, lty=2, lwd=2, col="blue")
legend("topleft", legend = c("Female prior", "Female estimate", "Male prior", "Male estimate"), lwd = 2, lty = c(1,2,1,2), col=c("pink","pink","blue","blue"), bty = "n")
dev.off()
}
| 901 | mit |
90d14f0d5320577c2d8ce8f82df8a79a5ed07d93 | andrewejaffe/summerR_2016 | Functions/lecture/Functions.R | ## ----return2, comment="",prompt=TRUE-------------------------------------
return2 = function(x) {
return(x[2])
}
return2(c(1,4,5,76))
## ----return2a, comment="",prompt=TRUE------------------------------------
return2a = function(x) {
x[2]
}
return2a(c(1,4,5,76))
## ----return2b, comment="",prompt=TRUE------------------------------------
return2b = function(x) x[2]
return2b(c(1,4,5,76))
## ----return2c, comment="",prompt=TRUE------------------------------------
return2c = function(x,n) x[n]
return2c(c(1,4,5,76), 3)
## ----sqdif, comment="",prompt=TRUE---------------------------------------
sqdif <- function(x=2,y=3){
(x-y)^2
}
sqdif()
sqdif(x=10,y=5)
sqdif(10,5)
## ----top, comment="",prompt=TRUE-----------------------------------------
top = function(mat,n=5) mat[1:n,1:n]
my.mat = matrix(1:1000,nr=100)
top(my.mat) #note that we are using the default value for n
## ----top2, comment="",prompt=TRUE----------------------------------------
circ = read.csv("http://www.aejaffe.com/winterR_2016/data/Charm_City_Circulator_Ridership.csv",
header=TRUE,as.is=TRUE)
dayList = split(circ, circ$day)
lapply(dayList, top, n = 2)
## ----top3, comment="",prompt=TRUE----------------------------------------
lapply(dayList, function(x) x[1:2,1:2])
## ----sapply1, comment="", prompt=TRUE------------------------------------
sapply(dayList, dim)
sapply(circ, class)
## ----sapply2, comment="", prompt=TRUE------------------------------------
myList = list(a=1:10, b=c(2,4,5), c = c("a","b","c"),
d = factor(c("boy","girl","girl")))
tmp = lapply(myList,function(x) x[1])
tmp
sapply(tmp, class)
## ----sapply3, comment="", prompt=TRUE------------------------------------
sapply(myList,function(x) x[1])
sapply(myList,function(x) as.character(x[1]))
| 1,799 | mit |
cf63a63191ca96ba1dfbe5211bf7be17e9139c4b | vankesteren/jasp-desktop | JASP-Tests/R/tests/testthat/test-ldgaussianunivariate.R | context("Discover Distributions - Normal")
options <- jasptools::analysisOptions("LDgaussianunivariate")
options$.meta <- list(newVariableName = list(containsColumn = TRUE), variable = list(
containsColumn = TRUE))
options$andersonDarling <- TRUE
options$ciInterval <- TRUE
options$cramerVonMisses <- TRUE
options$ecdf <- TRUE
options$estCDF <- TRUE
options$estPDF <- TRUE
options$explanatoryText <- TRUE
options$highlightDensity <- TRUE
options$highlightProbability <- TRUE
options$histogram <- TRUE
options$kolmogorovSmirnov <- TRUE
options$methodMLE <- TRUE
options$moments <- TRUE
options$momentsUpTo <- 10
options$newVariableName <- ""
options$outputSE <- TRUE
options$parsSupportMoments <- TRUE
options$plotCDF <- TRUE
options$plotQF <- TRUE
options$ppplot <- TRUE
options$qqplot <- TRUE
options$shapiroWilk <- TRUE
options$summary <- TRUE
options$variable <- "Normal100(mu=0,sigma=1)"
set.seed(1)
results <- jasptools::run("LDgaussianunivariate", "Distributions.csv", options)
test_that("Empirical Cumulative Distribution plot matches", {
plotName <- results[["results"]][["dataContainer"]][["collection"]][["dataContainer_ecdf"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "empirical-cumulative-distribution", dir="LDgaussianunivariate")
})
test_that("Histogram plot matches", {
plotName <- results[["results"]][["dataContainer"]][["collection"]][["dataContainer_histogram"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "histogram", dir="LDgaussianunivariate")
})
test_that("Observed Moments table results match", {
table <- results[["results"]][["dataContainer"]][["collection"]][["dataContainer_moments"]][["data"]]
expect_equal_tables(table,
list(0.130415109675896, 1, 0.130415109675896, 0.82808243717973, 2,
0.845090538011506, -0.047697887796715, 3, 0.278503611135073,
2.01019716268452, 4, 2.07010899463661, -0.292701501538923, 5,
1.02839194368045, 7.13443207941017, 6, 7.4197225223246, -1.68338958372109,
7, 4.88136372180672, 29.9772236055153, 8, 31.6228767537321,
-9.62585956073692, 9, 25.8569456873756, 137.395432416605, 10,
147.769982264367))
})
test_that("Descriptives table results match", {
table <- results[["results"]][["dataContainer"]][["collection"]][["dataContainer_summary"]][["data"]]
expect_equal_tables(table,
list(2.40161776050478, 0.130415109675896, 0.113909160788544, -2.2146998871775,
-0.494242549079377, 0.70433710614055, 100, 0.914574713318793,
0.836446906242152, "Normal100(mu=0,sigma=1)"))
})
test_that("Estimated Parameters table results match", {
table <- results[["results"]][["mleContainer"]][["collection"]][["mleContainer_estParametersTable"]][["data"]]
expect_equal_tables(table,
list(0.130422052279253, -0.0479317525657895, "<unicode>", 0.0909985113256541,
0.308775857124295, 0.828072906238923, 0.598550253333547, "<unicode><unicode>",
0.117105546181369, 1.0575955591443))
})
test_that("Empirical vs. Theoretical CDF plot matches", {
plotName <- results[["results"]][["mleContainer"]][["collection"]][["mleContainer_mleFitAssessment"]][["collection"]][["mleContainer_mleFitAssessment_estCDF"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "empirical-vs-theoretical-cdf", dir="LDgaussianunivariate")
})
test_that("Histogram vs. Theoretical PDF plot matches", {
plotName <- results[["results"]][["mleContainer"]][["collection"]][["mleContainer_mleFitAssessment"]][["collection"]][["mleContainer_mleFitAssessment_estPDF"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "histogram-vs-theoretical-pdf", dir="LDgaussianunivariate")
})
test_that("Fit Statistics table results match", {
table <- results[["results"]][["mleContainer"]][["collection"]][["mleContainer_mleFitAssessment"]][["collection"]][["mleContainer_mleFitAssessment_fitStatisticsTable"]][["data"]]
expect_equal_tables(table,
list(0.99278432447795, 0.0429230630813024, "Kolmogorov-Smirnov", 0.99519059575295,
0.0217999835633844, "Cram<unicode>r-von Mises", 0.999444838973091,
0.134303182925592, "Anderson-Darling", 0.994211531885932, 0.996119141722174,
"Shapiro-Wilk"))
})
test_that("P-P plot matches", {
plotName <- results[["results"]][["mleContainer"]][["collection"]][["mleContainer_mleFitAssessment"]][["collection"]][["mleContainer_mleFitAssessment_ppplot"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "p-p-plot", dir="LDgaussianunivariate")
})
test_that("Q-Q plot matches", {
plotName <- results[["results"]][["mleContainer"]][["collection"]][["mleContainer_mleFitAssessment"]][["collection"]][["mleContainer_mleFitAssessment_qqplot"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "q-q-plot", dir="LDgaussianunivariate")
})
test_that("Cumulative Probability Plot matches", {
plotName <- results[["results"]][["plotCDF"]][["collection"]][["plotCDF_cdfPlot"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "cumulative-probability-plot", dir="LDgaussianunivariate")
})
test_that("Density Plot matches", {
plotName <- results[["results"]][["plotPDF"]][["collection"]][["plotPDF_pdfPlot"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "density-plot", dir="LDgaussianunivariate")
})
test_that("Quantile Plot matches", {
plotName <- results[["results"]][["plotQF"]][["collection"]][["plotQF_qfPlot"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "quantile-plot", dir="LDgaussianunivariate")
}) | 6,257 | agpl-3.0 |
3f81c50745c0a090adc1f45154855ee20a161ad4 | andy-thomason/r-source | tests/reg-tests-1c.R | ## Regression tests for R >= 3.0.0
pdf("reg-tests-1c.pdf", encoding = "ISOLatin1.enc")
## mapply with classed objects with length method
## was not documented to work in 2.x.y
setClass("A", representation(aa = "integer"))
a <- new("A", aa = 101:106)
setMethod("length", "A", function(x) length(x@aa))
setMethod("[[", "A", function(x, i, j, ...) x@aa[[i]])
(z <- mapply(function(x, y) {x * y}, a, rep(1:3, 2)))
stopifnot(z == c(101, 204, 309, 104, 210, 318))
## reported as a bug (which it was not) by H. Pages in
## https://stat.ethz.ch/pipermail/r-devel/2012-November/065229.html
## recyling in split()
## https://stat.ethz.ch/pipermail/r-devel/2013-January/065700.html
x <- 1:6
y <- split(x, 1:2)
class(x) <- "ABC" ## class(x) <- "A" creates an invalid object
yy <- split(x, 1:2)
stopifnot(identical(y, yy))
## were different in R < 3.0.0
## dates with fractional seconds after 2038 (PR#15200)
## Extremely speculative!
z <- as.POSIXct(2^31+c(0.4, 0.8), origin=ISOdatetime(1970,1,1,0,0,0,tz="GMT"))
zz <- format(z)
stopifnot(zz[1] == zz[2])
## printed form rounded not truncated in R < 3.0.0
## origin coerced in tz and not GMT by as.POSIXct.numeric()
x <- as.POSIXct(1262304000, origin="1970-01-01", tz="EST")
y <- as.POSIXct(1262304000, origin=.POSIXct(0, "GMT"), tz="EST")
stopifnot(identical(x, y))
## Handling records with quotes in names
x <- c("a b' c",
"'d e' f g",
"h i 'j",
"k l m'")
y <- data.frame(V1 = c("a", "d e", "h"), V2 = c("b'", "f", "i"), V3 = c("c", "g", "j\nk l m"))
f <- tempfile()
writeLines(x, f)
stopifnot(identical(count.fields(f), c(3L, 3L, NA_integer_, 3L)))
stopifnot(identical(read.table(f), y))
stopifnot(identical(scan(f, ""), as.character(t(as.matrix(y)))))
## docu always said 'length 1 is sorted':
stopifnot(!is.unsorted(NA))
## str(.) for large factors should be fast:
u <- as.character(runif(1e5))
t1 <- max(0.001, system.time(str(u))[[1]]) # get a baseline > 0
uf <- factor(u)
(t2 <- system.time(str(uf))[[1]]) / t1 # typically around 1--2
stopifnot(t2 / t1 < 30)
## was around 600--850 for R <= 3.0.1
## ftable(<array with unusual dimnames>)
(m <- matrix(1:12, 3,4, dimnames=list(ROWS=paste0("row",1:3), COLS=NULL)))
ftable(m)
## failed to format (and hence print) because of NULL 'COLS' dimnames
## regression test formerly in kmeans.Rd, but result differs by platform
## Artificial example [was "infinite loop" on x86_64; PR#15364]
rr <- c(rep(-0.4, 5), rep(-0.4- 1.11e-16, 14), -.5)
r. <- signif(rr, 12)
k3 <- kmeans(rr, 3, trace=2) ## Warning: Quick-Transfer.. steps exceed
try ( k. <- kmeans(r., 3) ) # after rounding, have only two distinct points
k. <- kmeans(r., 2) # fine
## PR#15376
stem(c(1, Inf))
## hung in 3.0.1
## PR#15377, very long variable names
x <- 1:10
y <- x + rnorm(10)
z <- y + rnorm(10)
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy <- y
fit <- lm(cbind(yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy, z) ~ x)
## gave spurious error message in 3.0.1.
## PR#15341 singular complex matrix in rcond()
set.seed(11)
n <- 5
A <- matrix(runif(n*n),nrow=n)
B <- matrix(runif(n*n),nrow=n)
B[n,] <- (B[n-1,]+B[n-2,])/2
rcond(B)
B <- B + 0i
rcond(B)
## gave error message (OK) in R 3.0.1: now returns 0 as in real case.
## Misuse of formatC as in PR#15303
days <- as.Date(c("2012-02-02", "2012-03-03", "2012-05-05"))
(z <- formatC(days))
stopifnot(!is.object(z), is.null(oldClass(z)))
## used to copy over class in R < 3.0.2.
## PR15219
val <- sqrt(pi)
fun <- function(x) (-log(x))^(-1/2)
(res <- integrate(fun, 0, 1, rel.tol = 1e-4))
stopifnot(abs(res$value - val) < res$abs.error)
(res <- integrate(fun, 0, 1, rel.tol = 1e-6))
stopifnot(abs(res$value - val) < res$abs.error)
res <- integrate(fun, 0, 1, rel.tol = 1e-8)
stopifnot(abs(res$value - val) < res$abs.error)
fun <- function(x) x^(-1/2)*exp(-x)
(res <- integrate(fun, 0, Inf, rel.tol = 1e-4))
stopifnot(abs(res$value - val) < res$abs.error)
(res <- integrate(fun, 0, Inf, rel.tol = 1e-6))
stopifnot(abs(res$value - val) < res$abs.error)
(res <- integrate(fun, 0, Inf, rel.tol = 1e-8))
stopifnot(abs(res$value - val) < res$abs.error)
## sometimes exceeded reported error in 2.12.0 - 3.0.1
## Unary + should coerce
x <- c(TRUE, FALSE, NA, TRUE)
stopifnot(is.integer(+x))
## +x was logical in R <= 3.0.1
## Attritbutes of value of unary operators
# +x, -x were ts, !x was not in 3.0.2
x <- ts(c(a=TRUE, b=FALSE, c=NA, d=TRUE), frequency = 4, start = 2000)
x; +x; -x; !x
stopifnot(is.ts(!x), !is.ts(+x), !is.ts(-x))
# +x, -x were ts, !x was not in 3.0.2
x <- ts(c(a=1, b=2, c=0, d=4), frequency = 4, start = 2010)
x; +x; -x; !x
stopifnot(!is.ts(!x), is.ts(+x), is.ts(-x))
##
## regression test incorrectly in colorRamp.Rd
bb <- colorRampPalette(2)(4)
stopifnot(bb[1] == bb)
## special case, invalid in R <= 2.15.0:
## Setting NAMED on ... arguments
f <- function(...) { x <- (...); x[1] <- 7; (...) }
stopifnot(f(1+2) == 3)
## was 7 in 3.0.1
## copying attributes from only one arg of a binary operator.
A <- array(c(1), dim = c(1L,1L), dimnames = list("a", 1))
x <- c(a = 1)
B <- A/(pi*x)
stopifnot(is.null(names(B)))
## was wrong in R-devel in Aug 2013
## needed an un-NAMED rhs.
## lgamma(x) for very small negative x
X <- 3e-308; stopifnot(identical(lgamma(-X), lgamma(X)))
## lgamma(-X) was NaN in R <= 3.0.1
## PR#15413
z <- subset(data.frame(one = numeric()), select = one)
stopifnot(nrow(z) == 0L)
## created a row prior to 3.0.2
## https://stat.ethz.ch/pipermail/r-devel/2013-September/067524.html
dbeta(0.9, 9.9e307, 10)
dbeta(0.1, 9, 9.9e307)
dbeta(0.1, 9.9e307, 10)
## first two hung in R <= 3.0.2
## PR#15465
provideDimnames(matrix(nrow = 0, ncol = 1))
provideDimnames(table(character()))
as.data.frame(table(character()))
## all failed in 3.0.2
## PR#15004
n <- 10
s <- 3
l <- 10000
m <- 20
x <- data.frame(x1 = 1:n, x2 = 1:n)
by <- data.frame(V1 = factor(rep(1:3, n %/% s + 1)[1:n], levels = 1:s))
for(i in 1:m) {
by[[i + 1]] <- factor(rep(l, n), levels = 1:l)
}
agg <- aggregate.data.frame(x, by, mean)
stopifnot(nrow(unique(by)) == nrow(agg))
## rounding caused groups to be falsely merged
## PR#15454
set.seed(357)
z <- matrix(c(runif(50, -1, 1), runif(50, -1e-190, 1e-190)), nrow = 10)
contour(z)
## failed because rounding made crossing tests inconsistent
## Various cases where zero length vectors were not handled properly
## by functions in base and utils, including PR#15499
y <- as.data.frame(list())
format(y)
format(I(integer()))
gl(0, 2)
z <- list(numeric(0), 1)
stopifnot(identical(relist(unlist(z), z), z))
summary(y)
## all failed in 3.0.2
## PR#15518 Parser catching errors in particular circumstance:
(ee <- tryCatch(parse(text = "_"), error= function(e)e))
stopifnot(inherits(ee, "error"))
## unexpected characters caused the parser to segfault in 3.0.2
## nonsense value of nmax
unique(1:3, nmax = 1)
## infinite-looped in 3.0.2, now ignored.
## besselI() (and others), now using sinpi() etc:
stopifnot(all.equal(besselI(2.125,-5+1/1024),
0.02679209380095711, tol= 8e-16),
all.equal(lgamma(-12+1/1024), -13.053274367453049, tol=8e-16))
## rel.error was 1.5e-13 / 7.5e-14 in R <= 3.0.x
ss <- sinpi(2*(-10:10)-2^-12)
tt <- tanpi( (-10:10)-2^-12)
stopifnot(ss == ss[1], tt == tt[1], # as internal arithmetic must be exact here
all.equal(ss[1], -0.00076699031874270453, tol=8e-16),
all.equal(tt[1], -0.00076699054434309260, tol=8e-16))
## (checked via Rmpfr) The above failed during development
## PR#15535 c() "promoted" raw vectors to bad logical values
stopifnot( c(as.raw(11), TRUE) == TRUE )
## as.raw(11) became a logical value coded as 11,
## and did not test equal to TRUE.
## PR#15564
fit <- lm(rnorm(10) ~ I(1:10))
predict(fit, interval = "confidence", scale = 1)
## failed in <= 3.0.2 with object 'w' not found
## PR#15534 deparse() did not produce reparseable complex vectors
assert.reparsable <- function(sexp) {
deparsed <- paste(deparse(sexp), collapse=" ")
reparsed <- tryCatch(eval(parse(text=deparsed)[[1]]), error = function(e) NULL)
if (is.null(reparsed))
stop(sprintf("Deparsing produced invalid syntax: %s", deparsed))
if(!identical(reparsed, sexp))
stop(sprintf("Deparsing produced change: value is not %s", reparsed))
}
assert.reparsable(1)
assert.reparsable("string")
assert.reparsable(2+3i)
assert.reparsable(1:10)
assert.reparsable(c(NA, 12, NA, 14))
assert.reparsable(as.complex(NA))
assert.reparsable(complex(real=Inf, i=4))
assert.reparsable(complex(real=Inf, i=Inf))
assert.reparsable(complex(real=Inf, i=-Inf))
assert.reparsable(complex(real=3, i=-Inf))
assert.reparsable(complex(real=3, i=NaN))
assert.reparsable(complex(r=NaN, i=0))
assert.reparsable(complex(real=NA, i=1))
assert.reparsable(complex(real=1, i=NA))
## last 7 all failed
## PR#15621 backticks could not be escaped
stopifnot(deparse(as.name("`"), backtick=TRUE) == "`\\``")
assign("`", TRUE)
`\``
tools::assertError(parse("```"))
##
## We document tanpi(0.5) etc to be NaN
stopifnot(is.nan(tanpi(c(0.5, 1.5, -0.5, -1.5))))
## That is not required for system implementations, and some give +/-Inf
## PR#15642 segfault when parsing overflowing reals
as.double("1e1000")
ll <- ml <- list(1,2); dim(ml) <- 2:1
ali <- all.equal(list( ), identity) # failed in R-devel for ~ 30 hours
al1 <- all.equal(list(1), identity) # failed in R < 3.1.0
stopifnot(length(ali) == 3, grepl("list", ali[1]),
grepl("length", ali[2], ignore.case=TRUE),
is.character(al1), length(al1) >= 2,
all.equal(ml, ml),
all.equal(ll, ml, check.attributes=FALSE))
## PR#15699 aggregate failed when there were no grouping variables
dat <- data.frame(Y = runif(10), X = sample(LETTERS[1:3], 10, TRUE))
aggregate(Y ~ 1, FUN = mean, data = dat)
## merge() with duplicated column names, similar to PR#15618
X <- data.frame(Date = c("1967-02-01", "1967-02-02", "1967-02-03"),
Settle.x = c(NA, NA, NA), Settle.y = c(NA, NA, NA),
Settle = c(35.4, 35.15, 34.95))
Y <- data.frame(Date = c("2013-12-10", "2013-12-11", "2013-12-12"),
Settle = c(16.44, 16.65, 16.77))
merge(X, Y, by = "Date", all = TRUE)
## failed in R < 3.1.0: now warns (correctly).
## PR#15679
badstructure <- function(depth, key)
{
ch <- if (depth == 1L) list() else list(badstructure(depth-1,key))
r <- list()
r[[key]] <- ch
r
}
badstructure(20, "children")
## overran, segfaulted for the original reporter.
## PR#15702 and PR#15703
d <- as.dendrogram(hclust(dist(sin(1:7))))
(dl <- d[[c(2,1,2)]]) # single-leaf dendrogram
stopifnot(inherits(dl, "dendrogram"), is.leaf(dl),
identical(order.dendrogram(dl), as.vector(dl)),
identical(d, as.dendrogram(d)))
## as.dendrogram() was hidden; order.*() failed for leaf
## using *named* method
hw <- hclust(dist(sqrt(1:5)), method=c(M = "ward"))
## failed for 2 days in R-devel/-alpha
## PR#15758
my_env <- new.env(); my_env$one <- 1L
save(one, file = tempfile(), envir = my_env)
## failed in R < 3.1.1.
## Conversion to numeric in boundary case
ch <- "0x1.ffa0000000001p-1"
rr <- type.convert(ch, numerals = "allow.loss")
rX <- type.convert(ch, numerals = "no.loss")
stopifnot(is.numeric(rr), identical(rr, rX),
all.equal(rr, 0.999267578125),
all.equal(type.convert(ch, numerals = "warn"),
type.convert("0x1.ffap-1",numerals = "warn"), tol = 5e-15))
## type.convert(ch) was not numeric in R 3.1.0
##
ch <- "1234567890123456789"
rr <- type.convert(ch, numerals = "allow.loss")
rX <- type.convert(ch, numerals = "no.loss")
rx <- type.convert(ch, numerals = "no.loss", as.is = TRUE)
tools::assertWarning(r. <- type.convert(ch, numerals = "warn.loss"))
stopifnot(is.numeric(rr), identical(rr, r.), all.equal(rr, 1.234567890e18),
is.factor(rX), identical(rx, ch))
## PR#15764: integer overflow could happen without a warning or giving NA
tools::assertWarning(ii <- 1980000020L + 222000000L)
stopifnot(is.na(ii))
tools::assertWarning(ii <- (-1980000020L) + (-222000000L))
stopifnot(is.na(ii))
tools::assertWarning(ii <- (-1980000020L) - 222000000L)
stopifnot(is.na(ii))
tools::assertWarning(ii <- 1980000020L - (-222000000L))
stopifnot(is.na(ii))
## first two failed for some version of clang in R < 3.1.1
## PR#15735: formulae with exactly 32 variables
myFormula <- as.formula(paste(c("y ~ x0", paste0("x", 1:30)), collapse = "+"))
ans <- update(myFormula, . ~ . - w1)
stopifnot(identical(ans, myFormula))
updateArgument <-
as.formula(paste(c(". ~ . ", paste0("w", 1:30)), collapse = " - "))
ans2 <- update(myFormula, updateArgument)
stopifnot(identical(ans2, myFormula))
## PR#15753
0x110p-5L
stopifnot(.Last.value == 8.5)
## was 272 with a garbled message in R 3.0.0 - 3.1.0.
## numericDeriv failed to duplicate variables in
## the expression before modifying them. PR#15849
x <- 10; y <- 10
d1 <- numericDeriv(quote(x+y),c("x","y"))
x <- y <- 10
d2 <- numericDeriv(quote(x+y),c("x","y"))
stopifnot(identical(d1,d2))
## The second gave the wrong answer
## prettyNum(x, zero.print = .) failed when x had NAs
pp <- sapply(list(TRUE, FALSE, ".", " "), function(.)
prettyNum(c(0:1,NA), zero.print = . ))
stopifnot(identical(pp[1,], c("0", " ", ".", " ")),
pp[2:3,] == c("1","NA"))
## all 4 prettyNum() would error out
## checking all.equal() with externalptr
library(methods) # getClass()'s versionKey is an e.ptr
cA <- getClass("ANY")
stopifnot(all.equal(cA, cA),
is.character(all.equal(cA, getClass("S4"))))
# both all.equal() failed in R <= 3.1.1
## as.hexmode(x), as.octmode(x) when x is double
x <- c(NA, 1)
stopifnot(identical(x == x,
as.hexmode(x) == as.octmode(x)))
p <- c(1, pi)
tools::assertError(as.hexmode(p))
tools::assertError(as.octmode(p))
## where all "wrong" in R <= 3.1.1
## PR#15935
y <- 1:3
drop1(lm(y ~ 1))
drop1(glm(y ~ 1))
stats:::drop1.default(glm(y ~ 1))
## gave error in R < 3.1.2
## getAnywhere() wrongly dealing with namespace hidden list object
nm <- deparse(body(pbinom)[[2]])# == "C_pbinom" currently
gg <- getAnywhere(nm)
stopifnot(length(gg$objs) == 1)
## was 4 and printed "4 differing objects matching ‘C_pbinom’ ..." in R <= 3.1.1
## 0-length consistency of options(), PR#15979
stopifnot(identical(options(list()), options(NULL)))
## options(list()) failed in R <= 3.1.1
## merge.dendrogram(), PR#15648
mkDend <- function(n, lab, rGen = function(n) 1+round(16*abs(rnorm(n)))) {
stopifnot(is.numeric(n), length(n) == 1, n >= 1, is.character(lab))
a <- matrix(rGen(n*n), n, n)
colnames(a) <- rownames(a) <- paste0(lab, 1:n)
as.dendrogram(hclust(as.dist(a + t(a))))
}
set.seed(7)
da <- mkDend(4, "A")
db <- mkDend(3, "B")
d.ab <- merge(da, db)
hcab <- as.hclust(d.ab)
stopifnot(hcab$order == c(2, 4, 1, 3, 7, 5, 6),
hcab$labels == c(paste0("A", 1:4), paste0("B", 1:3)))
## was wrong in R <= 3.1.1
## bw.SJ() and similar with NA,Inf values, PR#16024
try(bw.SJ (c(NA,2,3)))
try(bw.bcv(c(-Inf,2,3)))
try(bw.ucv(c(1,NaN,3,4)))
## seg.faulted in 3.0.0 <= R <= 3.1.1
## as.dendrogram() with wrong input
x <- rbind(c( -6, -9), c( 0, 13),
c(-15, 6), c(-14, 0), c(12,-10))
dx <- dist(x,"manhattan")
hx <- hclust(dx)
hx$merge <- matrix(c(-3, 1, -2, 3,
-4, -5, 2, 3), 4,2)
tools::assertError(as.dendrogram(hx))
## 8 member dendrogram and memory explosion for larger examples in R <= 3.1.2
## abs with named args failed, PR#16047
abs(x=1i)
## Complained that the arg should be named z
## Big exponents overflowed, PR#15976
x <- 0E4933
y <- 0x0p100000
stopifnot(x == 0, y == 0)
##
## drop.terms() dropped some attributes, PR#16029
test <- model.frame(Employed ~ Year + poly(GNP,3) + Population, data=longley)
mterm <- terms(test)
mterm2 <- drop.terms(mterm, 3)
predvars <- attr(mterm2, "predvars")
dataClasses <- attr(mterm2, "dataClasses")
factors <- attr(mterm2, "factors")
stopifnot(is.language(predvars), length(predvars) == length(dataClasses)+1,
all(names(dataClasses) == rownames(factors)))
## Previously dropped predvars and dataClasses
## prompt() did not escape percent signs properly
fn <- function(fmt = "%s") {}
f <- tempfile(fileext = ".Rd")
prompt(fn, filename = f)
rd <- tools::parse_Rd(f)
## Gave syntax errors because the percent sign in Usage
## was taken as the start of a comment.
## power.t.test() failure for very large n (etc): PR#15792
(ptt <- power.t.test(delta = 1e-4, sd = .35, power = .8))
(ppt <- power.prop.test(p1 = .5, p2 = .501, sig.level=.001, power=0.90, tol=1e-8))
stopifnot(all.equal(ptt$n, 192297000, tol = 1e-5),
all.equal(ppt$n, 10451937, tol = 1e-7))
## call to uniroot() did not allow n > 1e7
## save(*, ascii=TRUE): PR#16137
x0 <- x <- c(1, NA, NaN)
save(x, file=(sf <- tempfile()), ascii = TRUE)
load(sf)
stopifnot(identical(x0, x))
## x had 'NA' instead of 'NaN'
## PR#16205
stopifnot(length(glob2rx(character())) == 0L)
## was "^$" in R < 3.1.3
### Bugs fixed in R 3.2.0
## Bugs reported by Radford Neal
x <- pairlist(list(1, 2))
x[[c(1, 2)]] <- NULL # wrongly gave an error, referring to misuse
# of the internal SET_VECTOR_ELT procedure
stopifnot(identical(x, pairlist(list(1))))
a <- pairlist(10, 20, 30, 40, 50, 60)
dim(a) <- c(2, 3)
dimnames(a) <- list(c("a", "b"), c("x", "y", "z"))
# print(a) # doesn't print names, not fixed
a[["a", "x"]] <- 0
stopifnot(a[["a", "x"]] == 0)
## First gave a spurious error, second caused a seg.fault
## Radford (R-devel, June 24, 2014); M.Maechler
m <- matrix(1:2, 1,2); v <- 1:3
stopifnot(identical(crossprod(2, v), t(2) %*% v),
identical(crossprod(m, v), t(m) %*% v),
identical(5 %*% v, 5 %*% t(v)),
identical(tcrossprod(m, 1:2), m %*% 1:2) )
## gave error "non-conformable arguments" in R <= 3.2.0
## list <--> environment
L0 <- list()
stopifnot(identical(L0, as.list(as.environment(L0))))
## as.env..() did not work, and as.list(..) gave non-NULL names in R 3.1.x
## all.equal() for environments and refClass()es
RR <- setRefClass("Ex", fields = list(nr = "numeric"))
m1 <- RR$new(); m2 <- RR$new(); m3 <- RR$new(nr = pi); m4 <- RR$new(nr=3.14159)
ee <- emptyenv(); e2 <- new.env()
stopifnot(all.equal(ee,ee), identical(ee,ee), !identical(ee,e2), all.equal(ee,e2),
identical(m3,m3), !identical(m1,m2),
all.equal(m1,m2), !isTRUE(all.equal(m1,m3)), !isTRUE(all.equal(m1,m4)),
all.equal(m3,m4, tol=1e-6), grepl("relative difference", all.equal(m3,m4)),
TRUE)
## did not work in R 3.1.x
e3 <- new.env()
e3$p <- "p"; e2$p <- "p"; ae.p <- all.equal(e2,e3)
e3$q <- "q"; ae.q <- all.equal(e2,e3)
e2$q <- "Q"; ae.Q <- all.equal(e2,e3)
stopifnot(ae.p, grepl("^Length", ae.q), grepl("string mismatch", ae.Q))
e2$q <- "q"; e2$r <- pi; e3$r <- 3.14159265
stopifnot(all.equal(e2, e3),
grepl("relative difference", all.equal(e2, e3, tol=1e-10)))
g <- globalenv() # so it now contains itself
l <- list(e = g)
stopifnot(all.equal(g, g),
all.equal(l, l))
## these ran into infinite recursion error.
## missing() did not propagate through '...', PR#15707
check <- function(x,y,z) c(missing(x), missing(y), missing(z))
check1 <- function(...) check(...)
check2 <- function(...) check1(...)
stopifnot(identical(check2(one, , three), c(FALSE, TRUE, FALSE)))
## missing() was unable to handle recursive promises
## envRefClass prototypes are a bit special -- broke all.equal() for baseenv()
rc <- getClass("refClass")
rp <- rc@prototype
str(rp) ## failed
rp ## show() failed ..
(ner <- new("envRefClass")) # show() failed
stopifnot(all.equal(rp,rp), all.equal(ner,ner))
be <- baseenv()
system.time(stopifnot(all.equal(be,be)))## <- takes a few sec's
stopifnot(
grepl("not identical.*character", print(all.equal(rp, ner))),
grepl("not identical.*character", print(all.equal(ner, rp))))
system.time(stopifnot(all.equal(globalenv(), globalenv())))
## Much of the above failed in R <= 3.2.0
## while did not protect its argument, which caused an error
## under gctorture, PR#15990
gctorture()
suppressWarnings(while(c(FALSE, TRUE)) 1)
gctorture(FALSE)
## gave an error because the test got released when the warning was generated.
## hist(x, breaks =) with too large bins, PR#15988
set.seed(5); x <- runif(99)
Hist <- function(x, b) hist(x, breaks = b, plot = FALSE)$counts
for(k in 1:5) {
b0 <- seq_len(k-1)/k
H.ok <- Hist(x, c(-10, b0, 10))
for(In in c(1000, 1e9, Inf))
stopifnot(identical(Hist(x, c(-In, b0, In)), H.ok),
identical(Hist(x, c( 0, b0, In)), H.ok))
}
## "wrong" results for k in {2,3,4} in R 3.1.x
## eigen(*, symmetric = <default>) with asymmetric dimnames, PR#16151
m <- matrix(c(83,41), 5, 4,
dimnames=list(paste0("R",1:5), paste0("C",1:4)))[-5,] + 3*diag(4)
stopifnot( all.equal(eigen(m, only.values=TRUE) $ values,
c(251, 87, 3, 3), tol=1e-14) )
## failed, using symmetric=FALSE and complex because of the asymmetric dimnames()
## match.call() re-matching '...'
test <- function(x, ...) test2(x, 2, ...)
test2 <- function(x, ...) match.call(test2, sys.call())
stopifnot(identical(test(1, 3), quote(test2(x=x, 2, 3))))
## wrongly gave test2(x=x, 2, 2, 3) in R <= 3.1.2
## callGeneric not forwarding dots in call (PR#16141)
setGeneric("foo", function(x, ...) standardGeneric("foo"))
setMethod("foo", "character",
function(x, capitalize = FALSE) if (capitalize) toupper(x) else x)
setMethod("foo", "factor",
function(x, capitalize = FALSE) { x <- as.character(x); callGeneric() })
toto1 <- function(x, ...) foo(x, ...)
stopifnot(identical(toto1(factor("a"), capitalize = TRUE), "A"))
## wrongly did not capitalize in R <= 3.1.2
## Accessing non existing objects must be an error
tools::assertError(base :: foobar)
tools::assertError(base :::foobar)
tools::assertError(stats:::foobar)
tools::assertError(stats:: foobar)
## lazy data only via '::', not ':::' :
stopifnot( nrow(datasets:: swiss) == 47)
tools::assertError(datasets:::swiss)
## The ::: versions gave NULL in certain development versions of R
stopifnot(identical(stats4::show -> s4s,
get("show", asNamespace("stats4") -> ns4)),
s4s@package == "methods",
is.null(ns4[["show"]]) # not directly in stats4 ns
)
## stats4::show was NULL for 4 hours in R-devel
## mode<- did too much evaluation (PR#16215)
x <- y <- quote(-2^2)
x <- as.list(x)
mode(y) <- "list"
stopifnot(identical(x, y))
## y ended up containing -4, not -2^2
## besselJ()/besselY() with too large order
besselJ(1, 2^64) ## NaN with a warning
besselY(1, c(2^(60:70), Inf))
## seg.faulted in R <= 3.1.2
## besselJ()/besselY() with nu = k + 1/2; k in {-1,-2,..}
besselJ(1, -1750.5) ## Inf, with only one warning...
stopifnot(is.finite(besselY(1, .5 - (1500 + 0:10))))
## last gave NaNs; both: more warnings in R <= 3.1.x
## BIC() for arima(), also with NA's
lho <- lh; lho[c(3,7,13,17)] <- NA
alh300 <- arima(lh, order = c(3,0,0))
alh311 <- arima(lh, order = c(3,1,1))
ao300 <- arima(lho, order = c(3,0,0))
ao301 <- arima(lho, order = c(3,0,1))
## AIC/BIC for *different* data rarely makes sense ... want warning:
tools::assertWarning(AA <- AIC(alh300,alh311, ao300,ao301))
tools::assertWarning(BB <- BIC(alh300,alh311, ao300,ao301))
fmLst <- list(alh300,alh311, ao300,ao301)
## nobs() did not "work" in R < 3.2.0:
stopifnot(sapply(fmLst, nobs) == c(48,47, 44,44))
lls <- lapply(fmLst, logLik)
str(lapply(lls, unclass))# -> 'df' and 'nobs'
## 'manual BIC' via generalized AIC:
stopifnot(all.equal(BB[,"BIC"],
sapply(fmLst, function(fm) AIC(fm, k = log(nobs(fm))))))
## BIC() was NA unnecessarily in R < 3.2.0; nobs() was not available eiher
## as.integer() close and beyond maximal integer
MI <- .Machine$integer.max
stopifnot(identical( MI, as.integer( MI + 0.99)),
identical(-MI, as.integer(-MI - 0.99)),
is.na(as.integer(as.character( 100*MI))),
is.na(as.integer(as.character(-100*MI))))
## The two cases with positive numbers failed in R <= 3.2.0
## Ensure that sort() works with a numeric vector "which is an object":
stopifnot(is.object(y <- freeny$y))
stopifnot(diff(sort(y)) > 0)
## order() and hence sort() failed here badly for a while around 2015-04-16
## NAs in data frame names:
dn <- list(c("r1", NA), c("V", NA))
d11 <- as.data.frame(matrix(c(1, 1, 1, 1), ncol = 2, dimnames = dn))
stopifnot(identical(names(d11), dn[[2]]),
identical(row.names(d11), dn[[1]]))
## as.data.frame() failed in R-devel for a couple of hours ..
## note that format(d11) does fail currently, and hence print(), too
## Ensure R -e .. works on Unix
if(.Platform$OS.type == "unix" &&
file.exists(Rc <- file.path(R.home("bin"), "R")) &&
file.access(Rc, mode = 1) == 0) { # 1: executable
cmd <- paste(Rc, "-q --vanilla -e 1:3")
ans <- system(cmd, intern=TRUE)
stopifnot(length(ans) >= 3,
identical(ans[1:2], c("> 1:3",
"[1] 1 2 3")))
}
## (failed for < 1 hr, in R-devel only)
## Parsing large exponents of floating point numbers, PR#16358
set.seed(12)
lrg <- sprintf("%.0f", round(exp(10*(2+abs(rnorm(2^10))))))
head(huge <- paste0("1e", lrg))
micro <- paste0("1e-", lrg)
stopifnot(as.numeric(huge) == Inf,
as.numeric(micro) == 0)
## Both failed in R <= 3.2.0
## vcov() failed on manova() results, PR#16380
tear <- c(6.5, 6.2, 5.8, 6.5, 6.5, 6.9, 7.2, 6.9, 6.1, 6.3, 6.7, 6.6, 7.2, 7.1, 6.8, 7.1, 7.0, 7.2, 7.5, 7.6)
gloss <- c(9.5, 9.9, 9.6, 9.6, 9.2, 9.1, 10.0, 9.9, 9.5, 9.4, 9.1, 9.3, 8.3, 8.4, 8.5, 9.2, 8.8, 9.7, 10.1, 9.2)
opacity <- c(4.4, 6.4, 3.0, 4.1, 0.8, 5.7, 2.0, 3.9, 1.9, 5.7, 2.8, 4.1, 3.8,1.6, 3.4, 8.4, 5.2, 6.9, 2.7, 1.9)
Y <- cbind(tear, gloss, opacity)
rate <- factor(gl(2,10), labels = c("Low", "High"))
fit <- manova(Y ~ rate)
vcov(fit)
## Gave error because coef.aov() turned matrix of coefficients into a vector
## Unary / Binary uses of logic operations, PR#16385
tools::assertError(`&`(FALSE))
tools::assertError(`|`(TRUE))
## Did not give errors in R <= 3.2.0
E <- tryCatch(`!`(), error = function(e)e)
stopifnot(grepl("0 arguments .*\\<1", conditionMessage(E)))
## Gave wrong error message in R <= 3.2.0
stopifnot(identical(!matrix(TRUE), matrix(FALSE)),
identical(!matrix(FALSE), matrix(TRUE)))
## was wrong for while in R 3.2.0 patched
## cummax(<integer>)
iNA <- NA_integer_
x <- c(iNA, 1L)
stopifnot(identical(cummin(x), c(iNA, iNA)),
identical(cummax(x), c(iNA, iNA)))
## an initial NA was not propagated in R <= 3.2.0
## summaryRprof failed for very short profile, PR#16395
profile <- tempfile()
writeLines(c(
'memory profiling: sample.interval=20000',
':145341:345360:13726384:0:"stdout"',
':208272:345360:19600000:0:"stdout"'), profile)
summaryRprof(filename = profile, memory = "both")
unlink(profile)
## failed when a matrix was downgraded to a vector
## option(OutDec = *) -- now gives a warning when not 1 character
op <- options(OutDec = ".", digits = 7, # <- default
warn = 2)# <- (unexpected) warnings become errors
stopifnot(identical("3.141593", fpi <- format(pi)))
options(OutDec = ",")
stopifnot(identical("3,141593", cpi <- format(pi)))
## warnings, but it "works" (for now):
tools::assertWarning(options(OutDec = ".1."))
stopifnot(identical("3.1.141593", format(pi)))
tools::assertWarning(options(OutDec = ""))
tools::assertWarning(stopifnot(identical("3141593", format(pi))))
options(op)# back to sanity
## No warnings in R versions <= 3.2.1
## format(*, decimal.mark=".") when OutDec != "." (PR#16411)
op <- options(OutDec = ",")
stopifnot(identical(fpi, format(pi, decimal.mark=".")))
## failed in R <= 3.2.1
## model.frame() removed ts attributes on original data (PR#16436)
orig <- class(EuStockMarkets)
mf <- model.frame(EuStockMarkets ~ 1, na.action=na.fail)
stopifnot(identical(orig, class(EuStockMarkets)))
## ts class lost in R <= 3.2.1
##
foo <- as.expression(1:3)
matrix(foo, 3, 3) # always worked
matrix(foo, 3, 3, byrow = TRUE)
## failed in R <= 3.1.2
## labels.dendrogram(), dendrapply(), etc -- see comment #15 of PR#15215 :
(D <- as.dendrogram(hclust(dist(cbind(setNames(c(0,1,4), LETTERS[1:3]))))))
stopifnot(
identical(labels(D), c("C", "A", "B")),
## has been used in "CRAN package space"
identical(suppressWarnings(dendrapply(D, labels)),
list("C", list("A", "B"), "C")))
## dendrapply(D, labels) failed in R-devel for a day or two
## poly() / polym() predict()ion
library(datasets)
alm <- lm(stack.loss ~ poly(Air.Flow, Water.Temp, degree=3), stackloss)
f20 <- fitted(alm)[1:20] # "correct" prediction values [1:20]
stopifnot(all.equal(unname(f20[1:4]), c(39.7703378, 39.7703378, 35.8251359, 21.5661761)),
all.equal(f20, predict(alm, stackloss) [1:20] , tolerance = 1e-14),
all.equal(f20, predict(alm, stackloss[1:20, ]), tolerance = 1e-14))
## the second prediction went off in R <= 3.2.1
## PR#16478
kkk <- c("a\tb", "3.14\tx")
z1 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c("numeric", "character"))
z2 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c(b = "character", a = "numeric"))
stopifnot(identical(z1, z2))
z3 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c(b = "character"))
stopifnot(identical(z1, z3))
z4 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c(c = "integer", b = "character", a = "numeric"))
stopifnot(identical(z1, z4))
## z2 and z4 used positional matching (and failed) in R < 3.3.0.
## PR#16484
z <- regexpr("(.)", NA_character_, perl = TRUE)
stopifnot(is.na(attr(z, "capture.start")), is.na(attr(z, "capture.length")))
## Result was random integers in R <= 3.2.2.
## PR#14861
if(.Platform$OS.type == "unix") { # no 'ls /' on Windows
con <- pipe("ls /", open = "rt")
data <- readLines(con)
z <- close(con)
print(z)
stopifnot(identical(z, 0L))
}
## was NULL in R <= 3.2.2
## Sam Steingold: compiler::enableJIT(3) not working in ~/.Rprofile anymore
stopifnot(identical(topenv(baseenv()),
baseenv()))
## accidentally globalenv in R 3.2.[12] only
## widths of unknown Unicode characters
stopifnot(nchar("\u200b", "w") == 0)
## was -1 in R 3.2.2
| 30,965 | gpl-2.0 |
fd4c28d11d4a235cb98895257de11aab895eb477 | ron1818/PhD_code | misc/outlier_fn.R | # outlier detection and removal functions
#http://stats.stackexchange.com/questions/1142/simple-algorithm-for-online-outlier-detection-of-a-generic-time-series
my_res_iqr_outliers <- function(x,iqr.factor=1.5,plot=TRUE)
{
x <- as.ts(x)
if(frequency(x)>1){ # may have seasonal
resid <- stl(x,s.window="periodic",robust=TRUE,na.action='na.exclude')$time.series[,3]
}else{
tt <- 1:length(x)
resid <- residuals(loess(x ~ tt, na.action='na.exclude')) # using loess to fit
}
resid.q <- quantile(resid,prob=c(0.25,0.75), na.rm=T) # get IQR
iqr <- diff(resid.q)
limits <- resid.q + iqr.factor*iqr*c(-1,1) # outside 3IQR => outliers
score <- abs(pmin((resid-limits[1])/iqr,0) + pmax((resid - limits[2])/iqr,0)) # +ve for outlier, otherwise non outlier
outlier.idx<-which(score>0)
if(plot)
{
plot(x)
x2 <- ts(rep(NA,length(x)))
x2[outlier.idx]<-x[outlier.idx]
tsp(x2) <- tsp(x)
points(x2,pch=19,col="red")
return(invisible(outlier.idx))
}
else
return(outlier.idx)
}
my_window_mad_outliers<-function(x, window=30, threshold=5)
{
x <- as.ts(x)
ut<-function(x,threshold){
median(x, na.rm=T)+threshold*mad(x, na.rm=T)
}
z<-rollapply(x, window, ut, threshold=threshold, align='right')
z <- c(rep(z[1], window-1), z) # Use z[1] throughout the initial period
outlier <- x > z
outlier.idx<-which(outlier==T)
plot(x, type="l")
x2 <- ts(rep(NA,length(x)))
x2[outlier.idx]<-x[outlier.idx]
tsp(x2) <- tsp(x)
points(x2, pch=19,col='red')
return(outlier.idx)
}
### rearrange data to vector
my_delete_outlier<-function(x,outlier.idx=NULL)
{
x<-na.approx(x)
if(is.xts(x)||is.ts(x)) timestamp<-time(x) else timestamp=NULL
x.nooutlier<-as.numeric(x)
# using SMA to remove outlier, causal
for (i in outlier.idx)
{
if(i>2)
x.nooutlier[i]<-0.5*(x.nooutlier[i-2]+x.nooutlier[i-1])
else if(i<=2&&i>1)
x.nooutlier[i]<-data(x.nooutlier[i-1])
else #i==1
x.nooutlier[i]<-0
}
if(!is.null(timestamp))
x.nooutlier<-xts(x.nooutlier,timestamp)
return(x.nooutlier)
}
| 2,090 | gpl-3.0 |
e6b88a9458ee80e01c652bd346efc46bc75c83eb | brightchen/h2o-3 | h2o-r/tests/testdir_autoGen/runit_complexFilterTest_allyears2k_headers_38.R | ##
# Author: Autogenerated on 2013-12-18 17:01:19
# gitHash: 2581a0dfa12a51892283830529a5126ea49f0cb9
# SEED: 2481425483200553751
##
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
complexFilterTest_allyears2k_headers_38 <- function() {
Log.info("A munge-task R unit test on data <allyears2k_headers> testing the functional unit <['', '<=']> ")
Log.info("Uploading allyears2k_headers")
hex <- h2o.importFile(locate("smalldata/airlines/allyears2k_headers.zip"), "rallyears2k_headers.hex")
Log.info("Performing compound task ( ( hex[,c(\"Cancelled\")] <= 0.252596694009 )) on dataset <allyears2k_headers>")
filterHex <- hex[( ( hex[,c("Cancelled")] <= 0.252596694009 )) ,]
Log.info("Performing compound task ( ( hex[,c(\"Diverted\")] <= 0.952276526236 )) on dataset allyears2k_headers, and also subsetting columns.")
filterHex <- hex[( ( hex[,c("Diverted")] <= 0.952276526236 )) , c("ActualElapsedTime","DepDelay","Diverted","DayOfWeek","Distance","TaxiIn","TaxiOut","CRSElapsedTime","ArrTime","CarrierDelay","CRSArrTime","DayofMonth","Year")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[( ( hex[,c("Diverted")] <= 0.952276526236 )) , c("ArrDelay","SecurityDelay","Month","DepTime","Origin","CancellationCode","FlightNum","LateAircraftDelay","WeatherDelay","Cancelled","TailNum","AirTime","IsArrDelayed","CRSDepTime","IsDepDelayed","Dest","UniqueCarrier","NASDelay")]
testEnd()
}
doTest("compoundFilterTest_ on data allyears2k_headers unit= ['', '<=']", complexFilterTest_allyears2k_headers_38)
| 1,639 | apache-2.0 |
e6b88a9458ee80e01c652bd346efc46bc75c83eb | printedheart/h2o-3 | h2o-r/tests/testdir_autoGen/runit_complexFilterTest_allyears2k_headers_38.R | ##
# Author: Autogenerated on 2013-12-18 17:01:19
# gitHash: 2581a0dfa12a51892283830529a5126ea49f0cb9
# SEED: 2481425483200553751
##
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
complexFilterTest_allyears2k_headers_38 <- function() {
Log.info("A munge-task R unit test on data <allyears2k_headers> testing the functional unit <['', '<=']> ")
Log.info("Uploading allyears2k_headers")
hex <- h2o.importFile(locate("smalldata/airlines/allyears2k_headers.zip"), "rallyears2k_headers.hex")
Log.info("Performing compound task ( ( hex[,c(\"Cancelled\")] <= 0.252596694009 )) on dataset <allyears2k_headers>")
filterHex <- hex[( ( hex[,c("Cancelled")] <= 0.252596694009 )) ,]
Log.info("Performing compound task ( ( hex[,c(\"Diverted\")] <= 0.952276526236 )) on dataset allyears2k_headers, and also subsetting columns.")
filterHex <- hex[( ( hex[,c("Diverted")] <= 0.952276526236 )) , c("ActualElapsedTime","DepDelay","Diverted","DayOfWeek","Distance","TaxiIn","TaxiOut","CRSElapsedTime","ArrTime","CarrierDelay","CRSArrTime","DayofMonth","Year")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[( ( hex[,c("Diverted")] <= 0.952276526236 )) , c("ArrDelay","SecurityDelay","Month","DepTime","Origin","CancellationCode","FlightNum","LateAircraftDelay","WeatherDelay","Cancelled","TailNum","AirTime","IsArrDelayed","CRSDepTime","IsDepDelayed","Dest","UniqueCarrier","NASDelay")]
testEnd()
}
doTest("compoundFilterTest_ on data allyears2k_headers unit= ['', '<=']", complexFilterTest_allyears2k_headers_38)
| 1,639 | apache-2.0 |
3f81c50745c0a090adc1f45154855ee20a161ad4 | nathan-russell/r-source | tests/reg-tests-1c.R | ## Regression tests for R >= 3.0.0
pdf("reg-tests-1c.pdf", encoding = "ISOLatin1.enc")
## mapply with classed objects with length method
## was not documented to work in 2.x.y
setClass("A", representation(aa = "integer"))
a <- new("A", aa = 101:106)
setMethod("length", "A", function(x) length(x@aa))
setMethod("[[", "A", function(x, i, j, ...) x@aa[[i]])
(z <- mapply(function(x, y) {x * y}, a, rep(1:3, 2)))
stopifnot(z == c(101, 204, 309, 104, 210, 318))
## reported as a bug (which it was not) by H. Pages in
## https://stat.ethz.ch/pipermail/r-devel/2012-November/065229.html
## recyling in split()
## https://stat.ethz.ch/pipermail/r-devel/2013-January/065700.html
x <- 1:6
y <- split(x, 1:2)
class(x) <- "ABC" ## class(x) <- "A" creates an invalid object
yy <- split(x, 1:2)
stopifnot(identical(y, yy))
## were different in R < 3.0.0
## dates with fractional seconds after 2038 (PR#15200)
## Extremely speculative!
z <- as.POSIXct(2^31+c(0.4, 0.8), origin=ISOdatetime(1970,1,1,0,0,0,tz="GMT"))
zz <- format(z)
stopifnot(zz[1] == zz[2])
## printed form rounded not truncated in R < 3.0.0
## origin coerced in tz and not GMT by as.POSIXct.numeric()
x <- as.POSIXct(1262304000, origin="1970-01-01", tz="EST")
y <- as.POSIXct(1262304000, origin=.POSIXct(0, "GMT"), tz="EST")
stopifnot(identical(x, y))
## Handling records with quotes in names
x <- c("a b' c",
"'d e' f g",
"h i 'j",
"k l m'")
y <- data.frame(V1 = c("a", "d e", "h"), V2 = c("b'", "f", "i"), V3 = c("c", "g", "j\nk l m"))
f <- tempfile()
writeLines(x, f)
stopifnot(identical(count.fields(f), c(3L, 3L, NA_integer_, 3L)))
stopifnot(identical(read.table(f), y))
stopifnot(identical(scan(f, ""), as.character(t(as.matrix(y)))))
## docu always said 'length 1 is sorted':
stopifnot(!is.unsorted(NA))
## str(.) for large factors should be fast:
u <- as.character(runif(1e5))
t1 <- max(0.001, system.time(str(u))[[1]]) # get a baseline > 0
uf <- factor(u)
(t2 <- system.time(str(uf))[[1]]) / t1 # typically around 1--2
stopifnot(t2 / t1 < 30)
## was around 600--850 for R <= 3.0.1
## ftable(<array with unusual dimnames>)
(m <- matrix(1:12, 3,4, dimnames=list(ROWS=paste0("row",1:3), COLS=NULL)))
ftable(m)
## failed to format (and hence print) because of NULL 'COLS' dimnames
## regression test formerly in kmeans.Rd, but result differs by platform
## Artificial example [was "infinite loop" on x86_64; PR#15364]
rr <- c(rep(-0.4, 5), rep(-0.4- 1.11e-16, 14), -.5)
r. <- signif(rr, 12)
k3 <- kmeans(rr, 3, trace=2) ## Warning: Quick-Transfer.. steps exceed
try ( k. <- kmeans(r., 3) ) # after rounding, have only two distinct points
k. <- kmeans(r., 2) # fine
## PR#15376
stem(c(1, Inf))
## hung in 3.0.1
## PR#15377, very long variable names
x <- 1:10
y <- x + rnorm(10)
z <- y + rnorm(10)
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy <- y
fit <- lm(cbind(yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy, z) ~ x)
## gave spurious error message in 3.0.1.
## PR#15341 singular complex matrix in rcond()
set.seed(11)
n <- 5
A <- matrix(runif(n*n),nrow=n)
B <- matrix(runif(n*n),nrow=n)
B[n,] <- (B[n-1,]+B[n-2,])/2
rcond(B)
B <- B + 0i
rcond(B)
## gave error message (OK) in R 3.0.1: now returns 0 as in real case.
## Misuse of formatC as in PR#15303
days <- as.Date(c("2012-02-02", "2012-03-03", "2012-05-05"))
(z <- formatC(days))
stopifnot(!is.object(z), is.null(oldClass(z)))
## used to copy over class in R < 3.0.2.
## PR15219
val <- sqrt(pi)
fun <- function(x) (-log(x))^(-1/2)
(res <- integrate(fun, 0, 1, rel.tol = 1e-4))
stopifnot(abs(res$value - val) < res$abs.error)
(res <- integrate(fun, 0, 1, rel.tol = 1e-6))
stopifnot(abs(res$value - val) < res$abs.error)
res <- integrate(fun, 0, 1, rel.tol = 1e-8)
stopifnot(abs(res$value - val) < res$abs.error)
fun <- function(x) x^(-1/2)*exp(-x)
(res <- integrate(fun, 0, Inf, rel.tol = 1e-4))
stopifnot(abs(res$value - val) < res$abs.error)
(res <- integrate(fun, 0, Inf, rel.tol = 1e-6))
stopifnot(abs(res$value - val) < res$abs.error)
(res <- integrate(fun, 0, Inf, rel.tol = 1e-8))
stopifnot(abs(res$value - val) < res$abs.error)
## sometimes exceeded reported error in 2.12.0 - 3.0.1
## Unary + should coerce
x <- c(TRUE, FALSE, NA, TRUE)
stopifnot(is.integer(+x))
## +x was logical in R <= 3.0.1
## Attritbutes of value of unary operators
# +x, -x were ts, !x was not in 3.0.2
x <- ts(c(a=TRUE, b=FALSE, c=NA, d=TRUE), frequency = 4, start = 2000)
x; +x; -x; !x
stopifnot(is.ts(!x), !is.ts(+x), !is.ts(-x))
# +x, -x were ts, !x was not in 3.0.2
x <- ts(c(a=1, b=2, c=0, d=4), frequency = 4, start = 2010)
x; +x; -x; !x
stopifnot(!is.ts(!x), is.ts(+x), is.ts(-x))
##
## regression test incorrectly in colorRamp.Rd
bb <- colorRampPalette(2)(4)
stopifnot(bb[1] == bb)
## special case, invalid in R <= 2.15.0:
## Setting NAMED on ... arguments
f <- function(...) { x <- (...); x[1] <- 7; (...) }
stopifnot(f(1+2) == 3)
## was 7 in 3.0.1
## copying attributes from only one arg of a binary operator.
A <- array(c(1), dim = c(1L,1L), dimnames = list("a", 1))
x <- c(a = 1)
B <- A/(pi*x)
stopifnot(is.null(names(B)))
## was wrong in R-devel in Aug 2013
## needed an un-NAMED rhs.
## lgamma(x) for very small negative x
X <- 3e-308; stopifnot(identical(lgamma(-X), lgamma(X)))
## lgamma(-X) was NaN in R <= 3.0.1
## PR#15413
z <- subset(data.frame(one = numeric()), select = one)
stopifnot(nrow(z) == 0L)
## created a row prior to 3.0.2
## https://stat.ethz.ch/pipermail/r-devel/2013-September/067524.html
dbeta(0.9, 9.9e307, 10)
dbeta(0.1, 9, 9.9e307)
dbeta(0.1, 9.9e307, 10)
## first two hung in R <= 3.0.2
## PR#15465
provideDimnames(matrix(nrow = 0, ncol = 1))
provideDimnames(table(character()))
as.data.frame(table(character()))
## all failed in 3.0.2
## PR#15004
n <- 10
s <- 3
l <- 10000
m <- 20
x <- data.frame(x1 = 1:n, x2 = 1:n)
by <- data.frame(V1 = factor(rep(1:3, n %/% s + 1)[1:n], levels = 1:s))
for(i in 1:m) {
by[[i + 1]] <- factor(rep(l, n), levels = 1:l)
}
agg <- aggregate.data.frame(x, by, mean)
stopifnot(nrow(unique(by)) == nrow(agg))
## rounding caused groups to be falsely merged
## PR#15454
set.seed(357)
z <- matrix(c(runif(50, -1, 1), runif(50, -1e-190, 1e-190)), nrow = 10)
contour(z)
## failed because rounding made crossing tests inconsistent
## Various cases where zero length vectors were not handled properly
## by functions in base and utils, including PR#15499
y <- as.data.frame(list())
format(y)
format(I(integer()))
gl(0, 2)
z <- list(numeric(0), 1)
stopifnot(identical(relist(unlist(z), z), z))
summary(y)
## all failed in 3.0.2
## PR#15518 Parser catching errors in particular circumstance:
(ee <- tryCatch(parse(text = "_"), error= function(e)e))
stopifnot(inherits(ee, "error"))
## unexpected characters caused the parser to segfault in 3.0.2
## nonsense value of nmax
unique(1:3, nmax = 1)
## infinite-looped in 3.0.2, now ignored.
## besselI() (and others), now using sinpi() etc:
stopifnot(all.equal(besselI(2.125,-5+1/1024),
0.02679209380095711, tol= 8e-16),
all.equal(lgamma(-12+1/1024), -13.053274367453049, tol=8e-16))
## rel.error was 1.5e-13 / 7.5e-14 in R <= 3.0.x
ss <- sinpi(2*(-10:10)-2^-12)
tt <- tanpi( (-10:10)-2^-12)
stopifnot(ss == ss[1], tt == tt[1], # as internal arithmetic must be exact here
all.equal(ss[1], -0.00076699031874270453, tol=8e-16),
all.equal(tt[1], -0.00076699054434309260, tol=8e-16))
## (checked via Rmpfr) The above failed during development
## PR#15535 c() "promoted" raw vectors to bad logical values
stopifnot( c(as.raw(11), TRUE) == TRUE )
## as.raw(11) became a logical value coded as 11,
## and did not test equal to TRUE.
## PR#15564
fit <- lm(rnorm(10) ~ I(1:10))
predict(fit, interval = "confidence", scale = 1)
## failed in <= 3.0.2 with object 'w' not found
## PR#15534 deparse() did not produce reparseable complex vectors
assert.reparsable <- function(sexp) {
deparsed <- paste(deparse(sexp), collapse=" ")
reparsed <- tryCatch(eval(parse(text=deparsed)[[1]]), error = function(e) NULL)
if (is.null(reparsed))
stop(sprintf("Deparsing produced invalid syntax: %s", deparsed))
if(!identical(reparsed, sexp))
stop(sprintf("Deparsing produced change: value is not %s", reparsed))
}
assert.reparsable(1)
assert.reparsable("string")
assert.reparsable(2+3i)
assert.reparsable(1:10)
assert.reparsable(c(NA, 12, NA, 14))
assert.reparsable(as.complex(NA))
assert.reparsable(complex(real=Inf, i=4))
assert.reparsable(complex(real=Inf, i=Inf))
assert.reparsable(complex(real=Inf, i=-Inf))
assert.reparsable(complex(real=3, i=-Inf))
assert.reparsable(complex(real=3, i=NaN))
assert.reparsable(complex(r=NaN, i=0))
assert.reparsable(complex(real=NA, i=1))
assert.reparsable(complex(real=1, i=NA))
## last 7 all failed
## PR#15621 backticks could not be escaped
stopifnot(deparse(as.name("`"), backtick=TRUE) == "`\\``")
assign("`", TRUE)
`\``
tools::assertError(parse("```"))
##
## We document tanpi(0.5) etc to be NaN
stopifnot(is.nan(tanpi(c(0.5, 1.5, -0.5, -1.5))))
## That is not required for system implementations, and some give +/-Inf
## PR#15642 segfault when parsing overflowing reals
as.double("1e1000")
ll <- ml <- list(1,2); dim(ml) <- 2:1
ali <- all.equal(list( ), identity) # failed in R-devel for ~ 30 hours
al1 <- all.equal(list(1), identity) # failed in R < 3.1.0
stopifnot(length(ali) == 3, grepl("list", ali[1]),
grepl("length", ali[2], ignore.case=TRUE),
is.character(al1), length(al1) >= 2,
all.equal(ml, ml),
all.equal(ll, ml, check.attributes=FALSE))
## PR#15699 aggregate failed when there were no grouping variables
dat <- data.frame(Y = runif(10), X = sample(LETTERS[1:3], 10, TRUE))
aggregate(Y ~ 1, FUN = mean, data = dat)
## merge() with duplicated column names, similar to PR#15618
X <- data.frame(Date = c("1967-02-01", "1967-02-02", "1967-02-03"),
Settle.x = c(NA, NA, NA), Settle.y = c(NA, NA, NA),
Settle = c(35.4, 35.15, 34.95))
Y <- data.frame(Date = c("2013-12-10", "2013-12-11", "2013-12-12"),
Settle = c(16.44, 16.65, 16.77))
merge(X, Y, by = "Date", all = TRUE)
## failed in R < 3.1.0: now warns (correctly).
## PR#15679
badstructure <- function(depth, key)
{
ch <- if (depth == 1L) list() else list(badstructure(depth-1,key))
r <- list()
r[[key]] <- ch
r
}
badstructure(20, "children")
## overran, segfaulted for the original reporter.
## PR#15702 and PR#15703
d <- as.dendrogram(hclust(dist(sin(1:7))))
(dl <- d[[c(2,1,2)]]) # single-leaf dendrogram
stopifnot(inherits(dl, "dendrogram"), is.leaf(dl),
identical(order.dendrogram(dl), as.vector(dl)),
identical(d, as.dendrogram(d)))
## as.dendrogram() was hidden; order.*() failed for leaf
## using *named* method
hw <- hclust(dist(sqrt(1:5)), method=c(M = "ward"))
## failed for 2 days in R-devel/-alpha
## PR#15758
my_env <- new.env(); my_env$one <- 1L
save(one, file = tempfile(), envir = my_env)
## failed in R < 3.1.1.
## Conversion to numeric in boundary case
ch <- "0x1.ffa0000000001p-1"
rr <- type.convert(ch, numerals = "allow.loss")
rX <- type.convert(ch, numerals = "no.loss")
stopifnot(is.numeric(rr), identical(rr, rX),
all.equal(rr, 0.999267578125),
all.equal(type.convert(ch, numerals = "warn"),
type.convert("0x1.ffap-1",numerals = "warn"), tol = 5e-15))
## type.convert(ch) was not numeric in R 3.1.0
##
ch <- "1234567890123456789"
rr <- type.convert(ch, numerals = "allow.loss")
rX <- type.convert(ch, numerals = "no.loss")
rx <- type.convert(ch, numerals = "no.loss", as.is = TRUE)
tools::assertWarning(r. <- type.convert(ch, numerals = "warn.loss"))
stopifnot(is.numeric(rr), identical(rr, r.), all.equal(rr, 1.234567890e18),
is.factor(rX), identical(rx, ch))
## PR#15764: integer overflow could happen without a warning or giving NA
tools::assertWarning(ii <- 1980000020L + 222000000L)
stopifnot(is.na(ii))
tools::assertWarning(ii <- (-1980000020L) + (-222000000L))
stopifnot(is.na(ii))
tools::assertWarning(ii <- (-1980000020L) - 222000000L)
stopifnot(is.na(ii))
tools::assertWarning(ii <- 1980000020L - (-222000000L))
stopifnot(is.na(ii))
## first two failed for some version of clang in R < 3.1.1
## PR#15735: formulae with exactly 32 variables
myFormula <- as.formula(paste(c("y ~ x0", paste0("x", 1:30)), collapse = "+"))
ans <- update(myFormula, . ~ . - w1)
stopifnot(identical(ans, myFormula))
updateArgument <-
as.formula(paste(c(". ~ . ", paste0("w", 1:30)), collapse = " - "))
ans2 <- update(myFormula, updateArgument)
stopifnot(identical(ans2, myFormula))
## PR#15753
0x110p-5L
stopifnot(.Last.value == 8.5)
## was 272 with a garbled message in R 3.0.0 - 3.1.0.
## numericDeriv failed to duplicate variables in
## the expression before modifying them. PR#15849
x <- 10; y <- 10
d1 <- numericDeriv(quote(x+y),c("x","y"))
x <- y <- 10
d2 <- numericDeriv(quote(x+y),c("x","y"))
stopifnot(identical(d1,d2))
## The second gave the wrong answer
## prettyNum(x, zero.print = .) failed when x had NAs
pp <- sapply(list(TRUE, FALSE, ".", " "), function(.)
prettyNum(c(0:1,NA), zero.print = . ))
stopifnot(identical(pp[1,], c("0", " ", ".", " ")),
pp[2:3,] == c("1","NA"))
## all 4 prettyNum() would error out
## checking all.equal() with externalptr
library(methods) # getClass()'s versionKey is an e.ptr
cA <- getClass("ANY")
stopifnot(all.equal(cA, cA),
is.character(all.equal(cA, getClass("S4"))))
# both all.equal() failed in R <= 3.1.1
## as.hexmode(x), as.octmode(x) when x is double
x <- c(NA, 1)
stopifnot(identical(x == x,
as.hexmode(x) == as.octmode(x)))
p <- c(1, pi)
tools::assertError(as.hexmode(p))
tools::assertError(as.octmode(p))
## where all "wrong" in R <= 3.1.1
## PR#15935
y <- 1:3
drop1(lm(y ~ 1))
drop1(glm(y ~ 1))
stats:::drop1.default(glm(y ~ 1))
## gave error in R < 3.1.2
## getAnywhere() wrongly dealing with namespace hidden list object
nm <- deparse(body(pbinom)[[2]])# == "C_pbinom" currently
gg <- getAnywhere(nm)
stopifnot(length(gg$objs) == 1)
## was 4 and printed "4 differing objects matching ‘C_pbinom’ ..." in R <= 3.1.1
## 0-length consistency of options(), PR#15979
stopifnot(identical(options(list()), options(NULL)))
## options(list()) failed in R <= 3.1.1
## merge.dendrogram(), PR#15648
mkDend <- function(n, lab, rGen = function(n) 1+round(16*abs(rnorm(n)))) {
stopifnot(is.numeric(n), length(n) == 1, n >= 1, is.character(lab))
a <- matrix(rGen(n*n), n, n)
colnames(a) <- rownames(a) <- paste0(lab, 1:n)
as.dendrogram(hclust(as.dist(a + t(a))))
}
set.seed(7)
da <- mkDend(4, "A")
db <- mkDend(3, "B")
d.ab <- merge(da, db)
hcab <- as.hclust(d.ab)
stopifnot(hcab$order == c(2, 4, 1, 3, 7, 5, 6),
hcab$labels == c(paste0("A", 1:4), paste0("B", 1:3)))
## was wrong in R <= 3.1.1
## bw.SJ() and similar with NA,Inf values, PR#16024
try(bw.SJ (c(NA,2,3)))
try(bw.bcv(c(-Inf,2,3)))
try(bw.ucv(c(1,NaN,3,4)))
## seg.faulted in 3.0.0 <= R <= 3.1.1
## as.dendrogram() with wrong input
x <- rbind(c( -6, -9), c( 0, 13),
c(-15, 6), c(-14, 0), c(12,-10))
dx <- dist(x,"manhattan")
hx <- hclust(dx)
hx$merge <- matrix(c(-3, 1, -2, 3,
-4, -5, 2, 3), 4,2)
tools::assertError(as.dendrogram(hx))
## 8 member dendrogram and memory explosion for larger examples in R <= 3.1.2
## abs with named args failed, PR#16047
abs(x=1i)
## Complained that the arg should be named z
## Big exponents overflowed, PR#15976
x <- 0E4933
y <- 0x0p100000
stopifnot(x == 0, y == 0)
##
## drop.terms() dropped some attributes, PR#16029
test <- model.frame(Employed ~ Year + poly(GNP,3) + Population, data=longley)
mterm <- terms(test)
mterm2 <- drop.terms(mterm, 3)
predvars <- attr(mterm2, "predvars")
dataClasses <- attr(mterm2, "dataClasses")
factors <- attr(mterm2, "factors")
stopifnot(is.language(predvars), length(predvars) == length(dataClasses)+1,
all(names(dataClasses) == rownames(factors)))
## Previously dropped predvars and dataClasses
## prompt() did not escape percent signs properly
fn <- function(fmt = "%s") {}
f <- tempfile(fileext = ".Rd")
prompt(fn, filename = f)
rd <- tools::parse_Rd(f)
## Gave syntax errors because the percent sign in Usage
## was taken as the start of a comment.
## power.t.test() failure for very large n (etc): PR#15792
(ptt <- power.t.test(delta = 1e-4, sd = .35, power = .8))
(ppt <- power.prop.test(p1 = .5, p2 = .501, sig.level=.001, power=0.90, tol=1e-8))
stopifnot(all.equal(ptt$n, 192297000, tol = 1e-5),
all.equal(ppt$n, 10451937, tol = 1e-7))
## call to uniroot() did not allow n > 1e7
## save(*, ascii=TRUE): PR#16137
x0 <- x <- c(1, NA, NaN)
save(x, file=(sf <- tempfile()), ascii = TRUE)
load(sf)
stopifnot(identical(x0, x))
## x had 'NA' instead of 'NaN'
## PR#16205
stopifnot(length(glob2rx(character())) == 0L)
## was "^$" in R < 3.1.3
### Bugs fixed in R 3.2.0
## Bugs reported by Radford Neal
x <- pairlist(list(1, 2))
x[[c(1, 2)]] <- NULL # wrongly gave an error, referring to misuse
# of the internal SET_VECTOR_ELT procedure
stopifnot(identical(x, pairlist(list(1))))
a <- pairlist(10, 20, 30, 40, 50, 60)
dim(a) <- c(2, 3)
dimnames(a) <- list(c("a", "b"), c("x", "y", "z"))
# print(a) # doesn't print names, not fixed
a[["a", "x"]] <- 0
stopifnot(a[["a", "x"]] == 0)
## First gave a spurious error, second caused a seg.fault
## Radford (R-devel, June 24, 2014); M.Maechler
m <- matrix(1:2, 1,2); v <- 1:3
stopifnot(identical(crossprod(2, v), t(2) %*% v),
identical(crossprod(m, v), t(m) %*% v),
identical(5 %*% v, 5 %*% t(v)),
identical(tcrossprod(m, 1:2), m %*% 1:2) )
## gave error "non-conformable arguments" in R <= 3.2.0
## list <--> environment
L0 <- list()
stopifnot(identical(L0, as.list(as.environment(L0))))
## as.env..() did not work, and as.list(..) gave non-NULL names in R 3.1.x
## all.equal() for environments and refClass()es
RR <- setRefClass("Ex", fields = list(nr = "numeric"))
m1 <- RR$new(); m2 <- RR$new(); m3 <- RR$new(nr = pi); m4 <- RR$new(nr=3.14159)
ee <- emptyenv(); e2 <- new.env()
stopifnot(all.equal(ee,ee), identical(ee,ee), !identical(ee,e2), all.equal(ee,e2),
identical(m3,m3), !identical(m1,m2),
all.equal(m1,m2), !isTRUE(all.equal(m1,m3)), !isTRUE(all.equal(m1,m4)),
all.equal(m3,m4, tol=1e-6), grepl("relative difference", all.equal(m3,m4)),
TRUE)
## did not work in R 3.1.x
e3 <- new.env()
e3$p <- "p"; e2$p <- "p"; ae.p <- all.equal(e2,e3)
e3$q <- "q"; ae.q <- all.equal(e2,e3)
e2$q <- "Q"; ae.Q <- all.equal(e2,e3)
stopifnot(ae.p, grepl("^Length", ae.q), grepl("string mismatch", ae.Q))
e2$q <- "q"; e2$r <- pi; e3$r <- 3.14159265
stopifnot(all.equal(e2, e3),
grepl("relative difference", all.equal(e2, e3, tol=1e-10)))
g <- globalenv() # so it now contains itself
l <- list(e = g)
stopifnot(all.equal(g, g),
all.equal(l, l))
## these ran into infinite recursion error.
## missing() did not propagate through '...', PR#15707
check <- function(x,y,z) c(missing(x), missing(y), missing(z))
check1 <- function(...) check(...)
check2 <- function(...) check1(...)
stopifnot(identical(check2(one, , three), c(FALSE, TRUE, FALSE)))
## missing() was unable to handle recursive promises
## envRefClass prototypes are a bit special -- broke all.equal() for baseenv()
rc <- getClass("refClass")
rp <- rc@prototype
str(rp) ## failed
rp ## show() failed ..
(ner <- new("envRefClass")) # show() failed
stopifnot(all.equal(rp,rp), all.equal(ner,ner))
be <- baseenv()
system.time(stopifnot(all.equal(be,be)))## <- takes a few sec's
stopifnot(
grepl("not identical.*character", print(all.equal(rp, ner))),
grepl("not identical.*character", print(all.equal(ner, rp))))
system.time(stopifnot(all.equal(globalenv(), globalenv())))
## Much of the above failed in R <= 3.2.0
## while did not protect its argument, which caused an error
## under gctorture, PR#15990
gctorture()
suppressWarnings(while(c(FALSE, TRUE)) 1)
gctorture(FALSE)
## gave an error because the test got released when the warning was generated.
## hist(x, breaks =) with too large bins, PR#15988
set.seed(5); x <- runif(99)
Hist <- function(x, b) hist(x, breaks = b, plot = FALSE)$counts
for(k in 1:5) {
b0 <- seq_len(k-1)/k
H.ok <- Hist(x, c(-10, b0, 10))
for(In in c(1000, 1e9, Inf))
stopifnot(identical(Hist(x, c(-In, b0, In)), H.ok),
identical(Hist(x, c( 0, b0, In)), H.ok))
}
## "wrong" results for k in {2,3,4} in R 3.1.x
## eigen(*, symmetric = <default>) with asymmetric dimnames, PR#16151
m <- matrix(c(83,41), 5, 4,
dimnames=list(paste0("R",1:5), paste0("C",1:4)))[-5,] + 3*diag(4)
stopifnot( all.equal(eigen(m, only.values=TRUE) $ values,
c(251, 87, 3, 3), tol=1e-14) )
## failed, using symmetric=FALSE and complex because of the asymmetric dimnames()
## match.call() re-matching '...'
test <- function(x, ...) test2(x, 2, ...)
test2 <- function(x, ...) match.call(test2, sys.call())
stopifnot(identical(test(1, 3), quote(test2(x=x, 2, 3))))
## wrongly gave test2(x=x, 2, 2, 3) in R <= 3.1.2
## callGeneric not forwarding dots in call (PR#16141)
setGeneric("foo", function(x, ...) standardGeneric("foo"))
setMethod("foo", "character",
function(x, capitalize = FALSE) if (capitalize) toupper(x) else x)
setMethod("foo", "factor",
function(x, capitalize = FALSE) { x <- as.character(x); callGeneric() })
toto1 <- function(x, ...) foo(x, ...)
stopifnot(identical(toto1(factor("a"), capitalize = TRUE), "A"))
## wrongly did not capitalize in R <= 3.1.2
## Accessing non existing objects must be an error
tools::assertError(base :: foobar)
tools::assertError(base :::foobar)
tools::assertError(stats:::foobar)
tools::assertError(stats:: foobar)
## lazy data only via '::', not ':::' :
stopifnot( nrow(datasets:: swiss) == 47)
tools::assertError(datasets:::swiss)
## The ::: versions gave NULL in certain development versions of R
stopifnot(identical(stats4::show -> s4s,
get("show", asNamespace("stats4") -> ns4)),
s4s@package == "methods",
is.null(ns4[["show"]]) # not directly in stats4 ns
)
## stats4::show was NULL for 4 hours in R-devel
## mode<- did too much evaluation (PR#16215)
x <- y <- quote(-2^2)
x <- as.list(x)
mode(y) <- "list"
stopifnot(identical(x, y))
## y ended up containing -4, not -2^2
## besselJ()/besselY() with too large order
besselJ(1, 2^64) ## NaN with a warning
besselY(1, c(2^(60:70), Inf))
## seg.faulted in R <= 3.1.2
## besselJ()/besselY() with nu = k + 1/2; k in {-1,-2,..}
besselJ(1, -1750.5) ## Inf, with only one warning...
stopifnot(is.finite(besselY(1, .5 - (1500 + 0:10))))
## last gave NaNs; both: more warnings in R <= 3.1.x
## BIC() for arima(), also with NA's
lho <- lh; lho[c(3,7,13,17)] <- NA
alh300 <- arima(lh, order = c(3,0,0))
alh311 <- arima(lh, order = c(3,1,1))
ao300 <- arima(lho, order = c(3,0,0))
ao301 <- arima(lho, order = c(3,0,1))
## AIC/BIC for *different* data rarely makes sense ... want warning:
tools::assertWarning(AA <- AIC(alh300,alh311, ao300,ao301))
tools::assertWarning(BB <- BIC(alh300,alh311, ao300,ao301))
fmLst <- list(alh300,alh311, ao300,ao301)
## nobs() did not "work" in R < 3.2.0:
stopifnot(sapply(fmLst, nobs) == c(48,47, 44,44))
lls <- lapply(fmLst, logLik)
str(lapply(lls, unclass))# -> 'df' and 'nobs'
## 'manual BIC' via generalized AIC:
stopifnot(all.equal(BB[,"BIC"],
sapply(fmLst, function(fm) AIC(fm, k = log(nobs(fm))))))
## BIC() was NA unnecessarily in R < 3.2.0; nobs() was not available eiher
## as.integer() close and beyond maximal integer
MI <- .Machine$integer.max
stopifnot(identical( MI, as.integer( MI + 0.99)),
identical(-MI, as.integer(-MI - 0.99)),
is.na(as.integer(as.character( 100*MI))),
is.na(as.integer(as.character(-100*MI))))
## The two cases with positive numbers failed in R <= 3.2.0
## Ensure that sort() works with a numeric vector "which is an object":
stopifnot(is.object(y <- freeny$y))
stopifnot(diff(sort(y)) > 0)
## order() and hence sort() failed here badly for a while around 2015-04-16
## NAs in data frame names:
dn <- list(c("r1", NA), c("V", NA))
d11 <- as.data.frame(matrix(c(1, 1, 1, 1), ncol = 2, dimnames = dn))
stopifnot(identical(names(d11), dn[[2]]),
identical(row.names(d11), dn[[1]]))
## as.data.frame() failed in R-devel for a couple of hours ..
## note that format(d11) does fail currently, and hence print(), too
## Ensure R -e .. works on Unix
if(.Platform$OS.type == "unix" &&
file.exists(Rc <- file.path(R.home("bin"), "R")) &&
file.access(Rc, mode = 1) == 0) { # 1: executable
cmd <- paste(Rc, "-q --vanilla -e 1:3")
ans <- system(cmd, intern=TRUE)
stopifnot(length(ans) >= 3,
identical(ans[1:2], c("> 1:3",
"[1] 1 2 3")))
}
## (failed for < 1 hr, in R-devel only)
## Parsing large exponents of floating point numbers, PR#16358
set.seed(12)
lrg <- sprintf("%.0f", round(exp(10*(2+abs(rnorm(2^10))))))
head(huge <- paste0("1e", lrg))
micro <- paste0("1e-", lrg)
stopifnot(as.numeric(huge) == Inf,
as.numeric(micro) == 0)
## Both failed in R <= 3.2.0
## vcov() failed on manova() results, PR#16380
tear <- c(6.5, 6.2, 5.8, 6.5, 6.5, 6.9, 7.2, 6.9, 6.1, 6.3, 6.7, 6.6, 7.2, 7.1, 6.8, 7.1, 7.0, 7.2, 7.5, 7.6)
gloss <- c(9.5, 9.9, 9.6, 9.6, 9.2, 9.1, 10.0, 9.9, 9.5, 9.4, 9.1, 9.3, 8.3, 8.4, 8.5, 9.2, 8.8, 9.7, 10.1, 9.2)
opacity <- c(4.4, 6.4, 3.0, 4.1, 0.8, 5.7, 2.0, 3.9, 1.9, 5.7, 2.8, 4.1, 3.8,1.6, 3.4, 8.4, 5.2, 6.9, 2.7, 1.9)
Y <- cbind(tear, gloss, opacity)
rate <- factor(gl(2,10), labels = c("Low", "High"))
fit <- manova(Y ~ rate)
vcov(fit)
## Gave error because coef.aov() turned matrix of coefficients into a vector
## Unary / Binary uses of logic operations, PR#16385
tools::assertError(`&`(FALSE))
tools::assertError(`|`(TRUE))
## Did not give errors in R <= 3.2.0
E <- tryCatch(`!`(), error = function(e)e)
stopifnot(grepl("0 arguments .*\\<1", conditionMessage(E)))
## Gave wrong error message in R <= 3.2.0
stopifnot(identical(!matrix(TRUE), matrix(FALSE)),
identical(!matrix(FALSE), matrix(TRUE)))
## was wrong for while in R 3.2.0 patched
## cummax(<integer>)
iNA <- NA_integer_
x <- c(iNA, 1L)
stopifnot(identical(cummin(x), c(iNA, iNA)),
identical(cummax(x), c(iNA, iNA)))
## an initial NA was not propagated in R <= 3.2.0
## summaryRprof failed for very short profile, PR#16395
profile <- tempfile()
writeLines(c(
'memory profiling: sample.interval=20000',
':145341:345360:13726384:0:"stdout"',
':208272:345360:19600000:0:"stdout"'), profile)
summaryRprof(filename = profile, memory = "both")
unlink(profile)
## failed when a matrix was downgraded to a vector
## option(OutDec = *) -- now gives a warning when not 1 character
op <- options(OutDec = ".", digits = 7, # <- default
warn = 2)# <- (unexpected) warnings become errors
stopifnot(identical("3.141593", fpi <- format(pi)))
options(OutDec = ",")
stopifnot(identical("3,141593", cpi <- format(pi)))
## warnings, but it "works" (for now):
tools::assertWarning(options(OutDec = ".1."))
stopifnot(identical("3.1.141593", format(pi)))
tools::assertWarning(options(OutDec = ""))
tools::assertWarning(stopifnot(identical("3141593", format(pi))))
options(op)# back to sanity
## No warnings in R versions <= 3.2.1
## format(*, decimal.mark=".") when OutDec != "." (PR#16411)
op <- options(OutDec = ",")
stopifnot(identical(fpi, format(pi, decimal.mark=".")))
## failed in R <= 3.2.1
## model.frame() removed ts attributes on original data (PR#16436)
orig <- class(EuStockMarkets)
mf <- model.frame(EuStockMarkets ~ 1, na.action=na.fail)
stopifnot(identical(orig, class(EuStockMarkets)))
## ts class lost in R <= 3.2.1
##
foo <- as.expression(1:3)
matrix(foo, 3, 3) # always worked
matrix(foo, 3, 3, byrow = TRUE)
## failed in R <= 3.1.2
## labels.dendrogram(), dendrapply(), etc -- see comment #15 of PR#15215 :
(D <- as.dendrogram(hclust(dist(cbind(setNames(c(0,1,4), LETTERS[1:3]))))))
stopifnot(
identical(labels(D), c("C", "A", "B")),
## has been used in "CRAN package space"
identical(suppressWarnings(dendrapply(D, labels)),
list("C", list("A", "B"), "C")))
## dendrapply(D, labels) failed in R-devel for a day or two
## poly() / polym() predict()ion
library(datasets)
alm <- lm(stack.loss ~ poly(Air.Flow, Water.Temp, degree=3), stackloss)
f20 <- fitted(alm)[1:20] # "correct" prediction values [1:20]
stopifnot(all.equal(unname(f20[1:4]), c(39.7703378, 39.7703378, 35.8251359, 21.5661761)),
all.equal(f20, predict(alm, stackloss) [1:20] , tolerance = 1e-14),
all.equal(f20, predict(alm, stackloss[1:20, ]), tolerance = 1e-14))
## the second prediction went off in R <= 3.2.1
## PR#16478
kkk <- c("a\tb", "3.14\tx")
z1 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c("numeric", "character"))
z2 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c(b = "character", a = "numeric"))
stopifnot(identical(z1, z2))
z3 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c(b = "character"))
stopifnot(identical(z1, z3))
z4 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c(c = "integer", b = "character", a = "numeric"))
stopifnot(identical(z1, z4))
## z2 and z4 used positional matching (and failed) in R < 3.3.0.
## PR#16484
z <- regexpr("(.)", NA_character_, perl = TRUE)
stopifnot(is.na(attr(z, "capture.start")), is.na(attr(z, "capture.length")))
## Result was random integers in R <= 3.2.2.
## PR#14861
if(.Platform$OS.type == "unix") { # no 'ls /' on Windows
con <- pipe("ls /", open = "rt")
data <- readLines(con)
z <- close(con)
print(z)
stopifnot(identical(z, 0L))
}
## was NULL in R <= 3.2.2
## Sam Steingold: compiler::enableJIT(3) not working in ~/.Rprofile anymore
stopifnot(identical(topenv(baseenv()),
baseenv()))
## accidentally globalenv in R 3.2.[12] only
## widths of unknown Unicode characters
stopifnot(nchar("\u200b", "w") == 0)
## was -1 in R 3.2.2
| 30,965 | gpl-2.0 |
92e46fe42a90017c6e9da12fb58c8c57064d69a6 | FujitsuLaboratories/COMEVIZZ | app/R/run.R | library(shiny)
#' start COMEVIZZ
#' @importFrom shiny runApp shinyApp
#' @export
run <- function() {
shiny::runApp(
shiny::shinyApp(ui, server),
host = "0.0.0.0", port = 3838
)
}
| 192 | apache-2.0 |
3f81c50745c0a090adc1f45154855ee20a161ad4 | andy-thomason/little_r | test/R-tests/reg-tests-1c.R | ## Regression tests for R >= 3.0.0
pdf("reg-tests-1c.pdf", encoding = "ISOLatin1.enc")
## mapply with classed objects with length method
## was not documented to work in 2.x.y
setClass("A", representation(aa = "integer"))
a <- new("A", aa = 101:106)
setMethod("length", "A", function(x) length(x@aa))
setMethod("[[", "A", function(x, i, j, ...) x@aa[[i]])
(z <- mapply(function(x, y) {x * y}, a, rep(1:3, 2)))
stopifnot(z == c(101, 204, 309, 104, 210, 318))
## reported as a bug (which it was not) by H. Pages in
## https://stat.ethz.ch/pipermail/r-devel/2012-November/065229.html
## recyling in split()
## https://stat.ethz.ch/pipermail/r-devel/2013-January/065700.html
x <- 1:6
y <- split(x, 1:2)
class(x) <- "ABC" ## class(x) <- "A" creates an invalid object
yy <- split(x, 1:2)
stopifnot(identical(y, yy))
## were different in R < 3.0.0
## dates with fractional seconds after 2038 (PR#15200)
## Extremely speculative!
z <- as.POSIXct(2^31+c(0.4, 0.8), origin=ISOdatetime(1970,1,1,0,0,0,tz="GMT"))
zz <- format(z)
stopifnot(zz[1] == zz[2])
## printed form rounded not truncated in R < 3.0.0
## origin coerced in tz and not GMT by as.POSIXct.numeric()
x <- as.POSIXct(1262304000, origin="1970-01-01", tz="EST")
y <- as.POSIXct(1262304000, origin=.POSIXct(0, "GMT"), tz="EST")
stopifnot(identical(x, y))
## Handling records with quotes in names
x <- c("a b' c",
"'d e' f g",
"h i 'j",
"k l m'")
y <- data.frame(V1 = c("a", "d e", "h"), V2 = c("b'", "f", "i"), V3 = c("c", "g", "j\nk l m"))
f <- tempfile()
writeLines(x, f)
stopifnot(identical(count.fields(f), c(3L, 3L, NA_integer_, 3L)))
stopifnot(identical(read.table(f), y))
stopifnot(identical(scan(f, ""), as.character(t(as.matrix(y)))))
## docu always said 'length 1 is sorted':
stopifnot(!is.unsorted(NA))
## str(.) for large factors should be fast:
u <- as.character(runif(1e5))
t1 <- max(0.001, system.time(str(u))[[1]]) # get a baseline > 0
uf <- factor(u)
(t2 <- system.time(str(uf))[[1]]) / t1 # typically around 1--2
stopifnot(t2 / t1 < 30)
## was around 600--850 for R <= 3.0.1
## ftable(<array with unusual dimnames>)
(m <- matrix(1:12, 3,4, dimnames=list(ROWS=paste0("row",1:3), COLS=NULL)))
ftable(m)
## failed to format (and hence print) because of NULL 'COLS' dimnames
## regression test formerly in kmeans.Rd, but result differs by platform
## Artificial example [was "infinite loop" on x86_64; PR#15364]
rr <- c(rep(-0.4, 5), rep(-0.4- 1.11e-16, 14), -.5)
r. <- signif(rr, 12)
k3 <- kmeans(rr, 3, trace=2) ## Warning: Quick-Transfer.. steps exceed
try ( k. <- kmeans(r., 3) ) # after rounding, have only two distinct points
k. <- kmeans(r., 2) # fine
## PR#15376
stem(c(1, Inf))
## hung in 3.0.1
## PR#15377, very long variable names
x <- 1:10
y <- x + rnorm(10)
z <- y + rnorm(10)
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy <- y
fit <- lm(cbind(yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy, z) ~ x)
## gave spurious error message in 3.0.1.
## PR#15341 singular complex matrix in rcond()
set.seed(11)
n <- 5
A <- matrix(runif(n*n),nrow=n)
B <- matrix(runif(n*n),nrow=n)
B[n,] <- (B[n-1,]+B[n-2,])/2
rcond(B)
B <- B + 0i
rcond(B)
## gave error message (OK) in R 3.0.1: now returns 0 as in real case.
## Misuse of formatC as in PR#15303
days <- as.Date(c("2012-02-02", "2012-03-03", "2012-05-05"))
(z <- formatC(days))
stopifnot(!is.object(z), is.null(oldClass(z)))
## used to copy over class in R < 3.0.2.
## PR15219
val <- sqrt(pi)
fun <- function(x) (-log(x))^(-1/2)
(res <- integrate(fun, 0, 1, rel.tol = 1e-4))
stopifnot(abs(res$value - val) < res$abs.error)
(res <- integrate(fun, 0, 1, rel.tol = 1e-6))
stopifnot(abs(res$value - val) < res$abs.error)
res <- integrate(fun, 0, 1, rel.tol = 1e-8)
stopifnot(abs(res$value - val) < res$abs.error)
fun <- function(x) x^(-1/2)*exp(-x)
(res <- integrate(fun, 0, Inf, rel.tol = 1e-4))
stopifnot(abs(res$value - val) < res$abs.error)
(res <- integrate(fun, 0, Inf, rel.tol = 1e-6))
stopifnot(abs(res$value - val) < res$abs.error)
(res <- integrate(fun, 0, Inf, rel.tol = 1e-8))
stopifnot(abs(res$value - val) < res$abs.error)
## sometimes exceeded reported error in 2.12.0 - 3.0.1
## Unary + should coerce
x <- c(TRUE, FALSE, NA, TRUE)
stopifnot(is.integer(+x))
## +x was logical in R <= 3.0.1
## Attritbutes of value of unary operators
# +x, -x were ts, !x was not in 3.0.2
x <- ts(c(a=TRUE, b=FALSE, c=NA, d=TRUE), frequency = 4, start = 2000)
x; +x; -x; !x
stopifnot(is.ts(!x), !is.ts(+x), !is.ts(-x))
# +x, -x were ts, !x was not in 3.0.2
x <- ts(c(a=1, b=2, c=0, d=4), frequency = 4, start = 2010)
x; +x; -x; !x
stopifnot(!is.ts(!x), is.ts(+x), is.ts(-x))
##
## regression test incorrectly in colorRamp.Rd
bb <- colorRampPalette(2)(4)
stopifnot(bb[1] == bb)
## special case, invalid in R <= 2.15.0:
## Setting NAMED on ... arguments
f <- function(...) { x <- (...); x[1] <- 7; (...) }
stopifnot(f(1+2) == 3)
## was 7 in 3.0.1
## copying attributes from only one arg of a binary operator.
A <- array(c(1), dim = c(1L,1L), dimnames = list("a", 1))
x <- c(a = 1)
B <- A/(pi*x)
stopifnot(is.null(names(B)))
## was wrong in R-devel in Aug 2013
## needed an un-NAMED rhs.
## lgamma(x) for very small negative x
X <- 3e-308; stopifnot(identical(lgamma(-X), lgamma(X)))
## lgamma(-X) was NaN in R <= 3.0.1
## PR#15413
z <- subset(data.frame(one = numeric()), select = one)
stopifnot(nrow(z) == 0L)
## created a row prior to 3.0.2
## https://stat.ethz.ch/pipermail/r-devel/2013-September/067524.html
dbeta(0.9, 9.9e307, 10)
dbeta(0.1, 9, 9.9e307)
dbeta(0.1, 9.9e307, 10)
## first two hung in R <= 3.0.2
## PR#15465
provideDimnames(matrix(nrow = 0, ncol = 1))
provideDimnames(table(character()))
as.data.frame(table(character()))
## all failed in 3.0.2
## PR#15004
n <- 10
s <- 3
l <- 10000
m <- 20
x <- data.frame(x1 = 1:n, x2 = 1:n)
by <- data.frame(V1 = factor(rep(1:3, n %/% s + 1)[1:n], levels = 1:s))
for(i in 1:m) {
by[[i + 1]] <- factor(rep(l, n), levels = 1:l)
}
agg <- aggregate.data.frame(x, by, mean)
stopifnot(nrow(unique(by)) == nrow(agg))
## rounding caused groups to be falsely merged
## PR#15454
set.seed(357)
z <- matrix(c(runif(50, -1, 1), runif(50, -1e-190, 1e-190)), nrow = 10)
contour(z)
## failed because rounding made crossing tests inconsistent
## Various cases where zero length vectors were not handled properly
## by functions in base and utils, including PR#15499
y <- as.data.frame(list())
format(y)
format(I(integer()))
gl(0, 2)
z <- list(numeric(0), 1)
stopifnot(identical(relist(unlist(z), z), z))
summary(y)
## all failed in 3.0.2
## PR#15518 Parser catching errors in particular circumstance:
(ee <- tryCatch(parse(text = "_"), error= function(e)e))
stopifnot(inherits(ee, "error"))
## unexpected characters caused the parser to segfault in 3.0.2
## nonsense value of nmax
unique(1:3, nmax = 1)
## infinite-looped in 3.0.2, now ignored.
## besselI() (and others), now using sinpi() etc:
stopifnot(all.equal(besselI(2.125,-5+1/1024),
0.02679209380095711, tol= 8e-16),
all.equal(lgamma(-12+1/1024), -13.053274367453049, tol=8e-16))
## rel.error was 1.5e-13 / 7.5e-14 in R <= 3.0.x
ss <- sinpi(2*(-10:10)-2^-12)
tt <- tanpi( (-10:10)-2^-12)
stopifnot(ss == ss[1], tt == tt[1], # as internal arithmetic must be exact here
all.equal(ss[1], -0.00076699031874270453, tol=8e-16),
all.equal(tt[1], -0.00076699054434309260, tol=8e-16))
## (checked via Rmpfr) The above failed during development
## PR#15535 c() "promoted" raw vectors to bad logical values
stopifnot( c(as.raw(11), TRUE) == TRUE )
## as.raw(11) became a logical value coded as 11,
## and did not test equal to TRUE.
## PR#15564
fit <- lm(rnorm(10) ~ I(1:10))
predict(fit, interval = "confidence", scale = 1)
## failed in <= 3.0.2 with object 'w' not found
## PR#15534 deparse() did not produce reparseable complex vectors
assert.reparsable <- function(sexp) {
deparsed <- paste(deparse(sexp), collapse=" ")
reparsed <- tryCatch(eval(parse(text=deparsed)[[1]]), error = function(e) NULL)
if (is.null(reparsed))
stop(sprintf("Deparsing produced invalid syntax: %s", deparsed))
if(!identical(reparsed, sexp))
stop(sprintf("Deparsing produced change: value is not %s", reparsed))
}
assert.reparsable(1)
assert.reparsable("string")
assert.reparsable(2+3i)
assert.reparsable(1:10)
assert.reparsable(c(NA, 12, NA, 14))
assert.reparsable(as.complex(NA))
assert.reparsable(complex(real=Inf, i=4))
assert.reparsable(complex(real=Inf, i=Inf))
assert.reparsable(complex(real=Inf, i=-Inf))
assert.reparsable(complex(real=3, i=-Inf))
assert.reparsable(complex(real=3, i=NaN))
assert.reparsable(complex(r=NaN, i=0))
assert.reparsable(complex(real=NA, i=1))
assert.reparsable(complex(real=1, i=NA))
## last 7 all failed
## PR#15621 backticks could not be escaped
stopifnot(deparse(as.name("`"), backtick=TRUE) == "`\\``")
assign("`", TRUE)
`\``
tools::assertError(parse("```"))
##
## We document tanpi(0.5) etc to be NaN
stopifnot(is.nan(tanpi(c(0.5, 1.5, -0.5, -1.5))))
## That is not required for system implementations, and some give +/-Inf
## PR#15642 segfault when parsing overflowing reals
as.double("1e1000")
ll <- ml <- list(1,2); dim(ml) <- 2:1
ali <- all.equal(list( ), identity) # failed in R-devel for ~ 30 hours
al1 <- all.equal(list(1), identity) # failed in R < 3.1.0
stopifnot(length(ali) == 3, grepl("list", ali[1]),
grepl("length", ali[2], ignore.case=TRUE),
is.character(al1), length(al1) >= 2,
all.equal(ml, ml),
all.equal(ll, ml, check.attributes=FALSE))
## PR#15699 aggregate failed when there were no grouping variables
dat <- data.frame(Y = runif(10), X = sample(LETTERS[1:3], 10, TRUE))
aggregate(Y ~ 1, FUN = mean, data = dat)
## merge() with duplicated column names, similar to PR#15618
X <- data.frame(Date = c("1967-02-01", "1967-02-02", "1967-02-03"),
Settle.x = c(NA, NA, NA), Settle.y = c(NA, NA, NA),
Settle = c(35.4, 35.15, 34.95))
Y <- data.frame(Date = c("2013-12-10", "2013-12-11", "2013-12-12"),
Settle = c(16.44, 16.65, 16.77))
merge(X, Y, by = "Date", all = TRUE)
## failed in R < 3.1.0: now warns (correctly).
## PR#15679
badstructure <- function(depth, key)
{
ch <- if (depth == 1L) list() else list(badstructure(depth-1,key))
r <- list()
r[[key]] <- ch
r
}
badstructure(20, "children")
## overran, segfaulted for the original reporter.
## PR#15702 and PR#15703
d <- as.dendrogram(hclust(dist(sin(1:7))))
(dl <- d[[c(2,1,2)]]) # single-leaf dendrogram
stopifnot(inherits(dl, "dendrogram"), is.leaf(dl),
identical(order.dendrogram(dl), as.vector(dl)),
identical(d, as.dendrogram(d)))
## as.dendrogram() was hidden; order.*() failed for leaf
## using *named* method
hw <- hclust(dist(sqrt(1:5)), method=c(M = "ward"))
## failed for 2 days in R-devel/-alpha
## PR#15758
my_env <- new.env(); my_env$one <- 1L
save(one, file = tempfile(), envir = my_env)
## failed in R < 3.1.1.
## Conversion to numeric in boundary case
ch <- "0x1.ffa0000000001p-1"
rr <- type.convert(ch, numerals = "allow.loss")
rX <- type.convert(ch, numerals = "no.loss")
stopifnot(is.numeric(rr), identical(rr, rX),
all.equal(rr, 0.999267578125),
all.equal(type.convert(ch, numerals = "warn"),
type.convert("0x1.ffap-1",numerals = "warn"), tol = 5e-15))
## type.convert(ch) was not numeric in R 3.1.0
##
ch <- "1234567890123456789"
rr <- type.convert(ch, numerals = "allow.loss")
rX <- type.convert(ch, numerals = "no.loss")
rx <- type.convert(ch, numerals = "no.loss", as.is = TRUE)
tools::assertWarning(r. <- type.convert(ch, numerals = "warn.loss"))
stopifnot(is.numeric(rr), identical(rr, r.), all.equal(rr, 1.234567890e18),
is.factor(rX), identical(rx, ch))
## PR#15764: integer overflow could happen without a warning or giving NA
tools::assertWarning(ii <- 1980000020L + 222000000L)
stopifnot(is.na(ii))
tools::assertWarning(ii <- (-1980000020L) + (-222000000L))
stopifnot(is.na(ii))
tools::assertWarning(ii <- (-1980000020L) - 222000000L)
stopifnot(is.na(ii))
tools::assertWarning(ii <- 1980000020L - (-222000000L))
stopifnot(is.na(ii))
## first two failed for some version of clang in R < 3.1.1
## PR#15735: formulae with exactly 32 variables
myFormula <- as.formula(paste(c("y ~ x0", paste0("x", 1:30)), collapse = "+"))
ans <- update(myFormula, . ~ . - w1)
stopifnot(identical(ans, myFormula))
updateArgument <-
as.formula(paste(c(". ~ . ", paste0("w", 1:30)), collapse = " - "))
ans2 <- update(myFormula, updateArgument)
stopifnot(identical(ans2, myFormula))
## PR#15753
0x110p-5L
stopifnot(.Last.value == 8.5)
## was 272 with a garbled message in R 3.0.0 - 3.1.0.
## numericDeriv failed to duplicate variables in
## the expression before modifying them. PR#15849
x <- 10; y <- 10
d1 <- numericDeriv(quote(x+y),c("x","y"))
x <- y <- 10
d2 <- numericDeriv(quote(x+y),c("x","y"))
stopifnot(identical(d1,d2))
## The second gave the wrong answer
## prettyNum(x, zero.print = .) failed when x had NAs
pp <- sapply(list(TRUE, FALSE, ".", " "), function(.)
prettyNum(c(0:1,NA), zero.print = . ))
stopifnot(identical(pp[1,], c("0", " ", ".", " ")),
pp[2:3,] == c("1","NA"))
## all 4 prettyNum() would error out
## checking all.equal() with externalptr
library(methods) # getClass()'s versionKey is an e.ptr
cA <- getClass("ANY")
stopifnot(all.equal(cA, cA),
is.character(all.equal(cA, getClass("S4"))))
# both all.equal() failed in R <= 3.1.1
## as.hexmode(x), as.octmode(x) when x is double
x <- c(NA, 1)
stopifnot(identical(x == x,
as.hexmode(x) == as.octmode(x)))
p <- c(1, pi)
tools::assertError(as.hexmode(p))
tools::assertError(as.octmode(p))
## where all "wrong" in R <= 3.1.1
## PR#15935
y <- 1:3
drop1(lm(y ~ 1))
drop1(glm(y ~ 1))
stats:::drop1.default(glm(y ~ 1))
## gave error in R < 3.1.2
## getAnywhere() wrongly dealing with namespace hidden list object
nm <- deparse(body(pbinom)[[2]])# == "C_pbinom" currently
gg <- getAnywhere(nm)
stopifnot(length(gg$objs) == 1)
## was 4 and printed "4 differing objects matching ‘C_pbinom’ ..." in R <= 3.1.1
## 0-length consistency of options(), PR#15979
stopifnot(identical(options(list()), options(NULL)))
## options(list()) failed in R <= 3.1.1
## merge.dendrogram(), PR#15648
mkDend <- function(n, lab, rGen = function(n) 1+round(16*abs(rnorm(n)))) {
stopifnot(is.numeric(n), length(n) == 1, n >= 1, is.character(lab))
a <- matrix(rGen(n*n), n, n)
colnames(a) <- rownames(a) <- paste0(lab, 1:n)
as.dendrogram(hclust(as.dist(a + t(a))))
}
set.seed(7)
da <- mkDend(4, "A")
db <- mkDend(3, "B")
d.ab <- merge(da, db)
hcab <- as.hclust(d.ab)
stopifnot(hcab$order == c(2, 4, 1, 3, 7, 5, 6),
hcab$labels == c(paste0("A", 1:4), paste0("B", 1:3)))
## was wrong in R <= 3.1.1
## bw.SJ() and similar with NA,Inf values, PR#16024
try(bw.SJ (c(NA,2,3)))
try(bw.bcv(c(-Inf,2,3)))
try(bw.ucv(c(1,NaN,3,4)))
## seg.faulted in 3.0.0 <= R <= 3.1.1
## as.dendrogram() with wrong input
x <- rbind(c( -6, -9), c( 0, 13),
c(-15, 6), c(-14, 0), c(12,-10))
dx <- dist(x,"manhattan")
hx <- hclust(dx)
hx$merge <- matrix(c(-3, 1, -2, 3,
-4, -5, 2, 3), 4,2)
tools::assertError(as.dendrogram(hx))
## 8 member dendrogram and memory explosion for larger examples in R <= 3.1.2
## abs with named args failed, PR#16047
abs(x=1i)
## Complained that the arg should be named z
## Big exponents overflowed, PR#15976
x <- 0E4933
y <- 0x0p100000
stopifnot(x == 0, y == 0)
##
## drop.terms() dropped some attributes, PR#16029
test <- model.frame(Employed ~ Year + poly(GNP,3) + Population, data=longley)
mterm <- terms(test)
mterm2 <- drop.terms(mterm, 3)
predvars <- attr(mterm2, "predvars")
dataClasses <- attr(mterm2, "dataClasses")
factors <- attr(mterm2, "factors")
stopifnot(is.language(predvars), length(predvars) == length(dataClasses)+1,
all(names(dataClasses) == rownames(factors)))
## Previously dropped predvars and dataClasses
## prompt() did not escape percent signs properly
fn <- function(fmt = "%s") {}
f <- tempfile(fileext = ".Rd")
prompt(fn, filename = f)
rd <- tools::parse_Rd(f)
## Gave syntax errors because the percent sign in Usage
## was taken as the start of a comment.
## power.t.test() failure for very large n (etc): PR#15792
(ptt <- power.t.test(delta = 1e-4, sd = .35, power = .8))
(ppt <- power.prop.test(p1 = .5, p2 = .501, sig.level=.001, power=0.90, tol=1e-8))
stopifnot(all.equal(ptt$n, 192297000, tol = 1e-5),
all.equal(ppt$n, 10451937, tol = 1e-7))
## call to uniroot() did not allow n > 1e7
## save(*, ascii=TRUE): PR#16137
x0 <- x <- c(1, NA, NaN)
save(x, file=(sf <- tempfile()), ascii = TRUE)
load(sf)
stopifnot(identical(x0, x))
## x had 'NA' instead of 'NaN'
## PR#16205
stopifnot(length(glob2rx(character())) == 0L)
## was "^$" in R < 3.1.3
### Bugs fixed in R 3.2.0
## Bugs reported by Radford Neal
x <- pairlist(list(1, 2))
x[[c(1, 2)]] <- NULL # wrongly gave an error, referring to misuse
# of the internal SET_VECTOR_ELT procedure
stopifnot(identical(x, pairlist(list(1))))
a <- pairlist(10, 20, 30, 40, 50, 60)
dim(a) <- c(2, 3)
dimnames(a) <- list(c("a", "b"), c("x", "y", "z"))
# print(a) # doesn't print names, not fixed
a[["a", "x"]] <- 0
stopifnot(a[["a", "x"]] == 0)
## First gave a spurious error, second caused a seg.fault
## Radford (R-devel, June 24, 2014); M.Maechler
m <- matrix(1:2, 1,2); v <- 1:3
stopifnot(identical(crossprod(2, v), t(2) %*% v),
identical(crossprod(m, v), t(m) %*% v),
identical(5 %*% v, 5 %*% t(v)),
identical(tcrossprod(m, 1:2), m %*% 1:2) )
## gave error "non-conformable arguments" in R <= 3.2.0
## list <--> environment
L0 <- list()
stopifnot(identical(L0, as.list(as.environment(L0))))
## as.env..() did not work, and as.list(..) gave non-NULL names in R 3.1.x
## all.equal() for environments and refClass()es
RR <- setRefClass("Ex", fields = list(nr = "numeric"))
m1 <- RR$new(); m2 <- RR$new(); m3 <- RR$new(nr = pi); m4 <- RR$new(nr=3.14159)
ee <- emptyenv(); e2 <- new.env()
stopifnot(all.equal(ee,ee), identical(ee,ee), !identical(ee,e2), all.equal(ee,e2),
identical(m3,m3), !identical(m1,m2),
all.equal(m1,m2), !isTRUE(all.equal(m1,m3)), !isTRUE(all.equal(m1,m4)),
all.equal(m3,m4, tol=1e-6), grepl("relative difference", all.equal(m3,m4)),
TRUE)
## did not work in R 3.1.x
e3 <- new.env()
e3$p <- "p"; e2$p <- "p"; ae.p <- all.equal(e2,e3)
e3$q <- "q"; ae.q <- all.equal(e2,e3)
e2$q <- "Q"; ae.Q <- all.equal(e2,e3)
stopifnot(ae.p, grepl("^Length", ae.q), grepl("string mismatch", ae.Q))
e2$q <- "q"; e2$r <- pi; e3$r <- 3.14159265
stopifnot(all.equal(e2, e3),
grepl("relative difference", all.equal(e2, e3, tol=1e-10)))
g <- globalenv() # so it now contains itself
l <- list(e = g)
stopifnot(all.equal(g, g),
all.equal(l, l))
## these ran into infinite recursion error.
## missing() did not propagate through '...', PR#15707
check <- function(x,y,z) c(missing(x), missing(y), missing(z))
check1 <- function(...) check(...)
check2 <- function(...) check1(...)
stopifnot(identical(check2(one, , three), c(FALSE, TRUE, FALSE)))
## missing() was unable to handle recursive promises
## envRefClass prototypes are a bit special -- broke all.equal() for baseenv()
rc <- getClass("refClass")
rp <- rc@prototype
str(rp) ## failed
rp ## show() failed ..
(ner <- new("envRefClass")) # show() failed
stopifnot(all.equal(rp,rp), all.equal(ner,ner))
be <- baseenv()
system.time(stopifnot(all.equal(be,be)))## <- takes a few sec's
stopifnot(
grepl("not identical.*character", print(all.equal(rp, ner))),
grepl("not identical.*character", print(all.equal(ner, rp))))
system.time(stopifnot(all.equal(globalenv(), globalenv())))
## Much of the above failed in R <= 3.2.0
## while did not protect its argument, which caused an error
## under gctorture, PR#15990
gctorture()
suppressWarnings(while(c(FALSE, TRUE)) 1)
gctorture(FALSE)
## gave an error because the test got released when the warning was generated.
## hist(x, breaks =) with too large bins, PR#15988
set.seed(5); x <- runif(99)
Hist <- function(x, b) hist(x, breaks = b, plot = FALSE)$counts
for(k in 1:5) {
b0 <- seq_len(k-1)/k
H.ok <- Hist(x, c(-10, b0, 10))
for(In in c(1000, 1e9, Inf))
stopifnot(identical(Hist(x, c(-In, b0, In)), H.ok),
identical(Hist(x, c( 0, b0, In)), H.ok))
}
## "wrong" results for k in {2,3,4} in R 3.1.x
## eigen(*, symmetric = <default>) with asymmetric dimnames, PR#16151
m <- matrix(c(83,41), 5, 4,
dimnames=list(paste0("R",1:5), paste0("C",1:4)))[-5,] + 3*diag(4)
stopifnot( all.equal(eigen(m, only.values=TRUE) $ values,
c(251, 87, 3, 3), tol=1e-14) )
## failed, using symmetric=FALSE and complex because of the asymmetric dimnames()
## match.call() re-matching '...'
test <- function(x, ...) test2(x, 2, ...)
test2 <- function(x, ...) match.call(test2, sys.call())
stopifnot(identical(test(1, 3), quote(test2(x=x, 2, 3))))
## wrongly gave test2(x=x, 2, 2, 3) in R <= 3.1.2
## callGeneric not forwarding dots in call (PR#16141)
setGeneric("foo", function(x, ...) standardGeneric("foo"))
setMethod("foo", "character",
function(x, capitalize = FALSE) if (capitalize) toupper(x) else x)
setMethod("foo", "factor",
function(x, capitalize = FALSE) { x <- as.character(x); callGeneric() })
toto1 <- function(x, ...) foo(x, ...)
stopifnot(identical(toto1(factor("a"), capitalize = TRUE), "A"))
## wrongly did not capitalize in R <= 3.1.2
## Accessing non existing objects must be an error
tools::assertError(base :: foobar)
tools::assertError(base :::foobar)
tools::assertError(stats:::foobar)
tools::assertError(stats:: foobar)
## lazy data only via '::', not ':::' :
stopifnot( nrow(datasets:: swiss) == 47)
tools::assertError(datasets:::swiss)
## The ::: versions gave NULL in certain development versions of R
stopifnot(identical(stats4::show -> s4s,
get("show", asNamespace("stats4") -> ns4)),
s4s@package == "methods",
is.null(ns4[["show"]]) # not directly in stats4 ns
)
## stats4::show was NULL for 4 hours in R-devel
## mode<- did too much evaluation (PR#16215)
x <- y <- quote(-2^2)
x <- as.list(x)
mode(y) <- "list"
stopifnot(identical(x, y))
## y ended up containing -4, not -2^2
## besselJ()/besselY() with too large order
besselJ(1, 2^64) ## NaN with a warning
besselY(1, c(2^(60:70), Inf))
## seg.faulted in R <= 3.1.2
## besselJ()/besselY() with nu = k + 1/2; k in {-1,-2,..}
besselJ(1, -1750.5) ## Inf, with only one warning...
stopifnot(is.finite(besselY(1, .5 - (1500 + 0:10))))
## last gave NaNs; both: more warnings in R <= 3.1.x
## BIC() for arima(), also with NA's
lho <- lh; lho[c(3,7,13,17)] <- NA
alh300 <- arima(lh, order = c(3,0,0))
alh311 <- arima(lh, order = c(3,1,1))
ao300 <- arima(lho, order = c(3,0,0))
ao301 <- arima(lho, order = c(3,0,1))
## AIC/BIC for *different* data rarely makes sense ... want warning:
tools::assertWarning(AA <- AIC(alh300,alh311, ao300,ao301))
tools::assertWarning(BB <- BIC(alh300,alh311, ao300,ao301))
fmLst <- list(alh300,alh311, ao300,ao301)
## nobs() did not "work" in R < 3.2.0:
stopifnot(sapply(fmLst, nobs) == c(48,47, 44,44))
lls <- lapply(fmLst, logLik)
str(lapply(lls, unclass))# -> 'df' and 'nobs'
## 'manual BIC' via generalized AIC:
stopifnot(all.equal(BB[,"BIC"],
sapply(fmLst, function(fm) AIC(fm, k = log(nobs(fm))))))
## BIC() was NA unnecessarily in R < 3.2.0; nobs() was not available eiher
## as.integer() close and beyond maximal integer
MI <- .Machine$integer.max
stopifnot(identical( MI, as.integer( MI + 0.99)),
identical(-MI, as.integer(-MI - 0.99)),
is.na(as.integer(as.character( 100*MI))),
is.na(as.integer(as.character(-100*MI))))
## The two cases with positive numbers failed in R <= 3.2.0
## Ensure that sort() works with a numeric vector "which is an object":
stopifnot(is.object(y <- freeny$y))
stopifnot(diff(sort(y)) > 0)
## order() and hence sort() failed here badly for a while around 2015-04-16
## NAs in data frame names:
dn <- list(c("r1", NA), c("V", NA))
d11 <- as.data.frame(matrix(c(1, 1, 1, 1), ncol = 2, dimnames = dn))
stopifnot(identical(names(d11), dn[[2]]),
identical(row.names(d11), dn[[1]]))
## as.data.frame() failed in R-devel for a couple of hours ..
## note that format(d11) does fail currently, and hence print(), too
## Ensure R -e .. works on Unix
if(.Platform$OS.type == "unix" &&
file.exists(Rc <- file.path(R.home("bin"), "R")) &&
file.access(Rc, mode = 1) == 0) { # 1: executable
cmd <- paste(Rc, "-q --vanilla -e 1:3")
ans <- system(cmd, intern=TRUE)
stopifnot(length(ans) >= 3,
identical(ans[1:2], c("> 1:3",
"[1] 1 2 3")))
}
## (failed for < 1 hr, in R-devel only)
## Parsing large exponents of floating point numbers, PR#16358
set.seed(12)
lrg <- sprintf("%.0f", round(exp(10*(2+abs(rnorm(2^10))))))
head(huge <- paste0("1e", lrg))
micro <- paste0("1e-", lrg)
stopifnot(as.numeric(huge) == Inf,
as.numeric(micro) == 0)
## Both failed in R <= 3.2.0
## vcov() failed on manova() results, PR#16380
tear <- c(6.5, 6.2, 5.8, 6.5, 6.5, 6.9, 7.2, 6.9, 6.1, 6.3, 6.7, 6.6, 7.2, 7.1, 6.8, 7.1, 7.0, 7.2, 7.5, 7.6)
gloss <- c(9.5, 9.9, 9.6, 9.6, 9.2, 9.1, 10.0, 9.9, 9.5, 9.4, 9.1, 9.3, 8.3, 8.4, 8.5, 9.2, 8.8, 9.7, 10.1, 9.2)
opacity <- c(4.4, 6.4, 3.0, 4.1, 0.8, 5.7, 2.0, 3.9, 1.9, 5.7, 2.8, 4.1, 3.8,1.6, 3.4, 8.4, 5.2, 6.9, 2.7, 1.9)
Y <- cbind(tear, gloss, opacity)
rate <- factor(gl(2,10), labels = c("Low", "High"))
fit <- manova(Y ~ rate)
vcov(fit)
## Gave error because coef.aov() turned matrix of coefficients into a vector
## Unary / Binary uses of logic operations, PR#16385
tools::assertError(`&`(FALSE))
tools::assertError(`|`(TRUE))
## Did not give errors in R <= 3.2.0
E <- tryCatch(`!`(), error = function(e)e)
stopifnot(grepl("0 arguments .*\\<1", conditionMessage(E)))
## Gave wrong error message in R <= 3.2.0
stopifnot(identical(!matrix(TRUE), matrix(FALSE)),
identical(!matrix(FALSE), matrix(TRUE)))
## was wrong for while in R 3.2.0 patched
## cummax(<integer>)
iNA <- NA_integer_
x <- c(iNA, 1L)
stopifnot(identical(cummin(x), c(iNA, iNA)),
identical(cummax(x), c(iNA, iNA)))
## an initial NA was not propagated in R <= 3.2.0
## summaryRprof failed for very short profile, PR#16395
profile <- tempfile()
writeLines(c(
'memory profiling: sample.interval=20000',
':145341:345360:13726384:0:"stdout"',
':208272:345360:19600000:0:"stdout"'), profile)
summaryRprof(filename = profile, memory = "both")
unlink(profile)
## failed when a matrix was downgraded to a vector
## option(OutDec = *) -- now gives a warning when not 1 character
op <- options(OutDec = ".", digits = 7, # <- default
warn = 2)# <- (unexpected) warnings become errors
stopifnot(identical("3.141593", fpi <- format(pi)))
options(OutDec = ",")
stopifnot(identical("3,141593", cpi <- format(pi)))
## warnings, but it "works" (for now):
tools::assertWarning(options(OutDec = ".1."))
stopifnot(identical("3.1.141593", format(pi)))
tools::assertWarning(options(OutDec = ""))
tools::assertWarning(stopifnot(identical("3141593", format(pi))))
options(op)# back to sanity
## No warnings in R versions <= 3.2.1
## format(*, decimal.mark=".") when OutDec != "." (PR#16411)
op <- options(OutDec = ",")
stopifnot(identical(fpi, format(pi, decimal.mark=".")))
## failed in R <= 3.2.1
## model.frame() removed ts attributes on original data (PR#16436)
orig <- class(EuStockMarkets)
mf <- model.frame(EuStockMarkets ~ 1, na.action=na.fail)
stopifnot(identical(orig, class(EuStockMarkets)))
## ts class lost in R <= 3.2.1
##
foo <- as.expression(1:3)
matrix(foo, 3, 3) # always worked
matrix(foo, 3, 3, byrow = TRUE)
## failed in R <= 3.1.2
## labels.dendrogram(), dendrapply(), etc -- see comment #15 of PR#15215 :
(D <- as.dendrogram(hclust(dist(cbind(setNames(c(0,1,4), LETTERS[1:3]))))))
stopifnot(
identical(labels(D), c("C", "A", "B")),
## has been used in "CRAN package space"
identical(suppressWarnings(dendrapply(D, labels)),
list("C", list("A", "B"), "C")))
## dendrapply(D, labels) failed in R-devel for a day or two
## poly() / polym() predict()ion
library(datasets)
alm <- lm(stack.loss ~ poly(Air.Flow, Water.Temp, degree=3), stackloss)
f20 <- fitted(alm)[1:20] # "correct" prediction values [1:20]
stopifnot(all.equal(unname(f20[1:4]), c(39.7703378, 39.7703378, 35.8251359, 21.5661761)),
all.equal(f20, predict(alm, stackloss) [1:20] , tolerance = 1e-14),
all.equal(f20, predict(alm, stackloss[1:20, ]), tolerance = 1e-14))
## the second prediction went off in R <= 3.2.1
## PR#16478
kkk <- c("a\tb", "3.14\tx")
z1 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c("numeric", "character"))
z2 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c(b = "character", a = "numeric"))
stopifnot(identical(z1, z2))
z3 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c(b = "character"))
stopifnot(identical(z1, z3))
z4 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c(c = "integer", b = "character", a = "numeric"))
stopifnot(identical(z1, z4))
## z2 and z4 used positional matching (and failed) in R < 3.3.0.
## PR#16484
z <- regexpr("(.)", NA_character_, perl = TRUE)
stopifnot(is.na(attr(z, "capture.start")), is.na(attr(z, "capture.length")))
## Result was random integers in R <= 3.2.2.
## PR#14861
if(.Platform$OS.type == "unix") { # no 'ls /' on Windows
con <- pipe("ls /", open = "rt")
data <- readLines(con)
z <- close(con)
print(z)
stopifnot(identical(z, 0L))
}
## was NULL in R <= 3.2.2
## Sam Steingold: compiler::enableJIT(3) not working in ~/.Rprofile anymore
stopifnot(identical(topenv(baseenv()),
baseenv()))
## accidentally globalenv in R 3.2.[12] only
## widths of unknown Unicode characters
stopifnot(nchar("\u200b", "w") == 0)
## was -1 in R 3.2.2
| 30,965 | mit |
d2a208b782f2034359bc4b0bdc90e667df7a6f90 | rmunoz12/chromeR | R/chrome.R | # chrome.R
# Graphing chromosome IBD segments
# Copyright (c) 2015 Richard Munoz
#
# This file is part of chromeR.
#
# chromeR is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# chromeR is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# chromeR If not, see http://www.gnu.org/licenses/.
# library(ggplot2)
# library(dplyr)
# library(tidyr)
# Based on
# http://stackoverflow.com/questions/25995257/r-shift-values-in-single-column-of-dataframe-up
shift <- function(v, n) {
# v vector (i.e. column of dataframe)
# n number to shift by:
# negative shifts to lower row numbers
# postive shifts to higher row numbers
v_len <- length(v)
if (v_len < abs(n)) {
stop("Shift length greater than vector length.")
}
if (v_len == 0) {
return(v)
}
if (n < 0) {
n <- -1 * n
c(v[-(seq(n))], rep(NA, n))
} else {
if (v_len == n) {
rep(NA, n)
} else {
c(rep(NA, n), v[1:(v_len - n)])
}
}
}
get_gap_ahead <- function(bp_end, b_next, n, n_next, c, c_next, cend) {
if (!is.na(n_next) && n == n_next) {
if (!is.na(c_next) && c == c_next) {
b_next - bp_end
} else if (bp_end < cend) {
cend - bp_end
} else {
NA
}
} else {
if (bp_end < cend) {
cend - bp_end
} else {
NA
}
}
}
get_gap_behind <- function(bp_start, n, n_prev, c, c_prev) {
if ((!is.na(n_prev) && n == n_prev) ||
is.na(n_prev)) {
if ((!is.na(c_prev) && c != c_prev) ||
is.na(c_prev)) {
bp_start
} else {
NA
}
} else {
if (bp_start > 0) {
bp_start - 0
} else {
NA
}
}
}
#' @importFrom dplyr %>%
fill_missing_chromes <- function(d) {
# Takes input from chrome_map with columns:
# 1: key
# 2: chromosome
# 3: bp_start
# 4: type
# 5: len
n <- d %>%
dplyr::group_by_(names(d)[1]) %>%
dplyr::summarise()
c <- data.frame(key=rep(n[[1]], each=22),
chromosome=1:22,
stringsAsFactors=FALSE)
y <- d %>%
dplyr::group_by_(names(d)[1], names(d)[2]) %>%
dplyr::summarise_(count=length(names(d)[2]))
c <- merge(c, y, by.x=names(c)[1:2], by.y=names(y)[1:2], all.x=TRUE)
c <- subset(c, is.na(c$count))
if (dim(c)[1] > 0) {
c$count <- NULL
c <- cbind(c,
data.frame(0, "gap_ahead",
chromeR::chrome_bp[c[ , names(c)[2]]],
stringsAsFactors=FALSE))
names(c) <- names(d)
d <- rbind(d, c)
}
return(d)
}
#' @importFrom dplyr %>%
chrome_map_helper <- function(d, threshold) {
# Expects the following column order, which is passed
# by build_chrome_map():
#
# 1: key
# 2: chromosome
# 3: bp_start
# 4: bp_end
orig_cols <- names(d)
names(d) <- make.names(seq(ncol(d)))
d <- d[order(d[1], d[2], d[3]), ]
row.names(d) <- NULL
d <- cbind(d, cend=chromeR::chrome_bp[d[, 2]]) # 5
d$n_prev <- shift(d[ , 1], 1) # 6
d$n_next <- shift(d[ , 1], -1) # 7
d$c_prev <- shift(d[ , 2], 1) # 8
d$c_next <- shift(d[ , 2], -1) # 9
d$b_next <- shift(d[ , 3], -1) # 10
d$gap_ahead <- mapply(d[ , 4], d$b_next, d[ , 1], d$n_next,
d[ , 2], d$c_next, d$cend,
FUN=get_gap_ahead) # 11
d$gap_behind <- mapply(d[ , 3], d[ , 1], d$n_prev,
d[ , 2], d$c_prev,
FUN=get_gap_behind) # 12
d$share <- mapply(d[ , 4], d[ , 3],
FUN=function(end, start) end - start) # 13
d <- d[c(1:3, 11:13)]
cols <- names(d)
m <- d %>% tidyr::gather_("type", "len", colnames(d)[4:6])
m <- na.omit(m)
m$type <- factor(m$type, levels=c("gap_behind", "share", "gap_ahead"))
m <- dplyr::arrange_(m, cols[1], cols[2], cols[3], "type")
m$c_next <- shift(m[, cols[2]], -1)
m$type <- as.character(m$type)
m$type[m$type == "gap_ahead" &
m[ , cols[2]] == m$c_next &
m$len <= threshold] <- "merge"
m$type <- factor(m$type, levels=c("gap_ahead", "gap_behind", "share", "merge"))
m$c_next <- NULL
m <- fill_missing_chromes(m)
names(m)[1:3] <- orig_cols[1:3]
return(m)
}
#' Calculate shared and not shared lengths.
#'
#' \code{chrome_map} returns a dataframe with shared and not shared regions.
#'
#' Based on a set of IBD segments, \code{chrome_map} calculate the lengths (in
#' basepairs) of the share segments as well as any unshared areas in each
#' chromosome (i.e., gaps). In addition, the \code{threshold} parameter can be
#' used to identifiy areas that are small (which \code{chrome_map} will then
#' classify as "merge").
#'
#' @export
#'
#' @param data Dataframe with \code{key}, \code{chromosome}, \code{bp_start},
#' and \code{bp_end} columns. Each row describes an IBD segment.
#' @param key String identifying the column in \code{data} that uniquely
#' identifies the pair of individuals corresponding to each IBD segment.
#' @param chromosome String indentifying the column in \code{data} that
#' indicates the chromosome of each IBD segment.
#' @param bp_start String indentifying the column in \code{data} that
#' indicates the starting basepair of each IBD segment.
#' @param bp_end String indentifying the column in \code{data} that
#' indicates the ending basepair of each IBD segment.
#' @param threshold Numeric value, greater than or equal to 0 for which a gap
#' should be classified as a "merge."
#'
#' @examples
#' data(test_ibd_segments)
#' chrome_map(test_ibd_segments, "result_id", "chromosome", "bp_start", "bp_end")
chrome_map <- function(data, key, chromosome, bp_start, bp_end,
threshold=2e5) {
cols <- c(key, chromosome, bp_start, bp_end)
data <- data[cols]
return(chrome_map_helper(data, threshold))
}
#' IBD segments plot
#'
#' Plots the output of \code{chrome_map}.
#'
#' @export
#'
#' @param d Dataframe output from \code{chrome_map}.
#' @param key String that identifies the column in \code{d} that uniquely
#' identifies the pair of individuals corresponding to each row.
#' @param chromosome String indentifying the column in \code{d} that
#' indicates the chromosome of each IBD segment.
#' @param length String indentifying the column in \code{d} that
#' indicates the length of each IBD segment, gap, or merge, calculated by
#' \code{chrome_map}.
#' @param type String indentifying the column in \code{d} that
#' indicates the type of each value, calculated by \code{chrome_map}.
#' @param title String for the plot's title.
#'
#' @examples
#' data(test_ibd_segments)
#' df <- chrome_map(test_ibd_segments, "result_id", "chromosome", "bp_start", "bp_end")
#' chrome_plot(df, "result_id", "chromosome", "len", "type", "Test IBD Matches")
chrome_plot <- function(d, key, chromosome, length, type, title) {
# chromosome <- which(names(d) == chromosome)
# length <- which(names(d) == length)
# type <- which(names(d) == type)
levels(d$type) <- c("Not shared", "Not shared", "Shared", "Merge")
p <- ggplot2::ggplot(d, ggplot2::aes_string(x=chromosome, y=length, fill=type)) +
ggplot2::geom_bar(stat="identity", alpha=.95) +
ggplot2::coord_flip() +
ggplot2::scale_x_reverse() +
ggplot2::scale_y_continuous("base pair") +
ggplot2::scale_fill_manual("", values=c("#e9a3c9", "#a1d76a", "#ffffb3")) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle=90, vjust=0.5, hjust=1)) +
ggplot2::ggtitle(title) +
ggplot2::facet_wrap(as.formula(paste0("~", key)))
return(p)
}
| 8,356 | gpl-3.0 |
e6b88a9458ee80e01c652bd346efc46bc75c83eb | junwucs/h2o-3 | h2o-r/tests/testdir_autoGen/runit_complexFilterTest_allyears2k_headers_38.R | ##
# Author: Autogenerated on 2013-12-18 17:01:19
# gitHash: 2581a0dfa12a51892283830529a5126ea49f0cb9
# SEED: 2481425483200553751
##
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
complexFilterTest_allyears2k_headers_38 <- function() {
Log.info("A munge-task R unit test on data <allyears2k_headers> testing the functional unit <['', '<=']> ")
Log.info("Uploading allyears2k_headers")
hex <- h2o.importFile(locate("smalldata/airlines/allyears2k_headers.zip"), "rallyears2k_headers.hex")
Log.info("Performing compound task ( ( hex[,c(\"Cancelled\")] <= 0.252596694009 )) on dataset <allyears2k_headers>")
filterHex <- hex[( ( hex[,c("Cancelled")] <= 0.252596694009 )) ,]
Log.info("Performing compound task ( ( hex[,c(\"Diverted\")] <= 0.952276526236 )) on dataset allyears2k_headers, and also subsetting columns.")
filterHex <- hex[( ( hex[,c("Diverted")] <= 0.952276526236 )) , c("ActualElapsedTime","DepDelay","Diverted","DayOfWeek","Distance","TaxiIn","TaxiOut","CRSElapsedTime","ArrTime","CarrierDelay","CRSArrTime","DayofMonth","Year")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[( ( hex[,c("Diverted")] <= 0.952276526236 )) , c("ArrDelay","SecurityDelay","Month","DepTime","Origin","CancellationCode","FlightNum","LateAircraftDelay","WeatherDelay","Cancelled","TailNum","AirTime","IsArrDelayed","CRSDepTime","IsDepDelayed","Dest","UniqueCarrier","NASDelay")]
testEnd()
}
doTest("compoundFilterTest_ on data allyears2k_headers unit= ['', '<=']", complexFilterTest_allyears2k_headers_38)
| 1,639 | apache-2.0 |
e6b88a9458ee80e01c652bd346efc46bc75c83eb | datachand/h2o-3 | h2o-r/tests/testdir_autoGen/runit_complexFilterTest_allyears2k_headers_38.R | ##
# Author: Autogenerated on 2013-12-18 17:01:19
# gitHash: 2581a0dfa12a51892283830529a5126ea49f0cb9
# SEED: 2481425483200553751
##
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
complexFilterTest_allyears2k_headers_38 <- function() {
Log.info("A munge-task R unit test on data <allyears2k_headers> testing the functional unit <['', '<=']> ")
Log.info("Uploading allyears2k_headers")
hex <- h2o.importFile(locate("smalldata/airlines/allyears2k_headers.zip"), "rallyears2k_headers.hex")
Log.info("Performing compound task ( ( hex[,c(\"Cancelled\")] <= 0.252596694009 )) on dataset <allyears2k_headers>")
filterHex <- hex[( ( hex[,c("Cancelled")] <= 0.252596694009 )) ,]
Log.info("Performing compound task ( ( hex[,c(\"Diverted\")] <= 0.952276526236 )) on dataset allyears2k_headers, and also subsetting columns.")
filterHex <- hex[( ( hex[,c("Diverted")] <= 0.952276526236 )) , c("ActualElapsedTime","DepDelay","Diverted","DayOfWeek","Distance","TaxiIn","TaxiOut","CRSElapsedTime","ArrTime","CarrierDelay","CRSArrTime","DayofMonth","Year")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[( ( hex[,c("Diverted")] <= 0.952276526236 )) , c("ArrDelay","SecurityDelay","Month","DepTime","Origin","CancellationCode","FlightNum","LateAircraftDelay","WeatherDelay","Cancelled","TailNum","AirTime","IsArrDelayed","CRSDepTime","IsDepDelayed","Dest","UniqueCarrier","NASDelay")]
testEnd()
}
doTest("compoundFilterTest_ on data allyears2k_headers unit= ['', '<=']", complexFilterTest_allyears2k_headers_38)
| 1,639 | apache-2.0 |
a4f5cc4f3150be8ea7ef3fd04fbc2387c8f429d1 | compops/phd-thesis | example-philips/ex-philips-posteriors.R | ##############################################################################
##############################################################################
# Example 3.13
# Particle Metropolis-Hastings for Swedish inflation/unemployment
# Reproduces Figure 3.9
#
# Copyright (c) 2016 Johan Dahlin [ johan.dahlin (at) liu.se ]
# Distributed under the MIT license.
#
##############################################################################
##############################################################################
#setwd("~/projects/phd-thesis/thesis-figures/ex-philips")
# Setup plot colors
library("RColorBrewer")
plotColors = brewer.pal(6, "Dark2");
# Settings for plotting
nMCMC = 15000;
burnin = 5000;
dpmh <- read.table("pmh0/philips_th_posterior.csv",header=T,sep=",");
cairo_pdf("ex-philips-pmh0.pdf", height = 5, width = 8)
layout(matrix(1:4, 2, 2, byrow = TRUE))
par(mar=c(4,5,1,1))
# Histograms with kernel density estimates
hist(dpmh$th0[burnin:nMCMC],breaks=floor(sqrt(nMCMC-burnin)),main="",freq=F,col=rgb(t(col2rgb(plotColors[1]))/256,alpha=0.25),border=NA,xlab=expression(phi),ylab="posterior estimate",xlim=c(0.5,1),ylim=c(0,8));
abline(v=mean(dpmh$th0[burnin:nMCMC]),lty="dotted");
lines(density(dpmh$th0[burnin:nMCMC],kernel="e",from=0.5,to=1),lwd=2,col=plotColors[1])
# Prior for phi
grid = seq(0.5,1,0.01);
dist = dnorm(grid,0.8,0.10)
lines(grid,dist,lwd=1,col="grey30")
hist(dpmh$th1[burnin:nMCMC],breaks=floor(sqrt(nMCMC-burnin)),main="",freq=F,col=rgb(t(col2rgb(plotColors[2]))/256,alpha=0.25),border=NA,xlab=expression(alpha),ylab="posterior estimate",xlim=c(0.0,1.0),ylim=c(0,3));
abline(v=mean(dpmh$th1[burnin:nMCMC]),lty="dotted");
lines(density(dpmh$th1[burnin:nMCMC],kernel="e",from=0.0,to=1.0),lwd=2,col=plotColors[2])
# Prior for alpha
grid = seq(0.0,1,0.01);
dist = dnorm(grid,0.5,0.2)
lines(grid,dist,lwd=1,col="grey30")
hist(dpmh$th2[burnin:nMCMC],breaks=floor(sqrt(nMCMC-burnin)),main="",freq=F,col=rgb(t(col2rgb(plotColors[3]))/256,alpha=0.25),border=NA,xlab=expression(beta),ylab="posterior estimate",xlim=c(-0.05,0.05),ylim=c(0,100));
abline(v=mean(dpmh$th2[burnin:nMCMC]),lty="dotted");
lines(density(dpmh$th2[burnin:nMCMC],kernel="e",from=-0.04,to=0.04),lwd=2,col=plotColors[3])
# Prior for beta
grid = seq(-0.04,0.1,0.04);
dist = dnorm(grid,0,0.1)
lines(grid,dist,lwd=1,col="grey30")
hist(dpmh$th3[burnin:nMCMC],breaks=floor(sqrt(nMCMC-burnin)),main="",freq=F,col=rgb(t(col2rgb(plotColors[4]))/256,alpha=0.25),border=NA,xlab=expression(sigma[v]),ylab="posterior estimate",xlim=c(0.2,0.35),ylim=c(0,50));
abline(v=mean(dpmh$th3[burnin:nMCMC]),lty="dotted");
lines(density(dpmh$th3[burnin:nMCMC],kernel="e",from=0.2,to=0.35),lwd=2,col=plotColors[4])
# Prior for sigma_v
grid = seq(0.2,0.35,0.01);
dist = dgamma(grid,shape=2.0,rate=4.0)
lines(grid,dist,lwd=1,col="grey30")
dev.off() | 2,852 | gpl-3.0 |
ec7e8af056f78fb51239536c0c9610b9eeea7fa2 | pauEscarcia/BigData-Zacatecas | Rprogramming/SubsettingVector.R | #the [ operator always returns an object of the same class as the original.
#the [[ operator is used to extract elements of a list or a data frame
#The $ operator is used to extract elements of a list or data frame by literal name.
x <- c ("a","b","c","d","a")
x[1]
x[2]
x[1:4]
x[c(1,2,3)]
u <- x>"a"
x[x>"a"]
| 310 | cc0-1.0 |
bfc083a45411c68190b80187e702923c14040a65 | GeoscienceAustralia/ptha | R/examples/austptha_template/DATA/HAZARD_POINTS/make_gridded_hazard_points.R | # Add some gridded hazard points
library(raster)
r1 = raster('../ELEV/merged_dem/merged_gebco_ga250_dem_patched.tif')
#
# Get other hazard points -- note the files have latitude before longitude,
# which we correct
#
haz_GA250_20 = read.table('OUTPUTS/OUTPUT_GA250_20m/haz_pts_W-180.txt',
skip=1, header=FALSE)
haz_GA250_20[,1:2] = haz_GA250_20[,2:1]
haz_GA250_100 = read.table('OUTPUTS/OUTPUT_GA250_100m/haz_pts_W-180.txt',
skip=1, header=FALSE)
haz_GA250_100[,1:2] = haz_GA250_100[,2:1]
haz_GA250_1000 = read.table('OUTPUTS/OUTPUT_GA250_1000m/haz_pts_W-180.txt',
skip=1, header=FALSE)
haz_GA250_1000[,1:2] = haz_GA250_1000[,2:1]
haz_GEBCO2014_100 = read.table('OUTPUTS/OUTPUT_GEBCO2014_100m/haz_pts_W-180.txt',
skip=1, header=FALSE)
haz_GEBCO2014_100[,1:2] = haz_GEBCO2014_100[,2:1]
# Points in GA250
existing_hp_GA250 = rbind(haz_GA250_20, haz_GA250_100, haz_GA250_1000)
# The above misses heard/macdonald, so let's add them back in
hp_keep = (haz_GEBCO2014_100[,1] > 70 & haz_GEBCO2014_100[,1] < 75 &
haz_GEBCO2014_100[,2] > -55 & haz_GEBCO2014_100[,2] < -50)
existing_hp = rbind(existing_hp_GA250, haz_GEBCO2014_100[hp_keep,])
# We have too many points around the other 'GA250' regions, so lets remove them
library(rgdal)
removal_polygon = readOGR(dsn='INPUTS/GRID_CLIP_LAYER/grid_clip_layer.shp',
layer='grid_clip_layer')
removal_polygon = removal_polygon@polygons[[1]]@Polygons[[1]]@coords
hp_remove = (point.in.polygon(existing_hp[,1], existing_hp[,2], removal_polygon[,1], removal_polygon[,2]) == 1)
nearby_hp = existing_hp[-which(hp_remove),]
#
# Make a regular lonlat grid
#
grid_dlonlat = 1/3
g1 = expand.grid(seq(-40, 320, by=grid_dlonlat), seq(-65, 72, by=grid_dlonlat))
g1_elev = extract(r1, g1)
g1_keep = which(g1_elev < -50 & (!is.na(g1_elev)))
g1 = g1[g1_keep,]
#
# Find distance between g1 and nearby_hp, in lon/lat units
#
library(FNN)
hp_nearest_g1 = get.knnx(as.matrix(nearby_hp[,1:2]), as.matrix(g1[,1:2]), k=1)
nearest_lon = nearby_hp[hp_nearest_g1$nn.index,1]
nearest_lat = nearby_hp[hp_nearest_g1$nn.index,2]
# Remove points with lon-lat distance > 1 degree
dlonlat = 1
g1_keep = which( (abs(g1[,1] - nearest_lon) < dlonlat) & abs(g1[,2] - nearest_lat) < dlonlat)
g1 = g1[g1_keep,]
names(g1) = c('lon', 'lat')
g1 = cbind(g1, data.frame(id = 1:length(g1[,1])))
dir.create('OUTPUTS/GRIDDED_POINTS', showWarnings=FALSE)
write.csv(g1, file='OUTPUTS/GRIDDED_POINTS/gridded_points.csv', row.names=FALSE)
| 2,471 | bsd-3-clause |
21d743e11769dae93ff21e6959b89a9641a13e46 | raysinensis/seq-for100mats | stats/KS.R | c0<-read.table("C:/Users/Rui/Desktop/nc0.csv",sep=",",header=TRUE)
c0$Freq=c0$Freq/100
c0vec=rep(c0$Val,c0$Freq)
c2<-read.table("C:/Users/Rui/Desktop/nc2.csv",sep=",",header=TRUE)
c2$Freq=c2$Freq/100
c2vec=rep(c2$Val,c2$Freq)
cresult=ks.test(c0vec,c2vec,exact=TRUE)
cresult$p.value
| 290 | mit |
ba7d778f75310c033f52cbce05d6f7a50c2f4e96 | distributions-io/gumbel-cdf | test/fixtures/test.number.R | options( digits = 16 )
library( jsonlite )
library( FAdist )
mu = 0
beta = 2
x = c( -5, -2.5, 0, 2.5, 5 )
y = pgumbel( x, beta, mu )
cat( y, sep = ",\n" )
data = list(
mu = mu,
beta = beta,
data = x,
expected = y
)
write( toJSON( data, digits = 16, auto_unbox = TRUE ), "./test/fixtures/number.json" )
| 310 | mit |
7cf1791a7ffae5d694ec4fe843e69cfc1a5c2adb | SchlossLab/Sze_FollowUps_Microbiome_2017 | code/reduced_srn/srn_reduced_run_78_RF.R | ### Build the best lesion model possible
### Try XG-Boost, RF, Logit (GLM), C5.0, SVM
### Find the best based on Jenna Wiens suggestions on test and training
## Marc Sze
#Load needed libraries
source('code/functions.R')
loadLibs(c("dplyr", "caret","scales", "doMC"))
load("exploratory/srn_RF_reduced_features_model_setup.RData")
# Set i variable
i = 78
#################################################################################
# #
# #
# Model Training and Parameter Tuning #
# #
#################################################################################
# Call number of processors to use
registerDoMC(cores = 4)
#Set up lists to store the data
test_tune_list <- list()
test_predictions <- list()
#Get test data
train_test_data <- test_data_imps[eighty_twenty_splits[, i], ]
#Train the model
train_name <- paste("data_split", i, sep = "")
set.seed(3457)
test_tune_list[[paste("data_split", i, sep = "")]] <- assign(train_name,
train(lesion ~ ., data = train_test_data,
method = "rf",
ntree = 2000,
trControl = fitControl,
metric = "ROC",
verbose = FALSE))
test_test_data <- test_data[-eighty_twenty_splits[, i], ]
test_predictions[[paste("data_split", i, sep = "")]] <-
predict(test_tune_list[[paste("data_split", i, sep = "")]],
test_test_data)
# Save image with data and relevant parameters
save.image(paste("exploratory/srn_Reducedfeatures_RF_model_", i, ".RData", sep=""))
| 1,796 | mit |
afb27d1041ef57c40ba864e613151756dcf69203 | andrewcparnell/simms_course | ap_notes/revision_of_R/Revision_of_R.R | ############################################################################################################
# Section 0:
# First a reminder about RStudio, and getting help with ? and ??, = vs <-, and comments, and setting the working directory
############################################################################################################
############################################################################################################
# Section 1:
# Object and variable types -----------------------------------------------
# c, character, vector, matrix, list, data frame,
# Numeric, integer, factors, L notation
############################################################################################################
# 1.1 Vectors
x = c(2.3, -4.6, 5.7)
str(x) # A numeric vector
x = c(1, 4, 6) # Still numeric
str(x)
x = c(1L, 4L, 6L) # Force it to be an integer
str(x)
x = c(FALSE, TRUE, TRUE) # Boolean/logical
str(x)
# 1.2 Matrices
x = matrix(1:4, ncol = 2, nrow = 2) # An integer matrix
str(x)
x = matrix(runif(4), ncol = 2, nrow = 2) # A numeric matrix
str(x)
# 1.3 Indexing
x[1, ] # The first row of x
x[1, , drop = FALSE] # The first row, but keep as a matrix
x[2, 1] # The second row and the first column
str(x[1,])
str(x[1, , drop = FALSE])
# 1.4 Can add row names of column names if required
colnames(x)
colnames(x) = c('one', 'two')
x
# 1.5 Lists
x = list(p = 1L, q = runif(4), r = c(2 + 3i, 7 - 2i))
x$r
x[[2]] # Alternative ways to index
x[[2]][1:2]
x$q[3:4]
# 1.6 Factors
x = factor(rep(1:3, length = 12),
labels = c('low', 'medium', 'high'))
str(x)
x + 2 # Gives a warning
# An ordered factor
x = factor(rep(1:3, length = 12),
labels = c('low', 'medium', 'high'),
ordered = TRUE)
str(x)
# Change levels
levels(x) = c('small', 'medium', 'big')
# (Can make a difference with various plotting functions and some statistical models)
# 1.7 Data frames
x = data.frame(a = 1:4,
col2 = runif(4),
z = factor(rep(1:2,2),
labels = c('no','yes')))
str(x)
# A data frame is just a list but can also be referenced like a matrix
x$col2[1:3]
x[,2:3] # Second and third columns
# Can even have long names but referencing can get a big messy
names(x) = c('A long name', 'A very long name', 'An even longer name')
x[1:3,1:2]
x$`A very long name`
############################################################################################################
# Section 2:
# Writing functions -------------------------------------------------------
############################################################################################################
# 2.1 The most basic
sq = function(x) return(x^2)
sq(2)
# 2.2 Multiple arguments
pow = function(x, p) return(x^p)
pow(2,2)
# 2.3 Multiple lines
pow = function(x, p) {
return(x^p)
}
pow(2,2)
# Return is optional but highly recommended
# 2.4 Default arguments
pow = function(x, p = 2) {
return(x^p)
}
pow(2)
# 2.5 Can also name them if specifying in weird order
# (Note the move from the default value for p in 2,4)
pow(p = 3, x = 4)
# 2.6 Advisable to use invisible if you don't want it to print
pow = function(x, p = 2) {
invisible(x^p)
# x^p
}
pow(3)
y = pow(3)
# 2.7 If returning multiple objects use a list
pow = function(x, p = 2) {
return(list(arguments = c(x, p), output = x^p))
}
pow(2)
# 2.8 Most functions are automatically vectorised
pow(1:10)
# .. but you need to be a little bit careful
pow(x = 1:3, p = 1:3) # Works ok
pow(x = 1:3, p = 1:2) # Was that what you wanted? (Values are recycled)
############################################################################################################
# Section 3:
# Ifelse ------------------------------------------------------------------
############################################################################################################
# 3.1 if, ifelse
x = runif(1)
if(x < 0.5) print('x < 0.5!')
# 3.2 Can make these more complicated
x = runif(1)
if(x < 0.5) {
y = rnorm(1)
print(x*y)
}
# 3.3 Can have compound statements using & (and) and | (or)
x = runif(1)
y = runif(1)
if( (x + y < 1) | (x*y < 0.2) ) {
print(x + y)
print(x*y)
}
# Make sure to add in parentheses
# 3.4 Can add in else and else if statements
x = runif(1)
if(x < 0.5) {
print('x < 0.5')
} else {
print('x > 0.5')
}
# Or else-if statements
x = runif(1)
y = runif(1)
if(x < 0.5) {
print('x < 0.5')
} else if(y < 0.5) {
print('x > 0.5 and y < 0.5')
} else {
print('x > 0.5 and y > 0.5')
}
# 3.5 If you just have something very simple can use ifelse
x = runif(1)
ifelse(x < 0.5, 2, 1)
# ?ifelse
# You can also easily store this in a variable
y = ifelse(x < 0.5, 2, 1)
# It can also be extended to compound statements like if above
############################################################################################################
# Section 4:
# Loops -------------------------------------------------------------------
# for, repeat, while
############################################################################################################
# 4.1 for loops
for (i in 1:10) print(i)
# 4.2 Can expand with curly brackets
for (i in 1:5) {
current = i^2
cat('i =',current, '\n')
}
# 4.2 Can nest loops together
for (i in 1:5) {
for (j in 1:5) {
cat('i =', i, 'j =', j, ', i*j = ',i*j, '\n')
}
}
# 4.3 Doesn't have to be a sequence in the loop
x = runif(10)
for (i in x) {
print(i < 0.5) # Note the logical statement here - evaluates to T or F
}
# 4.4 While loops slightly different
x = 1
while(x < 10) {
print(x)
x = x + 1
}
# All of the same rules as for loops apply
# Don't forget to adjust the boolean statement in the loop so you don't get stuck
# 4.5 Perhaps even more basic if repeat
x = 1
repeat {
print(x)
x = x + 1
if(x == 10) break
}
# Don't forget the break statement!
# I don't often use repeat loops
############################################################################################################
# Section 5:
# Plots -------------------------------------------------------------------
############################################################################################################
# 5.1 Use the prostate data
prostate = read.table('https://web.stanford.edu/~hastie/ElemStatLearn/datasets/prostate.data', header = TRUE)
# Look at the top of the data:
head(prostate)
# 5.2 A basic scatter plot
plot(prostate$age, prostate$lcavol)
with(prostate, plot(age, lcavol))
# Change the axis labels and add a title
plot(prostate$age, prostate$lcavol,
xlab = 'Age',
ylab = 'Log(cancer volume)',
main = 'Scatter plot')
# Use e.g. paste or paste0 to add in objects
var_1 = 'age'
var_2 = 'lcavol'
plot(prostate[,var_1], prostate[,var_2],
xlab = var_1,
ylab = var_2,
main = paste('Scatter plot of',var_1,'vs',var_2))
# Change the plotting type
plot(prostate[,var_1], prostate[,var_2],
pch = 19)
plot(prostate[,var_1], prostate[,var_2],
type = 'b') # Yuck
?pch # Look at different point types
# Changing colours
plot(prostate[,var_1], prostate[,var_2],
pch = 19, col = 'blue')
# Transparency
plot(prostate[,var_1], prostate[,var_2],
pch = 19, col = rgb(1, 0, 0, alpha = 0.2))
# Changing some options with par
par(mar = c(2, 2, 2, 2), las = 1) # Margins - see ?par for more
plot(prostate[,var_1], prostate[,var_2])
# Be careful - these options are persistent
par(mar = c(5, 4, 4, 2) + 0.1)
# 5.3 Add to plots with points and lines
plot(prostate[,var_1], prostate[,var_2], type = 'n')
points(prostate[,var_1], prostate[,var_2], col='red')
lines(prostate[,var_1], prostate[,var_2], col='green')
# Add a legend
legend('topleft', legend = c('males', 'females'),
pch = c(1, -1),
lty = c(-1, 1),
col = c('red', 'green'))
# 5.4 Histograms
hist(prostate$lweight)
# Better bins:
hist(prostate$lweight, breaks = 30)
hist(prostate$lweight, breaks = seq(2,5, by = 0.2))
# Better x axis
hist(prostate$lweight,
breaks = seq(2,5, by = 0.2),
xaxt = 'n')
axis(side = 1, at = seq(2, 5, by = 0.2))
# 5.5 Bar charts (called bar plots)
table(prostate$gleason)
barplot(table(prostate$gleason)) # No axis labels
barplot(table(prostate$gleason), horiz = TRUE)
# Boxplots
boxplot(prostate$lpsa)
# Careful - this does not give you what you might expect
boxplot(prostate$gleason, prostate$lpsa)
# Proper way
boxplot(prostate$lpsa ~ prostate$gleason) # this is a formula
# Lots of extra options regarding direction, style, shape, colour, etc.
# 5.6 pairs - matrix scatter plots
pairs(prostate)
############################################################################################################
# Section 6:
# Regression -------------------------------------------------------
############################################################################################################
# 6.1 Fit a simple regression model
?faithful
head(faithful)
lm(eruptions ~ waiting, data = faithful)
# Save as an object and then can manipulate
model_1 = lm(eruptions ~ waiting,
data = faithful)
summary(model_1)
# model_1 is now a list
str(model_1)
# Some of the useful things in it
model_1$coefficients
model_1$residuals
model_1$fitted.values
# Can also directly plot but a bit confusing
plot(model_1)
# Given the above though it's easy to plot the output
plot(faithful$waiting, faithful$eruptions)
lines(faithful$waiting, model_1$fitted.values)
# We can make a prediction for the
# duration of an eruption, given waiting time of 70
newdata1 = data.frame(waiting = 70)
predict(model_1, newdata1, type='response')
# 6.2 A binomial glm:
?mtcars
head(mtcars)
# Fit the model:
model_2 = glm(vs ~ disp,
family=binomial,
data=mtcars)
summary(model_2)
# Prediction for a car with a disp of 180 inches^3
newdata2 = data.frame(disp = 180)
predict(model_2, newdata2, type='response')
# Plotting this:
plot(mtcars$disp, mtcars$vs)
points(mtcars$disp, model_2$fitted.values, col='red')
# R's default plots:
plot(model_2)
# Adding in extra covariates
model_3 <- glm(vs ~ disp + mpg,
family=binomial,
data=mtcars)
summary(model_3)
# Add in interactions
model_4a <- glm(vs ~ disp + mpg + disp:mpg,
family=binomial,
data=mtcars)
model_4b <- glm(vs ~ disp*mpg,
family=binomial,
data=mtcars)
summary(model_4a)
# 6.3 A Poisson glm:
?quakes
head(quakes)
model_3 = glm(stations ~ mag,
family = poisson,
data=quakes)
summary(model_3)
# Prediction for number of stations reporting an earthquake of mag 4:
newdata3 = data.frame(mag = 4)
predict(model_3, newdata3, type='response')
# Plotting this:
plot(quakes$mag, quakes$stations)
points(quakes$mag, model_3$fitted.values, col='red', pch=19)
# R's default plots:
plot(model_3)
############################################################################################################
# Section 7:
# Opening and saving files ------------------------------------------------
# read/write.csv, read.table, load, save, scan, saving plots with e.g. pdf
############################################################################################################
# 7.1 Most useful function is probably read.csv
#prostate = read.csv('data/prostate.csv')
# More general version is read.table
prostate = read.table('https://web.stanford.edu/~hastie/ElemStatLearn/datasets/prostate.data')
# Useful options: header, sep, stringsAsFactors, skip, nrows
# Load in the first 50 rows but skip the first 10
prostate2 = read.table('https://web.stanford.edu/~hastie/ElemStatLearn/datasets/prostate.data',
nrows = 50,
skip = 10)
str(prostate2)
# 7.2 If you're dealing with people who only use R often safer to save directly as R format
save(prostate, file = 'my_prostate_file.rda')
save(prostate, file = 'path/to/my_prostate_file.rda')
# 7.3 Load it back in
load('path/to/my_prostate_file.rda')
# New: recommended way of saving objects
saveRDS(prostate, file = 'my_prostate_file.rds')
prostate = readRDS(file = 'my_prostate_file.rds')
# 7.4 To save plots, use e.g. the pdf or jpeg function, and follow with dev.off()
pdf(file = 'my_plot.pdf', width = 8, height = 6)
plot(prostate$lcavol, prostate$lpsa)
dev.off()
# Can also do this from plot window
############################################################################################################
# Section 8 (if time):
# Some other useful R functions ---------------------------------------------
############################################################################################################
# 8.1 library/require
library(MASS)
help(package = 'MASS')
# Note the difference
library(bla) # Error
require(bla) # Warning
# 8.2 Installing
install.packages('bla')
install.packages(c('bla', 'bla2'))
# 8.3 apply
x = matrix(runif(20), ncol = 2, nrow = 10)
apply(x, 1, sum) # rows
apply(x, 2, sum) # columns
# Lots of different versions for e.g. lists/dataframe (lapply), tapply (ragged arrays), sapply (simple version of lapply)
lapply(prostate, 'prod')
# Must need to use unlist
unlist(lapply(prostate, 'prod'))
apply(prostate, 2, 'prod')
# 8.4 head/tail
head(prostate)
head(prostate, 5)
tail(prostate, 2)
# 8.5 aggregate
aggregate(prostate$lpsa, by = list(prostate$gleason), 'mean')
aggregate(prostate$lpsa,
by = list(prostate$gleason, prostate$age),
'length')
aggregate(prostate$lpsa,
by = list(prostate$gleason),
function(x) return(c(mean(x),sd(x))))
# 8.6 Find all the locations where it matches
good = 6
which(prostate$gleason == good)
# 8.7 Find the min/max
which.min(prostate$age) # Only gives the first match
# 8.8 Just find out if the value is in there
good %in% prostate$gleason
# 8.9 subset
prostate3 = subset(prostate, age < 60)
# 8.10 Or more complicated
prostate4 = subset(prostate, (age < 60) & (gleason == 6))
# 8.11 scale
par(mfrow=c(1,2))
hist(prostate$lpsa, breaks = 30)
hist(scale(prostate$lpsa), breaks = 30)
par(mfrow=c(1,1))
# Just subtract off the mean
scale(prostate$lpsa, scale = FALSE)
# 8.12 cat/paste/print
print("Hello world!")
cat("Hello world!\n") # Slightly neater - \n for a new line (also \t for tab, \r for remove)
word = 'World'
cat("Hello", word, '\n')
result = paste("Hello", word, '\n')
cat(result)
result = paste("Hello", word, '\n', sep = ',')
cat(result)
# 8.13 order
plot(prostate$age, prostate$lcavol, type = 'l') # Messy
ord = order(prostate$age)
plot(prostate$age[ord], prostate$lcavol[ord],
type = 'l')
| 14,631 | gpl-2.0 |
f3523cecee3bada46069d8401169e45ef9645ecc | rintukutum/r-class-igib | class-2/maxormin.R | maxormin<- function(mat){
n <- 1
for(i in 1:nrow(mat)){
for(j in 1:ncol(mat)){
if(is.na(mat[i,j]) == TRUE){
# NO OPERATION
}else{
if(n == 1){
min <- mat[i,j]
max <- mat[i,j]
n <- n+1
}else{
if (mat[i,j]<min){
min<- mat[i,j]
}
if(mat[i,j]>max){
max<- mat[i,j]
}
}
}
}
}
output <- c(min, max)
names(output) <- c("min","max")
return(output)
}
| 509 | mit |
16ee8fb805a413fcead7a5adbfae762576617853 | awohns/selection | analysis/polymap_rename.R | #Rename the populations as we want to use them for the
#polygenic analysis.
#Don't forget to create symlinks to the snp and geno files.
data <- read.table("~/data/v6/use/v61kg_europe2names.ind", as.is=TRUE)
polymap <- read.table("~/data/v6/use/polymap.txt", as.is=TRUE)
for(i in 1:NROW(polymap)){
data[data[,3]==polymap[i,1],3] <- polymap[i,2]
}
write.table(data, "~/data/v6/use/v61kg_polynames.ind", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
| 466 | mit |
06279bfde8ad1e3909ad44cab4feea0354cf9a6f | aronlindberg/hub_turkr | Hub_Miner/R/orgs.R | #' List all public organizations for an unauthenticated user. Lists private and public organizations for authenticated users.
#'
#' @param user the user
#'
#' @param ctx the githb context object
#'
#' @return the list of organizations
get.user.organizations <- function(user, ctx = get.github.context())
.api.get.request(ctx, c("users", user, "orgs"))
#' List all organizations for the current user.
#'
#' @param ctx the githb context object
#'
#' @return the list of organizations
get.my.organizations <- function(ctx = get.github.context())
.api.get.request(ctx, c("user", "orgs"))
#' Get an organization
#'
#' @param org the organization name
#'
#' @param ctx the githb context object
#'
#' @return the organization information
get.organization <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org))
#' modify an organization
#'
#' @param org the organization name
#'
#' @param content the JSON object describing the organization. See http://developer.github.com/v3/orgs/#edit-an-organization for details.
#'
#' @param ctx the github context object
#'
#' @return the organization information
modify.organization <- function(org, content, ctx = get.github.context())
.api.patch.request(ctx, c("orgs", org), body=content)
################################################################################
# members
#' list all members from an organization. List all users who are members of an organization. A member is a user that belongs to at least 1 team in the organization. If the authenticated user is also an owner of this organization then both concealed and public members will be returned. If the requester is not an owner of the organization the query will be redirected to the public members list.
#'
#' @param org the organization name
#'
#' @param ctx the github context object
#'
#' @return the member list
get.organization.members <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org, "members"))
#' test if user is a member of an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is a member of organization
is.member.of.organization <- function(org, user, ctx = get.github.context())
.api.test.request(ctx, c("orgs", org, "members", user))
#' delete user from an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return none
delete.member.from.organization <- function(org, user, ctx = get.github.context())
.api.delete.request(ctx, c("orgs", org, "members", user))
#' list public members of organization
#'
#' @param org the organization name
#'
#' @param ctx the github context object
#'
#' @return the list of public members
get.organization.public.members <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org, "public_members"))
#' test if user is a public member of organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is a public member of organization
is.public.member.of.organization <- function(org, user, ctx = get.github.context())
.api.test.request(ctx, c("orgs", org, "public_members", user))
#' publicize user membership in an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return None
publicize.member.of.organization <- function(org, user, ctx = get.github.context())
.api.put.request(ctx, c("orgs", org, "public_members", user), expect.code=204)
#' conceal user membership in an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return None
conceal.member.of.organization <- function(org, user, ctx = get.github.context())
.api.delete.request(ctx, c("orgs", org, "public_members", user))
################################################################################
# teams
#' list teams from an organization
#'
#' @param org the organization name
#'
#' @param ctx the github context object
#'
#' @return the list of organization teams
get.organization.teams <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org, "teams"))
#' get team information
#'
#' @param id the id of the team
#'
#' @param ctx the github context object
#'
#' @return team information
get.team <- function(id, ctx = get.github.context())
.api.get.request(ctx, c("teams", id))
#' create a team in an organization
#'
#' @param org the organization name
#'
#' @param content the JSON object describing the team. See http://developer.github.com/v3/orgs/teams/#create-team for details.
#'
#' @param ctx the github context object
#'
#' @return team information
create.team <- function(org, content, ctx = get.github.context())
.api.post.request(ctx, c("orgs", org, "teams"), body=content)
#' edit a team in an organization
#'
#' @param id team id
#'
#' @param content the JSON object describing the team. See http://developer.github.com/v3/orgs/teams/#create-team for details.
#'
#' @param ctx the github context object
#'
#' @return team information
modify.team <- function(id, content, ctx = get.github.context())
.api.patch.request(ctx, c("teams", id), body=content)
#' delete a team in an organization
#'
#' @param id team id
#'
#' @param ctx the github context object
#'
#' @return none
delete.team <- function(id, ctx = get.github.context())
.api.delete.request(ctx, c("teams", id))
#' list team members
#'
#' @param id team id
#'
#' @param ctx the github context object
#'
#' @return team member list
get.team.members <- function(id, ctx = get.github.context())
.api.get.request(ctx, c("teams", id, "members"))
#' test if user is a member of team
#'
#' @param id team id
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is member of team
is.member.in.team <- function(id, user, ctx = get.github.context())
.api.test.request(ctx, c("teams", id, "members", user))
#' add member to team
#'
#' @param id team id
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is member of team
add.member.to.team <- function(id, user, ctx = get.github.context())
.api.put.request(ctx, c("teams", id, "memberships", user))
#' remove member from team
#'
#' @param id team id
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return none
delete.member.from.team <- function(id, user, ctx = get.github.context())
.api.delete.request(ctx, c("teams", id, "members", user))
#' list repositories of a team
#'
#' @param id team id
#'
#' @param ctx the github context object
#'
#' @return the repository list
get.team.repositories <- function(id, ctx = get.github.context())
.api.get.request(ctx, c("teams", id, "repos"))
#' add repository to team
#'
#' @param id team id
#'
#' @param owner the repo owner
#'
#' @param repo the repo name
#'
#' @param ctx the github context object
#'
#' @return none
add.repository.to.team <- function(id, owner, repo, ctx = get.github.context())
.api.put.request(ctx, c("teams", id, "repos", owner, repo), expect.code=204)
#' remove repository from team
#'
#' @param id team id
#'
#' @param owner the repo owner
#'
#' @param repo the repo name
#'
#' @param ctx the github context object
#'
#' @return none
delete.repository.from.team <- function(id, owner, repo, ctx = get.github.context())
.api.delete.request(ctx, c("teams", id, "repos", owner, repo))
| 7,650 | gpl-2.0 |
16ee8fb805a413fcead7a5adbfae762576617853 | mathii/europe_selection | analysis/polymap_rename.R | #Rename the populations as we want to use them for the
#polygenic analysis.
#Don't forget to create symlinks to the snp and geno files.
data <- read.table("~/data/v6/use/v61kg_europe2names.ind", as.is=TRUE)
polymap <- read.table("~/data/v6/use/polymap.txt", as.is=TRUE)
for(i in 1:NROW(polymap)){
data[data[,3]==polymap[i,1],3] <- polymap[i,2]
}
write.table(data, "~/data/v6/use/v61kg_polynames.ind", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
| 466 | mit |
06279bfde8ad1e3909ad44cab4feea0354cf9a6f | aronlindberg/rgithub | R/orgs.R | #' List all public organizations for an unauthenticated user. Lists private and public organizations for authenticated users.
#'
#' @param user the user
#'
#' @param ctx the githb context object
#'
#' @return the list of organizations
get.user.organizations <- function(user, ctx = get.github.context())
.api.get.request(ctx, c("users", user, "orgs"))
#' List all organizations for the current user.
#'
#' @param ctx the githb context object
#'
#' @return the list of organizations
get.my.organizations <- function(ctx = get.github.context())
.api.get.request(ctx, c("user", "orgs"))
#' Get an organization
#'
#' @param org the organization name
#'
#' @param ctx the githb context object
#'
#' @return the organization information
get.organization <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org))
#' modify an organization
#'
#' @param org the organization name
#'
#' @param content the JSON object describing the organization. See http://developer.github.com/v3/orgs/#edit-an-organization for details.
#'
#' @param ctx the github context object
#'
#' @return the organization information
modify.organization <- function(org, content, ctx = get.github.context())
.api.patch.request(ctx, c("orgs", org), body=content)
################################################################################
# members
#' list all members from an organization. List all users who are members of an organization. A member is a user that belongs to at least 1 team in the organization. If the authenticated user is also an owner of this organization then both concealed and public members will be returned. If the requester is not an owner of the organization the query will be redirected to the public members list.
#'
#' @param org the organization name
#'
#' @param ctx the github context object
#'
#' @return the member list
get.organization.members <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org, "members"))
#' test if user is a member of an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is a member of organization
is.member.of.organization <- function(org, user, ctx = get.github.context())
.api.test.request(ctx, c("orgs", org, "members", user))
#' delete user from an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return none
delete.member.from.organization <- function(org, user, ctx = get.github.context())
.api.delete.request(ctx, c("orgs", org, "members", user))
#' list public members of organization
#'
#' @param org the organization name
#'
#' @param ctx the github context object
#'
#' @return the list of public members
get.organization.public.members <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org, "public_members"))
#' test if user is a public member of organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is a public member of organization
is.public.member.of.organization <- function(org, user, ctx = get.github.context())
.api.test.request(ctx, c("orgs", org, "public_members", user))
#' publicize user membership in an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return None
publicize.member.of.organization <- function(org, user, ctx = get.github.context())
.api.put.request(ctx, c("orgs", org, "public_members", user), expect.code=204)
#' conceal user membership in an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return None
conceal.member.of.organization <- function(org, user, ctx = get.github.context())
.api.delete.request(ctx, c("orgs", org, "public_members", user))
################################################################################
# teams
#' list teams from an organization
#'
#' @param org the organization name
#'
#' @param ctx the github context object
#'
#' @return the list of organization teams
get.organization.teams <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org, "teams"))
#' get team information
#'
#' @param id the id of the team
#'
#' @param ctx the github context object
#'
#' @return team information
get.team <- function(id, ctx = get.github.context())
.api.get.request(ctx, c("teams", id))
#' create a team in an organization
#'
#' @param org the organization name
#'
#' @param content the JSON object describing the team. See http://developer.github.com/v3/orgs/teams/#create-team for details.
#'
#' @param ctx the github context object
#'
#' @return team information
create.team <- function(org, content, ctx = get.github.context())
.api.post.request(ctx, c("orgs", org, "teams"), body=content)
#' edit a team in an organization
#'
#' @param id team id
#'
#' @param content the JSON object describing the team. See http://developer.github.com/v3/orgs/teams/#create-team for details.
#'
#' @param ctx the github context object
#'
#' @return team information
modify.team <- function(id, content, ctx = get.github.context())
.api.patch.request(ctx, c("teams", id), body=content)
#' delete a team in an organization
#'
#' @param id team id
#'
#' @param ctx the github context object
#'
#' @return none
delete.team <- function(id, ctx = get.github.context())
.api.delete.request(ctx, c("teams", id))
#' list team members
#'
#' @param id team id
#'
#' @param ctx the github context object
#'
#' @return team member list
get.team.members <- function(id, ctx = get.github.context())
.api.get.request(ctx, c("teams", id, "members"))
#' test if user is a member of team
#'
#' @param id team id
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is member of team
is.member.in.team <- function(id, user, ctx = get.github.context())
.api.test.request(ctx, c("teams", id, "members", user))
#' add member to team
#'
#' @param id team id
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is member of team
add.member.to.team <- function(id, user, ctx = get.github.context())
.api.put.request(ctx, c("teams", id, "memberships", user))
#' remove member from team
#'
#' @param id team id
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return none
delete.member.from.team <- function(id, user, ctx = get.github.context())
.api.delete.request(ctx, c("teams", id, "members", user))
#' list repositories of a team
#'
#' @param id team id
#'
#' @param ctx the github context object
#'
#' @return the repository list
get.team.repositories <- function(id, ctx = get.github.context())
.api.get.request(ctx, c("teams", id, "repos"))
#' add repository to team
#'
#' @param id team id
#'
#' @param owner the repo owner
#'
#' @param repo the repo name
#'
#' @param ctx the github context object
#'
#' @return none
add.repository.to.team <- function(id, owner, repo, ctx = get.github.context())
.api.put.request(ctx, c("teams", id, "repos", owner, repo), expect.code=204)
#' remove repository from team
#'
#' @param id team id
#'
#' @param owner the repo owner
#'
#' @param repo the repo name
#'
#' @param ctx the github context object
#'
#' @return none
delete.repository.from.team <- function(id, owner, repo, ctx = get.github.context())
.api.delete.request(ctx, c("teams", id, "repos", owner, repo))
| 7,650 | mit |
06279bfde8ad1e3909ad44cab4feea0354cf9a6f | akhmed1/rgithub | R/orgs.R | #' List all public organizations for an unauthenticated user. Lists private and public organizations for authenticated users.
#'
#' @param user the user
#'
#' @param ctx the githb context object
#'
#' @return the list of organizations
get.user.organizations <- function(user, ctx = get.github.context())
.api.get.request(ctx, c("users", user, "orgs"))
#' List all organizations for the current user.
#'
#' @param ctx the githb context object
#'
#' @return the list of organizations
get.my.organizations <- function(ctx = get.github.context())
.api.get.request(ctx, c("user", "orgs"))
#' Get an organization
#'
#' @param org the organization name
#'
#' @param ctx the githb context object
#'
#' @return the organization information
get.organization <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org))
#' modify an organization
#'
#' @param org the organization name
#'
#' @param content the JSON object describing the organization. See http://developer.github.com/v3/orgs/#edit-an-organization for details.
#'
#' @param ctx the github context object
#'
#' @return the organization information
modify.organization <- function(org, content, ctx = get.github.context())
.api.patch.request(ctx, c("orgs", org), body=content)
################################################################################
# members
#' list all members from an organization. List all users who are members of an organization. A member is a user that belongs to at least 1 team in the organization. If the authenticated user is also an owner of this organization then both concealed and public members will be returned. If the requester is not an owner of the organization the query will be redirected to the public members list.
#'
#' @param org the organization name
#'
#' @param ctx the github context object
#'
#' @return the member list
get.organization.members <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org, "members"))
#' test if user is a member of an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is a member of organization
is.member.of.organization <- function(org, user, ctx = get.github.context())
.api.test.request(ctx, c("orgs", org, "members", user))
#' delete user from an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return none
delete.member.from.organization <- function(org, user, ctx = get.github.context())
.api.delete.request(ctx, c("orgs", org, "members", user))
#' list public members of organization
#'
#' @param org the organization name
#'
#' @param ctx the github context object
#'
#' @return the list of public members
get.organization.public.members <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org, "public_members"))
#' test if user is a public member of organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is a public member of organization
is.public.member.of.organization <- function(org, user, ctx = get.github.context())
.api.test.request(ctx, c("orgs", org, "public_members", user))
#' publicize user membership in an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return None
publicize.member.of.organization <- function(org, user, ctx = get.github.context())
.api.put.request(ctx, c("orgs", org, "public_members", user), expect.code=204)
#' conceal user membership in an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return None
conceal.member.of.organization <- function(org, user, ctx = get.github.context())
.api.delete.request(ctx, c("orgs", org, "public_members", user))
################################################################################
# teams
#' list teams from an organization
#'
#' @param org the organization name
#'
#' @param ctx the github context object
#'
#' @return the list of organization teams
get.organization.teams <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org, "teams"))
#' get team information
#'
#' @param id the id of the team
#'
#' @param ctx the github context object
#'
#' @return team information
get.team <- function(id, ctx = get.github.context())
.api.get.request(ctx, c("teams", id))
#' create a team in an organization
#'
#' @param org the organization name
#'
#' @param content the JSON object describing the team. See http://developer.github.com/v3/orgs/teams/#create-team for details.
#'
#' @param ctx the github context object
#'
#' @return team information
create.team <- function(org, content, ctx = get.github.context())
.api.post.request(ctx, c("orgs", org, "teams"), body=content)
#' edit a team in an organization
#'
#' @param id team id
#'
#' @param content the JSON object describing the team. See http://developer.github.com/v3/orgs/teams/#create-team for details.
#'
#' @param ctx the github context object
#'
#' @return team information
modify.team <- function(id, content, ctx = get.github.context())
.api.patch.request(ctx, c("teams", id), body=content)
#' delete a team in an organization
#'
#' @param id team id
#'
#' @param ctx the github context object
#'
#' @return none
delete.team <- function(id, ctx = get.github.context())
.api.delete.request(ctx, c("teams", id))
#' list team members
#'
#' @param id team id
#'
#' @param ctx the github context object
#'
#' @return team member list
get.team.members <- function(id, ctx = get.github.context())
.api.get.request(ctx, c("teams", id, "members"))
#' test if user is a member of team
#'
#' @param id team id
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is member of team
is.member.in.team <- function(id, user, ctx = get.github.context())
.api.test.request(ctx, c("teams", id, "members", user))
#' add member to team
#'
#' @param id team id
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is member of team
add.member.to.team <- function(id, user, ctx = get.github.context())
.api.put.request(ctx, c("teams", id, "memberships", user))
#' remove member from team
#'
#' @param id team id
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return none
delete.member.from.team <- function(id, user, ctx = get.github.context())
.api.delete.request(ctx, c("teams", id, "members", user))
#' list repositories of a team
#'
#' @param id team id
#'
#' @param ctx the github context object
#'
#' @return the repository list
get.team.repositories <- function(id, ctx = get.github.context())
.api.get.request(ctx, c("teams", id, "repos"))
#' add repository to team
#'
#' @param id team id
#'
#' @param owner the repo owner
#'
#' @param repo the repo name
#'
#' @param ctx the github context object
#'
#' @return none
add.repository.to.team <- function(id, owner, repo, ctx = get.github.context())
.api.put.request(ctx, c("teams", id, "repos", owner, repo), expect.code=204)
#' remove repository from team
#'
#' @param id team id
#'
#' @param owner the repo owner
#'
#' @param repo the repo name
#'
#' @param ctx the github context object
#'
#' @return none
delete.repository.from.team <- function(id, owner, repo, ctx = get.github.context())
.api.delete.request(ctx, c("teams", id, "repos", owner, repo))
| 7,650 | mit |
556b518abb4b87ba596acc66fd319b09491f246c | adib/BankConverter | csv-ocbc.R | #!/usr/bin/env Rscript --vanilla
#
# Reformat OCBC 360 Saving Account pseudo-CSV export into a proper CSV file that is ready for import into a personal finance software.
#
# OCBC's CSV file format starts with five lines of header information containing:
# - the account number
# - account balances
# - header information
#
# Furthermore a record may span more than one line in the `Description` column. Apparently the first row of the
# `Description` column is the transaction type whereas the second row contains free-text comments about the transaction.
# However not all records are expressed in two lines; those without a free-text field has only one line in the CSV dump.
#
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(lubridate))
Sys.setenv(TZ='Asia/Singapore')
# The date format to export.
dateFormat <- "%Y-%m-%d"
#
# Reads the input file and outputs a cleaned data frame
readTable <- function(inputFileName) {
# The first five rows are header information, we skip it
inputTable <- read.csv(inputFileName,skip=5)
inputTable$row.number = 1:nrow(inputTable)
mainRecords <- filter(inputTable,Transaction.date != "") %>%
rename (
Withdrawals = Withdrawals..SGD.,
Deposits = Deposits..SGD.
) %>%
mutate(
Transaction.date = dmy(Transaction.date),
Value.date = dmy(Value.date),
Withdrawals = as.numeric(gsub(",","", Withdrawals)),
Deposits = as.numeric(gsub(",","", Deposits))
)
suppRecords <- filter(inputTable,Transaction.date == "")
resultTable <- left_join(mainRecords,
mutate(suppRecords,mainRow=row.number-1) %>%
rename(Description.Extended=Description) %>%
select(mainRow,Description.Extended),
by=c("row.number" = "mainRow")
)
resultTable
}
writeTable <- function(resultTable,outputFileName) {
formatDate <- function(dv) {
format(dv,fmt=dateFormat)
}
formatNumber <- function(nv) {
sub("NA","",format(nv,trim=TRUE))
}
formattedTable <- arrange(resultTable,Transaction.date) %>%
mutate(
Transaction.date = formatDate(Transaction.date),
Value.date = formatDate(Value.date),
Withdrawals = formatNumber(Withdrawals),
Deposits = formatNumber(Deposits),
Memo = paste(Description,Description.Extended,sep="\t")
) %>% select(Transaction.date,Value.date,Memo,Withdrawals,Deposits)
write.csv(formattedTable,outputFileName,row.names=FALSE)
}
#
# Main
#
args <- commandArgs(trailingOnly = TRUE)
if(length(args) != 2) {
initial.options <- commandArgs(trailingOnly = FALSE)
file.arg.name <- "--file="
script.name <- sub(file.arg.name, "", initial.options[grep(file.arg.name, initial.options)])
cat("Reformat OCBC 360 Saving Account pseudo-CSV export into a proper CSV file.\nUsage:\n")
cat(paste(" ",script.name,"{input-file} {output-file}\nWhere:\n",
"{input-file}\tThe file obtained from OCBC 360's `Export to CSV` function.\n",
"{output-file}\tWhere to write the properly-formatted CSV output file.\n"),sep="")
quit(save="no",status=3)
}
inputFileName <- args[1]
outputFileName <- args[2]
if(!file.exists(inputFileName)) {
cat(paste("Input file '",inputFileName,"' not found.\n",sep=""))
quit(save="no",status=3)
}
if(file.exists(outputFileName)) {
cat(paste("Input file '",outputFileName,"' already exists.\n",sep=""))
quit(save="no",status=3)
}
# Process
writeTable(readTable(inputFileName),outputFileName)
quit(save="no",status=0)
| 3,788 | bsd-2-clause |
06279bfde8ad1e3909ad44cab4feea0354cf9a6f | bilakhiaricky/hub_turkr | Hub_Miner/R/orgs.R | #' List all public organizations for an unauthenticated user. Lists private and public organizations for authenticated users.
#'
#' @param user the user
#'
#' @param ctx the githb context object
#'
#' @return the list of organizations
get.user.organizations <- function(user, ctx = get.github.context())
.api.get.request(ctx, c("users", user, "orgs"))
#' List all organizations for the current user.
#'
#' @param ctx the githb context object
#'
#' @return the list of organizations
get.my.organizations <- function(ctx = get.github.context())
.api.get.request(ctx, c("user", "orgs"))
#' Get an organization
#'
#' @param org the organization name
#'
#' @param ctx the githb context object
#'
#' @return the organization information
get.organization <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org))
#' modify an organization
#'
#' @param org the organization name
#'
#' @param content the JSON object describing the organization. See http://developer.github.com/v3/orgs/#edit-an-organization for details.
#'
#' @param ctx the github context object
#'
#' @return the organization information
modify.organization <- function(org, content, ctx = get.github.context())
.api.patch.request(ctx, c("orgs", org), body=content)
################################################################################
# members
#' list all members from an organization. List all users who are members of an organization. A member is a user that belongs to at least 1 team in the organization. If the authenticated user is also an owner of this organization then both concealed and public members will be returned. If the requester is not an owner of the organization the query will be redirected to the public members list.
#'
#' @param org the organization name
#'
#' @param ctx the github context object
#'
#' @return the member list
get.organization.members <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org, "members"))
#' test if user is a member of an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is a member of organization
is.member.of.organization <- function(org, user, ctx = get.github.context())
.api.test.request(ctx, c("orgs", org, "members", user))
#' delete user from an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return none
delete.member.from.organization <- function(org, user, ctx = get.github.context())
.api.delete.request(ctx, c("orgs", org, "members", user))
#' list public members of organization
#'
#' @param org the organization name
#'
#' @param ctx the github context object
#'
#' @return the list of public members
get.organization.public.members <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org, "public_members"))
#' test if user is a public member of organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is a public member of organization
is.public.member.of.organization <- function(org, user, ctx = get.github.context())
.api.test.request(ctx, c("orgs", org, "public_members", user))
#' publicize user membership in an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return None
publicize.member.of.organization <- function(org, user, ctx = get.github.context())
.api.put.request(ctx, c("orgs", org, "public_members", user), expect.code=204)
#' conceal user membership in an organization
#'
#' @param org the organization name
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return None
conceal.member.of.organization <- function(org, user, ctx = get.github.context())
.api.delete.request(ctx, c("orgs", org, "public_members", user))
################################################################################
# teams
#' list teams from an organization
#'
#' @param org the organization name
#'
#' @param ctx the github context object
#'
#' @return the list of organization teams
get.organization.teams <- function(org, ctx = get.github.context())
.api.get.request(ctx, c("orgs", org, "teams"))
#' get team information
#'
#' @param id the id of the team
#'
#' @param ctx the github context object
#'
#' @return team information
get.team <- function(id, ctx = get.github.context())
.api.get.request(ctx, c("teams", id))
#' create a team in an organization
#'
#' @param org the organization name
#'
#' @param content the JSON object describing the team. See http://developer.github.com/v3/orgs/teams/#create-team for details.
#'
#' @param ctx the github context object
#'
#' @return team information
create.team <- function(org, content, ctx = get.github.context())
.api.post.request(ctx, c("orgs", org, "teams"), body=content)
#' edit a team in an organization
#'
#' @param id team id
#'
#' @param content the JSON object describing the team. See http://developer.github.com/v3/orgs/teams/#create-team for details.
#'
#' @param ctx the github context object
#'
#' @return team information
modify.team <- function(id, content, ctx = get.github.context())
.api.patch.request(ctx, c("teams", id), body=content)
#' delete a team in an organization
#'
#' @param id team id
#'
#' @param ctx the github context object
#'
#' @return none
delete.team <- function(id, ctx = get.github.context())
.api.delete.request(ctx, c("teams", id))
#' list team members
#'
#' @param id team id
#'
#' @param ctx the github context object
#'
#' @return team member list
get.team.members <- function(id, ctx = get.github.context())
.api.get.request(ctx, c("teams", id, "members"))
#' test if user is a member of team
#'
#' @param id team id
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is member of team
is.member.in.team <- function(id, user, ctx = get.github.context())
.api.test.request(ctx, c("teams", id, "members", user))
#' add member to team
#'
#' @param id team id
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return TRUE if user is member of team
add.member.to.team <- function(id, user, ctx = get.github.context())
.api.put.request(ctx, c("teams", id, "memberships", user))
#' remove member from team
#'
#' @param id team id
#'
#' @param user the user name
#'
#' @param ctx the github context object
#'
#' @return none
delete.member.from.team <- function(id, user, ctx = get.github.context())
.api.delete.request(ctx, c("teams", id, "members", user))
#' list repositories of a team
#'
#' @param id team id
#'
#' @param ctx the github context object
#'
#' @return the repository list
get.team.repositories <- function(id, ctx = get.github.context())
.api.get.request(ctx, c("teams", id, "repos"))
#' add repository to team
#'
#' @param id team id
#'
#' @param owner the repo owner
#'
#' @param repo the repo name
#'
#' @param ctx the github context object
#'
#' @return none
add.repository.to.team <- function(id, owner, repo, ctx = get.github.context())
.api.put.request(ctx, c("teams", id, "repos", owner, repo), expect.code=204)
#' remove repository from team
#'
#' @param id team id
#'
#' @param owner the repo owner
#'
#' @param repo the repo name
#'
#' @param ctx the github context object
#'
#' @return none
delete.repository.from.team <- function(id, owner, repo, ctx = get.github.context())
.api.delete.request(ctx, c("teams", id, "repos", owner, repo))
| 7,650 | gpl-2.0 |
ea30cb4bba62029f0a9ad6d5bdeee9ba0ec700a8 | Myasuka/systemml | system-ml/src/test/scripts/functions/aggregate/RowMins.R | #-------------------------------------------------------------
#
# (C) Copyright IBM Corp. 2010, 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
if(!("matrixStats" %in% rownames(installed.packages()))){
install.packages("matrixStats")
}
library("Matrix")
library("matrixStats")
A <- as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
B <- rowMins(A);
writeMM(as(B, "CsparseMatrix"), paste(args[2], "B", sep="")); | 1,046 | apache-2.0 |
ea30cb4bba62029f0a9ad6d5bdeee9ba0ec700a8 | aloknsingh/systemml | system-ml/src/test/scripts/functions/aggregate/RowMins.R | #-------------------------------------------------------------
#
# (C) Copyright IBM Corp. 2010, 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
if(!("matrixStats" %in% rownames(installed.packages()))){
install.packages("matrixStats")
}
library("Matrix")
library("matrixStats")
A <- as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
B <- rowMins(A);
writeMM(as(B, "CsparseMatrix"), paste(args[2], "B", sep="")); | 1,046 | apache-2.0 |
ea30cb4bba62029f0a9ad6d5bdeee9ba0ec700a8 | ckadner/systemml | system-ml/src/test/scripts/functions/aggregate/RowMins.R | #-------------------------------------------------------------
#
# (C) Copyright IBM Corp. 2010, 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
if(!("matrixStats" %in% rownames(installed.packages()))){
install.packages("matrixStats")
}
library("Matrix")
library("matrixStats")
A <- as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
B <- rowMins(A);
writeMM(as(B, "CsparseMatrix"), paste(args[2], "B", sep="")); | 1,046 | apache-2.0 |
ea30cb4bba62029f0a9ad6d5bdeee9ba0ec700a8 | fmakari/systemml | system-ml/src/test/scripts/functions/aggregate/RowMins.R | #-------------------------------------------------------------
#
# (C) Copyright IBM Corp. 2010, 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
if(!("matrixStats" %in% rownames(installed.packages()))){
install.packages("matrixStats")
}
library("Matrix")
library("matrixStats")
A <- as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
B <- rowMins(A);
writeMM(as(B, "CsparseMatrix"), paste(args[2], "B", sep="")); | 1,046 | apache-2.0 |
ea30cb4bba62029f0a9ad6d5bdeee9ba0ec700a8 | wjuncdl/systemml | system-ml/src/test/scripts/functions/aggregate/RowMins.R | #-------------------------------------------------------------
#
# (C) Copyright IBM Corp. 2010, 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
if(!("matrixStats" %in% rownames(installed.packages()))){
install.packages("matrixStats")
}
library("Matrix")
library("matrixStats")
A <- as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
B <- rowMins(A);
writeMM(as(B, "CsparseMatrix"), paste(args[2], "B", sep="")); | 1,046 | apache-2.0 |
ea30cb4bba62029f0a9ad6d5bdeee9ba0ec700a8 | dusenberrymw/systemml_old | system-ml/src/test/scripts/functions/aggregate/RowMins.R | #-------------------------------------------------------------
#
# (C) Copyright IBM Corp. 2010, 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
if(!("matrixStats" %in% rownames(installed.packages()))){
install.packages("matrixStats")
}
library("Matrix")
library("matrixStats")
A <- as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
B <- rowMins(A);
writeMM(as(B, "CsparseMatrix"), paste(args[2], "B", sep="")); | 1,046 | apache-2.0 |
33a053fce99e7bf650e5be8af20810388e143ff5 | cxxr-devel/cxxr-svn-mirror | src/library/Recommended/rpart/R/rpartcallback.R | ## This routine sets up the callback code for user-written split
## routines in rpart
##
rpartcallback <- function(mlist, nobs, init)
{
if (length(mlist) < 3L)
stop("User written methods must have 3 functions")
if (!is.function(mlist$init))
stop("User written method does not contain an 'init' function")
if (!is.function(mlist$split))
stop("User written method does not contain a 'split' function")
if (!is.function(mlist$eval))
stop("User written method does not contain an 'eval' function")
user.eval <- mlist$eval
user.split <- mlist$split
numresp <- init$numresp
numy <- init$numy
parms <- init$parms
##
## expr2 is an expression that will call the user "evaluation"
## function, and check that what comes back is valid
## expr1 does the same for the user "split" function
##
## For speed in the C interface, yback, xback, and wback are
## fixed S vectors of a fixed size, and nback tells us how
## much of the vector is actually being used on this particular
## callback.
##
if (numy == 1L) {
expr2 <- quote({
temp <- user.eval(yback[1:nback], wback[1:nback], parms)
if (length(temp$label) != numresp)
stop("User 'eval' function returned invalid label")
if (length(temp$deviance) != 1L)
stop("User 'eval' function returned invalid deviance")
as.numeric(as.vector(c(temp$deviance, temp$label)))
})
expr1 <- quote({
if (nback < 0L) { #categorical variable
n2 <- -nback
temp <- user.split(yback[1L:n2], wback[1L:n2],
xback[1L:n2], parms, FALSE)
ncat <- length(unique(xback[1L:n2]))
if (length(temp$goodness) != ncat - 1L ||
length(temp$direction) != ncat)
stop("Invalid return from categorical 'split' function")
} else {
temp <- user.split(yback[1L:nback], wback[1L:nback],
xback[1L:nback], parms, TRUE)
if (length(temp$goodness) != (nback - 1L))
stop("User 'split' function returned invalid goodness")
if (length(temp$direction) != (nback - 1L))
stop("User 'split' function returned invalid direction")
}
as.numeric(as.vector(c(temp$goodness, temp$direction)))
})
} else {
expr2 <- quote({
tempy <- matrix(yback[1L:(nback * numy)], ncol = numy)
temp <- user.eval(tempy, wback[1L:nback], parms)
if (length(temp$label) != numresp)
stop("User 'eval' function returned invalid label")
if (length(temp$deviance) != 1L)
stop("User 'eval' function returned invalid deviance")
as.numeric(as.vector(c(temp$deviance, temp$label)))
})
expr1 <- quote({
if (nback < 0L) { #categorical variable
n2 <- -nback
tempy <- matrix(yback[1L:(n2 * numy)], ncol = numy)
temp <- user.split(tempy, wback[1L:n2], xback[1L:n2],
parms, FALSE)
ncat <- length(unique(xback[1L:n2]))
if (length(temp$goodness) != ncat - 1L ||
length(temp$direction) != ncat)
stop("Invalid return from categorical 'split' function")
} else {
tempy <- matrix(yback[1L:(nback * numy)], ncol = numy)
temp <- user.split(tempy, wback[1:nback], xback[1L:nback],
parms, TRUE)
if (length(temp$goodness) != (nback - 1L))
stop("User 'split' function returned invalid goodness")
if (length(temp$direction) != (nback - 1L))
stop("User 'split' function returned invalid direction")
}
as.numeric(as.vector(c(temp$goodness, temp$direction)))
})
}
##
## The vectors nback, wback, xback and yback will have their
## contents constantly re-inserted by C code. It's one way to make
## things very fast. It is dangerous to do this, so they
## are tossed into a separate frame to isolate them. Evaluations of
## the above expressions occur in that frame.
##
rho <- new.env()
assign("nback", integer(1L), envir = rho)
assign("wback", double(nobs), envir = rho)
assign("xback", double(nobs), envir = rho)
assign("yback", double(nobs * numy), envir = rho)
assign("user.eval", user.eval, envir = rho)
assign("user.split", user.split, envir = rho)
assign("numy", numy, envir = rho)
assign("numresp", numresp, envir = rho)
assign("parms", parms, envir = rho)
.Call(C_init_rpcallback, rho, as.integer(numy), as.integer(numresp),
expr1, expr2)
list(expr1 = expr1, expr2 = expr2, rho = rho)
}
| 5,028 | gpl-2.0 |
33a053fce99e7bf650e5be8af20810388e143ff5 | andeek/rpart | R/rpartcallback.R | ## This routine sets up the callback code for user-written split
## routines in rpart
##
rpartcallback <- function(mlist, nobs, init)
{
if (length(mlist) < 3L)
stop("User written methods must have 3 functions")
if (!is.function(mlist$init))
stop("User written method does not contain an 'init' function")
if (!is.function(mlist$split))
stop("User written method does not contain a 'split' function")
if (!is.function(mlist$eval))
stop("User written method does not contain an 'eval' function")
user.eval <- mlist$eval
user.split <- mlist$split
numresp <- init$numresp
numy <- init$numy
parms <- init$parms
##
## expr2 is an expression that will call the user "evaluation"
## function, and check that what comes back is valid
## expr1 does the same for the user "split" function
##
## For speed in the C interface, yback, xback, and wback are
## fixed S vectors of a fixed size, and nback tells us how
## much of the vector is actually being used on this particular
## callback.
##
if (numy == 1L) {
expr2 <- quote({
temp <- user.eval(yback[1:nback], wback[1:nback], parms)
if (length(temp$label) != numresp)
stop("User 'eval' function returned invalid label")
if (length(temp$deviance) != 1L)
stop("User 'eval' function returned invalid deviance")
as.numeric(as.vector(c(temp$deviance, temp$label)))
})
expr1 <- quote({
if (nback < 0L) { #categorical variable
n2 <- -nback
temp <- user.split(yback[1L:n2], wback[1L:n2],
xback[1L:n2], parms, FALSE)
ncat <- length(unique(xback[1L:n2]))
if (length(temp$goodness) != ncat - 1L ||
length(temp$direction) != ncat)
stop("Invalid return from categorical 'split' function")
} else {
temp <- user.split(yback[1L:nback], wback[1L:nback],
xback[1L:nback], parms, TRUE)
if (length(temp$goodness) != (nback - 1L))
stop("User 'split' function returned invalid goodness")
if (length(temp$direction) != (nback - 1L))
stop("User 'split' function returned invalid direction")
}
as.numeric(as.vector(c(temp$goodness, temp$direction)))
})
} else {
expr2 <- quote({
tempy <- matrix(yback[1L:(nback * numy)], ncol = numy)
temp <- user.eval(tempy, wback[1L:nback], parms)
if (length(temp$label) != numresp)
stop("User 'eval' function returned invalid label")
if (length(temp$deviance) != 1L)
stop("User 'eval' function returned invalid deviance")
as.numeric(as.vector(c(temp$deviance, temp$label)))
})
expr1 <- quote({
if (nback < 0L) { #categorical variable
n2 <- -nback
tempy <- matrix(yback[1L:(n2 * numy)], ncol = numy)
temp <- user.split(tempy, wback[1L:n2], xback[1L:n2],
parms, FALSE)
ncat <- length(unique(xback[1L:n2]))
if (length(temp$goodness) != ncat - 1L ||
length(temp$direction) != ncat)
stop("Invalid return from categorical 'split' function")
} else {
tempy <- matrix(yback[1L:(nback * numy)], ncol = numy)
temp <- user.split(tempy, wback[1:nback], xback[1L:nback],
parms, TRUE)
if (length(temp$goodness) != (nback - 1L))
stop("User 'split' function returned invalid goodness")
if (length(temp$direction) != (nback - 1L))
stop("User 'split' function returned invalid direction")
}
as.numeric(as.vector(c(temp$goodness, temp$direction)))
})
}
##
## The vectors nback, wback, xback and yback will have their
## contents constantly re-inserted by C code. It's one way to make
## things very fast. It is dangerous to do this, so they
## are tossed into a separate frame to isolate them. Evaluations of
## the above expressions occur in that frame.
##
rho <- new.env()
assign("nback", integer(1L), envir = rho)
assign("wback", double(nobs), envir = rho)
assign("xback", double(nobs), envir = rho)
assign("yback", double(nobs * numy), envir = rho)
assign("user.eval", user.eval, envir = rho)
assign("user.split", user.split, envir = rho)
assign("numy", numy, envir = rho)
assign("numresp", numresp, envir = rho)
assign("parms", parms, envir = rho)
.Call(C_init_rpcallback, rho, as.integer(numy), as.integer(numresp),
expr1, expr2)
list(expr1 = expr1, expr2 = expr2, rho = rho)
}
| 5,028 | gpl-3.0 |
0772f096c3e45fe7eb6e16b4548fca3746a53ad1 | cxxr-devel/cxxr | src/extra/testr/filtered-test-suite/round/tc_round_3.R | expected <- structure(list(lowerNorm = c(1, 0.707, 0.704, 0.634, 0.629, 0.559,
0.554, 0.548, 0.47, 0.398, 0.391, 0.383, 0.374, 0.277, 0.186,
0.176, 0.153, 0.138, 0.122), upperNorm = c(1, 1, 1, 0.98, 0.984,
0.947, 0.952, 0.958, 0.91, 0.857, 0.864, 0.872, 0.881, 0.799,
0.71, 0.72, 0.744, 0.758, 0.775), lowerNormS = c(0.911, 0.683,
0.68, 0.614, 0.61, 0.544, 0.538, 0.533, 0.46, 0.391, 0.384, 0.376,
0.367, 0.275, 0.189, 0.179, 0.155, 0.141, 0.124), upperNorms = c(1,
0.997, 1, 0.97, 0.975, 0.938, 0.943, 0.949, 0.903, 0.852, 0.859,
0.867, 0.875, 0.797, 0.713, 0.722, 0.746, 0.761, 0.777)), .Names = c("lowerNorm",
"upperNorm", "lowerNormS", "upperNorms"), row.names = c(NA, -19L
), class = "data.frame")
test(id=2, code={
argv <- list(structure(list(lowerNorm = c(1, 0.7074793118252, 0.703783359109958,
0.633667085530785, 0.629171386131588, 0.55900804989023, 0.553693829615336,
0.547917347996141, 0.470383100744677, 0.397621760007547, 0.390548442517381,
0.382779091361949, 0.374191514453686, 0.276654053495554, 0.186268067402784,
0.176381170003996, 0.152703583557352, 0.138281755556403, 0.121518607618675
), upperNorm = c(1, 1, 1, 0.979778292620476, 0.984273992019672,
0.946874303050947, 0.952188523325841, 0.957965004945035, 0.910009056118068,
0.857280200776766, 0.864353518266933, 0.872122869422365, 0.880710446330628,
0.798976198605286, 0.710090476014583, 0.719977373413371, 0.743654959860015,
0.758076787860964, 0.774839935798692), lowerNormS = c(0.910985448809634,
0.683392923012911, 0.679522139376878, 0.614273605024573, 0.609653530675358,
0.543887035370979, 0.538488520130148, 0.532620411085642, 0.459604218176941,
0.390811451215735, 0.383715321807271, 0.375920913978781, 0.367305641565109,
0.274783502246108, 0.188735721130942, 0.178848823732154, 0.15517123728551,
0.140749409284561, 0.123986261346832), upperNorms = c(1, 0.996879185830627,
1, 0.969960088452818, 0.974580162802033, 0.937905681715855, 0.943304196956687,
0.949172306001193, 0.902674026454245, 0.851952320959801, 0.859048450368266,
0.866842858196755, 0.875458130610427, 0.797245309278501, 0.712558129742741,
0.722445027141529, 0.746122613588173, 0.760544441589122, 0.77730758952685
)), .Names = c("lowerNorm", "upperNorm", "lowerNormS", "upperNorms"
), row.names = c(NA, -19L), class = "data.frame"), 3)
do.call('round', argv);
}, o = expected);
| 2,345 | gpl-2.0 |
920e286c46518ec79050600232fbef892e1cbce8 | pkprabhat/ML-and-Finance | Deep Learning Analysis/airbCCFunction.R | library("neuralnet")
set.seed(10000)
EAD = 200000
LGD = 0.5
R = 0.04
#i = 0:100
#PD = 0.000 to 0.200
M = 2
eq0 <- function(x) {x/500}
eq3 <- function(PD) {(LGD*pnorm(((1/(1-R))^0.5)*qnorm(PD)+((R/(1-R))^0.5)*qnorm(0.999))-LGD*PD)}
eq4 <- function(K) {K*12.5*EAD}
response = matrix(data=NA, nrow=100,ncol=1)
attribute <- as.data.frame(seq(0,0.2,length=100),ncol=1)
for (i in 1:100){
response[i,1] = eq3(attribute[i,1])
#RWA[i] = eq4(K[i])
}
data <- cbind(attribute, response)
colnames(data) <- c("attribute","response")
fit<-neuralnet(response~attribute,
data = data,
hidden = c(3,3),
threshold = 0.01
)
resp = matrix(data=NA, nrow=40,ncol=1)
testdata <- as.matrix(seq(0.000,0.200,length=40),ncol=1)
for (i in 1:40){
resp[i,1] = eq3(testdata[i,1])
#RWA[i] = eq4(K[i])
}
pred <- compute(fit,testdata)
result <- cbind(testdata,pred$net.result,resp)
colnames(result) <- c("Attribute", "Predicted_K", "Actual_K")
round(result,4)
plot(testdata,pred$net.result, type="l", col="red", main="Exposure for Credit Card (Prediction)", cex.main = 2.0, cex.lab = 1.5, cex = 1.5, lwd = 3,
xlab = "Probability of Default (with fixed LGD = 50%)", ylab = "Capital Requirement (K)")
lines(testdata,resp, type="l", col="green", main="Exposure for Credit Card (Actual)", cex.main = 2.0, cex.lab = 1.5, cex = 1.5, lwd = 3,
xlab = "Probability of Default (with fixed LGD = 50%)", ylab = "Capital Requirement (K)")
legend('bottomright', c("ANN Prediction","Actual Calculculation"), lty=c(1,1), lwd=c(1,1),col=c("red","green")) | 1,573 | gpl-3.0 |
0772f096c3e45fe7eb6e16b4548fca3746a53ad1 | kmillar/cxxr | src/extra/testr/filtered-test-suite/round/tc_round_3.R | expected <- structure(list(lowerNorm = c(1, 0.707, 0.704, 0.634, 0.629, 0.559,
0.554, 0.548, 0.47, 0.398, 0.391, 0.383, 0.374, 0.277, 0.186,
0.176, 0.153, 0.138, 0.122), upperNorm = c(1, 1, 1, 0.98, 0.984,
0.947, 0.952, 0.958, 0.91, 0.857, 0.864, 0.872, 0.881, 0.799,
0.71, 0.72, 0.744, 0.758, 0.775), lowerNormS = c(0.911, 0.683,
0.68, 0.614, 0.61, 0.544, 0.538, 0.533, 0.46, 0.391, 0.384, 0.376,
0.367, 0.275, 0.189, 0.179, 0.155, 0.141, 0.124), upperNorms = c(1,
0.997, 1, 0.97, 0.975, 0.938, 0.943, 0.949, 0.903, 0.852, 0.859,
0.867, 0.875, 0.797, 0.713, 0.722, 0.746, 0.761, 0.777)), .Names = c("lowerNorm",
"upperNorm", "lowerNormS", "upperNorms"), row.names = c(NA, -19L
), class = "data.frame")
test(id=2, code={
argv <- list(structure(list(lowerNorm = c(1, 0.7074793118252, 0.703783359109958,
0.633667085530785, 0.629171386131588, 0.55900804989023, 0.553693829615336,
0.547917347996141, 0.470383100744677, 0.397621760007547, 0.390548442517381,
0.382779091361949, 0.374191514453686, 0.276654053495554, 0.186268067402784,
0.176381170003996, 0.152703583557352, 0.138281755556403, 0.121518607618675
), upperNorm = c(1, 1, 1, 0.979778292620476, 0.984273992019672,
0.946874303050947, 0.952188523325841, 0.957965004945035, 0.910009056118068,
0.857280200776766, 0.864353518266933, 0.872122869422365, 0.880710446330628,
0.798976198605286, 0.710090476014583, 0.719977373413371, 0.743654959860015,
0.758076787860964, 0.774839935798692), lowerNormS = c(0.910985448809634,
0.683392923012911, 0.679522139376878, 0.614273605024573, 0.609653530675358,
0.543887035370979, 0.538488520130148, 0.532620411085642, 0.459604218176941,
0.390811451215735, 0.383715321807271, 0.375920913978781, 0.367305641565109,
0.274783502246108, 0.188735721130942, 0.178848823732154, 0.15517123728551,
0.140749409284561, 0.123986261346832), upperNorms = c(1, 0.996879185830627,
1, 0.969960088452818, 0.974580162802033, 0.937905681715855, 0.943304196956687,
0.949172306001193, 0.902674026454245, 0.851952320959801, 0.859048450368266,
0.866842858196755, 0.875458130610427, 0.797245309278501, 0.712558129742741,
0.722445027141529, 0.746122613588173, 0.760544441589122, 0.77730758952685
)), .Names = c("lowerNorm", "upperNorm", "lowerNormS", "upperNorms"
), row.names = c(NA, -19L), class = "data.frame"), 3)
do.call('round', argv);
}, o = expected);
| 2,345 | gpl-2.0 |
0772f096c3e45fe7eb6e16b4548fca3746a53ad1 | krlmlr/cxxr | src/extra/testr/filtered-test-suite/round/tc_round_3.R | expected <- structure(list(lowerNorm = c(1, 0.707, 0.704, 0.634, 0.629, 0.559,
0.554, 0.548, 0.47, 0.398, 0.391, 0.383, 0.374, 0.277, 0.186,
0.176, 0.153, 0.138, 0.122), upperNorm = c(1, 1, 1, 0.98, 0.984,
0.947, 0.952, 0.958, 0.91, 0.857, 0.864, 0.872, 0.881, 0.799,
0.71, 0.72, 0.744, 0.758, 0.775), lowerNormS = c(0.911, 0.683,
0.68, 0.614, 0.61, 0.544, 0.538, 0.533, 0.46, 0.391, 0.384, 0.376,
0.367, 0.275, 0.189, 0.179, 0.155, 0.141, 0.124), upperNorms = c(1,
0.997, 1, 0.97, 0.975, 0.938, 0.943, 0.949, 0.903, 0.852, 0.859,
0.867, 0.875, 0.797, 0.713, 0.722, 0.746, 0.761, 0.777)), .Names = c("lowerNorm",
"upperNorm", "lowerNormS", "upperNorms"), row.names = c(NA, -19L
), class = "data.frame")
test(id=2, code={
argv <- list(structure(list(lowerNorm = c(1, 0.7074793118252, 0.703783359109958,
0.633667085530785, 0.629171386131588, 0.55900804989023, 0.553693829615336,
0.547917347996141, 0.470383100744677, 0.397621760007547, 0.390548442517381,
0.382779091361949, 0.374191514453686, 0.276654053495554, 0.186268067402784,
0.176381170003996, 0.152703583557352, 0.138281755556403, 0.121518607618675
), upperNorm = c(1, 1, 1, 0.979778292620476, 0.984273992019672,
0.946874303050947, 0.952188523325841, 0.957965004945035, 0.910009056118068,
0.857280200776766, 0.864353518266933, 0.872122869422365, 0.880710446330628,
0.798976198605286, 0.710090476014583, 0.719977373413371, 0.743654959860015,
0.758076787860964, 0.774839935798692), lowerNormS = c(0.910985448809634,
0.683392923012911, 0.679522139376878, 0.614273605024573, 0.609653530675358,
0.543887035370979, 0.538488520130148, 0.532620411085642, 0.459604218176941,
0.390811451215735, 0.383715321807271, 0.375920913978781, 0.367305641565109,
0.274783502246108, 0.188735721130942, 0.178848823732154, 0.15517123728551,
0.140749409284561, 0.123986261346832), upperNorms = c(1, 0.996879185830627,
1, 0.969960088452818, 0.974580162802033, 0.937905681715855, 0.943304196956687,
0.949172306001193, 0.902674026454245, 0.851952320959801, 0.859048450368266,
0.866842858196755, 0.875458130610427, 0.797245309278501, 0.712558129742741,
0.722445027141529, 0.746122613588173, 0.760544441589122, 0.77730758952685
)), .Names = c("lowerNorm", "upperNorm", "lowerNormS", "upperNorms"
), row.names = c(NA, -19L), class = "data.frame"), 3)
do.call('round', argv);
}, o = expected);
| 2,345 | gpl-2.0 |
46b9697333829fa089211f4f366be78b13c43121 | ldebrot/flashcards | studyblue_csv_to_rda.R | ##############################################################################
### STUDYBLUE CSV TO RDA CONVERTER ###
##############################################################################
###
###
### README : This is a small script to convert CSV files exported by Studyblue into clean Rda files.
### Please note that it takes into account the particular pattern of such files.
### Read.csv does not work for these files as line breaks in the flashcard text is wrongfully interpreted as a separation between cards.
### Therefore, the readlines function and a number of string manipulations are required.
###
### REQUIREMENTS : This script installs and uses the stringr package
###
### LEGAL : For the legal people out there, this script is not linked in any way to Studyblue. It just happens that I wrote it to handle files exported from this platform.
# Install stringr package if necessary, then load it
list.of.packages <- c("stringr")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])] # check which packages are installed
if(length(new.packages)) install.packages(new.packages) # if not install, install
for (i in 1:length(list.of.packages)) library(list.of.packages[i],character.only = TRUE) # load them
# Send working directory to the one in which flashcardreader is located
setwd(dirname(sys.frame(1)$ofile))
cat("\n Working directory set to the one in which this script is located \n")
# Set error messages for further use
errmsg1 <- "ERROR 1 - This is not a correct choice - choose a number corresponding to a file"
errmsg2 <- "ERROR 2 - Something went wrong with the identification of the starting and ending elements, despite 100 sincere attempts. Sorry, I failed you... :("
# File choice
cat("\n *** \n \n Available CSV files in your working directory : \n")
filepattern <- "\\.csv$" # Define file extension pattern
print(dir(pattern=filepattern)) # Show csv files in working directory
cat("\n Which file do you want to read? \n")
x <- (readline()) # Ask for input
# Check user input
if(is.na(as.numeric(x))) stop(errmsg1)
x<-as.numeric(x)
if(x>length(dir(pattern=filepattern)) || x<1) stop(errmsg1)
# If input correct (=corresponding to an actual file)
fnumber <- x
fcname <- dir(pattern=filepattern)[fnumber]
flines <- readLines(con=fcname)
# Check whether first line starts with a quotation mark (as Studyblue provides two types of csv files...)
tmp_1stchar <- substr(flines[1],1,1)
# Apply restrictive patterns
fpat_start <- '[,]{1}["]{1}' # starts with a quotation mark (old pattern : '^"')
fpat_end <- '[^"]["]{1}$' # ends with a quotation mark
fpat_start <- '^"' # starts with a quotation mark
fpat_end <- '"$' # ends with a quotation mark
# Identify starting and ending elements in the read elements
startelements <- grep(pattern=fpat_start,x=flines)
endelements <- grep(pattern=fpat_end,x=flines)
# Check whether equal numbers of start- and endelements
safetycount <- 0
checkreport <- "" #REPORT
while (length(startelements)!=length(endelements)) {
safetycount <- safetycount + 1
if (safetycount>100) stop(errmsg2)
tmp_counts <- max(c(length(startelements),length(endelements)))
for (i in 1:tmp_counts) {
if(endelements[i]<startelements[i]) { #If endelement is below or equal to the next starting element, there is a missing endelement
newreport <- paste("\n \n Unequal number of start- and endelements at element #",i," which is startelement #",startelements[i]," and endelement #",endelements[i]," \n \n ",collapse="")
checkreport <- c(checkreport,newreport)
# Check whether the previous endelement was right
tmp_pos <- endelements[i-1] #Get the position of the last endelement
tmp_nxt <- flines[tmp_pos+1] #Get the element after the position of the last endelement
# Check whether this looks like a start element
tmp_eval <- 0
if(length(grep(pattern='[,]{1}["]{1}',x=tmp_nxt))==1) tmp_eval <- tmp_eval + 1
if(length(grep(pattern='^"[A-Z]',x=tmp_nxt))==1) tmp_eval <- tmp_eval + 1
if(tmp_eval!=2) { #the next element is NOT a starting element, so the endelement is wrong !
newreport <- paste("Endelement wrong, next element is NOT a starting element. Endelement deleted.")
checkreport <- c(checkreport,newreport)
oldendelements <- endelements
endelements <- NULL
endelements[1:(i-2)] <- oldendelements [1:(i-2)]
endelements[(i-1):(length(oldendelements)-1)] <- oldendelements[i:length(oldendelements)]
}
if(tmp_eval==2) { #the next element IS INDEED a starting element, so the startelement is wrong
newreport <- paste("Startelement wrong, element following last endelement IS a starting element. Startelement added.")
checkreport <- c(checkreport,newreport)
oldstartelements <- startelements
startelements <- NULL
startelements[1:(i-1)] <- oldstartelements [1:(i-1)]
startelements[i] <- tmp_pos+1
startelements[(i+1):(length(oldstartelements)+1)] <- oldstartelements[i:length(oldstartelements)]
}
break
}
}
}
#REPORT
newreport <- "\n \n Conversion all fine."
checkreport <- c(checkreport,newreport)
elementslength <- length(startelements)
elements <- list()
fcards <- data.frame()
for(i in 1:elementslength) {
f_question <- NULL
f_answer <- NULL
elements[[i]] <- seq(from=startelements[i],to=endelements[i],by=1) # make a sequence from the starting to the ending element
f_question <- str_split(string=flines[startelements[i]],pattern='","')[[1]][1] #take first part before the pattern (",") in the starting element
f_answer <- str_split(string=flines[startelements[i]],pattern='","')[[1]][2] #take second part after the pattern (",") in the starting element. This is usually the first part of the answer.
if(length(elements[[i]])>1) {
for (ii in 2:length(elements[[i]])) f_answer <- rbind(f_answer,flines[elements[[i]][ii]]) #add the other elements, which should contain only answer content
}
f_question <- cbind(f_question,"question",i) # make a row of the question, mark it as a question and assign it a number
f_answer <- cbind(f_answer,"answer",i) # make a data frame of the answer element(s), mark it as answer content and assign it the same number
f_colnames <- c("content","type","number") # name the columns appropriately
colnames(f_question) <- (f_colnames)
colnames(f_answer) <- (f_colnames)
fcards <- rbind(fcards,f_question,f_answer) # produce a clean data frame containing all content
}
## Set classes right
fcards$type <- as.character(fcards$type)
fcards$content <- as.character(fcards$content)
fcards$number <- as.numeric(as.character(fcards$number))
## Save it as a RDA file
fcrdaname <- sub(pattern=".csv",replacement=".Rda",x=fcname)
save(fcards,file=fcrdaname)
cat(checkreport)
cat("\n \n *** \n Saved a RDA file containing the flashcard content in your working directory \n \n ") | 7,252 | gpl-3.0 |
74cecd23ba013cc6d997579cc7b00097bc54f6e4 | tylermorganwall/skpr | R/pipeimport.R | #' re-export magrittr pipe operator
#'
#' @importFrom magrittr %>%
#' @name %>%
#' @rdname pipe
#' @export
NULL
| 112 | gpl-3.0 |
74cecd23ba013cc6d997579cc7b00097bc54f6e4 | GeorgeMKhoury/skpr | R/pipeimport.R | #' re-export magrittr pipe operator
#'
#' @importFrom magrittr %>%
#' @name %>%
#' @rdname pipe
#' @export
NULL
| 112 | gpl-3.0 |
0772f096c3e45fe7eb6e16b4548fca3746a53ad1 | kmillar/rho | src/extra/testr/filtered-test-suite/round/tc_round_3.R | expected <- structure(list(lowerNorm = c(1, 0.707, 0.704, 0.634, 0.629, 0.559,
0.554, 0.548, 0.47, 0.398, 0.391, 0.383, 0.374, 0.277, 0.186,
0.176, 0.153, 0.138, 0.122), upperNorm = c(1, 1, 1, 0.98, 0.984,
0.947, 0.952, 0.958, 0.91, 0.857, 0.864, 0.872, 0.881, 0.799,
0.71, 0.72, 0.744, 0.758, 0.775), lowerNormS = c(0.911, 0.683,
0.68, 0.614, 0.61, 0.544, 0.538, 0.533, 0.46, 0.391, 0.384, 0.376,
0.367, 0.275, 0.189, 0.179, 0.155, 0.141, 0.124), upperNorms = c(1,
0.997, 1, 0.97, 0.975, 0.938, 0.943, 0.949, 0.903, 0.852, 0.859,
0.867, 0.875, 0.797, 0.713, 0.722, 0.746, 0.761, 0.777)), .Names = c("lowerNorm",
"upperNorm", "lowerNormS", "upperNorms"), row.names = c(NA, -19L
), class = "data.frame")
test(id=2, code={
argv <- list(structure(list(lowerNorm = c(1, 0.7074793118252, 0.703783359109958,
0.633667085530785, 0.629171386131588, 0.55900804989023, 0.553693829615336,
0.547917347996141, 0.470383100744677, 0.397621760007547, 0.390548442517381,
0.382779091361949, 0.374191514453686, 0.276654053495554, 0.186268067402784,
0.176381170003996, 0.152703583557352, 0.138281755556403, 0.121518607618675
), upperNorm = c(1, 1, 1, 0.979778292620476, 0.984273992019672,
0.946874303050947, 0.952188523325841, 0.957965004945035, 0.910009056118068,
0.857280200776766, 0.864353518266933, 0.872122869422365, 0.880710446330628,
0.798976198605286, 0.710090476014583, 0.719977373413371, 0.743654959860015,
0.758076787860964, 0.774839935798692), lowerNormS = c(0.910985448809634,
0.683392923012911, 0.679522139376878, 0.614273605024573, 0.609653530675358,
0.543887035370979, 0.538488520130148, 0.532620411085642, 0.459604218176941,
0.390811451215735, 0.383715321807271, 0.375920913978781, 0.367305641565109,
0.274783502246108, 0.188735721130942, 0.178848823732154, 0.15517123728551,
0.140749409284561, 0.123986261346832), upperNorms = c(1, 0.996879185830627,
1, 0.969960088452818, 0.974580162802033, 0.937905681715855, 0.943304196956687,
0.949172306001193, 0.902674026454245, 0.851952320959801, 0.859048450368266,
0.866842858196755, 0.875458130610427, 0.797245309278501, 0.712558129742741,
0.722445027141529, 0.746122613588173, 0.760544441589122, 0.77730758952685
)), .Names = c("lowerNorm", "upperNorm", "lowerNormS", "upperNorms"
), row.names = c(NA, -19L), class = "data.frame"), 3)
do.call('round', argv);
}, o = expected);
| 2,345 | gpl-2.0 |
0772f096c3e45fe7eb6e16b4548fca3746a53ad1 | ArunChauhan/cxxr | src/extra/testr/filtered-test-suite/round/tc_round_3.R | expected <- structure(list(lowerNorm = c(1, 0.707, 0.704, 0.634, 0.629, 0.559,
0.554, 0.548, 0.47, 0.398, 0.391, 0.383, 0.374, 0.277, 0.186,
0.176, 0.153, 0.138, 0.122), upperNorm = c(1, 1, 1, 0.98, 0.984,
0.947, 0.952, 0.958, 0.91, 0.857, 0.864, 0.872, 0.881, 0.799,
0.71, 0.72, 0.744, 0.758, 0.775), lowerNormS = c(0.911, 0.683,
0.68, 0.614, 0.61, 0.544, 0.538, 0.533, 0.46, 0.391, 0.384, 0.376,
0.367, 0.275, 0.189, 0.179, 0.155, 0.141, 0.124), upperNorms = c(1,
0.997, 1, 0.97, 0.975, 0.938, 0.943, 0.949, 0.903, 0.852, 0.859,
0.867, 0.875, 0.797, 0.713, 0.722, 0.746, 0.761, 0.777)), .Names = c("lowerNorm",
"upperNorm", "lowerNormS", "upperNorms"), row.names = c(NA, -19L
), class = "data.frame")
test(id=2, code={
argv <- list(structure(list(lowerNorm = c(1, 0.7074793118252, 0.703783359109958,
0.633667085530785, 0.629171386131588, 0.55900804989023, 0.553693829615336,
0.547917347996141, 0.470383100744677, 0.397621760007547, 0.390548442517381,
0.382779091361949, 0.374191514453686, 0.276654053495554, 0.186268067402784,
0.176381170003996, 0.152703583557352, 0.138281755556403, 0.121518607618675
), upperNorm = c(1, 1, 1, 0.979778292620476, 0.984273992019672,
0.946874303050947, 0.952188523325841, 0.957965004945035, 0.910009056118068,
0.857280200776766, 0.864353518266933, 0.872122869422365, 0.880710446330628,
0.798976198605286, 0.710090476014583, 0.719977373413371, 0.743654959860015,
0.758076787860964, 0.774839935798692), lowerNormS = c(0.910985448809634,
0.683392923012911, 0.679522139376878, 0.614273605024573, 0.609653530675358,
0.543887035370979, 0.538488520130148, 0.532620411085642, 0.459604218176941,
0.390811451215735, 0.383715321807271, 0.375920913978781, 0.367305641565109,
0.274783502246108, 0.188735721130942, 0.178848823732154, 0.15517123728551,
0.140749409284561, 0.123986261346832), upperNorms = c(1, 0.996879185830627,
1, 0.969960088452818, 0.974580162802033, 0.937905681715855, 0.943304196956687,
0.949172306001193, 0.902674026454245, 0.851952320959801, 0.859048450368266,
0.866842858196755, 0.875458130610427, 0.797245309278501, 0.712558129742741,
0.722445027141529, 0.746122613588173, 0.760544441589122, 0.77730758952685
)), .Names = c("lowerNorm", "upperNorm", "lowerNormS", "upperNorms"
), row.names = c(NA, -19L), class = "data.frame"), 3)
do.call('round', argv);
}, o = expected);
| 2,345 | gpl-2.0 |
74cecd23ba013cc6d997579cc7b00097bc54f6e4 | sakrejda/parse-stan | R/00-import.R | #' re-export magrittr pipe operator
#'
#' @importFrom magrittr %>%
#' @name %>%
#' @rdname pipe
#' @export
NULL
| 112 | gpl-3.0 |
0772f096c3e45fe7eb6e16b4548fca3746a53ad1 | rho-devel/rho | src/extra/testr/filtered-test-suite/round/tc_round_3.R | expected <- structure(list(lowerNorm = c(1, 0.707, 0.704, 0.634, 0.629, 0.559,
0.554, 0.548, 0.47, 0.398, 0.391, 0.383, 0.374, 0.277, 0.186,
0.176, 0.153, 0.138, 0.122), upperNorm = c(1, 1, 1, 0.98, 0.984,
0.947, 0.952, 0.958, 0.91, 0.857, 0.864, 0.872, 0.881, 0.799,
0.71, 0.72, 0.744, 0.758, 0.775), lowerNormS = c(0.911, 0.683,
0.68, 0.614, 0.61, 0.544, 0.538, 0.533, 0.46, 0.391, 0.384, 0.376,
0.367, 0.275, 0.189, 0.179, 0.155, 0.141, 0.124), upperNorms = c(1,
0.997, 1, 0.97, 0.975, 0.938, 0.943, 0.949, 0.903, 0.852, 0.859,
0.867, 0.875, 0.797, 0.713, 0.722, 0.746, 0.761, 0.777)), .Names = c("lowerNorm",
"upperNorm", "lowerNormS", "upperNorms"), row.names = c(NA, -19L
), class = "data.frame")
test(id=2, code={
argv <- list(structure(list(lowerNorm = c(1, 0.7074793118252, 0.703783359109958,
0.633667085530785, 0.629171386131588, 0.55900804989023, 0.553693829615336,
0.547917347996141, 0.470383100744677, 0.397621760007547, 0.390548442517381,
0.382779091361949, 0.374191514453686, 0.276654053495554, 0.186268067402784,
0.176381170003996, 0.152703583557352, 0.138281755556403, 0.121518607618675
), upperNorm = c(1, 1, 1, 0.979778292620476, 0.984273992019672,
0.946874303050947, 0.952188523325841, 0.957965004945035, 0.910009056118068,
0.857280200776766, 0.864353518266933, 0.872122869422365, 0.880710446330628,
0.798976198605286, 0.710090476014583, 0.719977373413371, 0.743654959860015,
0.758076787860964, 0.774839935798692), lowerNormS = c(0.910985448809634,
0.683392923012911, 0.679522139376878, 0.614273605024573, 0.609653530675358,
0.543887035370979, 0.538488520130148, 0.532620411085642, 0.459604218176941,
0.390811451215735, 0.383715321807271, 0.375920913978781, 0.367305641565109,
0.274783502246108, 0.188735721130942, 0.178848823732154, 0.15517123728551,
0.140749409284561, 0.123986261346832), upperNorms = c(1, 0.996879185830627,
1, 0.969960088452818, 0.974580162802033, 0.937905681715855, 0.943304196956687,
0.949172306001193, 0.902674026454245, 0.851952320959801, 0.859048450368266,
0.866842858196755, 0.875458130610427, 0.797245309278501, 0.712558129742741,
0.722445027141529, 0.746122613588173, 0.760544441589122, 0.77730758952685
)), .Names = c("lowerNorm", "upperNorm", "lowerNormS", "upperNorms"
), row.names = c(NA, -19L), class = "data.frame"), 3)
do.call('round', argv);
}, o = expected);
| 2,345 | gpl-2.0 |
92c773655c0228d6ae94eea7d1d30dfb5b15ef9e | ellispatrick/sydSeq | Examples/pMim/processmiRNAcounts.R | countsMi = as.matrix(read.csv('../../Data/pMim/miRNAcounts.csv',row.names = 1))
load("ensembl.gene.RData")
mir = ensembl.gene[ensembl.gene$ensembl_gene_id %in% rownames(countsMi), ]$mirbase_id
names(mir) = ensembl.gene[ensembl.gene$ensembl_gene_id %in% rownames(countsMi), ]$ensembl_gene_id
mir = unlist(lapply(strsplit(mir, "-"), function(x) if (length(x) > 2) paste(x[1:3],
collapse = "-")))
smir = split(names(mir), mir)
lmir = unlist(lapply(smir, length))
cMi = countsMi[unlist(smir[lmir == 1]), ]
rownames(cMi) = names(which(lmir == 1))
y = NULL
for (i in names(which(lmir > 1))) {
x = (countsMi[smir[[i]], ])
x = x[which.max(rowSums(x)),]
y = rbind(y, x)
}
rownames(y) = names(which(lmir > 1))
cMi = rbind(cMi, y)
countsMi = cMi
counts = as.matrix(read.csv('../../Data/pMim/mRNAcounts.csv',row.names = 1))
save(counts,countsMi,file = 'counts.RData')
| 943 | mit |
82de513da89c45f58ca0843b7eb1e3968dda0c68 | sam-s/ESS | test/literate/misc.R |
##### Indentation
### 1 ----------------------------------------------------------------
{
fun¶_call(
argument
) +
stuff1
} +
stuff2
##! "C-M-q"
{
fun¶_call(
argument
) +
stuff1
} +
stuff2
##! "C-u"
##! "C-M-q"
{
fun¶_call(
argument
) +
stuff1
} +
stuff2
| 358 | gpl-2.0 |
b74e0cbf1edfd67b2ed58e74f3a3455f30f096cb | nielsrhansen/ppstat | pkg/R/MarkedPointProcess.R | setMethod("markedPointProcess", c("data.frame", "ContinuousProcess"),
function(pointData, continuousData, markVar = 'markType', coarsen = NULL, ...) {
## Initialize an empty object - bypasses validity
## check. Validity function called before returning object
## at the end of the contructor.
pointProcess <- new("MarkedPointProcess")
if(continuousData@idVar %in% names(pointData)) {
id <- pointData[ , continuousData@idVar]
if(!identical(levels(id), levels(getId(continuousData))))
id <- factor(id, levels = levels(getId(continuousData)))
} else {
id <- factor(rep(getId(continuousData)[1], dim(pointData)[1]))
}
## Sorting the rows if needed.
if(is.unsorted(id)) {
ord <- order(id)
pointData <- pointData[ord, , drop = FALSE]
id <- id[ord]
}
if(continuousData@positionVar %in% names(pointData)) {
position <- pointData[ , continuousData@positionVar]
} else {
stop(paste("pointData needs a column named", pointProcess@positionVar))
}
ip <- split(seq_along(id), id)
if(any(sapply(names(ip), function(i) is.unsorted(position[ip[[i]]])))) {
ord <- sapply(names(ip),
function(i) order(position[ip[[i]]]), simplify = FALSE)
ord <- unlist(lapply(names(ip),
function(i) ip[[i]][ord[[i]]]), use.names=FALSE)
pointData <- pointData[ord, , drop = FALSE]
position <- position[ord]
id <- id[ord]
ip <- split(seq_along(id), id)
}
if(markVar %in% names(pointData)) {
markType <- pointData[, markVar]
} else {
markType <- rep("point", dim(pointData)[1])
}
markValue <- pointData[, !(names(pointData) %in%
c(continuousData@idVar,
continuousData@positionVar,
markVar)),
drop = FALSE]
pointProcessEnv <- new.env(parent = .GlobalEnv)
pointProcessEnv$id <- id
pointProcessEnv$markType <- as.factor(markType)
for(v in names(markValue)) {
assign(paste("var_", v, sep = ""),
markValue[,v], envir = pointProcessEnv)
}
pointProcess@iPointSubset <- -1L
pointProcess@jPointSubset <- -1L
ic <- split(iSubset(continuousData), getId(continuousData))
contPosition <- getPosition(continuousData)
ii <- sapply(names(ic),
function(i) {
findInterval(position[ip[[i]]],
contPosition[ic[[i]]])
}, simplify = FALSE
)
## Set any 0-pointer to point to the left-most end point.
## Computes the pointers in terms of the full data set.
l <- cumsum(c(0L, sapply(ic, length)[-length(ic)]))
leftPoints <- which(unlist(ii) == 0L)
ii <- as.integer(unlist(lapply(seq_along(l),
function(i) ii[[i]] + l[[i]])),
use.names = FALSE)
ii[leftPoints] <- ii[leftPoints] + 1L
if(is.null(coarsen)) {
## Finding exactly duplicated position entries.
dup <- duplicated(position)
iii <- !(position %in% contPosition[ii]) & !dup
## Setting pointer information.
pointPointer <- ii + cumsum(iii)
pointPointer[leftPoints] <- pointPointer[leftPoints] - 1L
## Creating copy of the continuous data part if necessary.
if(any(iii)) {
i <- c(unlist(ic, use.names = FALSE), ii[iii])
iSubset(continuousData) <- sort(i)
as(pointProcess, "ContinuousProcess") <- continuousProcess(continuousData)
## TODO: Move pointer to environment, remove points from pointprocess.
pointProcess@valueEnv$position[pointPointer[!dup]] <- position[!dup]
} else {
as(pointProcess, "ContinuousProcess") <- continuousData
}
} else if(coarsen == 'right') {
## Setting pointer information
pointPointer <- pmin(ii + 1L, dim(continuousData)[1])
pointPointer[leftPoints] <- pointPointer[leftPoints] - 1L
as(pointProcess, "ContinuousProcess") <- continuousData
} else if(coarsen == 'left') {
## Setting pointer information
pointPointer <- ii
as(pointProcess, "ContinuousProcess") <- continuousData
} else {
stop("Argument 'coarsen' can only be 'NULL', 'left' or 'right'")
}
pointProcessEnv$pointPointer <- pointPointer
pointProcess@pointPointer <- -1L
pointProcess@pointProcessEnv <- pointProcessEnv
lockEnvironment(pointProcess@pointProcessEnv, bindings = TRUE)
pointProcess@markColNames <-levels(getMarkType(pointProcess))
pointProcess@markValueColNames <- names(markValue)
lockEnvironment(pointProcess@valueEnv, bindings = TRUE)
validObject(pointProcess)
return(pointProcess)
}
)
setMethod("markedPointProcess", c("MarkedPointProcess", "missing"),
function(pointData, continuousData, ...) {
continuousData <- continuousProcess(pointData)
tmp <- data.frame(getPointId(pointData),
getPointPosition(pointData),
getMarkType(pointData))
names(tmp) <- c(pointData@idVar,
pointData@positionVar,
"markType")
pointData <- cbind(tmp, getMarkValue(pointData))
if(dim(pointData)[1] > 0) {
return(callGeneric(pointData, continuousData, ...))
} else {
warning("No point process data in marked point process, returning a 'ContinuousProcess'", call = FALSE)
return(continuousData)
}
}
)
setMethod("markedPointProcess", c("data.frame", "data.frame"),
function(pointData, continuousData, ..., markVar = 'markType', coarsen = NULL) {
callGeneric(pointData = pointData,
continuousData = continuousProcess(continuousData, ...),
markVar = markVar, coarsen = coarsen)
}
)
setMethod("markedPointProcess", c("data.frame", "factor.frame"),
function(pointData, continuousData, ..., markVar = 'markType', coarsen = NULL) {
callGeneric(pointData = pointData,
continuousData = continuousProcess(continuousData, ...),
markVar = markVar, coarsen = coarsen)
}
)
setMethod("markedPointProcess", c("data.frame", "vector"),
function(pointData, continuousData, positionVar = 'time', idVar = 'id', markVar = 'markType', ...) {
if(!(idVar %in% names(pointData))) {
continuousData <- data.frame(continuousData)
names(continuousData) <- positionVar
callGeneric(pointData = pointData,
continuousData = continuousData,
positionVar = positionVar, idVar = idVar,
markVar = markVar, ...)
} else {
id <- factor(rep(levels(pointData[, idVar]),
each = length(continuousData)))
continuousData <- rep(continuousData, length(levels(pointData[, idVar])))
continuousData <- data.frame(id, continuousData)
names(continuousData) <- c(idVar, positionVar)
callGeneric(pointData = pointData,
continuousData = continuousData,
positionVar = positionVar, idVar = idVar,
markVar = markVar, ...)
}
}
)
computeGrid <- function(x, positionVar, idVar, k = 4, ...) {
if(!(idVar %in% names(x))) {
pRange <- range(x[, positionVar])
fudge <- sum(pRange) / (50 * (diff(pRange) + 1))
logN <- ceiling(log2(nrow(x)))
y <- data.frame(seq(pRange[1] - fudge,
pRange[2] + fudge,
length.out = 2^(logN + k))
)
names(y) <- positionVar
}
else {
id <- x[, idVar]
idLevels <- levels(x[, idVar])
y <- list()
for (i in seq_along(idLevels)) {
xx <- x[id == idLevels[i], ]
pRange <- range(xx[, positionVar])
fudge <- sum(pRange) / (10 * (diff(pRange) + 1))
logN <- ceiling(log2(nrow(xx)))
kk <- 2^(logN + k)
y[[i]] <- data.frame(seq(pRange[1] - fudge,
pRange[2] + fudge,
length.out = kk),
factor(rep(idLevels[i], kk), levels = idLevels)
)
names(y[[i]]) <- c(positionVar, idVar)
}
y <- do.call(rbind, y)
}
y
}
setMethod("markedPointProcess", c("data.frame", "missing"),
function(pointData, continuousData, positionVar = 'time',
idVar = 'id', markVar = 'markType', ...) {
if(!(positionVar %in% names(pointData)))
stop(paste("pointData must have a column named", positionVar))
continuousData <- computeGrid(x = pointData,
positionVar = positionVar,
idVar = idVar, ...)
callGeneric(pointData = pointData,
continuousData = continuousData,
positionVar = positionVar, idVar = idVar, markVar = markVar, ...)
}
)
setMethod("markedPointProcess", c("vector", "ANY"),
function(pointData, continuousData, positionVar = 'time', idVar = 'id', markVar = 'markType',...) {
pointData <- data.frame(pointData)
names(pointData) <- positionVar
callGeneric(pointData = pointData, continuousData = continuousData,
positionVar = positionVar, idVar = idVar, markVar = markVar, ...)
}
)
## setMethod("markedPointProcess", c("vector", "vector"),
## function (pointData, continuousData, ...)
## {
## cVar <- deparse(substitute(continuousData))
## .local <- function(pointData, continuousData, positionVar = "time",
## idVar = "id", markVar = "markType", ...) {
## pointData <- data.frame(pointData)
## names(pointData) <- positionVar
## continuousData <- data.frame(continuousData)
## names(continuousData) <- cVar
## callGeneric(pointData = pointData, continuousData, positionVar = positionVar,
## idVar = idVar, markVar = markVar, ...)
## }
## .local(pointData, continuousData, ...)
## }
## )
setMethod("markedPointProcess", c("vector", "data.frame"),
function(pointData, continuousData, positionVar = 'time', idVar = 'id', markVar = 'markType', ...) {
pointData <- data.frame(pointData)
names(pointData) <- positionVar
callGeneric(pointData = pointData, continuousData = continuousData,
positionVar = positionVar, idVar = idVar, markVar = markVar, ...)
}
)
setMethod("colNames", c("MarkedPointProcess", "missing"),
function(object, ...) {
colnames <- c(object@markColNames, object@markValueColNames)
if(!identical(object@jPointSubset[1], -1L))
colnames <- colnames[object@jPointSubset]
colnames <- c(callNextMethod(object), colnames)
return(colnames)
}
)
setMethod("colNames", c("MarkedPointProcess", "character"),
function(object, type, ...) {
colnames <- callGeneric(object = object, ...)
if(type == "mark") {
colnames <- colnames[colnames %in% object@markColNames]
} else if(type == "markValue") {
colnames <- colnames[colnames %in% object@markValueColNames]
} else {
colnames <- callGeneric(as(object, "ContinuousProcess"), type = type, ...)
}
return(colnames)
}
)
setMethod("dim", "MarkedPointProcess",
function(x) {
d <- callNextMethod(x)
if(identical(x@jPointSubset[1], -1L)) {
d2 <- length(x@markColNames) + length(x@markValueColNames)
} else {
d2 <- length(x@jPointSubset)
}
d[2] <- d[2] + d2
return(d)
}
)
setMethod("integrator", "MarkedPointProcess",
function(object, f = 1, jumpVar = '', result = 'JumpProcess', ...)
{
id <- getPointId(object)
jump <- NULL
## The counting process
process <- rep(0,length(getId(object)))
process[getPointPointer(object)] <- 1
process <- tapply(process, getId(object), cumsum)
if(is.function(f))
f <- f(getPointPosition(object), ...)
if(length(f) == 1 && f == 1 && jumpVar %in% colnames(getMarkValue(object))) {
jump <- getMarkValue(object)[ , jumpVar]
} else if(length(f) > 1 || any(f != 1)) {
if(jumpVar %in% colnames(getMarkValue(object))) {
jump <- f*getMarkValue(object)[ , jumpVar]
} else {
jump <- f*rep(1, length(id))
}
}
if(is.null(jump)){
jump <- rep(1, length(id))
process <- unlist(process, use.names = FALSE)
} else {
compound <- tapply(jump, id, function(s) cumsum(c(0,s)))
process <- unlist(lapply(levels(id), function(i) compound[[i]][process[[i]]+1]))
}
if(result == 'numeric')
return(process)
CP <- continuousProcess(c(as(object[ , FALSE], "list"),
list(integrated = process)),
idVar = object@idVar,
positionVar = object@positionVar,
unitData = getUnitData(object))
MP <- data.frame(id, getPointPosition(object), "integrated", jump)
names(MP) <- c(object@idVar, object@positionVar, "markType", "jump")
return(jumpProcess(MP, CP, idVar = object@idVar,
positionVar = object@positionVar))
}
)
setMethod("iPointSubset", "MarkedPointProcess",
function(object) {
if(identical(object@iPointSubset[1], -1L)) {
i <- seq_along(object@pointProcessEnv$id)
} else {
i <- object@iPointSubset
}
return(i)
}
)
setMethod("jPointSubset", "MarkedPointProcess",
function(object) {
j <- seq_len(length(object@markColNames) + length(object@markValueColNames))
if(!isTRUE(object@jPointSubset == -1L))
j <- j[object@jPointSubset]
return(j)
}
)
setReplaceMethod("iPointSubset", c("MarkedPointProcess", "ANY"),
function(object, value) {
value <- value[!is.na(value)]
if(length(value) == length(object@pointProcessEnv$id) &&
identical(value, seq_along(object@pointProcessEnv$id))) {
object@iPointSubset <- -1L
} else {
object@iPointSubset <- value
}
return(object)
}
)
setReplaceMethod("jPointSubset", c("MarkedPointProcess", "ANY"),
function(object, value) {
value <- value[!is.na(value)]
d2 <- length(object@markColNames) + length(object@markValueColNames)
if(length(value) == d2 && identical(value, seq_len(d2))) {
object@jPointSubset <- -1L
} else {
object@jPointSubset <- value
}
return(object)
}
)
setMethod("getPointId", "MarkedPointProcess",
function(object, drop = TRUE, ...){
if(isTRUE(object@iPointSubset == -1L)) {
value <- object@pointProcessEnv$id
} else {
value <- object@pointProcessEnv$id[iPointSubset(object), drop = TRUE]
if(!drop)
value <- factor(value, levels = levels(getId(object)))
}
return(value)
}
)
setMethod("getPointPosition", "MarkedPointProcess",
function(object, ...){
return(getPosition(object)[getPointPointer(object, ...)])
}
)
setMethod("getPointTime", "MarkedPointProcess",
function(object, ...) {
getPointPosition(object, ...)
}
)
setMethod("getMarkType", "MarkedPointProcess",
function(object, drop = TRUE, ...){
if(isTRUE(object@iPointSubset == -1L)) {
value <- object@pointProcessEnv$markType
} else {
value <- object@pointProcessEnv$markType[iPointSubset(object), drop = TRUE]
if(!drop)
value <- factor(value, levels = colNames(object, "mark"))
}
return(value)
}
)
setMethod("getMarkValue", "MarkedPointProcess",
function(object, ...){
j <- colNames(object, "markValue")
if(length(j) > 0) {
value <- as.data.frame(getColumns(object, j, drop = FALSE))
} else {
value <- data.frame()[seq_along(getPointId(object)),]
}
return(value)
}
)
setMethod("getPlotData", "MarkedPointProcess",
function(object, y = '@mark', nPoints = 200, allUnitData = FALSE,
allMarkValueData = isTRUE(y %in% names(getMarkValue(object))), ...){
if(length(getMarkType(object)) == 0) {
plotData <- callGeneric(object = as(object, "ContinuousProcess"),
nPoints = nPoints,
allUnitData = allUnitData, ...)
} else {
pointPlotData <- data.frame(id = getPointId(object),
position = getPointPosition(object),
variable = factor(getMarkType(object)))
names(pointPlotData)[1] <- object@idVar
plotData <- callGeneric(object = as(object, "ContinuousProcess"),
nPoints = nPoints,
allUnitData = allUnitData,
selectPoints = getPointPointer(object), ...)
if(allMarkValueData)
pointPlotData <- cbind(pointPlotData, getMarkValue(object))
if(isTRUE(allUnitData))
pointPlotData <- cbind(pointPlotData, getUnitData(object)[as.numeric(getPointId(object)), , drop = FALSE])
pointPlotData$type <- as.factor('Track')
if(is.numeric(y)) {
pointPlotData$value <- y
plotData@breaks <- c(breaks, y)
plotData@labels <- c(labels, as.character(y))
} else {
pointPlotData$value <- as.factor(y)
}
if(y == "@mark")
pointPlotData$value <- pointPlotData$variable
if(y == object@idVar)
pointPlotData$value <- pointPlotData$id
if(isTRUE(y %in% names(getMarkValue(object))))
pointPlotData$value <- getColumns(object, y)
if(y == "@top") {
pointPlotData$value <- pointPlotData$type
plotData@position <- "top"
}
if(y == "@bottom") {
pointPlotData$value <- pointPlotData$type
plotData@position <- "bottom"
}
if("value" %in% names(plotData@continuousPlotData)) {
if(y == "@variable") {
pointPlotData$value <- numeric(dim(pointPlotData)[1])
for(variable in levels(pointPlotData$variable)) {
pointPlotData$value[pointPlotData$variable == variable] <-
getColumns(object[getPointPointer(object, variable),
colNames(as(object, "ContinuousProcess"))],
variable)
}
} else if(isTRUE(y %in% levels(plotData@continuousPlotData$variable))) {
pointPlotData$value <- getColumns(object, y)[getPointPointer(object)]
}
}
plotData@pointPlotData <- pointPlotData
}
return(plotData)
}
)
setMethod("plot", c("MarkedPointProcess", "missing"),
function(x, y, ...) callGeneric(x = x, y = '@mark', ...)
)
setMethod("plot", c("MarkedPointProcess", "character"),
function(x, y, nPoints = 200, ...){
plotData <- getPlotData(object = x, y = y, nPoints = nPoints, ...)
return(plot(plotData, ...))
}
)
setMethod("subset", "MarkedPointProcess",
function(x, ... , pointSubset) {
y <- callNextMethod()
if (missing(pointSubset))
r <- TRUE
else {
e <- substitute(pointSubset)
variables <- all.vars(e)
tmpEnv <- new.env(parent = .GlobalEnv)
for(v in variables) {
assign(v, getColumns(x, v), envir = tmpEnv)
}
r <- eval(e, tmpEnv, parent.frame())
if (!is.logical(r))
stop("'pointSubset' must evaluate to logical")
r <- r & !is.na(r)
}
if(!all(r)) {
iPointSubset(y) <- iPointSubset(y)[r]
setPointPointer(y) <- getPointPointer(y)[r]
}
return(y)
}
)
setMethod("getColumns", c("MarkedPointProcess", "character"),
function(object, j, drop = TRUE) {
checkColumns <- j %in% colNames(object)
if(!all(checkColumns))
stop(paste(c("No column '", j[!checkColumns][1], "' in the object."),
collapse = ""), call. = FALSE)
contj <- j %in% colNames(as(object, "ContinuousProcess"))
if(drop && length(j) == 1) {
if(contj) {
column <- callNextMethod(object, j, drop = TRUE)
} else {
if(j %in% object@markColNames) {
column <- getPointPosition(object, j)
} else {
column <- get(paste("var_", j, sep = ""),
envir = object@pointProcessEnv)
}
if(!identical(object@iPointSubset[1], -1L))
column <- column[object@iPointSubset]
}
} else {
column <- callNextMethod(object, j[contj], drop = drop)
for(jj in j[!contj]) {
if(jj %in% object@markColNames) {
column[[jj]] <- getPointPosition(object, jj)
} else {
column[[jj]] <- get(paste("var_", jj, sep = ""),
envir = object@pointProcessEnv)
}
if(!identical(object@iPointSubset[1], -1L))
column[[jj]] <- column[[jj]][object@iPointSubset]
}
}
return(column)
}
)
setMethod("[", c(x = "MarkedPointProcess", i = "integer", j = "missing"),
function(x, i, j, ... , drop = FALSE) {
orgPoint <- iSubset(x)[getPointPointer(x)]
as(x, "ContinuousProcess") <- callGeneric(as(x, "ContinuousProcess"), i, , )
newPointPointer <- match(orgPoint, iSubset(x))
notNA <- which(!is.na(newPointPointer))
iPointSubset(x) <- iPointSubset(x)[notNA]
setPointPointer(x) <- newPointPointer[notNA]
if(drop) {
marks <- colNames(x, "mark")
dropCol <- colNames(x) %in% marks[!(marks %in% levels(getMarkType(x)))]
if(any(dropCol))
x <- x[ , !dropCol, drop = TRUE]
}
return(x)
}
)
setMethod("[", c(x = "MarkedPointProcess", i = "missing", j = "character"),
function(x, i, j, ... , drop = FALSE) {
if(drop && length(j) == 1) {
x <- getColumns(x, j)
} else {
as(x, "ContinuousProcess") <- callGeneric(as(x, "ContinuousProcess"), , j)
i <- getMarkType(x) %in% j
if(!any(i)) {
if(drop) {
x <- as(x, "ContinuousProcess")
} else {
iPointSubset(x) <- integer()
jPointSubset(x) <- which(c(x@markColNames, x@markValueColNames) %in% j)
}
} else {
iPointSubset(x) <- iPointSubset(x)[i]
setPointPointer(x) <- getPointPointer(x)[i]
jPointSubset(x) <- which(c(x@markColNames, x@markValueColNames) %in% j)
}
}
return(x)
}
)
setMethod("getPointPointer", c("MarkedPointProcess", "missing"),
function(object, mark, ...) {
if(identical(object@pointPointer[1], -1L)) {
value <- object@pointProcessEnv$pointPointer
} else {
value <- object@pointPointer
}
return(value)
}
)
setMethod("getPointPointer", c("MarkedPointProcess", "character"),
function(object, mark, ...) {
getPointPointer(object)[getMarkType(object) %in% mark]
}
)
setMethod("object.size", "ProcessData",
function(x) {
size <- sum(sapply(ls(x@vpointProcessEnv),
function(name) object.size(get(name, envir = x@vpointProcessEnv))))
size <- size + object.size(as(x, "ContinuousProcess"))
return(structure(size, class = "object_size"))
}
)
setReplaceMethod("setPointPointer", c("MarkedPointProcess", "ANY"),
function(object, value) {
if(identical(object@iSubset[1], -1L) &&
identical(object@iPointSubset[1], -1L)) {
object@pointPointer <- -1L
} else {
object@pointPointer <- value
}
return(object)
}
)
setMethod("summarizeData", "MarkedPointProcess",
function(object, maxsum = 100, ....) {
summaryById <- callNextMethod()
if(length(colNames(object, "mark")) > 0) {
id <- getPointId(object, drop = FALSE)
splitEntries <- split(seq_along(id), id)
names(splitEntries) <- NULL
pointSummary <- do.call("rbind", lapply(splitEntries, function(e) table(getMarkType(object, drop = FALSE)[e])))
colnames(pointSummary) <- paste("#", colnames(pointSummary), sep = "")
if (ncol(pointSummary) > maxsum) {
drop <- maxsum:ncol(pointSummary)
tt <- colSums(pointSummary)
o <- order(tt, decreasing = TRUE)
pointSummary <- cbind(pointSummary[, o[-drop], drop = FALSE],
data.frame(Other = rowSums(pointSummary[, o[drop], drop = FALSE]))
)
}
summaryById <- cbind(summaryById, pointSummary)
sumVal <- list()
for(j in colNames(object, "markValue")) {
column <- getColumns(object, j)
if(is.numeric(column)) {
sumVal[[paste("mean(", j ,")", sep ="")]] <-
sapply(splitEntries, function(e) mean(column[e]))
} else {
sumVal[[paste("most freq. ", j , sep ="")]] <-
sapply(splitEntries, function(e) names(which.max(table(column[e]))))
}
}
if(length(sumVal) > 0)
summaryById <- cbind(summaryById,
as.data.frame(sumVal, optional = TRUE))
}
return(summaryById)
}
)
setMethod("unsubset", "MarkedPointProcess",
function(x, ...) {
as(x, "ContinuousProcess") <- callGeneric(as(x, "ContinuousProcess"))
x@iPointSubset <- -1L
x@jPointSubset <- -1L
x@pointPointer <- -1L
return(x)
}
)
| 30,452 | gpl-2.0 |
82de513da89c45f58ca0843b7eb1e3968dda0c68 | emacs-ess/ESS | test/literate/misc.R |
##### Indentation
### 1 ----------------------------------------------------------------
{
fun¶_call(
argument
) +
stuff1
} +
stuff2
##! "C-M-q"
{
fun¶_call(
argument
) +
stuff1
} +
stuff2
##! "C-u"
##! "C-M-q"
{
fun¶_call(
argument
) +
stuff1
} +
stuff2
| 358 | gpl-3.0 |
82de513da89c45f58ca0843b7eb1e3968dda0c68 | emacsmirror/ess | test/literate/misc.R |
##### Indentation
### 1 ----------------------------------------------------------------
{
fun¶_call(
argument
) +
stuff1
} +
stuff2
##! "C-M-q"
{
fun¶_call(
argument
) +
stuff1
} +
stuff2
##! "C-u"
##! "C-M-q"
{
fun¶_call(
argument
) +
stuff1
} +
stuff2
| 358 | gpl-3.0 |
294ac8f487f7ccc02cb4ca70156b68ccb34a935c | madmax983/h2o-3 | h2o-r/tests/testdir_algos/gbm/runit_GBM_groupsplit_bigcat.R |
library(gbm)
test.GBM.bigcat <- function() {
# Training set has 100 categories from cat001 to cat100
# Categories cat001, cat003, ... are perfect predictors of y = 1
# Categories cat002, cat004, ... are perfect predictors of y = 0
Log.info("Importing bigcat_5000x2.csv data...\n")
bigcat.hex <- h2o.uploadFile(locate("smalldata/gbm_test/bigcat_5000x2.csv"), destination_frame = "bigcat.hex")
bigcat.hex$y <- as.factor(bigcat.hex$y)
Log.info("Summary of bigcat_5000x2.csv from H2O:\n")
print(summary(bigcat.hex))
# Train H2O GBM Model:
# No longer Naive, since group_split is always on by default
Log.info("H2O GBM (Naive Split) with parameters:\nntrees = 1, max_depth = 1, nbins = 100\n")
drfmodel.nogrp <- h2o.gbm(x = "X", y = "y", training_frame = bigcat.hex, ntrees = 1, max_depth = 1, nbins = 100, distribution = "bernoulli")
print(drfmodel.nogrp)
drfmodel.nogrp.perf <- h2o.performance(drfmodel.nogrp, bigcat.hex)
Log.info("H2O GBM (Group Split) with parameters:\nntrees = 1, max_depth = 1, nbins = 100\n")
drfmodel.grpsplit <- h2o.gbm(x = "X", y = "y", training_frame = bigcat.hex, ntrees = 1, max_depth = 1, nbins = 100, distribution = "bernoulli")
print(drfmodel.grpsplit)
drfmodel.grpsplit.perf <- h2o.performance(drfmodel.grpsplit, bigcat.hex)
# Check AUC and overall prediction error at least as good with group split than without
#expect_true(h2o.auc(drfmodel.grpsplit.perf) >= h2o.auc(drfmodel.nogrp.perf))
#expect_true(h2o.accuracy(drfmodel.grpsplit.perf, 0.5) <= h2o.accuracy(drfmodel.nogrp.perf, 0.5))
}
doTest("GBM Test: Classification with 100 categorical level predictor", test.GBM.bigcat)
| 1,670 | apache-2.0 |
82de513da89c45f58ca0843b7eb1e3968dda0c68 | dellison/ESS | test/literate/misc.R |
##### Indentation
### 1 ----------------------------------------------------------------
{
fun¶_call(
argument
) +
stuff1
} +
stuff2
##! "C-M-q"
{
fun¶_call(
argument
) +
stuff1
} +
stuff2
##! "C-u"
##! "C-M-q"
{
fun¶_call(
argument
) +
stuff1
} +
stuff2
| 358 | gpl-2.0 |
294ac8f487f7ccc02cb4ca70156b68ccb34a935c | pchmieli/h2o-3 | h2o-r/tests/testdir_algos/gbm/runit_GBM_groupsplit_bigcat.R |
library(gbm)
test.GBM.bigcat <- function() {
# Training set has 100 categories from cat001 to cat100
# Categories cat001, cat003, ... are perfect predictors of y = 1
# Categories cat002, cat004, ... are perfect predictors of y = 0
Log.info("Importing bigcat_5000x2.csv data...\n")
bigcat.hex <- h2o.uploadFile(locate("smalldata/gbm_test/bigcat_5000x2.csv"), destination_frame = "bigcat.hex")
bigcat.hex$y <- as.factor(bigcat.hex$y)
Log.info("Summary of bigcat_5000x2.csv from H2O:\n")
print(summary(bigcat.hex))
# Train H2O GBM Model:
# No longer Naive, since group_split is always on by default
Log.info("H2O GBM (Naive Split) with parameters:\nntrees = 1, max_depth = 1, nbins = 100\n")
drfmodel.nogrp <- h2o.gbm(x = "X", y = "y", training_frame = bigcat.hex, ntrees = 1, max_depth = 1, nbins = 100, distribution = "bernoulli")
print(drfmodel.nogrp)
drfmodel.nogrp.perf <- h2o.performance(drfmodel.nogrp, bigcat.hex)
Log.info("H2O GBM (Group Split) with parameters:\nntrees = 1, max_depth = 1, nbins = 100\n")
drfmodel.grpsplit <- h2o.gbm(x = "X", y = "y", training_frame = bigcat.hex, ntrees = 1, max_depth = 1, nbins = 100, distribution = "bernoulli")
print(drfmodel.grpsplit)
drfmodel.grpsplit.perf <- h2o.performance(drfmodel.grpsplit, bigcat.hex)
# Check AUC and overall prediction error at least as good with group split than without
#expect_true(h2o.auc(drfmodel.grpsplit.perf) >= h2o.auc(drfmodel.nogrp.perf))
#expect_true(h2o.accuracy(drfmodel.grpsplit.perf, 0.5) <= h2o.accuracy(drfmodel.nogrp.perf, 0.5))
}
doTest("GBM Test: Classification with 100 categorical level predictor", test.GBM.bigcat)
| 1,670 | apache-2.0 |
008b0d2fec659e7f0e637f5025835680ac1ef88b | jirikadlec2/rushvalley | R/demo.R | #example R-script for plotting values from HydroServer
#this example compares ndvi for 'mammal' and 'no mammal'
#to run this script, you must install the packages devtools, XML, RCurl, plyr, ggplot2
require(XML)
require(RCurl)
require(plyr)
require(ggplot2)
#install the development version of the waterml package!
library(waterml)
#########################################################################
# Example: Get daily max NDVI's by mammal treatment in the long format #
# change startDate, endDate, variable to show different plot !! #
#########################################################################
server <- "http://worldwater.byu.edu/app/index.php/rushvalley/services/cuahsi_1_1.asmx"
startDate = "2014-10-07"
endDate = "2014-10-15"
variable = "SRS_Nr_NDVI"
#get the sites
all_sites <- GetSites(server)
#get the variables
all_variables <- GetVariables(server)
#get the values from all sites that measure NDVI
data <- NULL
col.index <- 0
#we use a loop to get all the time series and put them together
for (i in 1: nrow(all_sites)) {
sitecode <- all_sites$SiteCode[i]
new_data <- GetValues(server, sitecode, variable, startDate, endDate, daily="max")
#skip sites that don't have NDVI data in this time period
if (is.null(new_data)) {
print ('no data!')
next
}
#print(sitecode)
#we add the site and treatment column to identify the site and the treatment
new_data$site <- sitecode
#to indentify mammal/no mammal treatment we use the 5th character or the SiteCode
new_data$treatment <- substring(sitecode, 5, 5)
#we add the site's data to the data frame
data <- rbind(data, new_data)
}
##################################################################################
# Example: Plot the error bar plot, mammals vs. no mammals #
# see: http://www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_(ggplot2)/ #
##################################################################################
#we summarize the data by day and treatment
data.summarized <- ddply(data, ~time+treatment, summarise, mean=mean(DataValue), sd=sd(DataValue))
#rename the variable to better name
names(data.summarized)[3] <- "daily.max.NDVI"
# Daily plot: no error bars
ggplot(data.summarized, aes(x=time, y=daily.max.NDVI, colour=treatment)) +
geom_line() +
geom_point()
# Daily plot: errorbars
# The errorbars overlapped, so we use position_dodge to move them horizontally
pd <- position_dodge(0.25) # move them .05 to the left and right
ggplot(data.summarized, aes(x=time, y=daily.max.NDVI, ymax=max(daily.max.NDVI), colour=treatment)) +
geom_errorbar(aes(ymin=daily.max.NDVI-sd, ymax=daily.max.NDVI+sd), width=.5, position=pd) +
geom_line(position=pd) +
geom_point(position=pd)
| 2,764 | mit |
e5be6ecb99c40021d2f8c22d214c5614866892bb | iainmstott/popdemo | 1.3-0/popdemo/R/sens.R | ################################################################################
#' Calculate sensitivity matrix
#'
#' @description
#' Calculate the sensitivity matrix for a population matrix projection model
#' using eigenvectors.
#'
#' @param A a square, non-negative numeric matrix of any dimension
#' @param eval the eigenvalue to evaluate. Default is \code{eval="max"}, which
#' evaluates the dominant eigenvalue (the eigenvalue with largest REAL value:
#' for imprimitive or reducible matrices this may not be the first eigenvalue).
#' Otherwise, specifying e.g. \code{eval=2} will evaluate sensitivity of the
#' eigenvalue with second-largest modulus.
#' @param all (optional) if \code{FALSE}, then only sensitivity values for
#' observed transitions (nonzero entries in \code{A}) are returned.
#'
#' @details
#' \code{sens} uses the eigenvectors of \code{A} to calculate the sensitivity
#' matrix of the specified eigenvalue, see section 9.1 in Caswell (2001).
#' Same method as \code{sensitivity} in \code{popbio} but can also evaluate
#' subdominant eigenvalues.
#'
#' @return
#' A numeric (real or complex) matrix of equal dimension to \code{A}.
#'
#' @references
#' \itemize{
#' \item Caswell (2001) Matrix Population Models 2nd ed. Sinauer.
#' }
#'
#' @family PerturbationAnalyses
#'
#' @examples
#' # Create a 3x3 PPM
#' ( A <- matrix(c(0,1,2,0.5,0.1,0,0,0.6,0.6), byrow=TRUE, ncol=3) )
#'
#' # Calculate sensitivities of dominant eigenvalue
#' sens(A)
#*
#' # Calculate sensitivities of first subdominant eigenvalue,
#' # only for observed transitions
#' sens(A, eval=2, all=FALSE)
#'
#' @concept
#' perturbation sensitivity elasticity
#'
#' @export sens
#'
sens <-
function(A,eval="max",all=FALSE){
if(any(length(dim(A))!=2,dim(A)[1]!=dim(A)[2])) stop("A must be a square matrix")
order<-dim(A)[1]
reigs<-eigen(A)
leigs<-eigen(t(A))
if(eval=="max"){
val<-which.max(Re(reigs$values))
}
else{
val<-eval
}
w<-as.matrix(reigs$vectors[,val])
v<-as.matrix(leigs$vectors[,val])
S<-(Conj(v)%*%t(w))/as.vector(Conj(t(v))%*%w)
if(!all) S[A==0]<-0
if(max(Im(S))>0) return(S) else(return(Re(S)))
}
| 2,139 | gpl-3.0 |
e5be6ecb99c40021d2f8c22d214c5614866892bb | iainmstott/popdemo | 1.3-1/popdemo/R/sens.R | ################################################################################
#' Calculate sensitivity matrix
#'
#' @description
#' Calculate the sensitivity matrix for a population matrix projection model
#' using eigenvectors.
#'
#' @param A a square, non-negative numeric matrix of any dimension
#' @param eval the eigenvalue to evaluate. Default is \code{eval="max"}, which
#' evaluates the dominant eigenvalue (the eigenvalue with largest REAL value:
#' for imprimitive or reducible matrices this may not be the first eigenvalue).
#' Otherwise, specifying e.g. \code{eval=2} will evaluate sensitivity of the
#' eigenvalue with second-largest modulus.
#' @param all (optional) if \code{FALSE}, then only sensitivity values for
#' observed transitions (nonzero entries in \code{A}) are returned.
#'
#' @details
#' \code{sens} uses the eigenvectors of \code{A} to calculate the sensitivity
#' matrix of the specified eigenvalue, see section 9.1 in Caswell (2001).
#' Same method as \code{sensitivity} in \code{popbio} but can also evaluate
#' subdominant eigenvalues.
#'
#' @return
#' A numeric (real or complex) matrix of equal dimension to \code{A}.
#'
#' @references
#' \itemize{
#' \item Caswell (2001) Matrix Population Models 2nd ed. Sinauer.
#' }
#'
#' @family PerturbationAnalyses
#'
#' @examples
#' # Create a 3x3 PPM
#' ( A <- matrix(c(0,1,2,0.5,0.1,0,0,0.6,0.6), byrow=TRUE, ncol=3) )
#'
#' # Calculate sensitivities of dominant eigenvalue
#' sens(A)
#*
#' # Calculate sensitivities of first subdominant eigenvalue,
#' # only for observed transitions
#' sens(A, eval=2, all=FALSE)
#'
#' @concept
#' perturbation sensitivity elasticity
#'
#' @export sens
#'
sens <-
function(A,eval="max",all=FALSE){
if(any(length(dim(A))!=2,dim(A)[1]!=dim(A)[2])) stop("A must be a square matrix")
order<-dim(A)[1]
reigs<-eigen(A)
leigs<-eigen(t(A))
if(eval=="max"){
val<-which.max(Re(reigs$values))
}
else{
val<-eval
}
w<-as.matrix(reigs$vectors[,val])
v<-as.matrix(leigs$vectors[,val])
S<-(Conj(v)%*%t(w))/as.vector(Conj(t(v))%*%w)
if(!all) S[A==0]<-0
if(max(Im(S))>0) return(S) else(return(Re(S)))
}
| 2,139 | gpl-3.0 |
60d17629885b6e52dea70ed6f785e136bfc928bb | mbjoseph/gmix_networks | R/rwalk.R | # simulation of observations at one location
library(igraph)
n_nodes <- sample(5:20, 1)
Anet <- sample_pa(n_nodes, directed=FALSE)
plot(Anet)
Atrue <- as.matrix(as_adj(Anet))
s <- colSums(Atrue)
# maximum number of observations
zmax <- 1000
b <- rep(NA, zmax)
timesteps <- rep(NA, zmax)
clus <- rep(NA, zmax)
# initialize
time <- 1
z <- 1
start_node <- sample(n_nodes, 1)
timesteps[1] <- time
b[1] <- start_node
clus[1] <- 1
# generate observations
while(z < zmax){
# how many steps in the random walk
nsteps <- rpois(1, 15)
focal_node <- b[z]
while(nsteps > 0){
if (b[z] == focal_node){
# probabilities of walking to each node
p <- Atrue[b[z], ] / s[b[z]]
visit <- sample(n_nodes, 1, prob=p)
z <- z + 1
timesteps[z] <- timesteps[z - 1] + 1
nsteps <- nsteps - 1
clus[z] <- clus[z - 1]
b[z] <- visit
} else { # the RW is in a neighbor of the focal node
stopifnot(b[z] %in% which(Atrue[focal_node, ] > 0))
# define common neighbors
Ac <- Atrue[focal_node, ] * Atrue[b[z], ]
if (sum(Ac) == 0){
visit <- focal_node
} else {
p <- Ac / sum(Ac)
visit <- sample(n_nodes, 1, prob=p)
}
z <- z + 1
timesteps[z] <- timesteps[z - 1] + 1
nsteps <- nsteps - 1
clus[z] <- clus[z - 1]
b[z] <- visit
}
}
if (z < zmax){
z <- z + 1
timesteps[z] <- timesteps[z - 1] + rpois(1, 200)
clus[z] <- clus[z - 1] + 1
b[z] <- sample(n_nodes, 1)
}
}
d <- data.frame(b, timesteps)
par(mfrow=c(3, 2))
plot(timesteps, col=b, pch=19,
xlab='z = 1, ..., Z',
ylab=expression(paste(t == t[1], ", ...,", t[Z])))
legend('topleft', col=1:n_nodes, pch=19,
legend = paste('Individual', 1:n_nodes),
bty='n')
plot(Anet)
clusmat <- array(dim=c(length(b), max(clus)))
for (i in 1:max(clus)){
clusmat[, i] <- as.numeric(clus == i)
} | 1,899 | gpl-2.0 |
d47eeba53adc596d97cc8cc4412fd187c3862578 | b-steve/sscr | R/RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
make_toa_ssq <- function(capt, dists, sound_speed) {
.Call('_sscr_make_toa_ssq', PACKAGE = 'sscr', capt, dists, sound_speed)
}
| 259 | gpl-3.0 |
24f0a6493d27f1d7154e95ca367828a7ea6440ea | variani/bigcov | R/bigcov.R | #' @export
bigcov <- function(data, center = TRUE, scale = FALSE,
num_splits = 2,
verbose = 0, ...)
{
big_cov(data, center = center, scale = scale,
num_splits = num_splits,
verbose = verbose, ...)
}
#' @export
big_cov <- function(data, center = TRUE, scale = FALSE,
num_splits = 2,
verbose = 0, ...)
{
### variables
n <- nrow(data)
p <- ncol(data)
transform <- center | scale
### scale
if(transform) {
data <- scale(data, center = center, scale = scale)
}
### compute crossprod
covmat <- big_crossprod(data,
num_splits = num_splits, verbose = verbose, ...)
covmat <- covmat / (n - 1)
### return
return(covmat)
}
| 675 | gpl-3.0 |
bb9c9341cdb6f8428629459449e6c13daf731c2a | USGS-R/sbtools | R/query_sb_spatial.R | #' @title Query SB based on spatial extent
#'
#' @inheritParams query_sb
#' @param bbox An sp spatial data object. The bounding box of the object is used for the query.
#' @param long A vector of longitude values that will define the boundaries of a bounding box. Min and Max of supplied longitudes are used. (alternate option to bbox).
#' @param lat A vector of latitude values that will define the boundaries of a bounding box. Min and Max of supplied latitude are used. (alternate option to bbox).
#' @param bb_wkt A character string using the Well Known Text (WKT) standard for defining spatial data. Must be a POLYGON WKT object.
#'
#' @description
#' Queries ScienceBase based on a spatial bounding box. Accepts either an sp spatial data object
#' (uses the spatial object's bounding box) or long/lat coordinates defining the bounding box limits.
#'
#'
#' @examples \donttest{
#' #specify the latitude and longitude points to define the bounding box range.
#' # This is simply bottom left and top right points
#' query_sb_spatial(long=c(-104.4, -95.1), lat=c(37.5, 41.0), limit=3)
#'
#' #use a pre-formatted WKT polygon to grab data
#' query_sb_spatial(bb_wkt="POLYGON((-104.4 41.0,-95.1 41.0,-95.1 37.5,-104.4 37.5,-104.4 41.0))",
#' limit=3)
#' }
#' @export
#'
query_sb_spatial = function(bbox, long, lat, bb_wkt, ..., limit=20, session=current_session()){
if(!missing(bbox)){
message("bbox must be from an sp object in WGS84 Lon/Lat")
bb_wkt = make_wkt(bbox@bbox)
}else if(!missing(long) & !missing(lat)){
bb = matrix(nrow=2, ncol=2)
bb[1,1] = min(long)
bb[2,1] = min(lat)
bb[1,2] = max(long)
bb[2,2] = max(lat)
bb_wkt = make_wkt(bb)
}else if(!missing(bb_wkt)){
#We'll just assume this is fine then and will be passed to query_sb
}else{
stop('Must supply either bbox sp object or bb_min and bb_max vectors')
}
query_sb(list(spatialQuery = bb_wkt), limit=limit)
}
make_wkt = function(bb){
paste0('POLYGON((', bb[1,1], ' ', bb[2,2], ',',
bb[1,2], ' ', bb[2,2], ',',
bb[1,2], ' ', bb[2,1], ',',
bb[1,1], ' ', bb[2,1], ',',
bb[1,1], ' ', bb[2,2], '))')
}
| 2,148 | cc0-1.0 |
7f927d531465f221e4834eeeab984a9bef450f07 | kogalur/randomForestSRC | src/main/resources/cran/R/vimp.rfsrc.R | vimp.rfsrc <- function(object,
xvar.names,
m.target = NULL,
importance = c("anti", "permute", "random"),
block.size = 10,
joint = FALSE,
seed = NULL,
do.trace = FALSE,
...)
{
## incoming parameter checks - all are fatal
if (missing(object)) {
stop("object is missing")
}
if (object$family == "unsupv") {
stop("vimp does not apply to unsupervised forests: consider using max.subtree and var.select")
}
if (sum(inherits(object, c("rfsrc", "grow"), TRUE) == c(1, 2)) != 2 &
sum(inherits(object, c("rfsrc", "forest"), TRUE) == c(1, 2)) != 2) {
stop("This function only works for objects of class `(rfsrc, grow)' or '(rfsrc, forest)'")
}
## process the importance specification
if (!is.logical(joint)) {
stop("joint must be a logical value")
}
importance <- importance[1]
if (joint & importance != "none") {
i.str <- unlist(strsplit(importance, "\\."))
if (length(i.str) == 1) {
importance <- paste(i.str[1], ".joint", sep = "")
}
else if (length(i.str) == 2) {
importance <- paste(i.str[1], ".joint.", i.str[2], sep = "")
}
}
importance <- match.arg(as.character(importance),
c("anti", "permute", "random", "anti.joint", "permute.joint", "random.joint"))
## grow objects under non-standard bootstrapping are devoid of performance values
if (sum(inherits(object, c("rfsrc", "grow"), TRUE) == c(1, 2)) == 2) {
if (is.null(object$forest)) {
stop("The forest is empty. Re-run rfsrc (grow) call with forest=TRUE")
}
else {
bootstrap <- object$forest$bootstrap
}
}
else {
bootstrap <- object$bootstrap
}
if (bootstrap == "none" || bootstrap == "by.node") {
stop("grow objects under non-standard bootstrapping are devoid of performance values")
}
## make the call to generic predict
result <- generic.predict.rfsrc(object,
m.target = m.target,
importance = importance,
block.size = block.size,
importance.xvar = xvar.names,
seed = seed,
do.trace = do.trace,
membership = FALSE,
...)
return(result)
}
vimp <- vimp.rfsrc
| 2,530 | gpl-3.0 |
01d56c476c111bf7140192d3e38675502a549cd0 | cities-lab/tci | ExploreCutoff/sensitivity_analysis_output.R | # This script prepares the workspace and file directories for calculating travel time
# and travel cost with employment centres methods
# Set workspace
setwd("~/tci")
var_list.0 <- ls()
source("code/cluster/settings.R")
# define density cutoff and total cutoff
den.cutoff <- c(50,60,70,80,90,95)
tot.cutoff <- c(25,50,75)
all.df <- NULL
for (dc in den.cutoff) {
for (tc in tot.cutoff) {
# load results of specific cutoffs
den.tot.cutoff <- paste(dc, tc, sep="_")
load(file.path(INTERMEDIATE_DIR, paste("results", den.tot.cutoff, ".RData", sep="")))
# rename the calculation results with specific cutoffs
minpeakAggCost.Zi.name <- paste("minpeakAggCost", den.tot.cutoff, ".Zi", sep="")
assign(minpeakAggCost.Zi.name, minpeakAggCost.Zi)
minoffpeakAggCost.Zi.name <- paste("minoffpeakAggCost", den.tot.cutoff, ".Zi", sep="")
assign(minoffpeakAggCost.Zi.name, minoffpeakAggCost.Zi)
minAggTpCost.Zi.name <- paste("minAggCost", den.tot.cutoff, ".Zi", sep="")
assign(minAggTpCost.Zi.name, minAggTpCost.Zi)
weightedpeakAggCost.Zi.name <- paste("weightedpeakAggCost", den.tot.cutoff, ".Zi", sep="")
assign(weightedpeakAggCost.Zi.name, weightedpeakAggCost.Zi)
weightedoffpeakAggCost.Zi.name <- paste("weightedoffpeakAggCost", den.tot.cutoff, ".Zi", sep="")
assign(weightedoffpeakAggCost.Zi.name, weightedoffpeakAggCost.Zi)
weightedAggTpCost.Zi.name <- paste("weightedAggCost", den.tot.cutoff, ".Zi", sep="")
assign(weightedAggTpCost.Zi.name, weightedAggTpCost.Zi)
# convert array into data.frame
df <- data.frame(minpeakAggCost.Zi, minoffpeakAggCost.Zi, minAggTpCost.Zi,
weightedpeakAggCost.Zi, weightedoffpeakAggCost.Zi, weightedAggTpCost.Zi)
colnames(df) <- c(minpeakAggCost.Zi.name, minoffpeakAggCost.Zi.name, minAggTpCost.Zi.name,
weightedpeakAggCost.Zi.name, weightedoffpeakAggCost.Zi.name, weightedAggTpCost.Zi.name)
df.name <- paste("df", den.tot.cutoff, sep="")
assign(df.name, df)
remove(minpeakAggCost.ZiIcPr, minpeakAggCost.Zi, minoffpeakAggCost.ZiIcPr, minoffpeakAggCost.Zi,
weightedpeakAggCost.ZiIcPr, weightedpeakAggCost.Zi, weightedoffpeakAggCost.ZiIcPr, weightedoffpeakAggCost.Zi,
minAggTpCost.Zi, weightedAggTpCost.Zi)
# store all data.frame into one data.frame
if(is.null(all.df)) {
all.df <- df
}
else{
all.df <- cbind(all.df, df)
}
}
}
# use stargazer function to export out put
require(stargazer)
intm.file <- file.path(INTERMEDIATE_DIR, "sensiti_analy_output.txt")
stargazer(all.df, type = "text", title="Descriptive statistics",
summary.stat = c("n","min", "p25", "median", "mean","sd", "p75", "max"), digits=4, out=intm.file)
var_list.1 <- ls()
rm(list=var_list.1[!(var_list.1 %in% var_list.0)])
rm(var_list.1)
#
# df <- data.frame(density.cutoff= dc, total.cutoff=tc,
# minpeakAggCost.Zi, minoffpeakAggCost.Zi, minAggTpCost.Zi,
# weightedpeakAggCost.Zi, weightedoffpeakAggCost.Zi, weightedAggTpCost.Zi)
## Another method
# This script prepares the workspace and file directories for calculating travel time
# and travel cost with employment centres methods
# Set workspace
setwd("~/tci")
var_list.0 <- ls()
source("code/cluster/settings.R")
# load required packages
require(stargazer)
# define density cutoff and total cutoff
den.cutoff <- c(50,60,70,80,90,95)
tot.cutoff <- c(25,50,75)
all.df <- NULL
for (dc in den.cutoff) {
for (tc in tot.cutoff) {
# load results of specific cutoffs
den.tot.cutoff <- paste(dc, tc, sep="_")
load(file.path(INTERMEDIATE_DIR, paste("results", den.tot.cutoff, ".RData", sep="")))
# convert array into data.frame
df <- data.frame(minpeakAggCost.Zi, minoffpeakAggCost.Zi, minAggTpCost.Zi,
weightedpeakAggCost.Zi, weightedoffpeakAggCost.Zi, weightedAggTpCost.Zi)
title.name <- paste("density.cutoff = ", dc, "and total.cutoff = ", tc, sep=" ")
ds <- stargazer(df, type = "text", title=title.name,
summary.stat = c("n","min", "p25", "median", "mean","sd", "p75", "max"), digits=4)
#df.name <- paste("df", den.tot.cutoff, sep="")
#assign(df.name, df)
remove(minpeakAggCost.ZiIcPr, minpeakAggCost.Zi, minoffpeakAggCost.ZiIcPr, minoffpeakAggCost.Zi,
weightedpeakAggCost.ZiIcPr, weightedpeakAggCost.Zi, weightedoffpeakAggCost.ZiIcPr, weightedoffpeakAggCost.Zi,
minAggTpCost.Zi, weightedAggTpCost.Zi)
df <- data.frame(ds)
colnames(df) <- "Descriptive statistics"
# store all data.frame into one data.frame
if(is.null(all.df)) {
all.df <- df
}
else{
all.df <- cbind(all.df, df)
}
}
}
# print all.df
all.df
if (SAVE.INTERMEDIARIES) {
intm.file <- file.path(INTERMEDIATE_DIR, "all_df.RData")
save(all.df, file=intm.file)
}
# combine all total cutoff values into array
den.cutoff.values <- data.frame(hbw=c(170, 316,529,1010,2249,4138),hbs=c(30,54,102,218,545,1060),
hbr=c(1085,1375,1761,2238,3372,5494), hbo=c(251,353,496,766,1473,2553), row.names=c("50","60","70","80","90","95"))
str(den.cutoff.values)
den.cutoff.values["50","hbw"]*5
# whole reproducible
# combine all total cutoff values into array
hbw.tot.cutoff.values <- rbind(
"50"=c("25"=667, "50"=2229, "75"=5285),
"60"=c("25"=1550, "50"=3903, "75"=14188),
"70"=c("25"=2534, "50"=4819, "75"=9672),
"80"=c("25"=2840, "50"=5165, "75"=12083),
"90"=c("25"=3780, "50"=6584, "75"=10805),
"95"=c("25"=8171, "50"=7805, "75"=9899))
hbs.tot.cutoff.values <- rbind(
"50"=c("25"=600, "50"=1100, "75"=1594),
"60"=c("25"=322, "50"=815, "75"=1660),
"70"=c("25"=458, "50"=1134, "75"=3240),
"80"=c("25"=593, "50"=1030, "75"=2616),
"90"=c("25"=955, "50"=1845, "75"=3828),
"95"=c("25"=694, "50"=1725, "75"=3124))
hbr.tot.cutoff.values <- rbind(
"50"=c("25"=4567, "50"=7723, "75"=22201),
"60"=c("25"=3777, "50"=7306, "75"=13519),
"70"=c("25"=3239, "50"=6483, "75"=13077),
"80"=c("25"=2850, "50"=6128, "75"=11056),
"90"=c("25"=2746, "50"=6278, "75"=13772),
"95"=c("25"=6848, "50"=9089, "75"=12932))
hbo.tot.cutoff.values <- rbind(
"50"=c("25"=1433, "50"=2209, "75"=7268),
"60"=c("25"=1395, "50"=2439, "75"=6592),
"70"=c("25"=1874, "50"=3532, "75"=9055),
"80"=c("25"=1348, "50"=3399, "75"=7554),
"90"=c("25"=2089, "50"=3390, "75"=6886),
"95"=c("25"=2768, "50"=3684, "75"=6601))
tot.cutoff.values <- array(c(hbw.tot.cutoff.values, hbs.tot.cutoff.values, hbr.tot.cutoff.values, hbo.tot.cutoff.values),
dim=c(6,3,4), dimnames=list(c("50","60","70","80","90","95"), c("25","50","75"), c("hbw","hbs", "hbr", "hbo")))
# definde the percentile of cutoffs
den.cutoff.percentage <- c("50", "60", "70", "80", "90","95")
tot.cutoff.percentage <- c("25","50","75")
all.df <- NULL
for (dc in den.cutoff.percentage) {
for (tc in tot.cutoff.percentage) {
den.tot.cutoff.percentage <- paste(dc, tc, sep="_")
source("ExploreCutoff/cluster/identify_centers.R")
source("ExploreCutoff/cluster/compute_md_prob_trips.R")
source("ExploreCutoff/cluster/compute_tcost.R")
source("ExploreCutoff/cluster/aggregate_tcost.R")
if (SAVE.INTERMEDIARIES) {
intm.file <- file.path(INTERMEDIATE_DIR, paste("results", den.tot.cutoff.percentage, ".RData", sep=""))
save(minpeakAggCost.Zi, minoffpeakAggCost.Zi, minAggTpCost.Zi,
weightedpeakAggCost.Zi, weightedoffpeakAggCost.Zi, weightedAggTpCost.Zi, file=intm.file)
}
# convert array into data.frame
df <- data.frame(minpeakAggCost.Zi, minoffpeakAggCost.Zi, minAggTpCost.Zi,
weightedpeakAggCost.Zi, weightedoffpeakAggCost.Zi, weightedAggTpCost.Zi)
title.name <- paste("density.cutoff = ", dc, "and total.cutoff = ", tc, sep=" ")
ds <- stargazer(df, type = "text", title=title.name,
summary.stat = c("n","min", "p25", "median", "mean","sd", "p75", "max"), digits=4)
#df.name <- paste("df", den.tot.cutoff, sep="")
#assign(df.name, df)
remove(minpeakAggCost.ZiIcPr, minpeakAggCost.Zi, minoffpeakAggCost.ZiIcPr, minoffpeakAggCost.Zi,
weightedpeakAggCost.ZiIcPr, weightedpeakAggCost.Zi, weightedoffpeakAggCost.ZiIcPr, weightedoffpeakAggCost.Zi,
minAggTpCost.Zi, weightedAggTpCost.Zi)
df <- data.frame(ds)
colnames(df) <- "Descriptive statistics"
# store all data.frame into one data.frame
if(is.null(all.df)) {
all.df <- df
}
else{
all.df <- cbind(all.df, df)
}
}
}
print(all.df)
if (SAVE.INTERMEDIARIES) {
intm.file <- file.path(INTERMEDIATE_DIR, "all_df.RData")
save(all.df,file=intm.file)
}
| 9,045 | gpl-2.0 |
b7f3c8c694197985fcea42efeabafeb5484fd763 | zhangyaonju/Global_GPP_VPM_NCEP_C3C4 | NCEP/climate_data_ncep_aggregate.R | climate_data_ncep_aggregate<-function(year){
air.temp.file<-nc_open(paste('/data/eomf/users/yzhang/NCEP/origin/air.2m.gauss.',year,'.nc',sep=''))
radiation.file<-nc_open(paste('/data/eomf/users/yzhang/NCEP/origin/dswrf.sfc.gauss.',year,'.nc',sep=''))
outfile<-c(paste('/data/eomf/users/yzhang/NCEP/temperature/air.2m.8day.',year,'.nc',sep=''),
paste('/data/eomf/users/yzhang/NCEP/radiation/rad.8day.',year,'.nc',sep=''))
air.temp.raw.data<-ncvar_get(air.temp.file,'air')
air.temp.day.data<-array(0,dim=c(192,94,8))
air.temp.8day.data<-array(0,dim=c(192,94,46))
timezone.offset<--round((air.temp.file$var[[1]]$dim[[1]]$vals*(air.temp.file$var[[1]]$dim[[1]]$vals<180)-
(360-air.temp.file$var[[1]]$dim[[1]]$vals)*(air.temp.file$var[[1]]$dim[[1]]$vals>179))/15/6)
obs.num<-dim(air.temp.raw.data)[3]
for(x in 1:192){
for (y in 1:94){
for (i in 2:45){
for (j in 1:8){
air.temp.day.data[x,y,j]<-min(air.temp.raw.data[x,y,(timezone.offset[x]+(i-1)*8*4+(j-1)*4+1):(timezone.offset[x]+(i-1)*8*4+4*j)])*0.25+
max(air.temp.raw.data[x,y,(timezone.offset[x]+(i-1)*8*4+(j-1)*4+1):(timezone.offset[x]+(i-1)*8*4+4*j)])*0.75
}
air.temp.8day.data[x,y,i]<-mean(air.temp.day.data[x,y,1:min(8,obs.num/4-i*8)])
}
}
}
for(x in 1:192){
for (y in 1:94){
for(j in 1:8){
air.temp.day.data[x,y,j]<-min(air.temp.raw.data[x,y,max(1,(timezone.offset[x]+(j-1)*4+1)):(timezone.offset[x]+j*4)])*0.25+
max(air.temp.raw.data[x,y,max(1,(timezone.offset[x]+(j-1)*4+1)):(timezone.offset[x]+j*4)])*0.75
}
air.temp.8day.data[x,y,1]<-mean(air.temp.day.data[x,y,1:8])
}
}
for(x in 1:192){
for (y in 1:94){
for(j in 1:(obs.num/4-360)){
air.temp.day.data[x,y,j]<-min(air.temp.raw.data[x,y,(timezone.offset[x]+1440+(j-1)*4+1):min(obs.num,(timezone.offset[x]+1440+j*4))])*0.25+
max(air.temp.raw.data[x,y,(timezone.offset[x]+1440+(j-1)*4+1):min(obs.num,(timezone.offset[x]+1440+j*4))])*0.75
}
air.temp.8day.data[x,y,46]<-mean(air.temp.day.data[x,y,1:(obs.num/4-360)])
}
}
time.output<-air.temp.file$var[[1]]$dim[[3]]$vals[(0:45)*32+1]
x<-ncdim_def('Lon','degreesE',air.temp.file$var[[1]]$dim[[1]]$vals)
y<-ncdim_def('Lat','degreesN',air.temp.file$var[[1]]$dim[[2]]$vals)
t<-ncdim_def("Time",'hours since 1800-01-01 00:00:0.0',time.output,unlim=TRUE)
ncair<-ncvar_def('air',"degK",list(x,y,t),missval=-9.96920996838687e+36,longname='8-day 2m air temperature',prec='float')
ncair.out<-nc_create(outfile[1],ncair)
ncvar_put(ncair.out,'air',air.temp.8day.data)
nc_close(ncair.out)
radiation.raw.data<-ncvar_get(radiation.file,'dswrf')
radiation.8day.data<-array(0,dim=c(192,94,46))
obs.num<-dim(radiation.raw.data)[3]
for(x in 1:192){
for (y in 1:94){
for (i in 1:45){
radiation.8day.data[x,y,i]<-mean(radiation.raw.data[x,y,(8*i-7):(i*8)])
}
}
}
for(x in 1:192){
for (y in 1:94){
radiation.8day.data[x,y,46]<-mean(radiation.raw.data[x,y,361:obs.num])
}
}
time.output<-radiation.file$var[[1]]$dim[[3]]$vals[(0:45)*8+1]
x<-ncdim_def('Lon','degreesE',radiation.file$var[[1]]$dim[[1]]$vals)
y<-ncdim_def('Lat','degreesN',radiation.file$var[[1]]$dim[[2]]$vals)
t<-ncdim_def("Time",'hours since 1800-01-01 00:00:0.0',time.output,unlim=TRUE)
ncrad<-ncvar_def('dswrf',"W/m^2",list(x,y,t),missval=-9.96920996838687e+36,longname='8-day Downward Solar Radiation Flux at surface',prec='float')
ncrad.out<-nc_create(outfile[2],ncrad)
ncvar_put(ncrad.out,'dswrf',radiation.8day.data)
nc_close(ncrad.out)
gc()
}
| 3,847 | apache-2.0 |
339b46a0563c5e5a9d15449a940f7d20b62b18de | Shians/Glimma | man-roxygen/return_glMDSPlot.R | #' @return Draws a two-panel interactive MDS plot in an html page. The left
#' panel contains the plot between two MDS dimensions, with annotations
#' displayed on hover. The right panel contains a bar plot of the eigenvalues
#' of each dimension, clicking on any of the bars will plot the corresponding
#' dimension against the next dimension.
| 353 | lgpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.