id stringlengths 40 40 | repo_name stringlengths 5 110 | path stringlengths 2 233 | content stringlengths 0 1.03M ⌀ | size int32 0 60M ⌀ | license stringclasses 15 values |
|---|---|---|---|---|---|
3a87d5a1e840ec26f787802422192ee3a415c41f | cran/dissUtils | R/neighbor_identify.R | neighbors.identify <- function(neighbor.matrix, all.dists){
indices <- matrix(integer(length(neighbor.matrix)),
nrow(neighbor.matrix),
dimnames = dimnames(neighbor.matrix));
if(any("dist" %in% class(all.dists))){
dist.size <- attributes(all.dists)$Size;
for(i in 1:nrow(indices)){
for(j in 1:ncol(indices)){
tmp <- match(neighbor.matrix[i,j],
all.dists);
index.choices <- diss.index(tmp, dist.size);
indices[i,j] <- index.choices[index.choices != i];
}
}
}
else if(is.matrix(all.dists)){
for(i in 1:nrow(indices)){
active.lookup <- all.dists[i,];
for(j in 1:ncol(indices)){
indices[i,j] <- match(neighbor.matrix[i,j],
active.lookup);
}
}
}
return(invisible(indices));
}
| 1,021 | gpl-2.0 |
91e4bfdb8803e23ccebcec3e5f30bf2007365fd8 | mattwatts/marxan.io | marxan24/ingest_marxan_data.R | # marxan.io
# input is a zip file provided by user
# unzip to a temporary location
# detect pulayer
# simplify geometry
# detect field PUID or PU_ID
# rename as PUID
# drop all fields except PUID
# save as pulayer.shp
# detect puoutline if exists
# simplify geometry
# save as puoutline.shp
# scan input.dat
# detect pu.dat
# detect spec.dat
# detect bound.dat
# detect puvsp.dat
# identify common errors
# convert matrix to sparse
# create puorder.dat and sporder.dat
# create pulayer.Rdata
# run Marxan
# create cluster.Rdata
# return error condition to Marxan web app
# if stop error condition, return useful error string
# return list of warning messages to Marxan web app
library(foreign)
library(rgdal)
library(PBSmapping)
library(sqldf)
library(sp)
library(tools)
library(maptools)
library(foreach)
library(doMC)
require(vegan)
require(labdsv)
PadInt <- function(iRunNumber)
{
iPad <- 5 - nchar(as.character(iRunNumber))
return(paste0(paste0(rep("0",iPad),collapse=""),iRunNumber))
}
GetOutputFileext <- function(sMarxanDir,sParam)
{
inputdat <- readLines(paste(sMarxanDir,"/input.dat",sep=""))
iParam <- which(regexpr(sParam,inputdat)==1)
iValue <- as.integer(unlist(strsplit(inputdat[iParam], split=" "))[2])
if (iValue == 1) { return(".dat") }
if (iValue == 2) { return(".txt") }
if (iValue == 3) { return(".csv") }
}
# create a temporary directory in a specified directory
CreateTempDir <- function(sTempPath)
{
Gfold <- sprintf("%s",round(runif(1)*1000000))
for (ii in 1:100000)
{
sPath <- sprintf("%s/%s",sTempPath,Gfold)
if(!file.exists(sPath))
{
system(paste("mkdir ",sPath))
break()
}
}
return(sPath)
}
GetParamValue <- function(inputdat,sParam)
{
iParam <- which(regexpr(sParam,inputdat)==1)
if (length(iParam) > 0)
{
return(sapply(strsplit(inputdat[iParam]," "),"[",2))
} else {
return("")
}
}
smart_read <- function(sInFile)
{
cat(paste0("smart_read reading file ",sInFile,"\n"))
# automatically detect the delimeter type: comma, tab, or space
sLine <- readLines(sInFile,1)
if (grepl(",",sLine))
{
InTable <- read.csv(sInFile,stringsAsFactors=FALSE)
}
if (grepl("\t",sLine))
{
InTable <- read.delim(sInFile,stringsAsFactors=FALSE)
}
if (grepl(" ",sLine))
{
InTable <- read.table(sInFile,stringsAsFactors=FALSE,sep=" ")
}
cat(paste0("smart_read file read ",sInFile,"\n"))
return(InTable)
}
ParseMarxanZip <- function(sInputZipFile,sTempPath,sShinyUserPath,sDataPath,sUserName)
{
sPath <- sTempPath
sLogFile <- paste0(sPath,"/ParseMarxanZip.log")
WarningMsg <- c()
ErrorMsg <- c()
write(paste0("ParseMarxanZip log start ",date()),file=sLogFile)
write(paste0("temp path ",sPath," ",date()),file=sLogFile,append=TRUE)
cat(paste0("temp path ",sPath,"\n"))
# sPath is the temporary directory
cat(paste0(">",sInputZipFile,"< >",paste0(sPath,"/data.zip"),"<\n"))
file.copy(sInputZipFile,paste0(sPath,"/data.zip"))
cat(paste0("copy done\n"))
system(paste0("unzip ",sInputZipFile," -d ",sPath))
cat(paste0("unzip done\n"))
# create Marxan directories
dir.create(paste0(sPath,"/marxan"))
dir.create(paste0(sPath,"/marxan/input"))
dir.create(paste0(sPath,"/marxan/output"))
dir.create(paste0(sPath,"/marxan/pulayer"))
# find input.dat
sInputDat <- list.files(path = sPath, recursive = TRUE, pattern = "^input\\.dat$", ignore.case = TRUE, full.names = TRUE)
if (length(sInputDat) == 0)
{
# STOP ERROR
sErrorMsg <- "ERROR: input.dat not found"
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
if (length(sInputDat) > 1)
{
# STOP ERROR
sErrorMsg <- "ERROR: more than 1 input.dat found"
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
if (.Platform$pkgType == "source")
{
system(paste0("dos2unix ",sInputDat))
}
cat(paste0("reading input.dat ",sInputDat,"\n"))
inputdat <- readLines(sInputDat)
write(paste0("input.dat read ",sInputDat," ",date()),file=sLogFile,append=TRUE)
cat(paste0("input.dat read ",sInputDat,"\n"))
sPUNAME <- GetParamValue(inputdat,"PUNAME")
sSPECNAME <- GetParamValue(inputdat,"SPECNAME")
sPUVSPRNAME <- GetParamValue(inputdat,"PUVSPRNAME")
sBOUNDNAME <- GetParamValue(inputdat,"BOUNDNAME")
if (sPUNAME == "")
{
# STOP ERROR
sErrorMsg <- "ERROR: PUNAME not found in input.dat"
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
if (sSPECNAME == "")
{
# STOP ERROR
sErrorMsg <- "ERROR: SPECNAME not found in input.dat"
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
if (sPUVSPRNAME == "")
{
# STOP ERROR
sErrorMsg <- "ERROR: PUVSPRNAME not found in input.dat"
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
if (sBOUNDNAME == "")
{
# warning message but not critical
sWarningMsg <- "Warning: BOUNDNAME not found in input.dat"
write(paste0(sWarningMsg," ",date()),file=sLogFile,append=TRUE)
WarningMsg <- c(WarningMsg,sWarningMsg)
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
cat(paste0(sWarningMsg,"\n"))
}
sPuDat <- list.files(path = sPath, recursive = TRUE, pattern = sPUNAME, ignore.case = TRUE, full.names = TRUE)
sSpecDat <- list.files(path = sPath, recursive = TRUE, pattern = sSPECNAME, ignore.case = TRUE, full.names = TRUE)
sPuvsprDat <- list.files(path = sPath, recursive = TRUE, pattern = sPUVSPRNAME, ignore.case = TRUE, full.names = TRUE)
sBoundDat <- list.files(path = sPath, recursive = TRUE, pattern = sBOUNDNAME, ignore.case = TRUE, full.names = TRUE)
write(paste0("paths searched ",date()),file=sLogFile,append=TRUE)
cat("paths searched\n")
if (length(sPuDat) == 0)
{
# STOP ERROR
sErrorMsg <- paste0("ERROR: pu.dat file ",sPUNAME," not found ")
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
if (length(sSpecDat) == 0)
{
# STOP ERROR
sErrorMsg <- paste0("ERROR: spec.dat file ",sSPECNAME," not found ")
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
if (length(sPuvsprDat) == 0)
{
# STOP ERROR
sErrorMsg <- paste0("ERROR: puvspr.dat file ",sPUVSPRNAME," not found ")
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
if (length(sBoundDat) == 0)
{
# warning message but not critical
sWarningMsg <- paste0("Warning: bound.dat file ",sBOUNDNAME," not found ")
write(paste0(sWarningMsg," ",date()),file=sLogFile,append=TRUE)
WarningMsg <- c(WarningMsg,sWarningMsg)
cat(paste0(sWarningMsg,"\n"))
}
if (length(sPuDat) > 1)
{
# STOP ERROR
sErrorMsg <- paste0("ERROR: more than 1 pu.dat file ",sPUNAME," found ")
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
if (length(sSpecDat) > 1)
{
# STOP ERROR
sErrorMsg <- paste0("ERROR: more than 1 spec.dat file ",sSPECNAME," found ")
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
if (length(sPuvsprDat) > 1)
{
# STOP ERROR
sErrorMsg <- paste0("ERROR: more than 1 puvspr.dat file ",sPUVSPRNAME," found ")
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
if (length(sBoundDat) > 1)
{
# STOP ERROR
sErrorMsg <- paste0("ERROR: more than 1 bound.dat file ",sBOUNDNAME," found ")
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
if (.Platform$pkgType == "source")
{
system(paste0("dos2unix ",sPuDat))
system(paste0("dos2unix ",sSpecDat))
system(paste0("dos2unix ",sPuvsprDat))
if (length(sBoundDat) == 1)
{
system(paste0("dos2unix ",sBoundDat))
}
}
write(paste0("reading input files ",date()),file=sLogFile,append=TRUE)
cat("reading input files\n")
# are the files CSV, Tab, or comma delimeted?
pu.dat <- smart_read(sPuDat)
spec.dat <- smart_read(sSpecDat)
puvspr.dat <- smart_read(sPuvsprDat)
if (length(sBoundDat) == 1)
{
bound.dat <- smart_read(sBoundDat)
}
write(paste0("input files read ",date()),file=sLogFile,append=TRUE)
cat("input files read\n")
write.csv(pu.dat,paste0(sPath,"/marxan/input/pu.dat"),quote=FALSE,row.names=FALSE)
# if spec.dat has a name field, make the name field the last field
# this will reduce possible errors where users include delimeter characters in the feature names
cnames <- colnames(spec.dat)
iName <- which(grepl("name",cnames))
if (length(iName) > 0)
{
spec.dat <- spec.dat[c(cnames[-iName],"name")]
}
write.csv(spec.dat,paste0(sPath,"/marxan/input/spec.dat"),quote=FALSE,row.names=FALSE)
if (length(sBoundDat) == 1)
{
write.csv(bound.dat,paste0(sPath,"/marxan/input/bound.dat"),quote=FALSE,row.names=FALSE)
}
if (ncol(puvspr.dat) == 3)
{
# This is a sparse matrix. Ensure it is sorted in the correct order.
puorder.dat <- puvspr.dat[order(puvspr.dat$pu),]
write.csv(puorder.dat,paste0(sPath,"/marxan/input/puorder.dat"),quote=FALSE,row.names=FALSE)
sporder.dat <- puvspr.dat[order(puvspr.dat$species),]
write.csv(sporder.dat,paste0(sPath,"/marxan/input/sporder.dat"),quote=FALSE,row.names=FALSE)
# If puorder.dat and puvspr.dat do not have pu in the same order, warn user their matrix wasn't sorted ok.
if (sum(order(puvspr.dat$pu) != order(puorder.dat$pu)) > 0)
{
# warning message but not critical
sWarningMsg <- paste0("Warning: puvspr.dat file ",sPuvsprDat," was not ordered from lowest to highest pu ")
write(paste0(sWarningMsg," ",date()),file=sLogFile,append=TRUE)
WarningMsg <- c(WarningMsg,sWarningMsg)
cat(paste0(sWarningMsg,"\n"))
}
} else {
# we need to convert this to a sparse matrix
# generate puorder.dat
sPuOrder <- paste0(sPath,"/marxan/input/puorder.dat")
write('species,pu,amount',file=sPuOrder)
for (j in 1:nrow(matrix))
{
for (i in 2:ncol(matrix))
{
rAmount <- matrix[j,i]
if (rAmount > 0)
{
iPUID <- matrix[j,1]
iSPID <- substring(colnames(matrix)[i],2)
write(paste(iSPID,iPUID,rAmount,sep=","),
file=sPuOrder,append=TRUE)
}
}
}
# generate sporder.dat
sSpOrder <- paste0(sPath,"/marxan/input/sporder.dat")
write('species,pu,amount',file=sSpOrder)
for (i in 2:ncol(matrix))
{
for (j in 1:nrow(matrix))
{
rAmount <- matrix[j,i]
if (rAmount > 0)
{
iPUID <- matrix[j,1]
iSPID <- substring(colnames(matrix)[i],2)
write(paste(iSPID,iPUID,rAmount,sep=","),
file=sSpOrder,append=TRUE)
}
}
}
# Warn user their matrix wasn't in the correct format.
# warning message but not critical
sWarningMsg <- paste0("puvspr.dat file ",sPuvsprDat," was not in sparse matrix format ")
write(paste0(sWarningMsg," ",date()),file=sLogFile,append=TRUE)
WarningMsg <- c(WarningMsg,sWarningMsg)
cat(paste0(sWarningMsg,"\n"))
}
# save the input.dat
if (length(sBoundDat) == 1)
{
inputdat <- readLines(paste0(sDataPath,"/input.dat"))
} else {
inputdat <- readLines(paste0(sDataPath,"/no_bound_input.dat"))
}
writeLines(inputdat,con=paste0(sPath,"/marxan/input.dat"))
write(paste0("marxan files processed ",date()),file=sLogFile,append=TRUE)
cat("marxan files processed\n")
# find shapefiles
ShapeFiles <- list.files(path = sPath, recursive = TRUE, pattern = "*.shp", ignore.case = TRUE, full.names = TRUE)
# ignore any .xlm files if they are present
WhichXml <- regexpr(".xml",ShapeFiles) > 0
FilterShapes <- c()
for (i in 1:length(ShapeFiles))
{
if (WhichXml[i] == FALSE)
{
FilterShapes <- c(FilterShapes,ShapeFiles[i])
}
}
if (length(FilterShapes) > 0)
{
if (length(FilterShapes) == 1)
{
# we have 1 shapefile and assume this is the planning unit layer
sPuLayer <- FilterShapes[1]
sOutlineLayer <- ""
} else {
if (length(FilterShapes) == 2)
{
# we have 2 shapefiles: assume one is planning unit layer and other is outline
# guess which is which
WhichShapefile <- regexpr("pulayer.shp",FilterShapes) > 0
if (sum(WhichShapefile) != 1)
{
# Error, can't guess which shapefile is the pulayer
# STOP ERROR
sErrorMsg <- "ERROR: 2 shapefiles found, can't guess which one is the pulayer"
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
if (WhichShapefile[1] == TRUE)
{
sPuLayer <- FilterShapes[1]
sOutlineLayer <- FilterShapes[2]
} else {
sPuLayer <- FilterShapes[2]
sOutlineLayer <- FilterShapes[1]
}
} else {
# We have more than 2 shapefiles. This is an error condition.
# STOP ERROR
sErrorMsg <- "ERROR: more than 2 shapefiles found"
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
}
}
# load the planning unit shapefile
pushapefile <- readOGR(dirname(sPuLayer),file_path_sans_ext(basename(sPuLayer)))
writeOGR(pushapefile,paste0(sPath,"/marxan/pulayer"),"pulayer",driver="ESRI Shapefile",overwrite_layer=TRUE)
write(paste0("pulayer read ",sPuLayer," ",date()),file=sLogFile,append=TRUE)
cat(paste0("pulayer read ",sPuLayer,"\n"))
putable <- read.dbf(paste0(dirname(sPuLayer),"/",file_path_sans_ext(basename(sPuLayer)),".dbf"))
dim(putable)
# guess which field is PUID
iPUIDfield <- which(colnames(putable) == "PUID")
if (length(iPUIDfield) == 0)
{
iPUIDfield <- which(colnames(putable) == "PU_ID")
}
if (length(iPUIDfield) == 0)
{
iPUIDfield <- which(colnames(putable) == "puid")
}
if (length(iPUIDfield) == 0)
{
iPUIDfield <- which(colnames(putable) == "pu_id")
}
if (length(iPUIDfield) == 0)
{
# We can't find PUID field in the planning unit shapefile.
# STOP ERROR
sErrorMsg <- "ERROR: can't find PUID field in the planning unit shapefile "
write(paste0("STOP ",sErrorMsg," ",date()),file=sLogFile,append=TRUE)
ErrorMsg <- c(ErrorMsg,sErrorMsg)
cat(paste0(sErrorMsg,"\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
stop(sErrorMsg)
}
colnames(putable)[iPUIDfield] <- "PUID"
putable <- sqldf("SELECT PUID FROM putable")
dim(putable)
write.dbf(putable,paste0(sPath,"/marxan/pulayer/pulayer.dbf"))
# load the outline shapefile
if (sOutlineLayer != "")
{
outlineshapefile <- readOGR(dirname(sOutlineLayer),file_path_sans_ext(basename(sOutlineLayer)))
write(paste0("outline loaded ",sOutlineLayer," ",date()),file=sLogFile,append=TRUE)
cat(paste0("outline loaded ",sOutlineLayer,"\n"))
writeOGR(outlineshapefile,paste0(sPath,"/marxan/pulayer"),"puoutline",driver="ESRI Shapefile",overwrite_layer=TRUE)
puoutline <- SpatialPolygons2PolySet(outlineshapefile)
fOutline <- TRUE
} else {
fOutline <- FALSE
puoutline <- NA
# Warn user no outline layer.
# warning message but not critical
sWarningMsg <- "Warning: no outline layer."
write(paste0(sWarningMsg," ",date()),file=sLogFile,append=TRUE)
WarningMsg <- c(WarningMsg,sWarningMsg)
cat(paste0(sWarningMsg,"\n"))
}
write(paste0("shapefiles processed ",date()),file=sLogFile,append=TRUE)
cat("shapefiles processed\n")
# create the pulayer.Rdata file
pustatus_ <- unlist(pu.dat$status)
pulayer_ <<- SpatialPolygons2PolySet(pushapefile)
y_ <- bbox(pushapefile)[2,2] - bbox(pushapefile)[2,1]
x_ <- bbox(pushapefile)[1,2] - bbox(pushapefile)[1,1]
sRdata <- paste0(sPath,"/marxan/pulayer/pulayer.Rdata")
save(fOutline,puoutline,pulayer_,pustatus_,x_,y_,file=sRdata)
write(paste0("pulayer.Rdata created ",sRdata," ",date()),file=sLogFile,append=TRUE)
cat(paste0("pulayer.Rdata created ",sRdata,"\n"))
write(paste0("ParseMarxanZip log end ",date()),file=sLogFile,append=TRUE)
cat(paste0("ParseMarxanZip end\n"))
return(c(sPath,sLogFile,WarningMsg,ErrorMsg))
}
ReadParseErrors <- function(ParseResult)
{
Warnings <<- c()
Errors <<- c()
if (length(ParseResult) > 2)
{
for (i in 3:length(ParseResult))
{
if (grepl("Warning",ParseResult[i]))
{
Warnings <<- c(Warnings,ParseResult[i])
}
if (grepl("ERROR",ParseResult[i]))
{
Errors <<- c(Errors,ParseResult[i])
}
}
}
}
IngestMarxanDatabase <- function(sPath,sShinyUserPath,sUserName,sDatabaseName)
{
# copy the ingested Marxan dataset to the users home directory
dir.create(paste0(sShinyUserPath,"/",sUserName))
dir.create(paste0(sShinyUserPath,"/",sUserName,"/",sDatabaseName))
system(paste0("cp -r ",sPath,"/marxan/ ",sShinyUserPath,"/",sUserName,"/",sDatabaseName))
}
RunMarxan <- function(sMarxanDir,sExecutable,iCores,iRepsPerCore)
{
# BLM parameter
inputdat <- readLines(paste0(sMarxanDir,"/input.dat"))
randomseeds <- round(runif(10)*100000)
# run Marxan
foreach(i=1:iCores) %dopar%
{
dir.create(paste0(sMarxanDir,"/core",i))
file.copy(paste0(sShinyDataPath,"/",sExecutable),paste0(sMarxanDir,"/core",i,"/",sExecutable))
system(paste0("chmod +x ",sMarxanDir,"/core",i,"/",sExecutable))
# set parameters for multi core
iINPUTDIRparam <- which(regexpr("INPUTDIR",inputdat)==1)
iOUTPUTDIRparam <- which(regexpr("OUTPUTDIR",inputdat)==1)
iSCENNAMEparam <- which(regexpr("SCENNAME",inputdat)==1)
iNUMREPSparam <- which(regexpr("NUMREPS",inputdat)==1)
iRANDSEEDparam <- which(regexpr("RANDSEED",inputdat)==1)
inputdat[iINPUTDIRparam] <- paste0("INPUTDIR ",sMarxanDir,"/input")
inputdat[iOUTPUTDIRparam] <- paste0("OUTPUTDIR ",sMarxanDir,"/output")
inputdat[iSCENNAMEparam] <- paste0("SCENNAME output",i)
inputdat[iNUMREPSparam] <- paste0("NUMREPS ",iRepsPerCore)
inputdat[iRANDSEEDparam] <- paste0("RANDSEED ",randomseeds[i])
writeLines(inputdat,paste0(sMarxanDir,"/core",i,"/input.dat"))
setwd(paste0(sMarxanDir,"/core",i))
system(paste0("./",sExecutable," -s"))
}
for (i in 1:iCores)
{
file.remove(paste0(sMarxanDir,"/core",i,"/input.dat"))
}
}
JoinParallelResults <- function(sMarxanDir,iCores,iRepsPerCore)
{
iSolutions <- round(iCores*iRepsPerCore)
# combine the summary tables
sumtable <- c()
for (i in 1:iCores)
{
sumtable_ <- read.csv(paste0(sMarxanDir,"/output/output",i,"_sum.csv"))
sumtable <- rbind(sumtable,sumtable_)
}
for (i in 1:iSolutions)
{
sumtable[i,1] <- i
}
write.csv(sumtable,
paste0(sMarxanDir,"/output/output_sum.csv"),
quote=FALSE,row.names=FALSE)
# detect best solution
iBest <- which(sumtable$Score==min(sumtable$Score))
if (length(iBest) > 0)
{
iBest <- iBest[1]
}
# rename mv files and solution files
iSol <- 0
for (i in 1:iCores)
{
for (j in 1:iRepsPerCore)
{
iSol <- iSol + 1
file.rename(paste0(sMarxanDir,"/output/output",i,"_mv",PadInt(j),".csv"),
paste0(sMarxanDir,"/output/output_mv",PadInt(iSol),".csv"))
file.rename(paste0(sMarxanDir,"/output/output",i,"_r",PadInt(j),".csv"),
paste0(sMarxanDir,"/output/output_r",PadInt(iSol),".csv"))
}
}
# copy _mvbest and _best files
file.copy(paste0(sMarxanDir,"/output/output_mv",PadInt(iBest),".csv"),
paste0(sMarxanDir,"/output/output_mvbest.csv"),
overwrite=TRUE)
file.copy(paste0(sMarxanDir,"/output/output_r",PadInt(iBest),".csv"),
paste0(sMarxanDir,"/output/output_best.csv"),
overwrite=TRUE)
# join ssoln files
ssolntable <- read.csv(paste0(sMarxanDir,"/output/output",i,"_ssoln.csv"))
colnames(ssolntable)[2] <- "numberX"
for (i in 2:iCores)
{
ssolntable_ <- read.csv(paste0(sMarxanDir,"/output/output",i,"_ssoln.csv"))
ssolntable <- sqldf("SELECT * from ssolntable LEFT JOIN ssolntable_ USING(planning_unit)")
ssolntable$numberX <- ssolntable$numberX + ssolntable$number
ssolntable <- sqldf("SELECT planning_unit, numberX from ssolntable")
}
colnames(ssolntable)[2] <- "number"
write.csv(ssolntable,
paste0(sMarxanDir,"/output/output_ssoln.csv"),
quote=FALSE,row.names=FALSE)
# join cluster files: text parse
outfile <- file(paste0(sMarxanDir,"/output/output_solutionsmatrix.csv"),"w")
iRow <- 0
for (i in 1:iCores)
{
infile <- file(paste0(sMarxanDir,"/output/output",i,"_solutionsmatrix.csv"),"r")
# read header row
sLine <- readLines(con=infile,n=1)
# write header row if i == 1
if (i == 1)
{
write(sLine,file=outfile)
}
for (j in 1:iRepsPerCore)
{
sLine <- readLines(con=infile,n=1)
iLen <- nchar(sLine)
if (j < iRepsPerCore)
{
# S1..S9 : remove 3 chars
sLine <- substr(sLine, 4, iLen)
} else {
# S10 : remove 4 chars
sLine <- substr(sLine, 5, iLen)
}
iRow <- iRow + 1
write(paste0("S",iRow,",",sLine),file=outfile,append=TRUE)
}
close(infile)
}
close(outfile)
}
ImportOutputsCsvToShpDbf <- function(sPuShapeFileDbf, sMarxanDir, iNumberOfRuns, sPUID)
{
# Imports the relevant contents of output files to the planning unit shape file dbf.
# load and prepare pu_table
pu_table <- read.dbf(sPuShapeFileDbf)
pu_table <- sqldf(paste("SELECT ", sPUID, " from pu_table",sep=""))
colnames(pu_table)[1] <- "PUID"
pu_table$PUID <- as.integer(pu_table$PUID)
# load and prepare ssoln_table
ssoln_table <- read.csv(paste(sMarxanDir,"/output/output_ssoln",GetOutputFileext(sMarxanDir,"SAVESUMSOLN"),sep=""))
colnames(ssoln_table)[1] <- "PUID"
colnames(ssoln_table)[2] <- "SSOLN2"
ssoln_table$SSOLN1 <- as.integer(iNumberOfRuns - ssoln_table$SSOLN2)
ssoln_table$SSOLN2 <- as.integer(ssoln_table$SSOLN2)
# join pu_table and ssoln_table
pu_table <- sqldf("SELECT * from pu_table LEFT JOIN ssoln_table USING(PUID)")
# load and prepare best_table
best_table <- read.csv(paste(sMarxanDir,"/output/output_best",GetOutputFileext(sMarxanDir,"SAVEBEST"),sep=""))
best_table$BESTSOLN <- as.integer(best_table$SOLUTION + 1)
best_table <- sqldf("SELECT PUID, BESTSOLN from best_table")
# join pu_table and best_table
pu_table <- sqldf("SELECT * from pu_table LEFT JOIN best_table USING(PUID)")
# save the new pu_table
colnames(pu_table)[1] <- sPUID
write.dbf(pu_table,sPuShapeFileDbf)
}
labelCol <- function(x)
{
# we set iBest as a global in PrepareCluster_compute before calling labelCol
if (is.leaf(x))
{
## fetch label
a <- attributes(x)
label <- attr(x, "label")
colour <- "black"
if (label == paste0("S",iBest," (Best)")) { colour <- "blue" }
attr(x, "nodePar") <- c(a$nodePar, lab.col = colour)
}
return(x)
}
PrepareCluster_compute <- function(sMarxanDir)
{
# NOTE: we fail gracefully if there are not enough unique solutions
# prepare the cluster analysis objects
solutions_raw<-read.table(paste0(sMarxanDir,"/output/output_solutionsmatrix.csv"),header=TRUE, row.name=1, sep=",")
thetable <- read.csv(paste0(sMarxanDir,"/output/output_sum.csv"))
iBest <<- which.min(thetable$Score)
Best <- solutions_raw[iBest,]
solutions_raw <- solutions_raw[-iBest,]
solutions_join <- rbind(Best,solutions_raw)
rownames(solutions_join) <- c(paste0("S",iBest," (Best)"),row.names(solutions_raw))
plotlabels <- c(paste0("S",iBest," (Best)"),row.names(solutions_raw))
solutions <- unique(solutions_join)
iUniqueSolutions <- dim(solutions)[1]
if (iUniqueSolutions > 2)
{
# render the 2d
nmdscolours <- rep("black",each = iUniqueSolutions)
nmdscolours[1] <- "blue"
soldist<-vegdist(solutions,distance="bray")
sol.mds<-nmds(soldist,2)
h<-hclust(soldist, method="complete")
# render the dendogram
d <- dendrapply(as.dendrogram(h), labelCol)
# render the 3d
sol3d.mds <- nmds(soldist,3)
} else {
sol.mds <- NA
plotlabels <- NA
nmdscolours <- NA
d <- NA
sol3d.mds <- NA
}
sRdata <- paste0(sMarxanDir,"/output/cluster.Rdata")
save(sol.mds,plotlabels,nmdscolours,d,sol3d.mds,file=sRdata)
}
RunMarxan_1st <- function(sDatabasePath,sShinyDataPath,iCores,iRepsPerCore)
{
if (.Platform$pkgType == "source")
{
sExecutable <<- "MarOpt_v243_Linux64"
} else {
sExecutable <<- "MarOpt_v243_Mac64"
}
# copy the executable to the Marxan path
file.copy(paste0(sShinyDataPath,"/",sExecutable),paste0(sDatabasePath,"/",sExecutable))
system(paste0("chmod +x ",sDatabasePath,"/",sExecutable))
RunMarxan(sDatabasePath,sExecutable,iCores,iRepsPerCore)
JoinParallelResults(sDatabasePath,iCores,iRepsPerCore)
ImportOutputsCsvToShpDbf(paste0(sDatabasePath,"/pulayer/pulayer.dbf"),sDatabasePath,round(iCores*iRepsPerCore),"PUID")
PrepareCluster_compute(sDatabasePath)
}
list.dirs <- function(path=".", pattern=NULL, all.dirs=FALSE,
full.names=FALSE, ignore.case=FALSE)
{
# use full.names=TRUE to pass to file.info
all <- list.files(path, pattern, all.dirs,
full.names=TRUE, recursive=FALSE, ignore.case)
dirs <- all[file.info(all)$isdir]
# determine whether to return full names or just dir names
if(isTRUE(full.names))
return(dirs)
else
return(basename(dirs))
}
SafeDbName <- function(sDbName,sShinyUserPath,sUserName)
{
# sDbName is the name the user wants to use
# if it conflicts with another existing database, automatically fix it
UserDb <- list.dirs(paste0(sShinyUserPath,"/",sUserName))
sFixName <- sDbName
iMatch <- grep(paste0("^",sDbName,"$"),UserDb)
if (length(iMatch) > 0)
{
# database exists, generate a replacement name
for (i in 1:1000)
{
sFixName <- sprintf("%s%s",sDbName,round(runif(1)*1000))
iMatch <- grep(paste0("^",sFixName,"$"),UserDb)
if (length(iMatch) == 0)
{
break()
}
}
}
return(sFixName)
}
| 28,003 | agpl-3.0 |
f61f73f750e9d3d49eef2da80e93cc575781924f | R-Lum/Luminescence | R/calc_gSGC_feldspar.R | #'@title Calculate Global Standardised Growth Curve (gSGC) for Feldspar MET-pIRIR
#'
#'@description Implementation of the gSGC approach for feldspar MET-pIRIR by Li et al. (2015)
#'
#'@details ##TODO
#'
#'@param data [data.frame] (**required**): data frame with five columns per sample
#'`c("LnTn", "LnTn.error", "Lr1Tr1", "Lr1Tr1.error","Dr1")`
#'
#'@param gSGC.type [character] (*with default*): growth curve type to be selected
#'according to Table 3 in Li et al. (2015). Allowed options are
#'`"50LxTx"`, `"50Lx"`, `"50Tx"`, `"100LxTx"`, `"100Lx"`, `"100Tx"`, `"150LxTx"`,
#' `"150Lx"`, `"150Tx"`, `"200LxTx"`, `"200Lx"`, `"200Tx"`, `"250LxTx"`, `"250Lx"`,
#' `"250Tx"`
#'
#'@param gSGC.parameters [data.frame] (*optional*): an own parameter set for the
#'gSGC with the following columns `y1`, `y1_err`, `D1`
#'`D1_err`, `y2`, `y2_err`, `y0`, `y0_err`.
#'
#'@param n.MC [numeric] (*with default*): number of Monte-Carlo runs for the
#'error calculation
#'
#'@param plot [logical] (*with default*): enables/disables the control plot output
#'
#'@return Returns an S4 object of type [RLum.Results-class].
#'
#' **`@data`**\cr
#' `$ df` ([data.frame]) \cr
#' `.. $DE` the calculated equivalent dose\cr
#' `.. $DE.ERROR` error on the equivalent dose, which is the standard deviation of the MC runs\cr
#' `.. $HPD95_LOWER` lower boundary of the highest probability density (95%)\cr
#' `.. $HPD95_UPPER` upper boundary of the highest probability density (95%)\cr
#' `$ m.MC` ([list]) numeric vector with results from the MC runs.\cr
#'
#' **`@info`**\cr
#' `$ call`` ([call]) the original function call
#'
#' @section Function version: 0.1.0
#'
#' @author Harrison Gray, USGS (United States),
#' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)
#'
#' @seealso [RLum.Results-class], [get_RLum], [uniroot], [calc_gSGC]
#'
#' @references Li, B., Roberts, R.G., Jacobs, Z., Li, S.-H., Guo, Y.-J., 2015.
#' Construction of a “global standardised growth curve” (gSGC) for infrared
#' stimulated luminescence dating of K-feldspar 27, 119–130. \doi{10.1016/j.quageo.2015.02.010}
#'
#' @keywords datagen
#'
#' @examples
#'
#' ##test on a generated random sample
#' n_samples <- 10
#' data <- data.frame(
#' LnTn = rnorm(n=n_samples, mean=1.0, sd=0.02),
#' LnTn.error = rnorm(n=n_samples, mean=0.05, sd=0.002),
#' Lr1Tr1 = rnorm(n=n_samples, mean=1.0, sd=0.02),
#' Lr1Tr1.error = rnorm(n=n_samples, mean=0.05, sd=0.002),
#' Dr1 = rep(100,n_samples))
#'
#' results <- calc_gSGC_feldspar(
#' data = data, gSGC.type = "50LxTx",
#' plot = FALSE)
#'
#' plot_AbanicoPlot(results)
#'
#'@md
#'@export
calc_gSGC_feldspar <- function (
data,
gSGC.type = "50LxTx",
gSGC.parameters,
n.MC = 100,
plot = FALSE
){
# Integrity checks --------------------------------------------------------
if (!is(data, "data.frame")) {
stop("[calc_gSGC_feldspar()] 'data' needs to be of type data.frame.", call. = FALSE)
}
if (!is(gSGC.type[1], "character")) {
stop("[calc_gSGC_feldspar()] 'gSGC.type' needs to be of type character.", call. = FALSE)
}
if (ncol(data) != 5) {
stop("[calc_gSGC_feldspar()] Structure of 'data' does not fit the expectations.", call. = FALSE)
}
colnames(data) <- c("LnTn", "LnTn.error", "Lr1Tr1", "Lr1Tr1.error",
"Dr1")
# Parametrize -------------------------------------------------------------
params <- data.frame( # this is the data from Table 3 of Li et al., 2015
Type = c("50LxTx", "50Lx", "50Tx", "100LxTx", "100Lx", "100Tx", "150LxTx", "150Lx", "150Tx", "200LxTx", "200Lx", "200Tx", "250LxTx", "250Lx", "250Tx"),
y1 = c( 0.57, 0.36, 0.2, 0.39, 0.41, 0.28, 0.43, 0.4, 0.31, 0.3, 0.34, 0.37, 0.37, 0.17, 0.48),
y1_err = c( 0.19, 0.25, 0.24, 0.12, 0.28, 0.22, 0.11, 0.27, 0.33, 0.06, 0.28, 0.28, 0.1, 0.12, 0.37),
D1 = c( 241, 276, 259, 159, 304, 310, 177, 327, 372, 119, 316, 372, 142, 197, 410),
D1_err = c( 66, 137, 279, 48, 131, 220, 41, 132, 300, 32, 145, 218, 35, 116, 210),
y2 = c( 0.88, 1.37, 0.34, 0.91, 1.22, 0.42, 0.88, 1.26, 0.45, 0.95, 1.24, 0.43, 0.74, 1.32, 0.45),
y2_err = c( 0.15, 0.19, 0.15, 0.1, 0.23, 0.26, 0.09, 0.23, 0.18, 0.05, 0.25, 0.24, 0.09, 0.1, 0.15),
D2 = c( 1115, 1187, 1462, 741, 1146, 2715, 801, 1157, 2533, 661, 1023, 2792, 545, 830, 2175),
D2_err = c( 344, 287, 191, 105, 288, 639, 109, 263, 608, 49, 205, 709, 62, 79, 420),
y0 = c( 0.008, 0.003, 0.685, 0.018, 0.01, 0.64, 0.026, 0.015, 0.61, 0.034, 0.02, 0.573, 0.062, 0.028, 0.455),
y0_err = c( 0.009, 0.009, 0.014, 0.008, 0.008, 0.015, 0.006, 0.007, 0.014, 0.006, 0.006, 0.013, 0.005, 0.005, 0.011),
D0_2.3 = c( 2000, 2450, 1420, 1420, 2300, 2900, 1500, 2340, 2880, 1320, 2080, 2980, 1000, 1780, 2500),
D0_3 = c( 2780, 3280, 2520, 1950, 3100, 4960, 2060, 3130, 4760, 1780, 2800, 5120, 1380, 2360, 4060)
)
# these are user specified parameters if they so desire
if (!missing(gSGC.parameters)){
y1 <- gSGC.parameters$y1
y1_err <- gSGC.parameters$y1_err
D1 <- gSGC.parameters$D1
D1_err <- gSGC.parameters$D1_err
y2 <- gSGC.parameters$y2
y2_err <- gSGC.parameters$y2_err
y0 <- gSGC.parameters$y0
y0_err <- gSGC.parameters$y0_err
} else {
if (gSGC.type[1] %in% params$Type){
# take the user input pIRSL temperature and assign the correct parameters
index <- match(gSGC.type,params$Type)
y1 <- params$y1[index]
y1_err <- params$y1_err[index]
D1 <- params$D1[index]
D1_err <- params$D1_err[index]
y2 <- params$y2[index]
y2_err <- params$y2_err[index]
D2 <- params$D2[index]
D2_err <- params$D2_err[index]
y0 <- params$y0[index]
y0_err <- params$y0_err[index]
} else {
# give error if input is wrong
stop(
paste0("[calc_gSGC_feldspar()] 'gSGC.type' needs to be one of the accepted values, such as: ",
paste(params$Type, collapse = ", ")),
call. = FALSE)
}
}
##set function for uniroot
## function from Li et al., 2015 eq: 3
## function that equals zero when the correct De is found.
## This is so uniroot can find the correct value or 'root'
f <- function(De, Dr1, Lr1Tr1, LnTn, y1, D1, y2, D2, y0){
f_D <- y1 * (1 - exp(-De / D1)) + y2 * (1 - exp(-De / D2)) + y0
f_Dr <- y1 * (1 - exp(-Dr1 / D1)) + y2 * (1 - exp(-Dr1 / D2)) + y0
##return(f_D/Lr1Tr1 - f_Dr/LnTn) ##TODO double check seems to be wrong
return(f_Dr/Lr1Tr1 - f_D/LnTn)
}
# Run calculation ---------------------------------------------------------
l <- lapply(1:nrow(data), function(i) {
Lr1Tr1 <- data[i, "Lr1Tr1"] #assign user's input data
Lr1Tr1.error <- data[i, "Lr1Tr1.error"]
Dr1 <- data[i, "Dr1"]
LnTn <- data[i, "LnTn"]
LnTn.error <- data[i, "LnTn.error"]
## uniroot solution
temp <- try({
uniroot(
f,
interval = c(0.1, 3000),
tol = 0.001,
Dr1 = Dr1,
Lr1Tr1 = Lr1Tr1,
LnTn = LnTn,
y1 = y1,
D1 = D1,
y2 = y2,
D2 = D2,
y0 = y0,
extendInt = "yes",
check.conv = TRUE,
maxiter = 1000)
}, silent = TRUE) # solve for the correct De
## in case the initial uniroot solve does not work
if(inherits(temp, "try-error")) {
try(stop(paste0("[calc_gSGC_feldspar()] No solution was found for dataset: #", i,"! NA returned"), call. = FALSE))
return(NA)
}
De <- temp$root
temp.MC.matrix <- matrix(nrow = n.MC, ncol = 8)
# to estimate the error, use a monte carlo simulation. assume error in input data is gaussian
# create a matrix
colnames(temp.MC.matrix) <- c("LnTn", "Lr1Tr1","y1", "D1", "y2", "D2", "y0", "De")
# simulate random values for each parameter
temp.MC.matrix[, 1:7] <- matrix(
rnorm(n.MC * 7,
mean = c(LnTn, Lr1Tr1, y1, D1, y2, D2, y0),
sd = c(LnTn.error, Lr1Tr1.error, y1_err, D1_err, y2_err, D2_err, y0_err)),
ncol = 7,
byrow = TRUE)
# now use the randomly generated parameters to calculate De's with uniroot
for (j in 1:n.MC){
temp2 <- try({
uniroot(
f,
interval = c(0.1, 3000),
tol = 0.001,
LnTn = temp.MC.matrix[j, 1],
Lr1Tr1 = temp.MC.matrix[j, 2],
y1 = temp.MC.matrix[j, 3],
D1 = temp.MC.matrix[j, 4],
y2 = temp.MC.matrix[j, 5],
D2 = temp.MC.matrix[j, 6],
y0 = temp.MC.matrix[j, 7],
Dr1 = Dr1,
extendInt = "yes",
check.conv = TRUE,
maxiter = 1000
)
}, silent = TRUE)
if (!inherits(temp2, "try-error")){
temp.MC.matrix[j,8] <- temp2$root
} else {
# give an NA if uniroot cannot find a root (usually due to bad random values)
temp.MC.matrix[j,8] <- NA
}
}
# set the De uncertainty as the standard deviations of the randomly generated des
De.error <- sd(temp.MC.matrix[, 8], na.rm = TRUE)
return(list(
DE = De,
DE.ERROR = De.error,
m.MC = temp.MC.matrix))
})
# Plotting ----------------------------------------------------------------
if(plot){
old.par <- par(no.readonly = TRUE)
on.exit(par(old.par))
par(mfrow = c(mfrow = c(3,3)))
for (i in 1:length(l)) {
if(is.na(l[[i]][1])) next();
y_max <- max(l[[i]]$m.MC[, 1:2])
plot(NA, NA,
xlab = "Dose [a.u.]",
ylab = "Norm. Signal",
xlim = c(0, 3000),
main = paste0("Dataset #", i),
ylim = c(0, y_max)
)
for(j in 1:nrow(l[[i]]$m.MC)){
#y1 * (1 - exp(-De / D1)) + y2 * (1 - exp(-De / D2)) + y0
x <- NA
curve(
l[[i]]$m.MC[j, 3] * (1 - exp(-x / l[[i]]$m.MC[j, 4])) +
l[[i]]$m.MC[j, 5] * (1 - exp(-x / l[[i]]$m.MC[j, 6])) +
l[[i]]$m.MC[j, 7],
col = rgb(0,0,0,0.4),
add = TRUE)
}
par(new = TRUE)
hist <- hist(na.exclude(l[[i]]$m.MC[, 8]),
plot = FALSE
)
hist$counts <- ((y_max/max(hist$counts)) * hist$counts) / 2
plot(
hist,
xlab = "",
ylab = "",
axes = FALSE,
xlim = c(0, 3000),
ylim = c(0, y_max),
main = ""
)
}
}
# Return ------------------------------------------------------------------
##output matrix
m <- matrix(ncol = 4, nrow = nrow(data))
##calculate a few useful parameters
for(i in 1:nrow(m)){
if(is.na(l[[i]][1])) next();
m[i,1] <- l[[i]]$DE
m[i,2] <- l[[i]]$DE.ERROR
HPD <- .calc_HPDI(na.exclude(l[[i]]$m.MC[,8]))
m[i,3] <- HPD[1,1]
m[i,4] <- HPD[1,2]
}
df <- data.frame(
DE = m[, 1],
DE.ERROR = m[, 2],
HPD95_LOWER = m[, 3],
HPD95_UPPER = m[, 4]
)
return(
set_RLum("RLum.Results",
data = list(
data = df,
m.MC = lapply(l, function(x) {if(is.na(x[[1]])) {return(x)} else {x$m.MC} })
),
info = list(
call = sys.call()
)
))
}
| 11,289 | gpl-3.0 |
c4d7f9d14701eec0bc7ba360afbeccc5a1551178 | andrewdefries/andrewdefries.github.io | FDA_Pesticide_Glossary/ethephon.R | library("knitr")
library("rgl")
#knit("ethephon.Rmd")
#markdownToHTML('ethephon.md', 'ethephon.html', options=c("use_xhml"))
#system("pandoc -s ethephon.html -o ethephon.pdf")
knit2html('ethephon.Rmd')
| 204 | mit |
3a87d5a1e840ec26f787802422192ee3a415c41f | landmarkacoustics/dissUtils | R/neighbor_identify.R | neighbors.identify <- function(neighbor.matrix, all.dists){
indices <- matrix(integer(length(neighbor.matrix)),
nrow(neighbor.matrix),
dimnames = dimnames(neighbor.matrix));
if(any("dist" %in% class(all.dists))){
dist.size <- attributes(all.dists)$Size;
for(i in 1:nrow(indices)){
for(j in 1:ncol(indices)){
tmp <- match(neighbor.matrix[i,j],
all.dists);
index.choices <- diss.index(tmp, dist.size);
indices[i,j] <- index.choices[index.choices != i];
}
}
}
else if(is.matrix(all.dists)){
for(i in 1:nrow(indices)){
active.lookup <- all.dists[i,];
for(j in 1:ncol(indices)){
indices[i,j] <- match(neighbor.matrix[i,j],
active.lookup);
}
}
}
return(invisible(indices));
}
| 1,021 | gpl-2.0 |
fc3170fb48c726d27b4b5990d71b2ddb44a2b12f | mrwizard82d1/DBDA2Eprograms | Jags-Ymet-Xmet-Mrobust-Example.R | # Example for Jags-Ymet-Xmet-Mrobust.R
#-------------------------------------------------------------------------------
# Optional generic preliminaries:
graphics.off() # This closes all of R's graphics windows.
rm(list=ls()) # Careful! This clears all of R's memory!
#-------------------------------------------------------------------------------
# Load data file and specity column names of x (predictor) and y (predicted):
myData = read.csv( file="HtWtData30.csv" )
xName = "height" ; yName = "weight"
fileNameRoot = "HtWtData30-Jags-"
#............................................................................
# Load data file and specity column names of x (predictor) and y (predicted):
# myData = read.csv( file="HtWtData300.csv" )
# xName = "height" ; yName = "weight"
# fileNameRoot = "HtWtData300-Jags-"
#............................................................................
graphFileType = "eps"
#-------------------------------------------------------------------------------
# Load the relevant model into R's working memory:
source("Jags-Ymet-Xmet-Mrobust.R")
#-------------------------------------------------------------------------------
# Generate the MCMC chain:
#startTime = proc.time()
mcmcCoda = genMCMC( data=myData , xName=xName , yName=yName ,
numSavedSteps=20000 , saveName=fileNameRoot )
#stopTime = proc.time()
#duration = stopTime - startTime
#show(duration)
#-------------------------------------------------------------------------------
# Display diagnostics of chain, for specified parameters:
parameterNames = varnames(mcmcCoda) # get all parameter names
for ( parName in parameterNames ) {
diagMCMC( codaObject=mcmcCoda , parName=parName ,
saveName=fileNameRoot , saveType=graphFileType )
}
#-------------------------------------------------------------------------------
# Get summary statistics of chain:
summaryInfo = smryMCMC( mcmcCoda ,
compValBeta1=0.0 , ropeBeta1=c(-0.5,0.5) ,
saveName=fileNameRoot )
show(summaryInfo)
# Display posterior information:
plotMCMC( mcmcCoda , data=myData , xName=xName , yName=yName ,
compValBeta1=0.0 , ropeBeta1=c(-0.5,0.5) ,
pairsPlot=TRUE , showCurve=FALSE ,
saveName=fileNameRoot , saveType=graphFileType )
#-------------------------------------------------------------------------------
| 2,451 | mit |
fc3170fb48c726d27b4b5990d71b2ddb44a2b12f | bdetweiler/stat-8416-final-project | DBDA2Eprograms/Jags-Ymet-Xmet-Mrobust-Example.R | # Example for Jags-Ymet-Xmet-Mrobust.R
#-------------------------------------------------------------------------------
# Optional generic preliminaries:
graphics.off() # This closes all of R's graphics windows.
rm(list=ls()) # Careful! This clears all of R's memory!
#-------------------------------------------------------------------------------
# Load data file and specity column names of x (predictor) and y (predicted):
myData = read.csv( file="HtWtData30.csv" )
xName = "height" ; yName = "weight"
fileNameRoot = "HtWtData30-Jags-"
#............................................................................
# Load data file and specity column names of x (predictor) and y (predicted):
# myData = read.csv( file="HtWtData300.csv" )
# xName = "height" ; yName = "weight"
# fileNameRoot = "HtWtData300-Jags-"
#............................................................................
graphFileType = "eps"
#-------------------------------------------------------------------------------
# Load the relevant model into R's working memory:
source("Jags-Ymet-Xmet-Mrobust.R")
#-------------------------------------------------------------------------------
# Generate the MCMC chain:
#startTime = proc.time()
mcmcCoda = genMCMC( data=myData , xName=xName , yName=yName ,
numSavedSteps=20000 , saveName=fileNameRoot )
#stopTime = proc.time()
#duration = stopTime - startTime
#show(duration)
#-------------------------------------------------------------------------------
# Display diagnostics of chain, for specified parameters:
parameterNames = varnames(mcmcCoda) # get all parameter names
for ( parName in parameterNames ) {
diagMCMC( codaObject=mcmcCoda , parName=parName ,
saveName=fileNameRoot , saveType=graphFileType )
}
#-------------------------------------------------------------------------------
# Get summary statistics of chain:
summaryInfo = smryMCMC( mcmcCoda ,
compValBeta1=0.0 , ropeBeta1=c(-0.5,0.5) ,
saveName=fileNameRoot )
show(summaryInfo)
# Display posterior information:
plotMCMC( mcmcCoda , data=myData , xName=xName , yName=yName ,
compValBeta1=0.0 , ropeBeta1=c(-0.5,0.5) ,
pairsPlot=TRUE , showCurve=FALSE ,
saveName=fileNameRoot , saveType=graphFileType )
#-------------------------------------------------------------------------------
| 2,451 | mit |
1a1fcba062ea55b54c8b18979e4230df69cec07a | CompNet/TopicControl | data/CKM_Physicians_Innovation/conversion.R | # setwd("D:/Eclipse/workspaces/Networks/")
# source("properties.R")
library("igraph")
data.folder <- "TopicControl/data/"
# layerID nodeID nodeID weight=1
net.folder <- paste(data.folder,"CKM_Physicians_Innovation/",sep="")
file.in <- paste(net.folder,"CKM-Physicians-Innovation_multiplex.edges",sep="")
file.out <- paste(net.folder,"network.graphml",sep="")
directed <- TRUE
# read as a table
tab <- as.matrix(read.table(file.in))
# build igraph
edges <- as.vector(t(tab[,2:3]))
if(min(edges)==0)
edges <- edges + 1
n <- max(edges)
types <- tab[,1]
g <- graph.empty(n=n,directed=directed)
g <- add.edges(g,edges,type=types)
# export as graphml
write.graph(graph=g,file=file.out,format="graphml")
| 733 | gpl-2.0 |
e999a306fb5d4529a68647408bfe977e9d7078ce | radfordneal/pqR | src/library/base/R/sink.R | # File src/library/base/R/sink.R
# Part of the R package, http://www.R-project.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
sink <- function(file=NULL, append = FALSE, type = c("output", "message"),
split=FALSE)
{
type <- match.arg(type)
if(type == "message") {
if(is.null(file)) file <- stderr()
else if(!inherits(file, "connection") || !isOpen(file))
stop("'file' must be NULL or an already open connection")
if (split) stop("cannot split the message connection")
.Internal(sink(file, FALSE, TRUE, FALSE))
} else {
closeOnExit <- FALSE
if(is.null(file)) file <- -1L
else if(is.character(file)) {
file <- file(file, ifelse(append, "a", "w"))
closeOnExit <- TRUE
} else if(!inherits(file, "connection"))
stop("'file' must be NULL, a connection or a character string")
.Internal(sink(file, closeOnExit, FALSE,split))
}
}
sink.number <- function(type = c("output", "message"))
{
type <- match.arg(type)
.Internal(sink.number(type != "message"))
}
| 1,670 | gpl-2.0 |
e999a306fb5d4529a68647408bfe977e9d7078ce | hlin09/renjin | core/src/main/R/base/sink.R | # File src/library/base/R/sink.R
# Part of the R package, http://www.R-project.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
sink <- function(file=NULL, append = FALSE, type = c("output", "message"),
split=FALSE)
{
type <- match.arg(type)
if(type == "message") {
if(is.null(file)) file <- stderr()
else if(!inherits(file, "connection") || !isOpen(file))
stop("'file' must be NULL or an already open connection")
if (split) stop("cannot split the message connection")
.Internal(sink(file, FALSE, TRUE, FALSE))
} else {
closeOnExit <- FALSE
if(is.null(file)) file <- -1L
else if(is.character(file)) {
file <- file(file, ifelse(append, "a", "w"))
closeOnExit <- TRUE
} else if(!inherits(file, "connection"))
stop("'file' must be NULL, a connection or a character string")
.Internal(sink(file, closeOnExit, FALSE,split))
}
}
sink.number <- function(type = c("output", "message"))
{
type <- match.arg(type)
.Internal(sink.number(type != "message"))
}
| 1,670 | gpl-3.0 |
e999a306fb5d4529a68647408bfe977e9d7078ce | jukiewiczm/renjin | core/src/main/R/base/sink.R | # File src/library/base/R/sink.R
# Part of the R package, http://www.R-project.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
sink <- function(file=NULL, append = FALSE, type = c("output", "message"),
split=FALSE)
{
type <- match.arg(type)
if(type == "message") {
if(is.null(file)) file <- stderr()
else if(!inherits(file, "connection") || !isOpen(file))
stop("'file' must be NULL or an already open connection")
if (split) stop("cannot split the message connection")
.Internal(sink(file, FALSE, TRUE, FALSE))
} else {
closeOnExit <- FALSE
if(is.null(file)) file <- -1L
else if(is.character(file)) {
file <- file(file, ifelse(append, "a", "w"))
closeOnExit <- TRUE
} else if(!inherits(file, "connection"))
stop("'file' must be NULL, a connection or a character string")
.Internal(sink(file, closeOnExit, FALSE,split))
}
}
sink.number <- function(type = c("output", "message"))
{
type <- match.arg(type)
.Internal(sink.number(type != "message"))
}
| 1,670 | gpl-3.0 |
38a342059c35fa16d049fe80379154a6f74ac783 | ecor/RMAWGEN | inst/doc/examples/example_scripts_presentation/temperature-generator_jss.R | # file weather-generator.R
#
# This file contains a script example with two coupled temperature and precipitation stochastic generations
#
#
# author: Emanuele Cordano on 12-01-2012
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
rm(list=ls())
library(RMAWGEN)
set.seed(1222)
data(trentino)
station <- c("T0090","T0083") #,"T0099","T0001")
# Calibration period
# MONTHLY CLIMATOLOGY
TX_CLIMATE <- NULL #Tx_1961_1990[,station]
TN_CLIMATE <- NULL #Tn_1961_1990[,station]
PREC_CLIMATE <- NULL #prec_1961_1990[,station] # NULL # Adjusts prec_1961_1990 with days!!!!
# Calibration period
year_max <- 1990
year_min <- 1961
origin <- "1961-1-1"
# Simulation period (Stochastic Generation)
# MONTHLY CLIMATOLOGY
# specific parameter for model calibration
n_GPCA_iter <- 5
n_GPCA_iteration_residuals <- 5
p_test <- 1
p_temp <- 10
exogen <- NULL
exogen_sim <- exogen
generationP10GPCA_temp <- ComprehensiveTemperatureGenerator(station=station,Tx_all=TEMPERATURE_MAX,Tn_all=TEMPERATURE_MIN,year_min=year_min,year_max=year_max,p=p_temp,n_GPCA_iteration=n_GPCA_iter,n_GPCA_iteration_residuals=n_GPCA_iteration_residuals,exogen=exogen,exogen_sim=exogen_sim,sample="monthly",mean_climate_Tn=TN_CLIMATE,mean_climate_Tx=TX_CLIMATE)
generationP01GPCA_temp <- ComprehensiveTemperatureGenerator(station=station,Tx_all=TEMPERATURE_MAX,Tn_all=TEMPERATURE_MIN,year_min=year_min,year_max=year_max,p=p_test,n_GPCA_iteration=n_GPCA_iter,n_GPCA_iteration_residuals=n_GPCA_iteration_residuals,exogen=exogen,exogen_sim=exogen_sim,sample="monthly",mean_climate_Tn=TN_CLIMATE,mean_climate_Tx=TX_CLIMATE)
generationP10_temp <- ComprehensiveTemperatureGenerator(station=station,Tx_all=TEMPERATURE_MAX,Tn_all=TEMPERATURE_MIN,year_min=year_min,year_max=year_max,p=p_temp,n_GPCA_iteration=0,n_GPCA_iteration_residuals=0,exogen=exogen,exogen_sim=exogen_sim,sample="monthly",mean_climate_Tn=TN_CLIMATE,mean_climate_Tx=TX_CLIMATE)
generationP01_temp <- ComprehensiveTemperatureGenerator(station=station,Tx_all=TEMPERATURE_MAX,Tn_all=TEMPERATURE_MIN,year_min=year_min,year_max=year_max,p=p_test,n_GPCA_iteration=0,n_GPCA_iteration_residuals=0,exogen=exogen,exogen_sim=exogen_sim,sample="monthly",mean_climate_Tn=TN_CLIMATE,mean_climate_Tx=TX_CLIMATE)
# VAR select
VARselect(generationP01_temp$input$data_for_var,lag.max=20)
VARselect(generationP01GPCA_temp$var@GPCA_data$final_results,lag.max=20)
normality_test(generationP01_temp$var)
normality_test(generationP10_temp$var)
normality_test(generationP01GPCA_temp$var)
normality_test(generationP10GPCA_temp$var)
serial_test(generationP01_temp$var)
serial_test(generationP10_temp$var)
serial_test(generationP01GPCA_temp$var)
serial_test(generationP10GPCA_temp$var)
# Collecting the measured and generated time series
Tn_mes <- generationP01_temp$input$Tn_mes
Tx_mes <- generationP01_temp$input$Tx_mes
Tx_spline <- generationP01_temp$input$Tx_spline
Tn_spline <- generationP01_temp$input$Tn_spline
Tx_gen <- list(P10GPCA=generationP10GPCA_temp$output$Tx_gen,
P01GPCA=generationP01GPCA_temp$output$Tx_gen,
P10=generationP10GPCA_temp$output$Tx_gen,
P01=generationP01GPCA_temp$output$Tx_gen)
Tn_gen <- list(P10GPCA=generationP10GPCA_temp$output$Tn_gen,
P01GPCA=generationP01GPCA_temp$output$Tn_gen,
P10=generationP10GPCA_temp$output$Tn_gen,
P01=generationP01GPCA_temp$output$Tn_gen)
NDAY <- nrow(Tx_mes)
days <- list()
days$DJF <- extractmonths(data=1:NDAY,when=c("Dec","Jan","Feb"),origin=origin)
days$MAM <- extractmonths(data=1:NDAY,when=c("Mar","Apr","May"),origin=origin)
days$JJA <- extractmonths(data=1:NDAY,when=c("Jun","Jul","Aug"),origin=origin)
days$SON <- extractmonths(data=1:NDAY,when=c("Sep","Oct","Nov"),origin=origin)
# SET THE CORRECT PATH WHERE TO PLOT THE FIGURES
wpath <- "./"
station00 <- "T0090"
CEX <- 1.4
for (it in names(days)) {
str(it)
name <- it
season <- days[[it]]
pdf_Tx <- paste(wpath,"/tx_qqplot_",year_min,"_",year_max,"_",it,".pdf",sep="")
pdf_Tn <- paste(wpath,"/tn_qqplot_",year_min,"_",year_max,"_",it,".pdf",sep="")
pdf_deltaT <- paste(wpath,"/dt_qqplot_",year_min,"_",year_max,"_",it,".pdf",sep="")
pdf_Tx_anom <- paste(wpath,"/tx_anom_qqplot_",year_min,"_",year_max,"_",it,".pdf",sep="")
pdf_Tn_anom <- paste(wpath,"/tn_anom_qqplot_",year_min,"_",year_max,"_",it,".pdf",sep="")
main_tx <- paste("Tx",names(Tx_gen),station00,it,sep=" ")
main_tn <- paste("Tn",names(Tn_gen),station00,it,sep=" ")
main_deltat <- paste("dT",names(Tx_gen),station00,it,sep=" ")
main_tx_anom <- paste("Tx_anom",names(Tx_gen),station00,it,sep=" ")
main_tn_anom <- paste("Tn_anom",names(Tn_gen),station00,it,sep=" ")
qqplot_RMAWGEN_Tx(Tx_mes=Tx_mes,Tn_mes=Tn_mes,Tx_gen=Tx_gen,Tn_gen=Tn_gen,main=main_tx,station=station00,when=season,pdf=pdf_Tx,cex.main=CEX,cex.lab=CEX,cex.axis=CEX)
qqplot_RMAWGEN_Tn(Tx_mes=Tx_mes,Tn_mes=Tn_mes,Tx_gen=Tx_gen,Tn_gen=Tn_gen,main=main_tn,station=station00,when=season,pdf=pdf_Tn,cex.main=CEX,cex.lab=CEX,cex.axis=CEX)
qqplot_RMAWGEN_deltaT(Tx_mes=Tx_mes,Tn_mes=Tn_mes,Tx_gen=Tx_gen,Tn_gen=Tn_gen,main=main_deltat,station=station00,when=season,pdf=pdf_deltaT,cex.main=CEX,cex.lab=CEX,cex.axis=CEX)
qqplot_RMAWGEN_Tx(Tx_mes=Tx_mes,Tn_mes=Tn_mes,Tx_gen=Tx_gen,Tn_gen=Tn_gen,Tx_spline=Tx_spline,Tn_spline=Tn_spline,main=main_tx_anom,station=station00,when=season,pdf=pdf_Tx_anom,cex.main=CEX,cex.lab=CEX,cex.axis=CEX)
qqplot_RMAWGEN_Tn(Tx_mes=Tx_mes,Tn_mes=Tn_mes,Tx_gen=Tx_gen,Tn_gen=Tn_gen,Tx_spline=Tx_spline,Tn_spline=Tn_spline,main=main_tn_anom,station=station00,when=season,pdf=pdf_Tn_anom,cex.main=CEX,cex.lab=CEX,cex.axis=CEX)
}
print("acf")
# ACF Function
pdf(paste(wpath,"acf_tx_anom_P10GPCA.pdf",sep="/"))
plot(acf(Tx_gen$P10GPCA-Tx_spline,lag=50),xlab="lag [day]")
dev.off()
pdf(paste(wpath,"acf_tx_anom_mes.pdf",sep="/"))
plot(acf(Tx_mes-Tx_spline,lag=50))
dev.off()
pdf(paste(wpath,"acf_tn_anom_P10GPCA.pdf",sep="/"))
plot(acf(Tn_gen$P10GPCA-Tn_spline,lag=50),xlab="lag [day]")
dev.off()
pdf(paste(wpath,"acf_tn_anom_mes.pdf",sep="/"))
plot(acf(Tn_mes-Tn_spline,lag=50))
dev.off()
pdf(paste(wpath,"acf_deltat_P10GPCA.pdf",sep="/"))
plot(acf(Tx_gen$P10GPCA-Tn_gen$P10GPCA,lag=50),xlab="lag [day]")
dev.off()
pdf(paste(wpath,"acf_deltat_mes.pdf",sep="/"))
plot(acf(Tx_mes-Tn_mes,lag=50))
dev.off()
# COMPUTING CORRELATION OF GAUSSIANIZED VECTORS ....
#qqplot(generationP01_temp$input$res_multigen,generationP01_temp$input$data_for_var)
#cor(generationP01_temp$output$res_multigen) #,generationP01_temp$input$data_for_var)
#cor(generationP01_temp$input$data_for_var)
#cor(generationP10_temp$input$data_for_var)
#VARselect(generationP01_temp$input$data_for_var,lag.max=20)
#VARselect(generationP01GPCA_temp$var@GPCA_data$final_results,lag.max=20)
#....
##
## pdf_Tx <- '/Users/ecor/Dropbox/IASMA_CRI_DAES/beamerposter/EGU2012_EC/images_article/fig_Tx.pdf'
## pdf_Tn <- '/Users/ecor/Dropbox/IASMA_CRI_DAES/beamerposter/EGU2012_EC/images_article/fig_Tn.pdf'
## pdf_DeltaT <- '/Users/ecor/Dropbox/IASMA_CRI_DAES/beamerposter/EGU2012_EC/images_article/fig_DeltaT.pdf'
## #station0 <- "T0090"
## main <- names(Tx_gen)
## #source('/Users/ecor/Dropbox/iasma/RMAWGENdev/RMAWGEN/inst/doc/private/additional_functions/plot_RMAWGENts.R')
## #source('/Users/ecor/Dropbox/iasma/RMAWGENdev/RMAWGEN/inst/doc/private/additional_functions/qqplot_RMAWGENts.R')
##
## season <- DJF
##
## qqplot_RMAWGEN_Tn(Tx_mes=Tx_mes,Tn_mes=Tn_mes,Tx_gen=Tx_gen,Tn_gen=Tn_gen,station="T0090",when=season)
## qqplot_RMAWGEN_Tx(Tx_mes=Tx_mes,Tn_mes=Tn_mes,Tx_gen=Tx_gen,Tn_gen=Tn_gen,station="T0090",when=season)
## qqplot_RMAWGEN_DeltaT(Tx_mes=Tx_mes,Tn_mes=Tn_mes,Tx_gen=Tx_gen,Tn_gen=Tn_gen,station="T0090",when=season)
##
## #qqplot_RMAWGEN_Tx(Tx_mes=Tx_mes,Tx_gen=Tx_gen,Tn_mes=Tn_mes,Tn_gen=Tn_gen,pdf=pdf_Tx,station=station0)
## #qqplot_RMAWGEN_Tn(Tx_mes=Tx_mes,Tx_gen=Tx_gen,Tn_mes=Tn_mes,Tn_gen=Tn_gen,pdf=pdf_Tn,station=station0)
## #qqplot_RMAWGEN_deltaT(Tx_mes=Tx_mes,Tx_gen=Tx_gen,Tn_mes=Tn_mes,Tn_gen=Tn_gen,pdf=pdf_DeltaT,station=station0)
## | 8,701 | gpl-3.0 |
4f2c2f75cd83e41e99ae0f85f5244ff7b3b0a81a | google/GeoexperimentsResearch | tests/testthat/test_072_getgeonames.R | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
context("GetGeoNames")
unique.geos <- c("G11", "G21", "G12", "G22")
df.ga <- structure(data.frame(unique.geos,
c(1, 2, 1, 2)),
names=c(kGeo, kGeoGroup))
obj.ga <- GeoAssignment(df.ga)
first.date <- as.Date("2016-09-26")
df.gts <- expand.grid(date=first.date + c(0:13),
geo=unique.geos)
df.gts[["sales"]] <- abs(rnorm(nrow(df.gts)))
obj.gts <- GeoTimeseries(df.gts, metrics="sales")
obj.ged <- GeoExperimentData(obj.gts, periods=NULL,
geo.assignment=obj.ga,
treat.assignment=NULL)
df.geos <- data.frame(geo=unique.geos, sales=seq_along(unique.geos))
names(df.geos)[1] <- kGeo
geos <- Geos(df.geos, volume="sales")
test_that("'groups' must be a valid geo group number", {
# The test should be done in the dispatcher.
expect_error(GetGeoNames(NULL, groups="1"))
expect_error(GetGeoNames(NULL, groups="all"))
expect_error(GetGeoNames(NULL, groups=integer(0)))
expect_error(GetGeoNames(NULL, groups=FALSE))
})
context("GetGeoNames.Geos")
test_that("an unique list of geos is returned", {
expect_is(GetGeoNames(geos), "character")
expect_true(setequal(GetGeoNames(geos), unique.geos))
})
test_that("the geos are in sorted order", {
expect_identical(GetGeoNames(geos), sort(unique.geos))
})
test_that("an error is thrown if groups!=NULL", {
expect_error(GetGeoNames(geos, groups=1),
regexp="No geo group available")
obj.gts[[kGeoGroup]] <- 1L
expect_error(GetGeoNames(geos, groups=1),
regexp="No geo group available")
})
context("GetGeoNames.GeoTimeseries")
test_that("an unique list of geos is returned", {
expect_is(GetGeoNames(obj.gts), "character")
expect_true(setequal(GetGeoNames(obj.gts), unique.geos))
})
test_that("the geos are in sorted order", {
expect_identical(GetGeoNames(obj.gts), sort(unique.geos))
})
test_that("an error is thrown if groups!=NULL", {
expect_error(GetGeoNames(obj.gts, groups=1),
regexp="No geo group available")
obj.gts[[kGeoGroup]] <- 1L
expect_error(GetGeoNames(obj.gts, groups=1),
regexp="No geo group available")
})
context("GetGeoNames.GeoExperimentData")
test_that("an unique list of geos is returned", {
expect_true(setequal(GetGeoNames(obj.ged), unique.geos))
})
test_that("the geos are in sorted order", {
expect_identical(GetGeoNames(obj.ged), sort(unique.geos))
})
test_that("'groups' selects the right set of geos", {
expect_identical(GetGeoNames(obj.ged, groups=1),
df.ga[[kGeo]][df.ga[[kGeoGroup]] %in% 1])
expect_identical(GetGeoNames(obj.ged, groups=2),
df.ga[[kGeo]][df.ga[[kGeoGroup]] %in% 2])
})
test_that("a nonexisting 'groups' returns an empty set of geos", {
expect_identical(GetGeoNames(obj.ged, group=3), character(0))
})
test_that("error is thrown if geo assignment doesn't exist & groups != NULL", {
obj.ged2 <- SetInfo(obj.ged, geo.assignment=NULL)
expect_error(GetGeoNames(obj.ged2, group=1),
regexp="Cannot match groups: there is no geo assignment")
})
test_that("'groups' can be NA", {
expect_error(GetGeoNames(obj.ged, groups=NA),
regexp=NA)
})
context("GetGeoNames.GeoAssignment")
test_that("an unique list of geos is returned by default", {
expect_is(GetGeoNames(obj.ga), "character")
expect_true(setequal(GetGeoNames(obj.ga), obj.ga[[kGeo]]))
})
test_that("the geos are in sorted order", {
expect_identical(GetGeoNames(obj.ga), sort(obj.ga[[kGeo]]))
})
test_that("geos can be extracted by group", {
expect_identical(GetGeoNames(obj.ga, group=1), c("G11", "G12"))
expect_identical(GetGeoNames(obj.ga, group=2), c("G21", "G22"))
})
test_that("nonexisting group reference yields an empty set of geos", {
expect_identical(GetGeoNames(obj.ga, group=3), character(0))
})
context("GetGeoNames.GeoStrata")
geo.strata <- ExtractGeoStrata(obj.ged)
test_that("an unique list of geos is returned by default", {
expect_is(GetGeoNames(geo.strata), "character")
expect_true(setequal(GetGeoNames(geo.strata), geo.strata[[kGeo]]))
})
test_that("the geos are in sorted order", {
expect_identical(GetGeoNames(geo.strata), sort(geo.strata[[kGeo]]))
})
test_that("geo assignments that are fixed can be extracted by group", {
SetGeoGroup(geo.strata) <- obj.ga
expect_identical(GetGeoNames(geo.strata, group=1), c("G11", "G12"))
expect_identical(GetGeoNames(geo.strata, group=2), c("G21", "G22"))
})
test_that("geos can be extracted by group, including NA", {
# Assign G11 to group 1, others remain NA.
SetGeoGroup(geo.strata) <- GeoAssignment(data.frame(geo="G11", geo.group=1))
expect_identical(GetGeoNames(geo.strata, group=1), "G11")
expect_identical(GetGeoNames(geo.strata, group=2), character(0))
expect_identical(GetGeoNames(geo.strata, group=NA),
sort(setdiff(unique.geos, "G11")))
})
test_that("nonexisting group reference yields an empty set of geos", {
expect_identical(GetGeoNames(geo.strata, group=3), character(0))
})
| 5,662 | apache-2.0 |
990b7e68ac7d85b80aa39872ad18f756a43454e7 | hadley/r-source | src/library/methods/R/SClasses.R | # File src/library/methods/R/SClasses.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2015 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
setClass <-
## Define Class to be an S4 class.
function(Class, representation = list(), prototype = NULL,
contains = character(), validity = NULL, access = list(),
where = topenv(parent.frame()), version = .newExternalptr(),
sealed = FALSE, package = getPackageName(where),
S3methods = FALSE, slots)
{
oldDef <- getClassDef(Class, where)
if(is(oldDef, "classRepresentation") && oldDef@sealed)
stop(gettextf("%s has a sealed class definition and cannot be redefined",
dQuote(Class)),
domain = NA)
if(!missing(slots)) {
## The modern version consistent with reference classes
## Arguments slots= and contains= are used, representation must not be
if(!missing(representation))
stop("Argument \"representation\" cannot be used if argument \"slots\" is supplied")
properties <- inferProperties(slots, "slot")
classDef <- makeClassRepresentation(Class, properties,contains, prototype, package,
validity, access, version, sealed, where = where)
superClasses <- names(classDef@contains)
}
else if(is(representation, "classRepresentation")) {
## supplied a class definition object
classDef <- representation
if(!(missing(prototype) && missing(contains) && missing(validity) && missing(access)
&& missing(version) && missing(package)))
stop("only arguments 'Class' and 'where' can be supplied when argument 'representation' is a 'classRepresentation' object")
if(length(classDef@package) == 0L)
classDef@package <- package # the default
superClasses <- allNames(classDef@contains)
}
else {
## catch the special case of a single class name as the representation
if(is.character(representation) && length(representation) == 1L &&
is.null(names(representation)))
representation <- list(representation)
slots <- nzchar(allNames(representation))
superClasses <- c(as.character(representation[!slots]), contains)
properties <- representation[slots]
classDef <- makeClassRepresentation(Class, properties,superClasses, prototype, package,
validity, access, version, sealed, where = where)
superClasses <- names(classDef@contains)
}
classDef <- completeClassDefinition(Class, classDef, where, doExtends = FALSE)
## uncache an old definition for this package, if one is cached
.uncacheClass(Class, classDef)
if(length(superClasses) > 0L) {
sealed <- classDef@sealed
classDef@sealed <- FALSE # to allow setIs to work anyway; will be reset later
assignClassDef(Class, classDef, where)
badContains <- character()
for(class2 in superClasses) {
if(is(try(setIs(Class, class2, classDef = classDef, where = where)), "try-error"))
badContains <- c(badContains, class2)
else { # update class definition
classDef <- getClassDef(Class, where = where)
if(is.null(classDef))
stop(sprintf("internal error: definition of class %s not properly assigned",
dQuote(Class)),
domain = NA)
}
}
if(length(badContains)) {
msg <- paste(.dQ(badContains), collapse = ", ")
if(is(try(removeClass(Class, where)), "try-error"))
stop(gettextf("error in contained classes (%s) for class %s and unable to remove definition from %s",
msg, dQuote(Class),
sQuote(getPackageName(where))),
domain = NA)
if(is.null(oldDef))
stop(gettextf("error in contained classes (%s) for class %s; class definition removed from %s",
msg, dQuote(Class),
sQuote(getPackageName(where))),
domain = NA)
else if(is(try(setClass(Class, oldDef, where=where)), "try-error"))
stop(gettextf("error in contained classes (%s) for class %s and unable to restore previous definition from %s",
msg, dQuote(Class),
sQuote(getPackageName(where))),
domain = NA)
else
stop(gettextf("error in contained classes (%s) for class %s; previous definition restored to %s",
msg, dQuote(Class),
sQuote(getPackageName(where))),
domain = NA)
}
if(length(attr(classDef@contains, "conflicts")) > 0)
.reportSuperclassConflicts(Class, classDef@contains, where)
.checkRequiredGenerics(Class, classDef, where)
if(sealed) {
classDef@sealed <- TRUE
}
}
if(S3methods)
classDef <- .setS3MethodsOn(classDef)
assignClassDef(Class, classDef, where)
invisible(classGeneratorFunction(classDef, where))
}
representation <-
## Representation of a class; that is,
## a list of named slots and unnamed classes to be included in a class
## definition.
function(...)
{
value <- list(...)
## unlike the S-Plus function, this does not form the class representation,
## since set SClass works separately with the slots and extends arguments.
anames <- allNames(value)
for(i in seq_along(value)) {
ei <- el(value, i)
if(!is.character(ei) || length(ei) != 1L)
stop(gettextf("element %d of the representation was not a single character string", i), domain = NA)
}
includes <- as.character(value[!nzchar(anames)])
if(anyDuplicated(includes))
stop(gettextf("duplicate class names among superclasses: %s",
paste(.dQ(includes[duplicated(includes)]),
collapse = ", ")),
domain = NA)
slots <- anames[nzchar(anames)]
if(anyDuplicated(slots)) {
dslots <- slots[duplicated(slots)]
stop(sprintf(ngettext(length(dslots),
"duplicated slot name: %s",
"duplicated slot names: %s"),
paste(sQuote(dslots), collapse="")),
domain = NA)
}
value
}
### the version called prototype is the external interface. But functions with argument
### named prototype in R cannot call the prototype function (until there is a methods namespace
### to allow methods::prototype(...)
prototype <- function(...)
.prototype(...)
.prototype <- function(...) {
props <- list(...)
names <- allNames(props)
data <- !nzchar(names)
dataPart <- any(data)
if(dataPart) {
if(sum(data) > 1)
stop("only one data object (unnamed argument to prototype) allowed")
obj <- unclass(props[[seq_along(data)[data] ]])
props <- props[!data]
names <- names[!data]
}
else
obj <- defaultPrototype()
for(i in seq_along(names))
slot(obj, names[[i]], FALSE) <- props[[i]]
new("classPrototypeDef", object = obj, slots = names, dataPart = dataPart)
}
makeClassRepresentation <-
## Set the Class Definition.
## The formal definition of the class is set according to the arguments.
##
## Users should call setClass instead of this function.
function(name, slots = list(), superClasses = character(), prototype = NULL,
package, validity = NULL, access = list(), version = .newExternalptr(),
sealed = FALSE, virtual = NA, where)
{
if(any(superClasses %in% .AbnormalTypes))
superClasses <- .addAbnormalDataType(superClasses)
if(!is.null(prototype) || length(slots) || length(superClasses)) {
## collect information about slots, create prototype if needed
pp <- reconcilePropertiesAndPrototype(name, slots, prototype, superClasses, where)
slots <- pp$properties
prototype <- pp$prototype
}
contains <- list()
if(nzchar(package))
packageSlot(name) <- package
for(what in superClasses) {
whatClassDef <-
if(is(what, "classRepresentation"))
what
else if(is.null(packageSlot(what)))
getClass(what, where = where)
else
getClass(what)
what <- whatClassDef@className # includes package name as attribute
## Create the SClassExtension objects (will be simple, possibly dataPart).
## The slots are supplied explicitly, since `name' is currently an undefined class
elNamed(contains, what) <- makeExtends(name, what, slots = slots,
classDef2 = whatClassDef, package = package)
}
validity <- .makeValidityMethod(name, validity)
if(is.na(virtual)) {
virtual <- testVirtual(slots, contains, prototype, where)
if(virtual && !is.na(match("VIRTUAL", superClasses)))
elNamed(contains, "VIRTUAL") <- NULL
}
# new() must return an S4 object, except perhaps for basic classes
if(!is.null(prototype) && is.na(match(name, .BasicClasses)))
prototype <- .asS4(prototype)
if(".S3Class" %in% names(slots))
prototype <- .addS3Class(name, prototype, contains, where)
newClassRepresentation(className = name, slots = slots,
contains = contains,
prototype = prototype,
virtual = virtual,
validity = validity,
access = access,
package = package,
versionKey = version,
sealed = sealed)
}
getClassDef <-
## Get the definition of the class supplied as a string.
function(Class, where = topenv(parent.frame()), package = packageSlot(Class),
inherits = TRUE)
{
value <- if(inherits) #includes both the lookup and Class being already a definition
.getClassFromCache(Class, where)
## else NULL # want to force a search for the metadata in this case (Why?)
if(is.null(value)) {
cname <-
classMetaName(if(length(Class) > 1L)
## S3 class; almost certainly has no packageSlot,
## but we'll continue anyway
Class[[1L]] else Class)
## a string with a package slot strongly implies the class definition
## should be in that package.
if(identical(nzchar(package), TRUE)) {
whereP <- .requirePackage(package)
value <- get0(cname, whereP, inherits = inherits) # NULL if not existing
}
if(is.null(value))
value <- get0(cname, where, inherits = inherits) # NULL if not existing
}
value
}
getClass <-
## Get the complete definition of the class supplied as a string,
## including all slots, etc. in classes that this class extends.
function(Class, .Force = FALSE,
where = .classEnv(Class, topenv(parent.frame()), FALSE))
{
value <- .getClassFromCache(Class, where) # the quick way
if(is.null(value)) {
value <- getClassDef(Class, where) # searches
if(is.null(value)) {
if(!.Force)
stop(gettextf("%s is not a defined class",
dQuote(Class)),
domain = NA)
else
value <- makeClassRepresentation(Class, package = "base",
virtual = TRUE, where = where)
}
}
value
}
slot <-
## Get the value of the named slot. This function does exact, not partial, matching of names,
## and the name must be one of the slot names specified in the class's definition.
##
## Because slots are stored as attributes, the validity check is not 100% guaranteed,
## but should be OK if nobody has "cheated" (e.g., by setting other attributes directly).
function(object, name)
.Call(C_R_get_slot, object, name)
"slot<-" <-
## Set the value of the named slot. Must be one of the slots in the class's definition.
function(object, name, check = TRUE, value) {
if(check)
value <- checkSlotAssignment(object, name, value)
.Call(C_R_set_slot, object, name, value)
## currently --> R_do_slot_assign() in ../../../main/attrib.c
}
## ". - hidden" since one should typically rather use is(), extends() etc:
.hasSlot <- function(object, name)
.Call(C_R_hasSlot, object, name)
checkSlotAssignment <- function(obj, name, value)
{
cl <- class(obj)
ClassDef <- getClass(cl) # fails if cl not a defined class (!)
slotClass <- elNamed(ClassDef@slots, name)
if(is.null(slotClass))
stop(gettextf("%s is not a slot in class %s",
sQuote(name), dQuote(cl)),
domain = NA)
valueClass <- class(value)
if(.identC(slotClass, valueClass))
return(value)
## check the value, but be careful to use the definition of the slot's class from
## the class environment of obj (change validObject too if a better way is found)
ok <- possibleExtends(valueClass, slotClass,
ClassDef2 = getClassDef(slotClass, where = .classEnv(ClassDef)))
if(identical(ok, FALSE))
stop(gettextf("assignment of an object of class %s is not valid for slot %s in an object of class %s; is(value, \"%s\") is not TRUE",
dQuote(valueClass), sQuote(name), dQuote(cl), slotClass),
domain = NA)
else if(identical(ok, TRUE))
value
else
as(value, slotClass, strict=FALSE, ext = ok)
}
## slightly simpler verison to be called from do_attrgets()
checkAtAssignment <- function(cl, name, valueClass)
{
ClassDef <- getClass(cl) # fails if cl not a defined class (!)
slotClass <- elNamed(ClassDef@slots, name)
if(is.null(slotClass))
stop(gettextf("%s is not a slot in class %s",
sQuote(name), dQuote(cl)),
domain = NA)
if(.identC(slotClass, valueClass))
return(TRUE)
## check the value, but be careful to use the definition of the slot's class from
## the class environment of obj (change validObject too if a better way is found)
ok <- possibleExtends(valueClass, slotClass,
ClassDef2 = getClassDef(slotClass, where = .classEnv(ClassDef)))
if(identical(ok, FALSE))
stop(gettextf("assignment of an object of class %s is not valid for @%s in an object of class %s; is(value, \"%s\") is not TRUE",
dQuote(valueClass), sQuote(name), dQuote(cl), slotClass),
domain = NA)
TRUE
}
## Now a primitive in base
## "@<-" <-
## function(object, name, value) {
## arg <- substitute(name)
## if(is.name(arg))
## name <- as.character(arg)
## "slot<-"(object, name, TRUE, value)
## }
## The names of the class's slots. The argument is either the name
## of a class, or an object from the relevant class.
## NOTA BENE: .slotNames() shouldn't be needed,
## rather slotNames() should be changed (to work like .slotNames())!
slotNames <- function(x)
if(is(x, "classRepresentation")) names(x@slots) else .slotNames(x)
.slotNames <- function(x)
{
classDef <- getClassDef(
if(!isS4(x) && is.character(x) && length(x) == 1L) x else class(x))
if(is.null(classDef))
character()
else
names(classDef@slots)
}
removeClass <- function(Class, where = topenv(parent.frame())) {
if(missing(where)) {
classEnv <- .classEnv(Class, where, FALSE)
classWhere <- findClass(Class, where = classEnv)
if(length(classWhere) == 0L) {
warning(gettextf("class definition for %s not found (no action taken)",
dQuote(Class)),
domain = NA)
return(FALSE)
}
if(length(classWhere) > 1L)
warning(gettextf("class %s has multiple definitions visible; only the first removed",
dQuote(Class)),
domain = NA)
classWhere <- classWhere[[1L]]
}
else classWhere <- where
classDef <- getClassDef(Class, where=classWhere)
if(length(classDef@subclasses)) {
subclasses <- names(classDef@subclasses)
found <- vapply(subclasses, isClass, NA, where = where, USE.NAMES=TRUE)
for(what in subclasses[found])
.removeSuperClass(what, Class)
}
.removeSuperclassBackRefs(Class, classDef, classWhere)
.uncacheClass(Class, classDef)
.undefineMethod("initialize", Class, classWhere)
what <- classMetaName(Class)
rm(list=what, pos=classWhere)
TRUE
}
isClass <-
## Is this a formally defined class?
function(Class, formal=TRUE, where = topenv(parent.frame()))
## argument formal is for Splus compatibility & is ignored. (All classes that
## are defined must have a class definition object.)
!is.null(getClassDef(Class, where))
### TODO s/Class/._class/ -- in order to allow 'Class' as regular slot name
new <-
## Generate an object from the specified class.
##
## Note that the basic vector classes, `"numeric"', etc. are implicitly defined,
## so one can use `new' for these classes.
##
function(Class, ...)
{
ClassDef <- getClass(Class, where = topenv(parent.frame()))
value <- .Call(C_new_object, ClassDef)
initialize(value, ...)
}
getClasses <-
## The names of all the classes formally defined on `where'.
## If called with no argument, all the classes currently known in the session
## (which does not include classes that may be defined on one of the attached
## libraries, but have not yet been used in the session).
function(where = .externalCallerEnv(), inherits = missing(where))
{
pat <- paste0("^",classMetaName(""))
if(inherits) {
evList <- .parentEnvList(where)
clNames <- character()
for(ev in evList)
clNames <- c(clNames, objects(ev, pattern = pat, all.names = TRUE))
clNames <- unique(clNames)
}
else
clNames <- objects(where, pattern = pat, all.names = TRUE)
## strip off the leading pattern (this implicitly assumes the characters
## in classMetaName("") are either "." or not metacharacters
substring(clNames, nchar(pat, "c"))
}
validObject <- function(object, test = FALSE, complete = FALSE)
{
Class <- class(object)
classDef <- getClassDef(Class)
where <- .classEnv(classDef)
anyStrings <- function(x) if(identical(x, TRUE)) character() else x
## perform, from bottom up, the default and any explicit validity tests
## First, validate the slots.
errors <- character()
slotTypes <- classDef@slots
slotNames <- names(slotTypes)
attrNames <- c(".Data", ".S3Class", names(attributes(object)))
if(any(is.na(match(slotNames, attrNames)))) {
badSlots <- is.na(match(slotNames, attrNames))
errors <-
c(errors,
paste("slots in class definition but not in object:",
paste0('"', slotNames[badSlots], '"', collapse = ", ")))
slotTypes <- slotTypes[!badSlots]
slotNames <- slotNames[!badSlots]
}
for(i in seq_along(slotTypes)) {
classi <- slotTypes[[i]]
classDefi <- getClassDef(classi, where = where)
if(is.null(classDefi)) {
errors <- c(errors,
paste0("undefined class for slot \"", slotNames[[i]],
"\" (\"", classi, "\")"))
next
}
namei <- slotNames[[i]]
sloti <- try(switch(namei,
## .S3Class for S3 objects (e.g., "factor")
.S3Class = S3Class(object),
slot(object, namei)
), silent = TRUE)
if(inherits(sloti, "try-error")) {
errors <- c(errors, sloti)
next
}
## note that the use of possibleExtends is shared with checkSlotAssignment(), in case a
## future revision improves on it!
ok <- possibleExtends(class(sloti), classi, ClassDef2 = classDefi)
if(identical(ok, FALSE)) {
errors <- c(errors,
paste0("invalid object for slot \"", slotNames[[i]],
"\" in class \"", Class,
"\": got class \"", class(sloti),
"\", should be or extend class \"", classi, "\""))
next
}
if(!complete)
next
errori <- anyStrings(Recall(sloti, TRUE, TRUE))
if(length(errori)) {
errori <- paste0("In slot \"", slotNames[[i]],
"\" of class \"", class(sloti), "\": ", errori)
errors <- c(errors, errori)
}
}
extends <- rev(classDef@contains)
for(i in seq_along(extends)) {
exti <- extends[[i]]
superClass <- exti@superClass
if(!exti@simple && !is(object, superClass))
next ## skip conditional relations that don't hold for this object
superDef <- getClassDef(superClass, where = where)
if(is.null(superDef)) {
errors <- c(errors,
paste0("superclass \"", superClass,
"\" not defined in the environment of the object's class"))
break
}
validityMethod <- superDef@validity
if(is(validityMethod, "function")) {
errors <- c(errors, anyStrings(validityMethod(as(object, superClass))))
if(length(errors))
break
}
}
validityMethod <- classDef@validity
if(length(errors) == 0L && is(validityMethod, "function")) {
errors <- c(errors, anyStrings(validityMethod(object)))
}
if(length(errors)) {
if(test)
errors
else {
msg <- gettextf("invalid class %s object", dQuote(Class))
if(length(errors) > 1L)
stop(paste(paste0(msg, ":"),
paste(seq_along(errors), errors, sep=": "),
collapse = "\n"), domain = NA)
else stop(msg, ": ", errors, domain = NA)
}
}
else
TRUE
}
setValidity <- function(Class, method, where = topenv(parent.frame())) {
if(isClassDef(Class)) {
ClassDef <- Class
Class <- ClassDef@className
}
else {
ClassDef <- getClassDef(Class, where)
}
method <- .makeValidityMethod(Class, method)
if(is.null(method) ||
(is(method, "function") && length(formalArgs(method)) == 1L))
ClassDef@validity <- method
else
stop("validity method must be NULL or a function of one argument")
## TO DO: check the where argument against the package of the class def.
assignClassDef(Class, ClassDef, where = where)
resetClass(Class, ClassDef, where = where)
}
getValidity <- function (ClassDef) {
## "needed" according to ../man/validObject.Rd
ClassDef@validity
}
resetClass <- function(Class, classDef, where) {
if(is(Class, "classRepresentation")) {
classDef <- Class
Class <- Class@className
if(missing(where))
where <- .classDefEnv(classDef)
}
else {
if(missing(where)) {
if(missing(classDef))
where <- findClass(Class, unique = "resetting the definition")[[1L]]
else
where <- .classDefEnv(classDef)
}
if(missing(classDef)) {
classDef <- getClassDef(Class, where)
if(is.null(classDef)) {
warning(gettextf("class %s not found on %s; 'resetClass' will have no effect",
dQuote(Class),
sQuote(getPackageName(where))),
domain = NA)
return(classDef)
}
}
else if(!is(classDef, "classRepresentation"))
stop(gettextf("argument 'classDef' must be a string or a class representation; got an object of class %s",
dQuote(class(classDef))),
domain = NA)
# package <- getPackageName(where)
}
if(classDef@sealed)
warning(gettextf("class %s is sealed; 'resetClass' will have no effect",
dQuote(Class)),
domain = NA)
else {
classDef <- .uncompleteClassDefinition(classDef)
classDef <- completeClassDefinition(Class, classDef, where)
assignClassDef(Class, classDef, where)
}
classDef
}
## the (default) initialization: becomes the default method when the function
## is made a generic by .InitMethodDefinitions
initialize <- function(.Object, ...) {
args <- list(...)
if(length(args)) {
Class <- class(.Object)
## the basic classes have fixed definitions
if(!is.na(match(Class, .BasicClasses)))
return(newBasic(Class, ...))
ClassDef <- getClass(Class)
## separate the slots, superclass objects
snames <- allNames(args)
which <- nzchar(snames)
elements <- args[which]
supers <- args[!which]
thisExtends <- names(ClassDef@contains)
slotDefs <- ClassDef@slots
dataPart <- elNamed(slotDefs, ".Data")
if(is.null(dataPart)) dataPart <- "missing"
if(length(supers)) {
for(i in rev(seq_along(supers))) {
obj <- el(supers, i)
Classi <- class(obj)
if(length(Classi) > 1L)
Classi <- Classi[[1L]] #possible S3 inheritance
## test some cases that let information be copied into the
## object, ordered from more to less: all the slots in the
## first two cases, some in the 3rd, just the data part in 4th
if(.identC(Classi, Class))
.Object <- obj
else if(extends(Classi, Class))
.Object <- as(obj, Class, strict=FALSE)
else if(extends(Class, Classi))
as(.Object, Classi) <- obj
else if(extends(Classi, dataPart))
.Object@.Data <- obj
else {
## is there a class to which we can coerce obj
## that is then among the superclasses of Class?
extendsi <- extends(Classi)[-1L]
## look for the common extensions, choose the first
## one in the extensions of Class
which <- match(thisExtends, extendsi)
which <- seq_along(which)[!is.na(which)]
if(length(which)) {
Classi <- thisExtends[which[1L]]
### was: as(.Object, Classi) <- as(obj, Classi, strict = FALSE)
## but as<- does an as(....) to its value argument
as(.Object, Classi) <- obj
}
else
stop(gettextf("cannot use object of class %s in new(): class %s does not extend that class",
dQuote(Classi),
dQuote(Class)),
domain = NA)
}
}
}
if(length(elements)) {
snames <- names(elements)
if(anyDuplicated(snames))
stop(gettextf("duplicated slot names: %s",
paste(sQuote(snames[duplicated(snames)]),
collapse = ", ")), domain = NA)
which <- match(snames, names(slotDefs))
if(anyNA(which))
stop(sprintf(ngettext(sum(is.na(which)),
"invalid name for slot of class %s: %s",
"invalid names for slots of class %s: %s"),
dQuote(Class),
paste(snames[is.na(which)], collapse=", ")),
domain = NA)
firstTime <- TRUE
for(i in seq_along(snames)) {
slotName <- el(snames, i)
slotClass <- elNamed(slotDefs, slotName)
slotClassDef <- getClassDef(slotClass, package = ClassDef@package)
slotVal <- el(elements, i)
## perform non-strict coercion, but leave the error messages for
## values not conforming to the slot definitions to validObject(),
## hence the check = FALSE argument in the slot assignment
if(!.identC(class(slotVal), slotClass)
&& !is.null(slotClassDef) ) {
valClass <- class(slotVal)
valClassDef <- getClassDef(valClass, package = ClassDef@package)
if(!identical(possibleExtends(valClass, slotClass,
valClassDef, slotClassDef), FALSE))
slotVal <- as(slotVal, slotClass, strict = FALSE)
}
if (firstTime) {
## force a copy of .Object
slot(.Object, slotName, check = FALSE) <- slotVal
firstTime <- FALSE
} else {
## XXX: do the assignment in-place
"slot<-"(.Object, slotName, check = FALSE, slotVal)
}
}
}
validObject(.Object)
}
.Object
}
findClass <- function(Class, where = topenv(parent.frame()), unique = "") {
if(is(Class, "classRepresentation")) {
pkg <- Class@package
classDef <- Class
Class <- Class@className
}
else {
pkg <- packageSlot(Class)
if(is.null(pkg))
pkg <- ""
classDef <- getClassDef(Class, where, pkg)
}
where <- if(missing(where) && nzchar(pkg)) .requirePackage(pkg) else as.environment(where)
what <- classMetaName(Class)
where <- .findAll(what, where)
if(length(where) > 1L && nzchar(pkg)) {
pkgs <- sapply(where, function(db)get(what, db)@package)
where <- where[match(pkg, pkgs, 0L)]
}
else
pkgs <- pkg
if(length(where) == 0L) {
if(is.null(classDef))
classDef <- getClassDef(Class) # but won't likely succeed over previous
if(nzchar(unique)) {
if(is(classDef, "classRepresentation"))
stop(gettextf("class %s is defined, with package %s, but no corresponding metadata object was found (not exported?)",
dQuote(Class),
sQuote(classDef@package)),
domain = NA)
else
stop(gettextf("no definition of %s to use for %s",
dQuote(Class),
unique),
domain = NA)
}
}
else if(length(where) > 1L) {
pkgs <- sapply(where, getPackageName, create = FALSE)
## not all environments need be packages (e.g., imports)
## We only try to eliminate duplicate package namespaces
where <- where[!(nzchar(pkgs) & duplicated(pkgs))]
if(length(where) > 1L)
if(nzchar(unique)) {
pkgs <- base::unique(pkgs)
where <- where[1L]
## problem: 'unique'x is text passed in, so do not translate
warning(sprintf(ngettext(length(pkgs),
"multiple definition of class %s visible (%s); using the definition\n in package %s for %s",
"multiple definitions of class %s visible (%s); using the definition\n in package %s for %s"),
dQuote(Class),
paste(sQuote(pkgs), collapse = ", "),
sQuote(pkgs[[1L]]),
unique),
domain = NA)
}
## else returns a list of >1 places, for the caller to sort out (e.g., .findOrCopyClass)
}
where
}
isSealedClass <- function(Class, where = topenv(parent.frame())) {
if(is.character(Class))
Class <- getClass(Class, TRUE, where)
if(!is(Class, "classRepresentation"))
FALSE
else
Class@sealed
}
sealClass <- function(Class, where = topenv(parent.frame())) {
if(missing(where))
where <- findClass(Class, unique = "sealing the class", where = where)
classDef <- getClassDef(Class, where)
if(!classDef@sealed) {
classDef@sealed <- TRUE
assignClassDef(Class, classDef, where)
}
invisible(classDef)
}
## see $RHOME/src/main/duplicate.c for the corresponding datatypes
## not copied by duplicate1
.AbnormalTypes <- c("environment", "name", "externalptr", "NULL")
.indirectAbnormalClasses <- paste0(".", .AbnormalTypes)
names(.indirectAbnormalClasses) <- .AbnormalTypes
## the types not supported by indirect classes (yet)
.AbnormalTypes <- c(.AbnormalTypes,
"special","builtin", "weakref", "bytecode")
.addAbnormalDataType <- function(classes) {
types <- match(classes, .AbnormalTypes, 0) > 0
type = classes[types]
if(length(type) == 0)
return(classes)
if(length(type) > 1)
stop(gettextf("class definition cannot extend more than one of these data types: %s",
paste0('"',type, '"', collapse = ", ")),
domain = NA)
class <- .indirectAbnormalClasses[type]
if(is.na(class))
stop(gettextf("abnormal type %s is not supported as a superclass of a class definition",
dQuote(type)),
domain = NA)
## this message USED TO BE PRINTED: reminds programmers that
## they will see an unexpected superclass
## message(gettextf('Defining type "%s" as a superclass via class "%s"',
## type, class), domain = NA)
c(class, classes[!types])
}
.checkRequiredGenerics <- function(Class, classDef, where) {}
..checkRequiredGenerics <- function(Class, classDef, where) {
## If any of the superclasses are in the .NeedPrimitiveMethods
## list, cache the corresponding generics now and also save their names in
## .requireCachedGenerics to be used when the environment
## where= is loaded.
supers <- names(classDef@contains)
allNeeded <- get(".NeedPrimitiveMethods", envir = .methodsNamespace)
specials <- names(allNeeded)
needed <- match(specials, supers, 0L) > 0L
if(any(needed)) {
generics <- unique(allNeeded[needed])
packages <- vapply(generics, function(g) {
def <- getGeneric(g)
pkg <- def@package # must be "methods" ?
cacheGenericsMetaData(g, def, TRUE, where, pkg)
pkg
}, character(1))
previous <- if(exists(".requireCachedGenerics", where, inherits = FALSE))
get(".requireCachedGenerics", where) else character()
packages <- c(attr(previous, "package"), packages)
gg <- c(previous, generics)
attr(gg, "package") <- packages
assign(".requireCachedGenerics", gg, where)
}
}
.setS3MethodsOn <- function(classDef) {
ext <- extends(classDef)
slots <- classDef@slots
if(is.na(match(".S3Class", names(slots)))) {
## add the slot if it's not there
slots$.S3Class <- getClass("oldClass")@slots$.S3Class
classDef@slots <- slots
}
## in any case give the prototype the full extends as .S3Class
proto <- classDef@prototype
if(is.null(proto)) # simple virtual class--unlikely but valid
proto <- defaultPrototype()
attr(proto, ".S3Class") <- ext
classDef@prototype <- proto
classDef
}
multipleClasses <- function(details = FALSE) {
classes <- as.list(.classTable, all.names=TRUE)
dups <- Filter(is.list, classes)
if(details) dups else names(dups)
}
className <- function(class, package) {
if(is(class, "character")) {
className <- as.character(class)
if(missing(package))
package <- packageSlot(class)
if(is.null(package)) {
if(exists(className, envir = .classTable, inherits = FALSE))
classDef <- get(className, envir = .classTable)
else {
classDef <- findClass(className, topenv(parent.frame()))
if(length(classDef) == 1)
classDef <- classDef[[1]]
}
## at this point, classDef is the definition if
## unique, otherwise a list of 0 or >1 definitions
if(is(classDef, "classRepresentation"))
package <- classDef@package
else if(length(classDef) > 1L) {
pkgs <- sapply(classDef, function(cl)cl@package)
warning(gettextf("multiple class definitions for %s from packages: %s; picking the first",
dQuote(className),
paste(sQuote(pkgs), collapse = ", ")),
domain = NA)
package <- pkgs[[1L]]
}
else
stop(gettextf("no package name supplied and no class definition found for %s",
dQuote(className)),
domain = NA)
}
}
else if(is(class, classDef)) {
className <- class@className
if(missing(package))
package <- class@package
}
new("className", .Data = className, package = package)
}
## bootstrap version before the class is defined
classGeneratorFunction <- function(classDef, env = topenv(parent.frame())) {
fun <- function(...)NULL
## put the class name with package attribute into new()
body(fun) <- substitute(new(CLASS, ...),
list(CLASS = classDef@className))
environment(fun) <- env
fun
}
.classGeneratorFunction <- function(classDef, env = topenv(parent.frame())) {
if(is(classDef, "classRepresentation")) {}
else if(is(classDef, "character")) {
if(is.null(packageSlot(classDef)))
classDef <- getClass(classDef, where = env)
else
classDef <- getClass(classDef)
}
else
stop("argument 'classDef' must be a class definition or the name of a class")
fun <- function(...)NULL
## put the class name with package attribute into new()
body(fun) <- substitute(new(CLASS, ...),
list(CLASS = classDef@className))
environment(fun) <- env
fun <- as(fun, "classGeneratorFunction")
fun@className <- classDef@className
fun@package <- classDef@package
fun
}
## grammar: 'what' is an adjective, so not plural ....
inferProperties <- function(props, what) {
.validPropNames <- function(propNames) {
n <- length(props)
if(!n)
return(character())
else if(is.null(propNames))
stop(gettextf("No %s names supplied", what),
domain = NA, call. = FALSE)
else if(!all(nzchar(propNames)))
stop(gettextf("All %s names must be nonempty in:\n(%s)", what,
paste(sQuote(propNames), collapse = ", ")),
domain = NA, call. = FALSE)
else if(any(duplicated(propNames))) # NB: not translatable because of plurals
stop(gettextf("All %s names must be distinct in:\n(%s)", what,
paste(sQuote(propNames), collapse = ", ")),
domain = NA, call. = FALSE)
propNames
}
if(is.character(props)) {
propNames <- names(props)
if(is.null(propNames)) {
propNames <- .validPropNames(props) # the text is the names
## treat as "ANY"
props <- as.list(rep("ANY", length(props)))
names(props) <- propNames
}
else {
.validPropNames(propNames)
props <- as.list(props)
}
}
else if(is.list(props)) {
if(length(props) > 0) # just validate them
.validPropNames(names(props))
}
else
stop(gettextf("argument %s must be a list or a character vector; got an object of class %s",
dQuote(what), dQuote(class(fields))),
domain = NA)
props
}
| 40,618 | gpl-2.0 |
8d89013e960aa53ac59641028c621fb93e87f75c | fmakari/systemml | system-ml/src/test/scripts/functions/binary/matrix/IQM.R | #-------------------------------------------------------------
#
# (C) Copyright IBM Corp. 2010, 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A <- as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
m = nrow(A);
S = sort(A)
q25d=m*0.25
q75d=m*0.75
q25i=ceiling(q25d)
q75i=ceiling(q75d)
iqm = sum(S[(q25i+1):q75i])
iqm = iqm + (q25i-q25d)*S[q25i] - (q75i-q75d)*S[q75i]
iqm = iqm/(m*0.5)
miqm = as.matrix(iqm);
writeMM(as(miqm, "CsparseMatrix"), paste(args[3], "R", sep=""));
| 1,156 | apache-2.0 |
8d89013e960aa53ac59641028c621fb93e87f75c | wjuncdl/systemml | system-ml/src/test/scripts/functions/binary/matrix/IQM.R | #-------------------------------------------------------------
#
# (C) Copyright IBM Corp. 2010, 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A <- as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
m = nrow(A);
S = sort(A)
q25d=m*0.25
q75d=m*0.75
q25i=ceiling(q25d)
q75i=ceiling(q75d)
iqm = sum(S[(q25i+1):q75i])
iqm = iqm + (q25i-q25d)*S[q25i] - (q75i-q75d)*S[q75i]
iqm = iqm/(m*0.5)
miqm = as.matrix(iqm);
writeMM(as(miqm, "CsparseMatrix"), paste(args[3], "R", sep=""));
| 1,156 | apache-2.0 |
8d89013e960aa53ac59641028c621fb93e87f75c | Myasuka/systemml | system-ml/src/test/scripts/functions/binary/matrix/IQM.R | #-------------------------------------------------------------
#
# (C) Copyright IBM Corp. 2010, 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A <- as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
m = nrow(A);
S = sort(A)
q25d=m*0.25
q75d=m*0.75
q25i=ceiling(q25d)
q75i=ceiling(q75d)
iqm = sum(S[(q25i+1):q75i])
iqm = iqm + (q25i-q25d)*S[q25i] - (q75i-q75d)*S[q75i]
iqm = iqm/(m*0.5)
miqm = as.matrix(iqm);
writeMM(as(miqm, "CsparseMatrix"), paste(args[3], "R", sep=""));
| 1,156 | apache-2.0 |
8d89013e960aa53ac59641028c621fb93e87f75c | dusenberrymw/systemml_old | system-ml/src/test/scripts/functions/binary/matrix/IQM.R | #-------------------------------------------------------------
#
# (C) Copyright IBM Corp. 2010, 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A <- as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
m = nrow(A);
S = sort(A)
q25d=m*0.25
q75d=m*0.75
q25i=ceiling(q25d)
q75i=ceiling(q75d)
iqm = sum(S[(q25i+1):q75i])
iqm = iqm + (q25i-q25d)*S[q25i] - (q75i-q75d)*S[q75i]
iqm = iqm/(m*0.5)
miqm = as.matrix(iqm);
writeMM(as(miqm, "CsparseMatrix"), paste(args[3], "R", sep=""));
| 1,156 | apache-2.0 |
8d89013e960aa53ac59641028c621fb93e87f75c | aloknsingh/systemml | system-ml/src/test/scripts/functions/binary/matrix/IQM.R | #-------------------------------------------------------------
#
# (C) Copyright IBM Corp. 2010, 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A <- as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
m = nrow(A);
S = sort(A)
q25d=m*0.25
q75d=m*0.75
q25i=ceiling(q25d)
q75i=ceiling(q75d)
iqm = sum(S[(q25i+1):q75i])
iqm = iqm + (q25i-q25d)*S[q25i] - (q75i-q75d)*S[q75i]
iqm = iqm/(m*0.5)
miqm = as.matrix(iqm);
writeMM(as(miqm, "CsparseMatrix"), paste(args[3], "R", sep=""));
| 1,156 | apache-2.0 |
8d89013e960aa53ac59641028c621fb93e87f75c | ckadner/systemml | system-ml/src/test/scripts/functions/binary/matrix/IQM.R | #-------------------------------------------------------------
#
# (C) Copyright IBM Corp. 2010, 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A <- as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
m = nrow(A);
S = sort(A)
q25d=m*0.25
q75d=m*0.75
q25i=ceiling(q25d)
q75i=ceiling(q75d)
iqm = sum(S[(q25i+1):q75i])
iqm = iqm + (q25i-q25d)*S[q25i] - (q75i-q75d)*S[q75i]
iqm = iqm/(m*0.5)
miqm = as.matrix(iqm);
writeMM(as(miqm, "CsparseMatrix"), paste(args[3], "R", sep=""));
| 1,156 | apache-2.0 |
88b039d7e4a7451e6c2da71a51936a2a26c7da4d | michalkurka/h2o-3 | h2o-r/tests/testdir_misc/runit_saveMojo.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
test.saveMojo <- function() {
data <- as.h2o(iris)
features <- c("Sepal.Length", "Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width")
model <- h2o.gbm(x=features, y = "Species", training_frame = data)
mojo_path <- h2o.saveMojo(model, path = tempdir()) # saveMojo should delegate to save_mojo
mojo_model <- h2o.import_mojo(mojo_path)
expect_true(!is.null(mojo_model)) # mojo should be importable back into H2O
}
doTest("Test delegation of deprecated saveMojo method call to save_mojo", test.saveMojo)
| 643 | apache-2.0 |
18517e96c3280d2406c3d26395fa4699fd052005 | inventionate/TimeSpaceAnalysis | R/get-time-pattern.R | #' Reshape time pattern data.
#'
#' @param data data frame which contains time pattern data.
#' @param id vector which contains questionnaire ids.
#' @param reshape_data whether reshape data or not.
#' Use this option if your data is column wise concentration (e. g. "mo_seminar")
#'
#' @return reshaped data frame for further visualization.
#' @export
get_time_pattern <- function(data, id = "all", reshape_data = TRUE) {
# Check NA
na_exist <- nrow(data) > nrow(na.omit(data))
if( na_exist ) warning("There are NAs. They will be omitted!")
# Filter ID
if (id[[1]] != "all") data_tp <- filter(na.omit(data), questionnaire_id %in% id)
else data_tp <- na.omit(data)
if(reshape_data) {
data_time_pattern <- data_tp %>%
gather(activity, duration, 3:ncol(data_tp), -questionnaire_id) %>%
mutate(activity = str_replace(activity, "_\\d+", "")) %>%
separate(activity, c("day", "activity"), sep ="_") %>%
mutate(day = ifelse(day == "mo", 1, ifelse(day == "di", 2, ifelse(day == "mi", 3, ifelse(day == "do", 4, ifelse(day == "fr", 5, ifelse(day == "sa", 6, 7))))))) %>%
mutate(activity = fct_recode(
str_to_title(activity),
"Lehrveranstaltungen" = "Veranstaltungen")) %>%
mutate(activity = fct_relevel(activity, "Lehrveranstaltungen", "Zwischenzeit", "Selbststudium", "Arbeitszeit", "Fahrzeit", "Freizeit", "Schlafen")) %>%
group_by(questionnaire_id, day) %>%
mutate(prop_duration = duration / sum(duration)) %>%
arrange(questionnaire_id, day, desc(activity)) %>%
ungroup()
} else {
data_time_pattern <- data_tp %>%
ungroup() %>%
mutate(day = if_else(day == "Montag", 1, if_else(day == "Dienstag", 2, if_else(day == "Mittwoch", 3, if_else(day == "Donnerstag", 4, if_else(day == "Freitag", 5, if_else(day == "Samstag", 6, 7)))))))
}
return(data_time_pattern)
}
| 1,871 | gpl-3.0 |
50215969fafa492e95df5dd076dbc223273a65a1 | paulhendricks/plumber | tests/testthat/test-preempt.R | test_that("preempts work", {
r <- PlumberRouter$new("files/preempt.R")
expect_equal(length(r$endpoints), 3)
e <- r$endpoints[["testFun"]][[1]]
expect_equal(e$preempt, "testFun")
e <- r$endpoints[["testFun2"]][[1]]
expect_equal(e$preempt, "testFun2")
e <- r$endpoints[["testFun3"]][[1]]
expect_equal(e$preempt, "testFun3")
})
test_that("Redundant preempts fail", {
expect_error(PlumberRouter$new("files/preempt-redundant.R"), regexp="Multiple @preempts")
})
test_that("Empty preempts fail", {
expect_error(PlumberRouter$new("files/preempt-empty.R"), regexp="No @preempt specified")
})
test_that("Non-existant preempts fail", {
expect_error(PlumberRouter$new("files/preempt-nonexistent.R"), regexp="The given @preempt")
})
| 749 | mit |
88b039d7e4a7451e6c2da71a51936a2a26c7da4d | h2oai/h2o-3 | h2o-r/tests/testdir_misc/runit_saveMojo.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
test.saveMojo <- function() {
data <- as.h2o(iris)
features <- c("Sepal.Length", "Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width")
model <- h2o.gbm(x=features, y = "Species", training_frame = data)
mojo_path <- h2o.saveMojo(model, path = tempdir()) # saveMojo should delegate to save_mojo
mojo_model <- h2o.import_mojo(mojo_path)
expect_true(!is.null(mojo_model)) # mojo should be importable back into H2O
}
doTest("Test delegation of deprecated saveMojo method call to save_mojo", test.saveMojo)
| 643 | apache-2.0 |
50215969fafa492e95df5dd076dbc223273a65a1 | gustavobio/plumber | tests/testthat/test-preempt.R | test_that("preempts work", {
r <- PlumberRouter$new("files/preempt.R")
expect_equal(length(r$endpoints), 3)
e <- r$endpoints[["testFun"]][[1]]
expect_equal(e$preempt, "testFun")
e <- r$endpoints[["testFun2"]][[1]]
expect_equal(e$preempt, "testFun2")
e <- r$endpoints[["testFun3"]][[1]]
expect_equal(e$preempt, "testFun3")
})
test_that("Redundant preempts fail", {
expect_error(PlumberRouter$new("files/preempt-redundant.R"), regexp="Multiple @preempts")
})
test_that("Empty preempts fail", {
expect_error(PlumberRouter$new("files/preempt-empty.R"), regexp="No @preempt specified")
})
test_that("Non-existant preempts fail", {
expect_error(PlumberRouter$new("files/preempt-nonexistent.R"), regexp="The given @preempt")
})
| 749 | mit |
e61390f372e59386c1bc3c832b75d67288027da3 | kevinykuo/sparklyr | R/ml_classification_random_forest_classifier.R | #' @rdname ml_random_forest
#' @template roxlate-ml-probabilistic-classifier-params
#' @export
ml_random_forest_classifier <- function(
x,
formula = NULL,
num_trees = 20L,
subsampling_rate = 1,
max_depth = 5L,
min_instances_per_node = 1L,
feature_subset_strategy = "auto",
impurity = "gini",
min_info_gain = 0,
max_bins = 32L,
seed = NULL,
thresholds = NULL,
checkpoint_interval = 10L,
cache_node_ids = FALSE,
max_memory_in_mb = 256L,
features_col = "features",
label_col = "label",
prediction_col = "prediction",
probability_col = "probability",
raw_prediction_col = "rawPrediction",
uid = random_string("random_forest_classifier_"), ...
) {
UseMethod("ml_random_forest_classifier")
}
#' @export
ml_random_forest_classifier.spark_connection <- function(
x,
formula = NULL,
num_trees = 20L,
subsampling_rate = 1,
max_depth = 5L,
min_instances_per_node = 1L,
feature_subset_strategy = "auto",
impurity = "gini",
min_info_gain = 0,
max_bins = 32L,
seed = NULL,
thresholds = NULL,
checkpoint_interval = 10L,
cache_node_ids = FALSE,
max_memory_in_mb = 256L,
features_col = "features",
label_col = "label",
prediction_col = "prediction",
probability_col = "probability",
raw_prediction_col = "rawPrediction",
uid = random_string("random_forest_classifier_"), ...) {
ml_ratify_args()
class <- "org.apache.spark.ml.classification.RandomForestClassifier"
jobj <- ml_new_classifier(
x, class, uid, features_col, label_col,
prediction_col, probability_col, raw_prediction_col
) %>%
invoke("setCheckpointInterval", checkpoint_interval) %>%
invoke("setMaxBins", max_bins) %>%
invoke("setMaxDepth", max_depth) %>%
invoke("setMinInfoGain", min_info_gain) %>%
invoke("setMinInstancesPerNode", min_instances_per_node) %>%
invoke("setCacheNodeIds", cache_node_ids) %>%
invoke("setMaxMemoryInMB", max_memory_in_mb) %>%
invoke("setNumTrees", num_trees) %>%
invoke("setSubsamplingRate", subsampling_rate) %>%
invoke("setFeatureSubsetStrategy", feature_subset_strategy) %>%
invoke("setImpurity", impurity)
if(!rlang::is_null(thresholds))
jobj <- invoke(jobj, "setThresholds", thresholds)
if (!rlang::is_null(seed))
jobj <- invoke(jobj, "setSeed", seed)
new_ml_random_forest_classifier(jobj)
}
#' @export
ml_random_forest_classifier.ml_pipeline <- function(
x,
formula = NULL,
num_trees = 20L,
subsampling_rate = 1,
max_depth = 5L,
min_instances_per_node = 1L,
feature_subset_strategy = "auto",
impurity = "gini",
min_info_gain = 0,
max_bins = 32L,
seed = NULL,
thresholds = NULL,
checkpoint_interval = 10L,
cache_node_ids = FALSE,
max_memory_in_mb = 256L,
features_col = "features",
label_col = "label",
prediction_col = "prediction",
probability_col = "probability",
raw_prediction_col = "rawPrediction",
uid = random_string("random_forest_classifier_"), ...) {
transformer <- ml_new_stage_modified_args()
ml_add_stage(x, transformer)
}
#' @export
ml_random_forest_classifier.tbl_spark <- function(
x,
formula = NULL,
num_trees = 20L,
subsampling_rate = 1,
max_depth = 5L,
min_instances_per_node = 1L,
feature_subset_strategy = "auto",
impurity = "gini",
min_info_gain = 0,
max_bins = 32L,
seed = NULL,
thresholds = NULL,
checkpoint_interval = 10L,
cache_node_ids = FALSE,
max_memory_in_mb = 256L,
features_col = "features",
label_col = "label",
prediction_col = "prediction",
probability_col = "probability",
raw_prediction_col = "rawPrediction",
uid = random_string("random_forest_classifier_"),
response = NULL,
features = NULL,
predicted_label_col = "predicted_label", ...) {
predictor <- ml_new_stage_modified_args()
ml_formula_transformation()
if (is.null(formula)) {
predictor %>%
ml_fit(x)
} else {
ml_generate_ml_model(
x, predictor, formula, features_col, label_col,
"classification", new_ml_model_random_forest_classification,
predicted_label_col
)
}
}
# Validator
ml_validator_random_forest_classifier <- function(args, nms) {
old_new_mapping <- c(
ml_tree_param_mapping(),
list(
sample.rate = "subsampling_rate",
num.trees = "num_trees",
col.sample.rate = "feature_subset_strategy"
))
args %>%
ml_validate_decision_tree_args() %>%
ml_validate_args({
if (!rlang::is_null(thresholds))
thresholds <- lapply(thresholds, ensure_scalar_double)
num_trees <- ensure_scalar_integer(num_trees)
subsampling_rate <- ensure_scalar_double(subsampling_rate)
feature_subset_strategy <- ensure_scalar_character(feature_subset_strategy)
impurity <- rlang::arg_match(impurity, c("gini", "entropy"))
}, old_new_mapping) %>%
ml_extract_args(nms, old_new_mapping)
}
# Constructors
new_ml_random_forest_classifier <- function(jobj) {
new_ml_predictor(jobj, subclass = "ml_random_forest_classifier")
}
new_ml_random_forest_classification_model <- function(jobj) {
new_ml_prediction_model(
jobj,
feature_importances = try_null(read_spark_vector(jobj, "featureImportances")),
num_trees = invoke(jobj, "numTrees"),
num_classes = try_null(invoke(jobj, "numClasses")),
num_features = invoke(jobj, "numFeatures"),
total_num_nodes = invoke(jobj, "totalNumNodes"),
tree_weights = invoke(jobj, "treeWeights"),
trees = invoke(jobj, "trees") %>%
lapply(new_ml_decision_tree_regression_model),
features_col = invoke(jobj, "getFeaturesCol"),
prediction_col = invoke(jobj, "getPredictionCol"),
probability_col = try_null(invoke(jobj, "getProbabilityCol")),
raw_prediction_col = try_null(invoke(jobj, "getRawPredictionCol")),
thresholds = try_null(invoke(jobj, "getThresholds")),
subclass = "ml_random_forest_classification_model")
}
new_ml_model_random_forest_classification <- function(
pipeline, pipeline_model, model, dataset, formula, feature_names,
index_labels, call) {
new_ml_model_classification(
pipeline, pipeline_model, model, dataset, formula,
subclass = "ml_model_random_forest_classification",
.features = feature_names,
.index_labels = index_labels,
.call = call
)
}
| 6,242 | apache-2.0 |
1d91beb773d6060100fd53ed4e66f6a9aa002e78 | R-Lum/Luminescence | R/install_DevelopmentVersion.R | #' Attempts to install the development version of the 'Luminescence' package
#'
#' This function is a convenient method for installing the development
#' version of the R package 'Luminescence' directly from GitHub.
#'
#' This function uses [Luminescence::github_branches][Luminescence::GitHub-API] to check
#' which development branches of the R package 'Luminescence' are currently
#' available on GitHub. The user is then prompted to choose one of the branches
#' to be installed. It further checks whether the R package 'devtools' is
#' currently installed and available on the system. Finally, it prints R code
#' to the console that the user can copy and paste to the R console in order
#' to install the desired development version of the package.
#'
#'
#' If `force_install=TRUE` the functions checks if 'devtools' is available
#' and then attempts to install the chosen development branch via
#' [devtools::remote-reexports].
#'
#' @param force_install [logical] (*optional*):
#' If `FALSE` (the default) the function produces and prints the required
#' code to the console for the user to run manually afterwards. When `TRUE`
#' and all requirements are fulfilled (see details) this function attempts to install
#' the package itself.
#'
#' @return
#' This function requires user input at the command prompt to choose the
#' desired development branch to be installed. The required R code to install
#' the package is then printed to the console.
#'
#' @examples
#'
#' \dontrun{
#' install_DevelopmentVersion()
#' }
#'
#' @md
#' @export
install_DevelopmentVersion <- function(force_install = FALSE) {
message("\n[install_DevelopmentVersion]\n")
# check which branches are currently available
# see ?github_branches for GitHub API implementation
branches <- github_branches()
index <- NULL
# let user pick which branch he wants to install
while(is.null(index)) {
message(paste0("Which development branch do you want to install? \n",
paste0(" [", 1:length(branches$BRANCH), "]: ", branches$BRANCH, collapse = "\n")))
message("\n [0]: <Exit>")
index <- readline()
if (index == 0)
return(NULL)
if (!index %in% seq_len(length(branches$BRANCH)))
index <- NULL
cat("\n")
}
# select the correct branch
branch <- branches$BRANCH[as.numeric(index)]
if (!force_install) {
message("----\n",
"Are all prerequisites installed? Make sure to have read\n",
"https://github.com/R-Lum/Luminescence/blob/master/README.md\n",
"----\n")
message("Please copy and run the following code in your R command-line:\n")
if (!requireNamespace("devtools", quietly = TRUE))
message("install.packages('devtools')")
message(branches$INSTALL[as.numeric(index)], "\n")
} else {
reply <- NULL
while(is.null(reply)) {
message("Are all prerequisites installed?",
" (https://github.com/R-Lum/Luminescence/blob/master/README.md)\n",
" [n/N]: No\n",
" [y/Y]: Yes\n")
reply <- readline()
if (reply == "n" || reply == "N")
return(NULL)
if (reply != "y" && reply != "Y")
reply <- NULL
}
# check if 'devtools' is available and install if not
if (!requireNamespace("devtools", quietly = TRUE)) {
message("Please install the 'devtools' package first by running the following command:\n",
"install.packages('devtools')")
return(NULL)
}
# detach the 'Luminescence' package
try(detach(name = "package:Luminescence", unload = TRUE, force = TRUE),
silent = TRUE)
# try to unload the dynamic library
dynLibs <- sapply(.dynLibs(), function(x) x[["path"]] )
try(dyn.unload(dynLibs[grep("Luminescence", dynLibs)]), silent = TRUE)
# install the development version
devtools::install_github(paste0("r-lum/luminescence@", branch))
}
}
| 3,914 | gpl-3.0 |
6561bc1238922d8196fc8686e8a9d5e851b1405f | cxxr-devel/cxxr-svn-mirror | src/library/Recommended/survival/R/attrassign.R | # $Id$
# When X is a model matrix, Splus and R have a different format
# for the "assign" attribute
# For instance
# survreg(Surv(time, status) ~ age + sex + factor(ph.ecog), lung)
# R gives the compact form, a vector (0, 1, 2, 3, 3, 3); which can be
# read as "the first column of the X matrix (intercept) goes with none of
# the terms', 'the second column goes with term 1', etc.
# Splus gives a list
# $(Intercept) 1
# $age 2
# $sex 3
# $factor(ph.ecog) 4 5 6
#
# This function creates the Splus style of output from the R style. Several
# of the routines in the package use this, as it is somewhat easier (more
# transparent) to work with.
#
attrassign<-function (object, ...) UseMethod("attrassign")
attrassign.lm<-function(object, ...){
attrassign(model.matrix(object), terms(object))}
attrassign.default<-function(object, tt, ...){
if (!inherits(tt,"terms"))
stop("need terms object")
aa<-attr(object,"assign")
if (is.null(aa))
stop("argument is not really a model matrix")
ll<-attr(tt,"term.labels")
if (attr(tt,"intercept")>0)
ll<-c("(Intercept)",ll)
aaa<-factor(aa,labels=ll)
split(order(aa),aaa)
}
| 1,293 | gpl-2.0 |
c567963c6f0ef88d660c35f49ea6dd1b8e571eb2 | EccRiley/Riley | R/Rqstring.R | ## SIMPLE UTIL FOR QUERY BUILDING ##
Rqstring <- function(x, field, substring_side = NULL, substring_n = NULL, operand = "AND", ...) {
if (!is.null(substring_n) & !is.null(substring_side)) {
qstring <- paste0(" ", operand, " ", substring_side, "(LTRIM(RTRIM(", field, ")), ",
substring_n, ") IN ('",
paste0(unique(c(as.character(unique(x)))), collapse = "','"), "')",
...)
} else {
qstring <- paste0(" ", operand, " LTRIM(RTRIM(", field, ")) IN ('",
paste0(unique(c(as.character(unique(x)))), collapse = "','"), "')",
...)
}
return(qstring)
}
| 637 | agpl-3.0 |
115aa8e422746cce4a9221c3a986ec93e9f95c52 | joseflaviojr/transcriptograma | GeradorDeExperimentos.R |
#
# Copyright (C) 2015-2016 José Flávio de Souza Dias Júnior
#
# This file is part of Transcriptograma - <http://www.joseflavio.com/transcriptograma/>.
#
# Transcriptograma is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Transcriptograma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Transcriptograma. If not, see <http://www.gnu.org/licenses/>.
#
#
# Direitos Autorais Reservados (C) 2015-2016 José Flávio de Souza Dias Júnior
#
# Este arquivo é parte de Transcriptograma - <http://www.joseflavio.com/transcriptograma/>.
#
# Transcriptograma é software livre: você pode redistribuí-lo e/ou modificá-lo
# sob os termos da Licença Pública Menos Geral GNU conforme publicada pela
# Free Software Foundation, tanto a versão 3 da Licença, como
# (a seu critério) qualquer versão posterior.
#
# Transcriptograma é distribuído na expectativa de que seja útil,
# porém, SEM NENHUMA GARANTIA; nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Consulte a
# Licença Pública Menos Geral do GNU para mais detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Menos Geral do GNU
# junto com Transcriptograma. Se não, veja <http://www.gnu.org/licenses/>.
#
#------------------------------------------------------------------#
# Legenda:
# RAL-O = Rede Aleatória Orientada
# RAL-N = Rede Aleatória Não Orientada
# RLE-O = Rede Livre de Escala (Barabasi) Orientada
# RLE-N = Rede Livre de Escala (Barabasi) Não Orientada
# RPM-O = Rede Pequeno Mundo Orientada
# RPM-N = Rede Pequeno Mundo Não Orientada
# ARQ-O = Rede orientada real, especificada em arquivo externo
# ARQ-N = Rede não orientada real, especificada em arquivo externo
# [III] XXX-X(NNNN)(GGG)(PI-PE) T = III: identificação; NNNN: vértices; XXX-X: tipo da rede;
# GGG: grupos de mesmo tamanho; xGG: grupos de diferentes tamanhos;
# PI: probabilidade de ligação interna de grupo; PE: probabilidade externa
# T: comentário opcional
#------------------------------------------------------------------#
library(igraph)
#------------------------------------------------------------------#
# Cria grafo aleatório simples com vértices distribuídos em grupos de mesmo tamanho.
RAL <- function( vertices, orientado=FALSE, grupos=3, probInterna=0.3, probExterna=0.02, embaralhar=TRUE ){
rede <- matrix(0L,vertices,vertices)
rede <- apply( rede, c(1,2), function(x){ if( runif(1,0,1) <= probExterna ) 1L else 0L } )
quantPorGrupo <- trunc(vertices/grupos)
for( grupo in 1:grupos ){
inicio <- ( grupo - 1 ) * quantPorGrupo + 1
fim <- if( grupo == grupos ) vertices else grupo * quantPorGrupo
for( i in inicio:fim ){
for( j in inicio:fim ){
rede[i,j] <- if( runif(1,0,1) <= probInterna ) 1L else 0L
}
}
}
for( i in 1:vertices ) rede[i,i] <- 0L
if( embaralhar ){
desordem <- sample.int(vertices)
rede <- rede[desordem,desordem]
}
graph.adjacency( rede, mode = if(orientado) "directed" else "undirected" )
}
#------------------------------------------------------------------#
# Cria grafo aleatório simples com vértices distribuídos em grupos de tamanho especificado.
RAX <- function( vertices, orientado=FALSE, grupos=c(10,10,10), probInterna=0.3, probExterna=0.02, embaralhar=TRUE ){
rede <- matrix(0L,vertices,vertices)
rede <- apply( rede, c(1,2), function(x){ if( runif(1,0,1) <= probExterna ) 1L else 0L } )
i <- 1
for( grupo in 1:length(grupos) ){
inicio <- i
fim <- inicio + grupos[grupo] - 1
for( i in inicio:fim ){
for( j in inicio:fim ){
rede[i,j] <- if( runif(1,0,1) <= probInterna ) 1L else 0L
}
}
i <- i + 1
}
for( i in 1:vertices ) rede[i,i] <- 0L
if( embaralhar ){
desordem <- sample.int(vertices)
rede <- rede[desordem,desordem]
}
graph.adjacency( rede, mode = if(orientado) "directed" else "undirected" )
}
#------------------------------------------------------------------#
# Cria grafo conforme modelo Barabasi-Albert (Rede Livre de Escala).
RLE <- function( vertices, orientado=FALSE, poder=1, embaralhar=TRUE ){
rede <- barabasi.game( vertices, directed=orientado, power=poder )
if( embaralhar ){
desordem <- sample.int(vertices)
rede <- as.matrix(rede[])[desordem,desordem]
rede <- graph.adjacency( rede, mode = if(orientado) "directed" else "undirected" )
}
rede
}
#------------------------------------------------------------------#
# Cria grafo conforme modelo de rede denominado Pequeno Mundo.
RPM <- function( vertices, orientado=FALSE, embaralhar=TRUE ){
rede <- graph.ring( vertices, directed=orientado, mutual=FALSE, circular=TRUE )
rede <- rewire.edges( rede, prob=0.5, loops=FALSE, multiple=FALSE )
if( embaralhar ){
desordem <- sample.int(vertices)
rede <- as.matrix(rede[])[desordem,desordem]
rede <- graph.adjacency( rede, mode = if(orientado) "directed" else "undirected" )
}
rede
}
#------------------------------------------------------------------#
# Cria grafo simples com arestas especificadas em arquivo externo.
ARQ <- function( arquivo, orientado=FALSE ){
rede <- read.table(arquivo)
colnames(rede) <- c("Origem","Destino")
graph.data.frame( rede, directed=orientado )
}
#------------------------------------------------------------------#
# Salva o grafo em arquivo externo no formato CSV.
salvar <- function( grafo, arquivo ){
write.table( as.matrix(grafo[]), file=arquivo, sep=",", col.names=F, row.names=F )
}
#------------------------------------------------------------------#
gerarExperimentos01 <- function(){
# Redes aleatórias
set.seed(1)
grafo <- RAL( 100, grupos=2, probInterna=0.4, probExterna=0.02, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[001] RAL-N(0100)(002)(0,400-0,020).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=2, probInterna=0.4, probExterna=0.04, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[002] RAL-N(0100)(002)(0,400-0,040).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=2, probInterna=0.4, probExterna=0.08, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[003] RAL-N(0100)(002)(0,400-0,080).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=2, probInterna=0.6, probExterna=0.02, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[004] RAL-N(0100)(002)(0,600-0,020).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=2, probInterna=0.4, probExterna=0.02, orientado=TRUE, embaralhar=TRUE )
salvar( grafo, "[005] RAL-O(0100)(002)(0,400-0,020).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=2, probInterna=0.4, probExterna=0.08, orientado=TRUE, embaralhar=TRUE )
salvar( grafo, "[006] RAL-O(0100)(002)(0,400-0,080).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=3, probInterna=0.4, probExterna=0.02, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[007] RAL-N(0100)(003)(0,400-0,020).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=3, probInterna=0.4, probExterna=0.04, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[008] RAL-N(0100)(003)(0,400-0,040).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=3, probInterna=0.4, probExterna=0.08, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[009] RAL-N(0100)(003)(0,400-0,080).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=3, probInterna=0.6, probExterna=0.02, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[010] RAL-N(0100)(003)(0,600-0,020).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=3, probInterna=0.4, probExterna=0.02, orientado=TRUE, embaralhar=TRUE )
salvar( grafo, "[011] RAL-O(0100)(003)(0,400-0,020).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=3, probInterna=0.4, probExterna=0.08, orientado=TRUE, embaralhar=TRUE )
salvar( grafo, "[012] RAL-O(0100)(003)(0,400-0,080).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=10, probInterna=0.5, probExterna=0.02, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[013] RAL-N(0100)(010)(0,500-0,020).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=10, probInterna=0.5, probExterna=0.04, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[014] RAL-N(0100)(010)(0,500-0,040).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=10, probInterna=0.5, probExterna=0.08, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[015] RAL-N(0100)(010)(0,500-0,080).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=10, probInterna=0.5, probExterna=0.02, orientado=TRUE, embaralhar=TRUE )
salvar( grafo, "[016] RAL-O(0100)(010)(0,500-0,020).csv" )
set.seed(1)
grafo <- RAL( 100, grupos=10, probInterna=0.5, probExterna=0.08, orientado=TRUE, embaralhar=TRUE )
salvar( grafo, "[017] RAL-O(0100)(010)(0,500-0,080).csv" )
set.seed(1)
grafo <- RAX( 100, grupos=c(10,20,35,5,10,20), probInterna=0.5, probExterna=0.01, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[018] RAL-N(0100)(x06)(0,500-0,010).csv" )
set.seed(1)
grafo <- RAX( 100, grupos=c(10,20,7,10,20,8,5,20), probInterna=0.4, probExterna=0.01, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[019] RAL-N(0100)(x08)(0,400-0,010).csv" )
set.seed(1)
grafo <- RAX( 100, grupos=c(10,20,7,10,20,8,5,20), probInterna=0.6, probExterna=0.01, orientado=TRUE, embaralhar=TRUE )
salvar( grafo, "[020] RAL-O(0100)(x08)(0,600-0,010).csv" )
set.seed(1)
grafo <- RAL( 200, grupos=25, probInterna=0.5, probExterna=0.01, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[021] RAL-N(0200)(025)(0,500-0,010).csv" )
set.seed(1)
grafo <- RAL( 300, grupos=25, probInterna=0.5, probExterna=0.01, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[022] RAL-N(0300)(025)(0,500-0,010).csv" )
set.seed(1)
grafo <- RAL( 400, grupos=25, probInterna=0.5, probExterna=0.01, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[023] RAL-N(0400)(025)(0,500-0,010).csv" )
set.seed(1)
grafo <- RAL( 500, grupos=25, probInterna=0.5, probExterna=0.01, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[024] RAL-N(0500)(025)(0,500-0,010).csv" )
set.seed(1)
grafo <- RAL( 1000, grupos=25, probInterna=0.5, probExterna=0.01, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[025] RAL-N(1000)(025)(0,500-0,010).csv" )
# Redes livres de escala
set.seed(1)
grafo <- RLE( 100, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[026] RLE-N(0100).csv" )
set.seed(1)
grafo <- RLE( 100, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[027] RLE-N(0100).csv" )
set.seed(1)
grafo <- RLE( 100, orientado=TRUE, embaralhar=TRUE )
salvar( grafo, "[028] RLE-O(0100).csv" )
set.seed(1)
grafo <- RLE( 200, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[029] RLE-N(0200).csv" )
set.seed(1)
grafo <- RLE( 300, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[030] RLE-N(0300).csv" )
set.seed(1)
grafo <- RLE( 400, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[031] RLE-N(0400).csv" )
set.seed(1)
grafo <- RLE( 400, orientado=TRUE, embaralhar=TRUE )
salvar( grafo, "[032] RLE-O(0400).csv" )
set.seed(1)
grafo <- RLE( 500, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[033] RLE-N(0500).csv" )
set.seed(1)
grafo <- RLE( 500, orientado=TRUE, embaralhar=TRUE )
salvar( grafo, "[034] RLE-O(0500).csv" )
set.seed(1)
grafo <- RLE( 1000, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[035] RLE-N(1000).csv" )
# Redes pequeno mundo
set.seed(1)
grafo <- RPM( 100, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[036] RPM-N(0100).csv" )
set.seed(1)
grafo <- RPM( 100, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[037] RPM-N(0100).csv" )
set.seed(1)
grafo <- RPM( 100, orientado=TRUE, embaralhar=TRUE )
salvar( grafo, "[038] RPM-O(0100).csv" )
set.seed(1)
grafo <- RPM( 200, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[039] RPM-N(0200).csv" )
set.seed(1)
grafo <- RPM( 300, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[040] RPM-N(0300).csv" )
set.seed(1)
grafo <- RPM( 400, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[041] RPM-N(0400).csv" )
set.seed(1)
grafo <- RPM( 400, orientado=TRUE, embaralhar=TRUE )
salvar( grafo, "[042] RPM-O(0400).csv" )
set.seed(1)
grafo <- RPM( 500, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[043] RPM-N(0500).csv" )
set.seed(1)
grafo <- RPM( 500, orientado=TRUE, embaralhar=TRUE )
salvar( grafo, "[044] RPM-O(0500).csv" )
set.seed(1)
grafo <- RPM( 1000, orientado=FALSE, embaralhar=TRUE )
salvar( grafo, "[045] RPM-N(1000).csv" )
}
#------------------------------------------------------------------#
gerarExperimentos02 <- function(){
experimento <- 1
semente <- 1
for( tamanho in seq(100, 1000, by=100) ){
for( poder in seq(0.5, 1, by=0.5) ){
for( n in 1:2 ){
set.seed(semente)
grafo <- RLE( tamanho, orientado=FALSE, poder=poder, embaralhar=TRUE )
salvar( grafo, paste("[", formatC(experimento, width=3, flag="0"), "] RLE-N(", formatC(tamanho, width=4, flag="0"), ").csv", sep="") )
experimento <- experimento + 1
semente <- semente + 1
}
}
}
}
#------------------------------------------------------------------# | 13,538 | lgpl-3.0 |
970841604c2899785e6e38fefc3af939bce5b744 | ericwol/mia | R/Case_B_ROI.R | #' Sarcoma tumor region of interest in the pelvis.
#' This scan was acquired at University of Washington in Seattle, USA.
#' Uptake values are expressed in SUV scale for this test dataset
#' (Clinical SUVmax = 7.8). The ellipsoidal ROI was hand-drawn by expert
#' at the UW School of Medicine.
#' This is frame 0, gate 0, i.e. one static FDG-PET scan.
#' @format An array with 5 columns as follows:
#' \describe{
#' \item{Value}{Uptake value}
#' \item{Weight}{Voxel weight (dummy values set to 1)}
#' \item{X (mm)}{x-coordinate in the scanner referential}
#' \item{Y (mm)}{y-coordinate in the scanner referential}
#' \item{Z (mm)}{z-coordinate in the scanner referential}
#' }
"Case_B_ROI"
| 708 | gpl-3.0 |
5edeae531cd9f370e0bab20e5d5c5061a8dd6fc4 | lawinslow/streamMetabolizer | R/calc_is_daytime.R | #' Determines if specified datetime is during the daytime
#' Returns T/F indicating whether a datetime occurs during the daytime (sunlight hours)
#'
#' @param datetimes Vector of dates as \code{POSIXct} or \code{POSIXlt} (see \code{\link{DateTimeClasses}}) format, but in SOLAR time
#' @param lat Single latitude value of site. South should be negative, north positive
#'
#' @return a boolean vector of same length as \code{datetimes}
#'
#' @author
#' Luke A. Winslow
#' @seealso
#' \link{calc_sun_rise_set}
#' @importFrom LakeMetabolizer is.day
#' @examples
#' calc_is_daytime(datetimes=as.POSIXlt('2013-03-31'), lat=40.75)
#' @export
calc_is_daytime <- function(datetimes, lat) {
LakeMetabolizer::is.day(datetimes, lat)
} | 727 | cc0-1.0 |
fe07a1f7382dd831de7b8d4d5835b9490cfad429 | LARAsuite/lara-R | laraEvalVis/R/barplot3d.R | #'_____________________________________________________________________________
#
# PROJECT: LARA
# CLASS:
# FILENAME: barplot3d.R
#
# CATEGORY:
#
# AUTHOR: mark doerr
# EMAIL: mark@ismeralda.org
#
# VERSION: 0.1.0
#
# CREATION_DATE: 2015/09/08
# LASTMODIFICATION_DATE: 2015/09/08
#
# BRIEF_DESCRIPTION: Library for plotting microtiter plate data in 3D representation
# DETAILED_DESCRIPTION:
# HISTORY:
#
# ____________________________________________________________________________
#
# Copyright:
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This file is provided "AS IS" with NO WARRANTY OF ANY KIND,
# INCLUDING THE WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE.
#
# For further Information see COPYING file that comes with this distribution.
#_______________________________________________________________________________
#' barplot3d
#'
#' Plotting 3D Bars
#' @description plots data from a (heights x rows x columns ) array
#' @param heigths_arr
#' @keywords plate readers
#' @export
#' @examples
#' barplot3d()
#'
#' @note todo:
#'
barplot3d <- function(heights_arr=NULL, barcode = "0000", filename = "_3Dbarplot",
column_colours = 'bluered', numCol=20, zmax=1.0, nticks=4, moreLight=FALSE, axisBack=TRUE,
theta=-60, phi=15, scale=1.0, char_scale=1.1, width=700, height=900, transp=1.0,
col_lab=NULL, row_lab=NULL, z_lab=NULL,
barWidth=0.5, barDistance=0.8,
plotBg='white',
outputFormat='screen')
{
require(rgl)
# **** RGL settings
#rgl.open()
rgl.init()
open3d()
par3d(windowRect=c(0, 0, width, height))
save <- par3d(skipRedraw=TRUE)
on.exit(par3d(save))
# Choosing a background
rgl.bg(color=plotBg) #rgl.bg(color="gray") #rgl.bg(col="#cccccc") lightgrey
if (isTRUE(moreLight)) rgl.light() else {
# removing reflections
rgl.pop("lights")
light3d(specular="black")
}
title3d('','', ylab=z_lab, outher=TRUE) #title3d('','',xlab='xlab',ylab='Abs/AU',zlab='ylab')
# alpha is transparency
# **** scaling hights array
heights_arr <- heights_arr * scale
# print(heights_arr)
# **** selecting color scheme
# factor to spread colours over the whole range and +1 to prevent indexing by 0
num_col <- ( max(heights_arr, na.rm=TRUE) * numCol ) + 1
switch(column_colours,
bluered={column_colours <-colorRampPalette(c("blue","green","yellow","red"))(num_col)},
rainbow={column_colours <-rainbow(num_col)},
green={column_colours <-colorRampPalette(c("yellow","green","darkgreen"))(num_col)},
lightgreen={column_colours <-colorRampPalette(c("yellow","lightgreen","green"))(num_col)},
red={column_colours <-colorRampPalette(c("yellow","red","darkred"))(num_col)},
lightred={column_colours <-colorRampPalette(c("yellow","orange","red"))(num_col)},
orange={column_colours <-colorRampPalette(c("yellow","red","darkred"))(num_col)},
blue={column_colours <-colorRampPalette(c("lightblue","blue","darkblue"))(num_col)},
grey={column_colours <-colorRampPalette(c("white", "lightgrey","grey"))(num_col)},
lightgrey={column_colours <-colorRampPalette(c("white", "lightgrey"))(num_col)},
violet={column_colours <-colorRampPalette(c("lightgrey","grey", "violet"))(num_col)},
heat={column_colours <- rev(heat.colors(num_col))}
)
nrows <- as.integer(dim(heights_arr)[1])
ncols <- as.integer(dim(heights_arr)[2])
nheights <- as.integer(dim(heights_arr)[3])
printDebug("barplolt3D(DEBUG): n nheights: %s - cols :%s - rows: %s", nheights, ncols, nrows )
# spawning a 3D matrix for corner coordinates of the cuboids (default:12x8x2+n*z)
ncubes = nrows * ncols
x_distance <- barWidth + barDistance
y_distance <- barWidth + barDistance
# lowering dimension to matrix
##cube_matr <- matrix(heights_arr, c(nheights, ncubes))
## adding y-coordinates
##cube_matr <- rbind(rep(seq(0,(ncols-1)*y_distance, y_distance), nrows), cube_matr )
## adding x-coordinates
##cube_matr <- rbind(rep(seq((nrows-1)*x_distance,0, -x_distance), each=ncols) , cube_matr )
# generating y-coordinates
y_vec <- rep(seq(0,(ncols-1)*y_distance, y_distance), each=nrows)
# generating x-coordinates
x_vec <- rep(seq((nrows-1)*x_distance,0, -x_distance), ncols)
# combining everything to one array (=stacking the arrays)
cube_arr <- array(c(x_vec, y_vec, heights_arr), dim=c(nrows, ncols , nheights+2))
#print(cube_arr)
cubeFactory <-function(cube_vec, cube_width, curr_colour)
{
# geometry of the cube
xw <- cube_width
yw <- cube_width
# lower left corner of cube and height differences
x1 <- cube_vec[1]
y1 <- cube_vec[2]
z0 <- cube_vec[3]
z1 <- cube_vec[4]
# cube face color
curr_colour <- column_colours[(z1* numCol )+1]
right_side <- matrix(c(x1,x1,x1,x1,z0,z0,z1,z1,y1,y1+yw,y1+yw,y1), 4,3)
front_side <- matrix(c(x1,x1+xw,x1+xw,x1,z0,z0,z1,z1,y1,y1,y1,y1), 4,3)
# transformation matrices
y_tr <- matrix(c(0,0,0,0,0,0,0,0,yw,yw,yw,yw), 4,3)
x_tr <- matrix(c(xw,xw,xw,xw,0,0,0,0,0,0,0,0), 4,3)
# transformation of two cube plains (front side, rigth side) in both directions
left_side <- right_side + x_tr
back_side <- front_side + y_tr
rgl.quads(rbind(right_side,left_side,front_side,back_side), col=curr_colour)
}
# helper function for preparation of the cuboid colums
axis <- TRUE
plotCuboidColumn <-function(cube_vec=NULL, cube_width=0, column_colours=NULL, numCol=20)
{
#print(cube_vec)
# cube geometry
xw <- cube_width
yw <- cube_width
# lower left corner and height differences
x1 <- cube_vec[1]
y1 <- cube_vec[2]
z0 <- cube_vec[3] # cuboid bottom height
z1 <- cube_vec[length(cube_vec)] # length(cube_vec) = index of cuboid lid hight
# checking Z values and melting the coordinates to one matrix containing all cube coordinates
checknStackZ <- function(z_idx)
{
if(cube_vec[z_idx] < cube_vec[z_idx+1]) return( c(x1, y1, cube_vec[z_idx], cube_vec[z_idx+1]) ) else {
printError("WARNING(barplot3d): z cube coordinate is bigger then z+1 coordinate, please check cube stacking order for z=%s!", z_idx )
return( c(x1, y1, cube_vec[z_idx], cube_vec[z_idx]) )
}
}
num_heights <- length(cube_vec) - 1
curr_colour <- column_colours[(z1*numCol)+1]
# plotting stack of cubes
invisible(apply( sapply((3:num_heights), checknStackZ ), 2, cubeFactory, cube_width, curr_colour ))
# plotting bottom and lid
bottom <- matrix(c(x1,x1+xw,x1+xw,x1,0,0,0,0,y1,y1,y1+yw,y1+yw), 4,3)
z_tr <- matrix(c(0,0,0,0,z1,z1,z1,z1,0,0,0,0), 4,3)
#curr_colour <- column_colours[(z1*numCol)+1]
rgl.quads(bottom, col=curr_colour)
# and now the lid
rgl.quads(bottom+z_tr, col=curr_colour)
#plot only axis for first plot
if (axis == TRUE) {axis <<- FALSE }
}
# plotting the cubes
apply(cube_arr, c(1,2), plotCuboidColumn, barWidth, column_colours )
rgl.points(0,zmax,0)
# adding the axes and grid
#axes3d(c('x','y','z'), expand=1.00, colour="black")
#pos=c(0, -0.8, -0.8), eax=0.3
if(isTRUE(axisBack)){
grid3d(side=c('x+-'), col="gray")
axis3d(edge=c("y+-"), color="black", cex=char_scale, nticks=nticks) # axis in back
} else {
grid3d(side=c('x','z'), col="gray")
axes3d(edge=c('y'), expand=1.00, color="black", cex=char_scale, nticks=nticks) # axis in front
}
# grid3d(col="gray")
# well coordinates
row.names <- LETTERS[1:nrows]
col.names <- as.character(1:ncols)
text3d(x=seq((nrows-1)*x_distance,0, -x_distance)+0.2, y=-0.2, z=-0.5, texts=row.names ,col="black", cex=char_scale)
text3d(x=-0.6, y=-0.2, z=seq(0,(ncols-1)*y_distance, y_distance), texts=col.names ,col="black", cex=char_scale)
##par3d(userMatrix=um, FOV=19.28572, c(0,0,1200,800)) # projection !
par3d( FOV=19.28572)
view3d(theta = -115, phi=25)
switch(outputFormat,
screen={},
webGL={ out_filename_3D <- paste(barcode, "_", filename, ".html", sep="" )
cat("webGL file: ", out_filename_3D,"\n")
cat("webGL file path: ", system.file(file.path("WebGL", "template.html"), package = "rgl") ,"\n")
writeWebGL( dir="webGL", filename=file.path("webGL", out_filename_3D),
template = system.file(file.path("WebGL", "template.html"), package = "rgl"),
width=width, height=height)
},
png={ out_filename_3D <- paste(barcode,"_", filename, ".png", sep="" )
rgl.snapshot(out_filename_3D) },
svg={ out_filename_3D <- paste(barcode, "_", filename, ".svg", sep="" )
rgl.postscript( filename=out_filename_3D, fmt="svg", drawText=TRUE ) },
{ printError("ERROR(LA_Plot.wells): unknown output format (%s) specified ! Supported formats: 'screen', 'webGL', 'png', 'svg' !", outputFormat) }
)
if(outputFormat %in% c('webGL', 'svg', 'png') ) rgl.close()
return(TRUE)
}
#' LA_Plot.barplot3d
#'
#' @title Plotting 3D bars of Measurements in Microtiter Format
#' @description
#' Wrapper function for 3D bar plotting
#' @param heigths_arr
#' @keywords plate readers
#' @export
#' @examples
#' platePlot3d()
#'
#' @note todo:
#'
LA_Plot.barplot3d <- function( plot_data_df=NULL, num=0, zmax=1.0, ...)
{
require(rgl)
print("barplot3d 0.9i")
printDebug("barplolt3D(DEBUG): plot width %s, height: %s", plot_width, plot_height )
# plot_zlab = 'Absorption / AU'
if (is.null(plot_var) ) plot_var = c("Value", "RefTime")
if(current_filename == 'LA_PLot') current_filename <- "3Dbarplot"
# reverting original classes
if ( ! is.null(plot_data_df)) class(plot_data_df) <- "data.frame" else return(FALSE)
# removing neg. values
# if( min(plot_data_df$Slope, na.rm=T) < 0 ) {
# plot_data_df[plot_data_df$Slope <0, ] <- 0
# plot_data_df[plot_data_df$Value <0, ] <- 0
# }
if(current_wavelength == 0) printError("WARNING(plotePlot3D): No wavelength specified !")
umatrix = matrix(c( -0.4596869, 0.01776623, 0.8879033, 0,
0.3495693, 0.92270774, 0.1625170 , 0,
-0.8163879 ,0.38509068 ,-0.4303671 , 0,
0.0000000, 0.00000000 , 0.0000000 , 1), c(4,4))
if(isTRUE(plot_preview)) axisBack <- FALSE else axisBack <- TRUE
# converting data frame to array for fast plotting
heights_arr <- readerDF2Array(source_data_df=plot_data_df, wavelength=current_wavelength,num=num, plotVar=plot_var)
barplot3d(heights_arr=heights_arr, barcode=current_barcode, filename=current_filename,
zmax=zmax, column_colours=fill_colours, plotBg=plot_bg, numCol=num_colours, char_scale, moreLight=plot_preview, axisBack=axisBack,
theta=plot_theta, phi=plot_phi, scale=plot_scale, width=plot_width, height=plot_height, transp=plot_transparence,
col_lab=plot_xlab, row_lab=plot_ylab, z_lab=plot_zlab,
barWidth=plot_bar_width, barDistance=plot_bar_distance,
outputFormat="screen")
# ! to save a plot to file, first plot to screen and finally save it
switch(plot_output_format,
screen={ par3d(windowRect=c(0,0,1024,900))
par3d(userMatrix=t(umatrix), zoom= 0.80) },
webGL={ out_filename_3D <- paste(current_barcode,"_", current_filename, ".html", sep="" )
writeWebGL( dir="webGL", filename=file.path("webGL", out_filename_3D),
template = system.file(file.path("WebGL", "template.html"), package = "rgl"),
width=plot_width, height=plot_height)
},
png={ out_filename_3D <- paste(current_barcode,"_", current_filename, ".png", sep="" )
par3d(windowRect=c(0,0,plot_width,plot_height))
par3d(userMatrix=t(umatrix), zoom= 0.80)
rgl.snapshot(out_filename_3D) },
svg={ out_filename_3D <- paste(current_barcode, "_", current_filename, ".svg", sep="" )
rgl.postscript( filename=out_filename_3D, fmt="svg", drawText=TRUE ) },
{ printError("ERROR(LA_Plot.wells): unknown output format (%s) specified - supported formats: 'screen', 'webGL', 'png', 'svg' !", outputFormat) }
)
if (plot_output_format != "screen" ) rgl.close()
return(TRUE)
}
| 12,841 | gpl-3.0 |
9f07afff40add4f3d28d4638cb4235da99e10d90 | ge11232002/latticeExtra | R/ecdfplot.R |
prepanel.ecdfplot <-
function(x, f.value = NULL, ...)
{
ans <-
prepanel.default.qqmath(x,
f.value = f.value,
distribution = qunif)
with(ans, list(xlim = ylim, ylim = c(0, 1),
dx = dy, dy = dx))
}
panel.ecdfplot <-
function(x, f.value = NULL, type = "s",
groups = NULL, qtype = 7,
ref = TRUE,
...)
{
if (ref)
{
reference.line <- trellis.par.get("reference.line")
do.call(panel.abline, c(list(h = c(0, 1)), reference.line))
}
x <- as.numeric(x)
distribution <- qunif
nobs <- sum(!is.na(x))
if (!is.null(groups))
{
panel.superpose(x, y = NULL,
f.value = f.value, type = type,
distribution = distribution,
qtype = qtype,
groups = groups,
panel.groups = panel.ecdfplot,
...)
}
else if (nobs)
{
if (is.null(f.value))
{
panel.xyplot(x = sort(x),
y = seq_len(nobs) / nobs,
type = type,
...)
}
else
{
p <- if (is.numeric(f.value)) f.value else f.value(nobs)
panel.xyplot(x = quantile(x, p, names = FALSE, type = qtype, na.rm = TRUE),
y = distribution(p),
type = type,
...)
}
}
}
ecdfplot <-
function (x, data, ...)
UseMethod("ecdfplot")
ecdfplot.formula <-
function (x, data = NULL,
prepanel = "prepanel.ecdfplot",
panel = "panel.ecdfplot",
ylab = gettext("Empirical CDF"),
...)
{
ccall <- match.call()
ocall <- sys.call(sys.parent()); ocall[[1]] <- quote(ecdfplot) ## for nice $call
ccall$data <- data
ccall$prepanel <- prepanel
ccall$panel <- panel
ccall$ylab <- ylab
ccall[[1]] <- quote(lattice::densityplot)
ans <- eval.parent(ccall)
ans$call <- ocall
ans
}
ecdfplot.numeric <-
function (x, data = NULL, xlab = deparse(substitute(x)), ...)
{
ccall <- match.call()
ocall <- sys.call(sys.parent()); ocall[[1]] <- quote(ecdfplot) ## for nice $call
if (!is.null(ccall$data))
warning("explicit 'data' specification ignored")
ccall$data <- list(x = x)
ccall$xlab <- xlab
ccall$x <- ~x
ccall[[1]] <- quote(latticeExtra::ecdfplot)
ans <- eval.parent(ccall)
ans$call <- ocall
ans
}
| 2,645 | gpl-3.0 |
a26c6091d967303a9de58da068dda9bb1a3773c1 | radfordneal/pqR | tests/demos.R | #### Run all demos that do not depend on tcl and other specials.
.ptime <- proc.time()
set.seed(123)
options(keep.source=TRUE, useFancyQuotes=FALSE)
## Drop these for strict testing {and add them to demos2.R)
## lm.glm is in ../src/library/utils/man/demo.Rd }:
dont <- list(graphics = c("Hershey", "Japanese", "plotmath"),
stats = c("lm.glm", "nlm")
)
## don't take tcltk here
for(pkg in c("base", "graphics", "stats")) {
demos <- list.files(file.path(system.file(package = pkg), "demo"),
pattern = "\\.R$")
demos <- demos[is.na(match(demos, paste(dont[[pkg]], "R",sep=".")))]
if(length(demos)) {
if(need <- pkg != "base" &&
!any((fpkg <- paste("package", pkg, sep=":")) == search()))
library(pkg, character.only = TRUE)
for(nam in sub("\\.R$", "", demos))
demo(nam, character.only = TRUE)
if(need) detach(pos = which(fpkg == search()))
}
}
cat("Time elapsed: ", proc.time() - .ptime, "\n")
| 1,022 | gpl-2.0 |
5eba3eeb33f67f45455162ef3cec2b23a3ab892e | jeffreyhorner/R-Array-Hash | src/library/tools/R/admin.R | # File src/library/tools/R/admin.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2015 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
### * .install_package_description
## called from basepkg.mk and .install_packages
.install_package_description <-
function(dir, outDir, builtStamp=character())
{
## Function for taking the DESCRIPTION package meta-information,
## checking/validating it, and installing it with the 'Built:'
## field added. Note that from 1.7.0 on, packages without
## compiled code are not marked as being from any platform.
## Check first. Note that this also calls .read_description(), but
## .check_package_description() currently really needs to know the
## path to the DESCRIPTION file, and returns an object with check
## results and not the package metadata ...
ok <- .check_package_description(file.path(dir, "DESCRIPTION"))
if(any(as.integer(sapply(ok, length)) > 0L)) {
stop(paste(gettext("Invalid DESCRIPTION file") ,
paste(.eval_with_capture(print(ok))$output,
collapse = "\n"),
sep = "\n\n"),
domain = NA,
call. = FALSE)
}
## This reads (in C locale) byte-by-byte, declares latin1 or UTF-8
## Maybe it would be better to re-encode others (there are none at
## present, at least in a UTF-8 locale?
db <- .read_description(file.path(dir, "DESCRIPTION"))
## should not have a Built: field, so ignore it if it is there
nm <- names(db)
if("Built" %in% nm) {
db <- db[-match("Built", nm)]
warning(gettextf("*** someone has corrupted the Built field in package '%s' ***",
db["Package"]),
domain = NA,
call. = FALSE)
}
OStype <- R.version$platform
if (grepl("-apple-darwin", OStype) && nzchar(Sys.getenv("R_ARCH")))
OStype <- sub(".*-apple-darwin", "universal-apple-darwin", OStype)
Built <-
paste0("R ",
paste(R.version[c("major", "minor")], collapse = "."),
"; ",
if(dir.exists(file.path(dir, "src"))) OStype else "",
"; ",
## Some build systems want to supply a package-build timestamp for reproducibility
## Prefer date in ISO 8601 format, UTC.
if (length(builtStamp)==0) format(Sys.time(), tz = "UTC", usetz = TRUE) else builtStamp,
## Sys.time(),
"; ",
.OStype())
## At some point of time, we had:
## We must not split the Built: field across lines.
## Not sure if this is still true. If not, the following could be
## simplified to
## db["Built"] <- Built
## write.dcf(rbind(db), file.path(outDir, "DESCRIPTION"))
## But in any case, it is true for fields obtained from expanding R
## fields (Authors@R): these should not be reformatted.
db <- c(db,
.expand_package_description_db_R_fields(db),
Built = Built)
## This cannot be done in a MBCS: write.dcf fails
ctype <- Sys.getlocale("LC_CTYPE")
Sys.setlocale("LC_CTYPE", "C")
on.exit(Sys.setlocale("LC_CTYPE", ctype))
.write_description(db, file.path(outDir, "DESCRIPTION"))
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'",
outMetaDir),
domain = NA)
saveInfo <- .split_description(db)
saveRDS(saveInfo, file.path(outMetaDir, "package.rds"))
invisible()
}
### * .split_description
## also used in .getRequiredPackages
.split_description <-
function(db, verbose = FALSE)
{
if(!is.na(Built <- db["Built"])) {
Built <- as.list(strsplit(Built, "; ")[[1L]])
if(length(Built) != 4L) {
warning(gettextf("*** someone has corrupted the Built field in package '%s' ***",
db["Package"]),
domain = NA,
call. = FALSE)
Built <- NULL
} else {
names(Built) <- c("R", "Platform", "Date", "OStype")
Built[["R"]] <- R_system_version(sub("^R ([0-9.]+)", "\\1",
Built[["R"]]))
}
} else Built <- NULL
## might perhaps have multiple entries
Depends <- .split_dependencies(db[names(db) %in% "Depends"])
## several packages 'Depends' on base!
ind <- match("base", names(Depends), 0L)
if(ind) Depends <- Depends[-ind]
## We only need Rdepends for R < 2.7.0, but we still need to be
## able to check that someone is not trying to load this into a
## very old version of R.
if("R" %in% names(Depends)) {
Rdeps2 <- Depends["R" == names(Depends)]
names(Rdeps2) <- NULL
Rdeps <- Depends[["R", exact = TRUE]] # the first one
Depends <- Depends[names(Depends) != "R"]
## several packages have 'Depends: R', which is a noop.
if(verbose && length(Rdeps) == 1L)
message("WARNING: omitting pointless dependence on 'R' without a version requirement")
if(length(Rdeps) <= 1L) Rdeps <- NULL
} else Rdeps2 <- Rdeps <- NULL
Rdeps <- as.vector(Rdeps)
Suggests <- .split_dependencies(db[names(db) %in% "Suggests"])
Imports <- .split_dependencies(db[names(db) %in% "Imports"])
LinkingTo <- .split_dependencies(db[names(db) %in% "LinkingTo"])
structure(list(DESCRIPTION = db, Built = Built,
Rdepends = Rdeps, Rdepends2 = Rdeps2,
Depends = Depends, Suggests = Suggests,
Imports = Imports, LinkingTo = LinkingTo),
class = "packageDescription2")
}
### * .vinstall_package_descriptions_as_RDS
## called from src/library/Makefile
.vinstall_package_descriptions_as_RDS <-
function(dir, packages)
{
## For the given packages installed in @file{dir}, install their
## DESCRIPTION package metadata as R metadata.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+"))) {
meta_dir <- file.path(dir, p, "Meta")
if(!dir.exists(meta_dir) && !dir.create(meta_dir))
stop(gettextf("cannot open directory '%s'", meta_dir))
package_info_dcf_file <- file.path(dir, p, "DESCRIPTION")
package_info_rds_file <- file.path(meta_dir, "package.rds")
if(file_test("-nt",
package_info_rds_file,
package_info_dcf_file))
next
saveRDS(.split_description(.read_description(package_info_dcf_file)),
package_info_rds_file)
}
invisible()
}
### * .update_package_rds
## not used
.update_package_rds <-
function(lib.loc = NULL)
{
## rebuild the dumped package descriptions for all packages in lib.loc
if (is.null(lib.loc)) lib.loc <- .libPaths()
lib.loc <- lib.loc[file.exists(lib.loc)]
for (lib in lib.loc) {
a <- list.files(lib, all.files = FALSE, full.names = TRUE)
for (nam in a) {
dfile <- file.path(nam, "DESCRIPTION")
if (file.exists(dfile)) {
print(nam)
.install_package_description(nam, nam)
}
}
}
}
### * .install_package_code_files
.install_package_code_files <-
function(dir, outDir)
{
if(!dir.exists(dir))
stop(gettextf("directory '%s' does not exist", dir),
domain = NA)
dir <- file_path_as_absolute(dir)
## Attempt to set the LC_COLLATE locale to 'C' to turn off locale
## specific sorting.
curLocale <- Sys.getlocale("LC_COLLATE")
on.exit(Sys.setlocale("LC_COLLATE", curLocale), add = TRUE)
## (Guaranteed to work as per the Sys.setlocale() docs.)
lccollate <- "C"
if(Sys.setlocale("LC_COLLATE", lccollate) != lccollate) {
## <NOTE>
## I don't think we can give an error here.
## It may be the case that Sys.setlocale() fails because the "OS
## reports request cannot be honored" (src/main/platform.c), in
## which case we should still proceed ...
warning("cannot turn off locale-specific sorting via LC_COLLATE")
## </NOTE>
}
## We definitely need a valid DESCRIPTION file.
db <- .read_description(file.path(dir, "DESCRIPTION"))
codeDir <- file.path(dir, "R")
if(!dir.exists(codeDir)) return(invisible())
codeFiles <- list_files_with_type(codeDir, "code", full.names = FALSE)
collationField <-
c(paste("Collate", .OStype(), sep = "."), "Collate")
if(any(i <- collationField %in% names(db))) {
collationField <- collationField[i][1L]
codeFilesInCspec <- .read_collate_field(db[collationField])
## Duplicated entries in the collation spec?
badFiles <-
unique(codeFilesInCspec[duplicated(codeFilesInCspec)])
if(length(badFiles)) {
out <- gettextf("\nduplicated files in '%s' field:",
collationField)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## See which files are listed in the collation spec but don't
## exist.
badFiles <- setdiff(codeFilesInCspec, codeFiles)
if(length(badFiles)) {
out <- gettextf("\nfiles in '%s' field missing from '%s':",
collationField,
codeDir)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## See which files exist but are missing from the collation
## spec. Note that we do not want the collation spec to use
## only a subset of the available code files.
badFiles <- setdiff(codeFiles, codeFilesInCspec)
if(length(badFiles)) {
out <- gettextf("\nfiles in '%s' missing from '%s' field:",
codeDir,
collationField)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## Everything's groovy ...
codeFiles <- codeFilesInCspec
}
codeFiles <- file.path(codeDir, codeFiles)
if(!dir.exists(outDir) && !dir.create(outDir))
stop(gettextf("cannot open directory '%s'", outDir),
domain = NA)
outCodeDir <- file.path(outDir, "R")
if(!dir.exists(outCodeDir) && !dir.create(outCodeDir))
stop(gettextf("cannot open directory '%s'", outCodeDir),
domain = NA)
outFile <- file.path(outCodeDir, db["Package"])
if(!file.create(outFile))
stop(gettextf("unable to create '%s'", outFile), domain = NA)
writeLines(paste0(".packageName <- \"", db["Package"], "\""),
outFile)
enc <- as.vector(db["Encoding"])
need_enc <- !is.na(enc) # Encoding was specified
## assume that if locale is 'C' we can used 8-bit encodings unchanged.
if(need_enc && !(Sys.getlocale("LC_CTYPE") %in% c("C", "POSIX"))) {
con <- file(outFile, "a")
on.exit(close(con)) # Windows does not like files left open
for(f in codeFiles) {
tmp <- iconv(readLines(f, warn = FALSE), from = enc, to = "")
if(length(bad <- which(is.na(tmp)))) {
warning(sprintf(ngettext(length(bad),
"unable to re-encode %s line %s",
"unable to re-encode %s lines %s"),
sQuote(basename(f)),
paste(bad, collapse = ", ")),
domain = NA, call. = FALSE)
tmp <- iconv(readLines(f, warn = FALSE), from = enc, to = "",
sub = "byte")
}
writeLines(paste0("#line 1 \"", f, "\""), con)
writeLines(tmp, con)
}
close(con); on.exit()
} else {
## <NOTE>
## It may be safer to do
## writeLines(sapply(codeFiles, readLines), outFile)
## instead, but this would be much slower ...
## use fast version of file.append that ensures LF between files
if(!all(.file_append_ensuring_LFs(outFile, codeFiles)))
stop("unable to write code files")
## </NOTE>
}
## A syntax check here, so that we do not install a broken package.
## FIXME: this is only needed if we don't lazy load, as the lazy loader
## would detect the error.
op <- options(showErrorCalls=FALSE)
on.exit(options(op))
parse(outFile)
invisible()
}
### * .install_package_indices
## called from R CMD INSTALL
.install_package_indices <-
function(dir, outDir)
{
options(warn = 1) # to ensure warnings get seen
if(!dir.exists(dir))
stop(gettextf("directory '%s' does not exist", dir),
domain = NA)
if(!dir.exists(outDir))
stop(gettextf("directory '%s' does not exist", outDir),
domain = NA)
## If there is an @file{INDEX} file in the package sources, we
## install this, and do not build it.
if(file_test("-f", file.path(dir, "INDEX")))
if(!file.copy(file.path(dir, "INDEX"),
file.path(outDir, "INDEX"),
overwrite = TRUE))
stop(gettextf("unable to copy INDEX to '%s'",
file.path(outDir, "INDEX")),
domain = NA)
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'", outMetaDir),
domain = NA)
.install_package_Rd_indices(dir, outDir)
.install_package_demo_index(dir, outDir)
invisible()
}
### * .install_package_Rd_indices
.install_package_Rd_indices <-
function(dir, outDir)
{
dir <- file_path_as_absolute(dir)
docsDir <- file.path(dir, "man")
dataDir <- file.path(outDir, "data")
outDir <- file_path_as_absolute(outDir)
## <FIXME>
## Not clear whether we should use the basename of the directory we
## install to, or the package name as obtained from the DESCRIPTION
## file in the directory we install from (different for versioned
## installs). We definitely do not want the basename of the dir we
## install from.
packageName <- basename(outDir)
## </FIXME>
allRd <- if(dir.exists(docsDir))
list_files_with_type(docsDir, "docs") else character()
## some people have man dirs without any valid .Rd files
if(length(allRd)) {
## we want the date of the newest .Rd file we will install
newestRd <- max(file.mtime(allRd))
## these files need not exist, which gives NA.
indices <- c(file.path("Meta", "Rd.rds"),
file.path("Meta", "hsearch.rds"),
file.path("Meta", "links.rds"),
"INDEX")
upToDate <- file.mtime(file.path(outDir, indices)) >= newestRd
if(dir.exists(dataDir)
&& length(dataFiles <- list.files(dataDir))) {
## Note that the data index is computed from both the package's
## Rd files and the data sets actually available.
newestData <- max(file.mtime(dataFiles))
upToDate <- c(upToDate,
file.mtime(file.path(outDir, "Meta", "data.rds")) >=
max(newestRd, newestData))
}
## Note that this is not quite good enough: an Rd file or data file
## might have been removed since the indices were made.
RdsFile <- file.path("Meta", "Rd.rds")
if(file.exists(RdsFile)) { ## for Rd files
## this has file names without path
files <- readRDS(RdsFile)$File
if(!identical(basename(allRd), files)) upToDate <- FALSE
}
## we want to proceed if any is NA.
if(all(upToDate %in% TRUE)) return(invisible())
## Rd objects should already have been installed.
db <- tryCatch(Rd_db(basename(outDir), lib.loc = dirname(outDir)),
error = function(e) NULL)
## If not, we build the Rd db from the sources:
if(is.null(db)) db <- .build_Rd_db(dir, allRd)
contents <- Rd_contents(db)
.write_Rd_contents_as_RDS(contents,
file.path(outDir, "Meta", "Rd.rds"))
defaultEncoding <- as.vector(readRDS(file.path(outDir, "Meta", "package.rds"))$DESCRIPTION["Encoding"])
if(is.na(defaultEncoding)) defaultEncoding <- NULL
saveRDS(.build_hsearch_index(contents, packageName, defaultEncoding),
file.path(outDir, "Meta", "hsearch.rds"))
saveRDS(.build_links_index(contents, packageName),
file.path(outDir, "Meta", "links.rds"))
## If there is no @file{INDEX} file in the package sources, we
## build one.
## <NOTE>
## We currently do not also save this in RDS format, as we can
## always do
## .build_Rd_index(readRDS(file.path(outDir, "Meta", "Rd.rds"))
if(!file_test("-f", file.path(dir, "INDEX")))
writeLines(formatDL(.build_Rd_index(contents)),
file.path(outDir, "INDEX"))
## </NOTE>
} else {
contents <- NULL
saveRDS(.build_hsearch_index(contents, packageName, defaultEncoding),
file.path(outDir, "Meta", "hsearch.rds"))
saveRDS(.build_links_index(contents, packageName),
file.path(outDir, "Meta", "links.rds"))
}
if(dir.exists(dataDir))
saveRDS(.build_data_index(dataDir, contents),
file.path(outDir, "Meta", "data.rds"))
invisible()
}
### * .install_package_vignettes2
## called from R CMD INSTALL for pre 3.0.2-built tarballs, and for base packages
.install_package_vignettes2 <-
function(dir, outDir, encoding = "")
{
dir <- file_path_as_absolute(dir)
subdirs <- c("vignettes", file.path("inst", "doc"))
ok <- dir.exists(file.path(dir, subdirs))
## Create a vignette index only if the vignette dir exists.
if (!any(ok))
return(invisible())
subdir <- subdirs[ok][1L]
vignetteDir <- file.path(dir, subdir)
outDir <- file_path_as_absolute(outDir)
packageName <- basename(outDir)
outVignetteDir <- file.path(outDir, "doc")
## --fake and --no-inst installs do not have a outVignetteDir.
if(!dir.exists(outVignetteDir)) return(invisible())
## If there is an HTML index in the @file{inst/doc} subdirectory of
## the package source directory (@code{dir}), we do not overwrite it
## (similar to top-level @file{INDEX} files). Installation already
## copied this over.
hasHtmlIndex <- file_test("-f", file.path(vignetteDir, "index.html"))
htmlIndex <- file.path(outDir, "doc", "index.html")
vigns <- pkgVignettes(dir = dir, subdirs = subdir, check = TRUE)
## Write dummy HTML index if no vignettes are found and exit.
if(length(vigns$docs) == 0L) {
## we don't want to write an index if the directory is in fact empty
files <- list.files(vignetteDir, all.files = TRUE, no.. = TRUE)
if((length(files) > 0L) && !hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex)
return(invisible())
}
if (subdir == "vignettes") {
## copy vignette sources over.
file.copy(vigns$docs, outVignetteDir)
}
vigns <- tryCatch({
pkgVignettes(dir=outDir, subdirs="doc", output=TRUE, source=TRUE)
}, error = function(ex) {
pkgVignettes(dir=outDir, subdirs="doc")
})
vignetteIndex <- .build_vignette_index(vigns)
if(NROW(vignetteIndex) > 0L) {
cwd <- getwd()
if (is.null(cwd))
stop("current working directory cannot be ascertained")
setwd(outVignetteDir)
loadVignetteBuilder(dir, mustwork = FALSE)
## install tangled versions of Sweave vignettes. FIXME: Vignette
## *.R files should have been included when the package was built,
## but in the interim before they are all built with the new code,
## this is needed.
for(i in seq_along(vigns$docs)) {
file <- vigns$docs[i]
if (!is.null(vigns$sources) && !is.null(vigns$sources[file][[1]]))
next
file <- basename(file)
enc <- vigns$encodings[i]
cat(" ", sQuote(basename(file)),
if(nzchar(enc)) paste("using", sQuote(enc)), "\n")
engine <- try(vignetteEngine(vigns$engines[i]), silent = TRUE)
if (!inherits(engine, "try-error"))
engine$tangle(file, quiet = TRUE, encoding = enc)
setwd(outVignetteDir) # just in case some strange tangle function changed it
}
setwd(cwd)
# Update - now from the output directory
vigns <- pkgVignettes(dir=outDir, subdirs="doc", source=TRUE)
## remove any files with no R code (they will have header comments).
## if not correctly declared they might not be in the current encoding
sources <- unlist(vigns$sources)
for(i in seq_along(sources)) {
file <- sources[i]
if (!file_test("-f", file)) next
bfr <- readLines(file, warn = FALSE)
if(all(grepl("(^###|^[[:space:]]*$)", bfr, useBytes = TRUE)))
unlink(file)
}
# Update
vigns <- pkgVignettes(dir=outDir, subdirs="doc", source=TRUE)
# Add tangle source files (*.R) to the vignette index
# Only the "main" R file, because tangle may also split
# output into multiple files
sources <- character(length(vigns$docs))
for (i in seq_along(vigns$docs)) {
name <- vigns$names[i]
source <- find_vignette_product(name, by = "tangle", main = TRUE, dir = vigns$dir, engine = engine)
if (length(source) > 0L)
sources[i] <- basename(source)
}
vignetteIndex$R <- sources
}
if(!hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex, vignetteIndex)
saveRDS(vignetteIndex,
file = file.path(outDir, "Meta", "vignette.rds"))
invisible()
}
### * .install_package_vignettes3
## called from R CMD INSTALL for 3.0.2 or later tarballs
.install_package_vignettes3 <-
function(dir, outDir, encoding = "")
{
packageName <- basename(outDir)
dir <- file_path_as_absolute(dir)
indexname <- file.path(dir, "build", "vignette.rds")
ok <- file_test("-f", indexname)
## Create a vignette index only if the vignette dir exists.
if (!ok)
return(invisible())
## Copy the index to Meta
file.copy(indexname, file.path(outDir, "Meta"))
## If there is an HTML index in the @file{inst/doc} subdirectory of
## the package source directory (@code{dir}), we do not overwrite it
## (similar to top-level @file{INDEX} files). Installation already
## copied this over.
vignetteDir <- file.path(outDir, "doc")
hasHtmlIndex <- file_test("-f", file.path(vignetteDir, "index.html"))
htmlIndex <- file.path(outDir, "doc", "index.html")
vignetteIndex <- readRDS(indexname)
if(!hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex, vignetteIndex)
invisible()
}
### * .install_package_demo_index
.install_package_demo_index <-
function(dir, outDir)
{
demoDir <- file.path(dir, "demo")
if(!dir.exists(demoDir)) return(invisible())
demoIndex <- .build_demo_index(demoDir)
saveRDS(demoIndex,
file = file.path(outDir, "Meta", "demo.rds"))
invisible()
}
### * .vinstall_package_indices
## called from src/library/Makefile
.vinstall_package_indices <-
function(src_dir, out_dir, packages)
{
## For the given packages with sources rooted at @file{src_dir} and
## installations rooted at @file{out_dir}, install the package
## indices.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+")))
.install_package_indices(file.path(src_dir, p), file.path(out_dir, p))
utils::make.packages.html(.Library, verbose = FALSE)
invisible()
}
### * .install_package_vignettes
## called from src/library/Makefile[.win]
## this is only used when building R
.install_package_vignettes <-
function(dir, outDir, keep.source = TRUE)
{
dir <- file_path_as_absolute(dir)
vigns <- pkgVignettes(dir = dir)
if(is.null(vigns) || !length(vigns$docs)) return(invisible())
outDir <- file_path_as_absolute(outDir)
outVignetteDir <- file.path(outDir, "doc")
if(!dir.exists(outVignetteDir) && !dir.create(outVignetteDir))
stop(gettextf("cannot open directory '%s'", outVignetteDir),
domain = NA)
## We have to be careful to avoid repeated rebuilding.
vignettePDFs <-
file.path(outVignetteDir,
sub("$", ".pdf",
basename(file_path_sans_ext(vigns$docs))))
upToDate <- file_test("-nt", vignettePDFs, vigns$docs)
## The primary use of this function is to build and install PDF
## vignettes in base packages.
## Hence, we build in a subdir of the current directory rather
## than a temp dir: this allows inspection of problems and
## automatic cleanup via Make.
cwd <- getwd()
if (is.null(cwd))
stop("current working directory cannot be ascertained")
buildDir <- file.path(cwd, ".vignettes")
if(!dir.exists(buildDir) && !dir.create(buildDir))
stop(gettextf("cannot create directory '%s'", buildDir), domain = NA)
on.exit(setwd(cwd))
setwd(buildDir)
loadVignetteBuilder(vigns$pkgdir)
for(i in seq_along(vigns$docs)[!upToDate]) {
file <- vigns$docs[i]
name <- vigns$names[i]
engine <- vignetteEngine(vigns$engines[i])
message(gettextf("processing %s", sQuote(basename(file))),
domain = NA)
## Note that contrary to all other weave/tangle calls, here
## 'file' is not a file in the current directory [hence no
## file <- basename(file) above]. However, weave should/must
## always create a file ('output') in the current directory.
output <- tryCatch({
engine$weave(file, pdf = TRUE, eps = FALSE, quiet = TRUE,
keep.source = keep.source, stylepath = FALSE)
setwd(buildDir)
find_vignette_product(name, by = "weave", engine = engine)
}, error = function(e) {
stop(gettextf("running %s on vignette '%s' failed with message:\n%s",
engine[["name"]], file, conditionMessage(e)),
domain = NA, call. = FALSE)
})
## In case of an error, do not clean up: should we point to
## buildDir for possible inspection of results/problems?
## We need to ensure that vignetteDir is in TEXINPUTS and BIBINPUTS.
if (vignette_is_tex(output)) {
## <FIXME>
## What if this fails?
## Now gives a more informative error texi2pdf fails
## or if it does not produce a <name>.pdf.
tryCatch({
texi2pdf(file = output, quiet = TRUE, texinputs = vigns$dir)
output <- find_vignette_product(name, by = "texi2pdf", engine = engine)
}, error = function(e) {
stop(gettextf("compiling TeX file %s failed with message:\n%s",
sQuote(output), conditionMessage(e)),
domain = NA, call. = FALSE)
})
## </FIXME>
}
if(!file.copy(output, outVignetteDir, overwrite = TRUE))
stop(gettextf("cannot copy '%s' to '%s'",
output,
outVignetteDir),
domain = NA)
}
## Need to change out of this dir before we delete it,
## at least on Windows.
setwd(cwd)
unlink(buildDir, recursive = TRUE)
## Now you need to update the HTML index!
## This also creates the .R files
.install_package_vignettes2(dir, outDir)
invisible()
}
### * .install_package_namespace_info
.install_package_namespace_info <-
function(dir, outDir)
{
dir <- file_path_as_absolute(dir)
nsFile <- file.path(dir, "NAMESPACE")
if(!file_test("-f", nsFile)) return(invisible())
nsInfoFilePath <- file.path(outDir, "Meta", "nsInfo.rds")
if(file_test("-nt", nsInfoFilePath, nsFile)) return(invisible())
nsInfo <- parseNamespaceFile(basename(dir), dirname(dir))
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'", outMetaDir),
domain = NA)
saveRDS(nsInfo, nsInfoFilePath)
invisible()
}
### * .vinstall_package_namespaces_as_RDS
## called from src/library/Makefile
.vinstall_package_namespaces_as_RDS <-
function(dir, packages)
{
## For the given packages installed in @file{dir} which have a
## NAMESPACE file, install the namespace info as R metadata.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+")))
.install_package_namespace_info(file.path(dir, p),
file.path(dir, p))
invisible()
}
### * .install_package_Rd_objects
## called from src/library/Makefile
.install_package_Rd_objects <-
function(dir, outDir, encoding = "unknown")
{
dir <- file_path_as_absolute(dir)
mandir <- file.path(dir, "man")
manfiles <- if(!dir.exists(mandir)) character()
else list_files_with_type(mandir, "docs")
manOutDir <- file.path(outDir, "help")
dir.create(manOutDir, FALSE)
db_file <- file.path(manOutDir,
paste0(basename(outDir), ".rdx"))
built_file <- file.path(dir, "build", "partial.rdb")
macro_files <- list.files(file.path(dir, "man", "macros"), pattern = "\\.Rd$", full.names = TRUE)
if (length(macro_files)) {
macroDir <- file.path(manOutDir, "macros")
dir.create(macroDir, FALSE)
file.copy(macro_files, macroDir, overwrite = TRUE)
}
## Avoid (costly) rebuilding if not needed.
## Actually, it seems no more costly than these tests, which it also does
pathsFile <- file.path(manOutDir, "paths.rds")
if(!file_test("-f", db_file) || !file.exists(pathsFile) ||
!identical(sort(manfiles), sort(readRDS(pathsFile))) ||
!all(file_test("-nt", db_file, manfiles))) {
db <- .build_Rd_db(dir, manfiles, db_file = db_file,
encoding = encoding, built_file = built_file)
nm <- as.character(names(db)) # Might be NULL
saveRDS(structure(nm,
first = nchar(file.path(mandir)) + 2L),
pathsFile)
names(db) <- sub("\\.[Rr]d$", "", basename(nm))
makeLazyLoadDB(db, file.path(manOutDir, basename(outDir)))
}
invisible()
}
### * .install_package_demos
## called from basepkg.mk and .install_packages
.install_package_demos <-
function(dir, outDir)
{
## NB: we no longer install 00Index
demodir <- file.path(dir, "demo")
if(!dir.exists(demodir)) return()
demofiles <- list_files_with_type(demodir, "demo", full.names = FALSE)
if(!length(demofiles)) return()
demoOutDir <- file.path(outDir, "demo")
if(!dir.exists(demoOutDir)) dir.create(demoOutDir)
file.copy(file.path(demodir, demofiles), demoOutDir,
overwrite = TRUE)
}
### * .find_cinclude_paths
.find_cinclude_paths <-
function(pkgs, lib.loc = NULL, file = NULL)
{
## given a character string of comma-separated package names,
## find where the packages are installed and generate
## -I"/path/to/package/include" ...
if(!is.null(file)) {
tmp <- read.dcf(file, "LinkingTo")[1L, 1L]
if(is.na(tmp)) return(invisible())
pkgs <- tmp
}
pkgs <- strsplit(pkgs[1L], ",[[:blank:]]*")[[1L]]
paths <- find.package(pkgs, lib.loc, quiet=TRUE)
if(length(paths))
cat(paste(paste0('-I"', paths, '/include"'), collapse=" "))
return(invisible())
}
### * .Rtest_package_depends_R_version
.Rtest_package_depends_R_version <-
function(dir)
{
if(missing(dir)) dir <- "."
meta <- .read_description(file.path(dir, "DESCRIPTION"))
deps <- .split_description(meta, verbose = TRUE)$Rdepends2
status <- 0
current <- getRversion()
for(depends in deps) {
## .split_description will have ensured that this is NULL or
## of length 3.
if(length(depends) > 1L) {
## .check_package_description will insist on these operators
if(!depends$op %in% c("<=", ">=", "<", ">", "==", "!="))
message("WARNING: malformed 'Depends' field in 'DESCRIPTION'")
else {
status <- if(inherits(depends$version, "numeric_version"))
!do.call(depends$op, list(current, depends$version))
else {
ver <- R.version
if (ver$status %in% c("", "Patched")) FALSE
else !do.call(depends$op,
list(ver[["svn rev"]],
as.numeric(sub("^r", "", depends$version))))
}
}
if(status != 0) {
package <- Sys.getenv("R_PACKAGE_NAME")
if(!nzchar(package))
package <- meta["Package"]
msg <- if(nzchar(package))
gettextf("ERROR: this R is version %s, package '%s' requires R %s %s",
current, package,
depends$op, depends$version)
else
gettextf("ERROR: this R is version %s, required is R %s %s",
current, depends$op, depends$version)
message(strwrap(msg, exdent = 2L))
break
}
}
}
status
}
## no longer used
.test_package_depends_R_version <-
function(dir)
q(status = .Rtest_package_depends_R_version(dir))
### * .test_load_package
.test_load_package <- function(pkg_name, lib)
{
options(warn = 1)
res <- try(suppressPackageStartupMessages(library(pkg_name, lib.loc = lib, character.only = TRUE, logical.return = TRUE)))
if (inherits(res, "try-error") || !res)
stop("loading failed", call. = FALSE)
}
### * checkRdaFiles
checkRdaFiles <- function(paths)
{
if(length(paths) == 1L && dir.exists(paths)) {
paths <- Sys.glob(c(file.path(paths, "*.rda"),
file.path(paths, "*.RData")))
## Exclude .RData, which this may or may not match
paths <- grep("/[.]RData$", paths, value = TRUE, invert = TRUE)
}
res <- data.frame(size = NA_real_, ASCII = NA,
compress = NA_character_, version = NA_integer_,
stringsAsFactors = FALSE)
res <- res[rep_len(1L, length(paths)), ]
row.names(res) <- paths
keep <- file.exists(paths)
res$size[keep] <- file.size(paths)[keep]
for(p in paths[keep]) {
magic <- readBin(p, "raw", n = 5)
res[p, "compress"] <- if(all(magic[1:2] == c(0x1f, 0x8b))) "gzip"
else if(rawToChar(magic[1:3]) == "BZh") "bzip2"
else if(magic[1L] == 0xFD && rawToChar(magic[2:5]) == "7zXZ") "xz"
else if(grepl("RD[ABX][12]", rawToChar(magic), useBytes = TRUE)) "none"
else "unknown"
con <- gzfile(p)
magic <- readChar(con, 5L, useBytes = TRUE)
close(con)
res[p, "ASCII"] <- if (grepl("RD[ABX][12]", magic, useBytes = TRUE))
substr(magic, 3, 3) == "A" else NA
ver <- sub("(RD[ABX])([12]*)", "\\2", magic, useBytes = TRUE)
res$version <- as.integer(ver)
}
res
}
### * resaveRdaFiles
resaveRdaFiles <- function(paths,
compress = c("auto", "gzip", "bzip2", "xz"),
compression_level)
{
if(length(paths) == 1L && dir.exists(paths))
paths <- Sys.glob(c(file.path(paths, "*.rda"),
file.path(paths, "*.RData")))
compress <- match.arg(compress)
if (missing(compression_level))
compression_level <- switch(compress, "gzip" = 6, 9)
for(p in paths) {
env <- new.env(hash = TRUE) # probably small, need not be
suppressPackageStartupMessages(load(p, envir = env))
if(compress == "auto") {
f1 <- tempfile()
save(file = f1, list = ls(env, all.names = TRUE), envir = env)
f2 <- tempfile()
save(file = f2, list = ls(env, all.names = TRUE), envir = env,
compress = "bzip2")
ss <- file.size(c(f1, f2)) * c(0.9, 1.0)
names(ss) <- c(f1, f2)
if(ss[1L] > 10240) {
f3 <- tempfile()
save(file = f3, list = ls(env, all.names = TRUE), envir = env,
compress = "xz")
ss <- c(ss, file.size(f3))
names(ss) <- c(f1, f2, f3)
}
nm <- names(ss)
ind <- which.min(ss)
file.copy(nm[ind], p, overwrite = TRUE)
unlink(nm)
} else
save(file = p, list = ls(env, all.names = TRUE), envir = env,
compress = compress, compression_level = compression_level)
}
}
### * compactPDF
compactPDF <-
function(paths, qpdf = Sys.which(Sys.getenv("R_QPDF", "qpdf")),
gs_cmd = Sys.getenv("R_GSCMD", ""),
gs_quality = Sys.getenv("GS_QUALITY", "none"),
gs_extras = character())
{
use_qpdf <- nzchar(qpdf)
gs_quality <- match.arg(gs_quality, c("none", "printer", "ebook", "screen"))
use_gs <- if(gs_quality != "none") nzchar(gs_cmd <- find_gs_cmd(gs_cmd)) else FALSE
if (!use_gs && !use_qpdf) return()
if(length(paths) == 1L && dir.exists(paths))
paths <- Sys.glob(file.path(paths, "*.pdf"))
dummy <- rep.int(NA_real_, length(paths))
ans <- data.frame(old = dummy, new = dummy, row.names = paths)
tf <- tempfile("pdf"); tf2 <- tempfile("pdf")
for (p in paths) {
res <- 0
if (use_gs) {
res <- system2(gs_cmd,
c("-q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite",
sprintf("-dPDFSETTINGS=/%s", gs_quality),
"-dCompatibilityLevel=1.5",
"-dAutoRotatePages=/None",
sprintf("-sOutputFile=%s", tf),
gs_extras, p), FALSE, FALSE)
if(!res && use_qpdf) {
unlink(tf2) # precaution
file.rename(tf, tf2)
res <- system2(qpdf, c("--stream-data=compress",
"--object-streams=generate",
tf2, tf), FALSE, FALSE)
unlink(tf2)
}
} else if(use_qpdf) {
res <- system2(qpdf, c("--stream-data=compress",
"--object-streams=generate",
p, tf), FALSE, FALSE)
}
if(!res && file.exists(tf)) {
old <- file.size(p); new <- file.size(tf)
if(new/old < 0.9 && new < old - 1e4) {
file.copy(tf, p, overwrite = TRUE)
ans[p, ] <- c(old, new)
}
}
unlink(tf)
}
structure(na.omit(ans), class = c("compactPDF", "data.frame"))
}
find_gs_cmd <- function(gs_cmd = "")
{
if(!nzchar(gs_cmd)) {
if(.Platform$OS.type == "windows") {
gsexe <- Sys.getenv("R_GSCMD")
if (!nzchar(gsexe)) gsexe <- Sys.getenv("GSC")
gs_cmd <- Sys.which(gsexe)
if (!nzchar(gs_cmd)) gs_cmd <- Sys.which("gswin64c")
if (!nzchar(gs_cmd)) gs_cmd <- Sys.which("gswin32c")
gs_cmd
} else Sys.which(Sys.getenv("R_GSCMD", "gs"))
} else Sys.which(gs_cmd)
}
format.compactPDF <- function(x, ratio = 0.9, diff = 1e4, ...)
{
if(!nrow(x)) return(character())
z <- y <- x[with(x, new/old < ratio & new < old - diff), ]
if(!nrow(z)) return(character())
z[] <- lapply(y, function(x) sprintf("%.0fKb", x/1024))
large <- y$new >= 1024^2
z[large, ] <- lapply(y[large, ], function(x) sprintf("%.1fMb", x/1024^2))
paste(' compacted', sQuote(basename(row.names(y))),
'from', z[, 1L], 'to', z[, 2L])
}
### * add_datalist
add_datalist <- function(pkgpath, force = FALSE)
{
dlist <- file.path(pkgpath, "data", "datalist")
if (!force && file.exists(dlist)) return()
size <- sum(file.size(Sys.glob(file.path(pkgpath, "data", "*"))))
if(size <= 1024^2) return()
z <- suppressPackageStartupMessages(list_data_in_pkg(dataDir = file.path(pkgpath, "data"))) # for BARD
if(!length(z)) return()
con <- file(dlist, "w")
for (nm in names(z)) {
zz <- z[[nm]]
if (length(zz) == 1L && zz == nm) writeLines(nm, con)
else cat(nm, ": ", paste(zz, collapse = " "), "\n",
sep = "", file = con)
}
close(con)
invisible()
}
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "### [*]+" ***
### End: ***
| 42,256 | gpl-2.0 |
5eba3eeb33f67f45455162ef3cec2b23a3ab892e | patperry/r-source | src/library/tools/R/admin.R | # File src/library/tools/R/admin.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2015 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
### * .install_package_description
## called from basepkg.mk and .install_packages
.install_package_description <-
function(dir, outDir, builtStamp=character())
{
## Function for taking the DESCRIPTION package meta-information,
## checking/validating it, and installing it with the 'Built:'
## field added. Note that from 1.7.0 on, packages without
## compiled code are not marked as being from any platform.
## Check first. Note that this also calls .read_description(), but
## .check_package_description() currently really needs to know the
## path to the DESCRIPTION file, and returns an object with check
## results and not the package metadata ...
ok <- .check_package_description(file.path(dir, "DESCRIPTION"))
if(any(as.integer(sapply(ok, length)) > 0L)) {
stop(paste(gettext("Invalid DESCRIPTION file") ,
paste(.eval_with_capture(print(ok))$output,
collapse = "\n"),
sep = "\n\n"),
domain = NA,
call. = FALSE)
}
## This reads (in C locale) byte-by-byte, declares latin1 or UTF-8
## Maybe it would be better to re-encode others (there are none at
## present, at least in a UTF-8 locale?
db <- .read_description(file.path(dir, "DESCRIPTION"))
## should not have a Built: field, so ignore it if it is there
nm <- names(db)
if("Built" %in% nm) {
db <- db[-match("Built", nm)]
warning(gettextf("*** someone has corrupted the Built field in package '%s' ***",
db["Package"]),
domain = NA,
call. = FALSE)
}
OStype <- R.version$platform
if (grepl("-apple-darwin", OStype) && nzchar(Sys.getenv("R_ARCH")))
OStype <- sub(".*-apple-darwin", "universal-apple-darwin", OStype)
Built <-
paste0("R ",
paste(R.version[c("major", "minor")], collapse = "."),
"; ",
if(dir.exists(file.path(dir, "src"))) OStype else "",
"; ",
## Some build systems want to supply a package-build timestamp for reproducibility
## Prefer date in ISO 8601 format, UTC.
if (length(builtStamp)==0) format(Sys.time(), tz = "UTC", usetz = TRUE) else builtStamp,
## Sys.time(),
"; ",
.OStype())
## At some point of time, we had:
## We must not split the Built: field across lines.
## Not sure if this is still true. If not, the following could be
## simplified to
## db["Built"] <- Built
## write.dcf(rbind(db), file.path(outDir, "DESCRIPTION"))
## But in any case, it is true for fields obtained from expanding R
## fields (Authors@R): these should not be reformatted.
db <- c(db,
.expand_package_description_db_R_fields(db),
Built = Built)
## This cannot be done in a MBCS: write.dcf fails
ctype <- Sys.getlocale("LC_CTYPE")
Sys.setlocale("LC_CTYPE", "C")
on.exit(Sys.setlocale("LC_CTYPE", ctype))
.write_description(db, file.path(outDir, "DESCRIPTION"))
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'",
outMetaDir),
domain = NA)
saveInfo <- .split_description(db)
saveRDS(saveInfo, file.path(outMetaDir, "package.rds"))
invisible()
}
### * .split_description
## also used in .getRequiredPackages
.split_description <-
function(db, verbose = FALSE)
{
if(!is.na(Built <- db["Built"])) {
Built <- as.list(strsplit(Built, "; ")[[1L]])
if(length(Built) != 4L) {
warning(gettextf("*** someone has corrupted the Built field in package '%s' ***",
db["Package"]),
domain = NA,
call. = FALSE)
Built <- NULL
} else {
names(Built) <- c("R", "Platform", "Date", "OStype")
Built[["R"]] <- R_system_version(sub("^R ([0-9.]+)", "\\1",
Built[["R"]]))
}
} else Built <- NULL
## might perhaps have multiple entries
Depends <- .split_dependencies(db[names(db) %in% "Depends"])
## several packages 'Depends' on base!
ind <- match("base", names(Depends), 0L)
if(ind) Depends <- Depends[-ind]
## We only need Rdepends for R < 2.7.0, but we still need to be
## able to check that someone is not trying to load this into a
## very old version of R.
if("R" %in% names(Depends)) {
Rdeps2 <- Depends["R" == names(Depends)]
names(Rdeps2) <- NULL
Rdeps <- Depends[["R", exact = TRUE]] # the first one
Depends <- Depends[names(Depends) != "R"]
## several packages have 'Depends: R', which is a noop.
if(verbose && length(Rdeps) == 1L)
message("WARNING: omitting pointless dependence on 'R' without a version requirement")
if(length(Rdeps) <= 1L) Rdeps <- NULL
} else Rdeps2 <- Rdeps <- NULL
Rdeps <- as.vector(Rdeps)
Suggests <- .split_dependencies(db[names(db) %in% "Suggests"])
Imports <- .split_dependencies(db[names(db) %in% "Imports"])
LinkingTo <- .split_dependencies(db[names(db) %in% "LinkingTo"])
structure(list(DESCRIPTION = db, Built = Built,
Rdepends = Rdeps, Rdepends2 = Rdeps2,
Depends = Depends, Suggests = Suggests,
Imports = Imports, LinkingTo = LinkingTo),
class = "packageDescription2")
}
### * .vinstall_package_descriptions_as_RDS
## called from src/library/Makefile
.vinstall_package_descriptions_as_RDS <-
function(dir, packages)
{
## For the given packages installed in @file{dir}, install their
## DESCRIPTION package metadata as R metadata.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+"))) {
meta_dir <- file.path(dir, p, "Meta")
if(!dir.exists(meta_dir) && !dir.create(meta_dir))
stop(gettextf("cannot open directory '%s'", meta_dir))
package_info_dcf_file <- file.path(dir, p, "DESCRIPTION")
package_info_rds_file <- file.path(meta_dir, "package.rds")
if(file_test("-nt",
package_info_rds_file,
package_info_dcf_file))
next
saveRDS(.split_description(.read_description(package_info_dcf_file)),
package_info_rds_file)
}
invisible()
}
### * .update_package_rds
## not used
.update_package_rds <-
function(lib.loc = NULL)
{
## rebuild the dumped package descriptions for all packages in lib.loc
if (is.null(lib.loc)) lib.loc <- .libPaths()
lib.loc <- lib.loc[file.exists(lib.loc)]
for (lib in lib.loc) {
a <- list.files(lib, all.files = FALSE, full.names = TRUE)
for (nam in a) {
dfile <- file.path(nam, "DESCRIPTION")
if (file.exists(dfile)) {
print(nam)
.install_package_description(nam, nam)
}
}
}
}
### * .install_package_code_files
.install_package_code_files <-
function(dir, outDir)
{
if(!dir.exists(dir))
stop(gettextf("directory '%s' does not exist", dir),
domain = NA)
dir <- file_path_as_absolute(dir)
## Attempt to set the LC_COLLATE locale to 'C' to turn off locale
## specific sorting.
curLocale <- Sys.getlocale("LC_COLLATE")
on.exit(Sys.setlocale("LC_COLLATE", curLocale), add = TRUE)
## (Guaranteed to work as per the Sys.setlocale() docs.)
lccollate <- "C"
if(Sys.setlocale("LC_COLLATE", lccollate) != lccollate) {
## <NOTE>
## I don't think we can give an error here.
## It may be the case that Sys.setlocale() fails because the "OS
## reports request cannot be honored" (src/main/platform.c), in
## which case we should still proceed ...
warning("cannot turn off locale-specific sorting via LC_COLLATE")
## </NOTE>
}
## We definitely need a valid DESCRIPTION file.
db <- .read_description(file.path(dir, "DESCRIPTION"))
codeDir <- file.path(dir, "R")
if(!dir.exists(codeDir)) return(invisible())
codeFiles <- list_files_with_type(codeDir, "code", full.names = FALSE)
collationField <-
c(paste("Collate", .OStype(), sep = "."), "Collate")
if(any(i <- collationField %in% names(db))) {
collationField <- collationField[i][1L]
codeFilesInCspec <- .read_collate_field(db[collationField])
## Duplicated entries in the collation spec?
badFiles <-
unique(codeFilesInCspec[duplicated(codeFilesInCspec)])
if(length(badFiles)) {
out <- gettextf("\nduplicated files in '%s' field:",
collationField)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## See which files are listed in the collation spec but don't
## exist.
badFiles <- setdiff(codeFilesInCspec, codeFiles)
if(length(badFiles)) {
out <- gettextf("\nfiles in '%s' field missing from '%s':",
collationField,
codeDir)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## See which files exist but are missing from the collation
## spec. Note that we do not want the collation spec to use
## only a subset of the available code files.
badFiles <- setdiff(codeFiles, codeFilesInCspec)
if(length(badFiles)) {
out <- gettextf("\nfiles in '%s' missing from '%s' field:",
codeDir,
collationField)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## Everything's groovy ...
codeFiles <- codeFilesInCspec
}
codeFiles <- file.path(codeDir, codeFiles)
if(!dir.exists(outDir) && !dir.create(outDir))
stop(gettextf("cannot open directory '%s'", outDir),
domain = NA)
outCodeDir <- file.path(outDir, "R")
if(!dir.exists(outCodeDir) && !dir.create(outCodeDir))
stop(gettextf("cannot open directory '%s'", outCodeDir),
domain = NA)
outFile <- file.path(outCodeDir, db["Package"])
if(!file.create(outFile))
stop(gettextf("unable to create '%s'", outFile), domain = NA)
writeLines(paste0(".packageName <- \"", db["Package"], "\""),
outFile)
enc <- as.vector(db["Encoding"])
need_enc <- !is.na(enc) # Encoding was specified
## assume that if locale is 'C' we can used 8-bit encodings unchanged.
if(need_enc && !(Sys.getlocale("LC_CTYPE") %in% c("C", "POSIX"))) {
con <- file(outFile, "a")
on.exit(close(con)) # Windows does not like files left open
for(f in codeFiles) {
tmp <- iconv(readLines(f, warn = FALSE), from = enc, to = "")
if(length(bad <- which(is.na(tmp)))) {
warning(sprintf(ngettext(length(bad),
"unable to re-encode %s line %s",
"unable to re-encode %s lines %s"),
sQuote(basename(f)),
paste(bad, collapse = ", ")),
domain = NA, call. = FALSE)
tmp <- iconv(readLines(f, warn = FALSE), from = enc, to = "",
sub = "byte")
}
writeLines(paste0("#line 1 \"", f, "\""), con)
writeLines(tmp, con)
}
close(con); on.exit()
} else {
## <NOTE>
## It may be safer to do
## writeLines(sapply(codeFiles, readLines), outFile)
## instead, but this would be much slower ...
## use fast version of file.append that ensures LF between files
if(!all(.file_append_ensuring_LFs(outFile, codeFiles)))
stop("unable to write code files")
## </NOTE>
}
## A syntax check here, so that we do not install a broken package.
## FIXME: this is only needed if we don't lazy load, as the lazy loader
## would detect the error.
op <- options(showErrorCalls=FALSE)
on.exit(options(op))
parse(outFile)
invisible()
}
### * .install_package_indices
## called from R CMD INSTALL
.install_package_indices <-
function(dir, outDir)
{
options(warn = 1) # to ensure warnings get seen
if(!dir.exists(dir))
stop(gettextf("directory '%s' does not exist", dir),
domain = NA)
if(!dir.exists(outDir))
stop(gettextf("directory '%s' does not exist", outDir),
domain = NA)
## If there is an @file{INDEX} file in the package sources, we
## install this, and do not build it.
if(file_test("-f", file.path(dir, "INDEX")))
if(!file.copy(file.path(dir, "INDEX"),
file.path(outDir, "INDEX"),
overwrite = TRUE))
stop(gettextf("unable to copy INDEX to '%s'",
file.path(outDir, "INDEX")),
domain = NA)
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'", outMetaDir),
domain = NA)
.install_package_Rd_indices(dir, outDir)
.install_package_demo_index(dir, outDir)
invisible()
}
### * .install_package_Rd_indices
.install_package_Rd_indices <-
function(dir, outDir)
{
dir <- file_path_as_absolute(dir)
docsDir <- file.path(dir, "man")
dataDir <- file.path(outDir, "data")
outDir <- file_path_as_absolute(outDir)
## <FIXME>
## Not clear whether we should use the basename of the directory we
## install to, or the package name as obtained from the DESCRIPTION
## file in the directory we install from (different for versioned
## installs). We definitely do not want the basename of the dir we
## install from.
packageName <- basename(outDir)
## </FIXME>
allRd <- if(dir.exists(docsDir))
list_files_with_type(docsDir, "docs") else character()
## some people have man dirs without any valid .Rd files
if(length(allRd)) {
## we want the date of the newest .Rd file we will install
newestRd <- max(file.mtime(allRd))
## these files need not exist, which gives NA.
indices <- c(file.path("Meta", "Rd.rds"),
file.path("Meta", "hsearch.rds"),
file.path("Meta", "links.rds"),
"INDEX")
upToDate <- file.mtime(file.path(outDir, indices)) >= newestRd
if(dir.exists(dataDir)
&& length(dataFiles <- list.files(dataDir))) {
## Note that the data index is computed from both the package's
## Rd files and the data sets actually available.
newestData <- max(file.mtime(dataFiles))
upToDate <- c(upToDate,
file.mtime(file.path(outDir, "Meta", "data.rds")) >=
max(newestRd, newestData))
}
## Note that this is not quite good enough: an Rd file or data file
## might have been removed since the indices were made.
RdsFile <- file.path("Meta", "Rd.rds")
if(file.exists(RdsFile)) { ## for Rd files
## this has file names without path
files <- readRDS(RdsFile)$File
if(!identical(basename(allRd), files)) upToDate <- FALSE
}
## we want to proceed if any is NA.
if(all(upToDate %in% TRUE)) return(invisible())
## Rd objects should already have been installed.
db <- tryCatch(Rd_db(basename(outDir), lib.loc = dirname(outDir)),
error = function(e) NULL)
## If not, we build the Rd db from the sources:
if(is.null(db)) db <- .build_Rd_db(dir, allRd)
contents <- Rd_contents(db)
.write_Rd_contents_as_RDS(contents,
file.path(outDir, "Meta", "Rd.rds"))
defaultEncoding <- as.vector(readRDS(file.path(outDir, "Meta", "package.rds"))$DESCRIPTION["Encoding"])
if(is.na(defaultEncoding)) defaultEncoding <- NULL
saveRDS(.build_hsearch_index(contents, packageName, defaultEncoding),
file.path(outDir, "Meta", "hsearch.rds"))
saveRDS(.build_links_index(contents, packageName),
file.path(outDir, "Meta", "links.rds"))
## If there is no @file{INDEX} file in the package sources, we
## build one.
## <NOTE>
## We currently do not also save this in RDS format, as we can
## always do
## .build_Rd_index(readRDS(file.path(outDir, "Meta", "Rd.rds"))
if(!file_test("-f", file.path(dir, "INDEX")))
writeLines(formatDL(.build_Rd_index(contents)),
file.path(outDir, "INDEX"))
## </NOTE>
} else {
contents <- NULL
saveRDS(.build_hsearch_index(contents, packageName, defaultEncoding),
file.path(outDir, "Meta", "hsearch.rds"))
saveRDS(.build_links_index(contents, packageName),
file.path(outDir, "Meta", "links.rds"))
}
if(dir.exists(dataDir))
saveRDS(.build_data_index(dataDir, contents),
file.path(outDir, "Meta", "data.rds"))
invisible()
}
### * .install_package_vignettes2
## called from R CMD INSTALL for pre 3.0.2-built tarballs, and for base packages
.install_package_vignettes2 <-
function(dir, outDir, encoding = "")
{
dir <- file_path_as_absolute(dir)
subdirs <- c("vignettes", file.path("inst", "doc"))
ok <- dir.exists(file.path(dir, subdirs))
## Create a vignette index only if the vignette dir exists.
if (!any(ok))
return(invisible())
subdir <- subdirs[ok][1L]
vignetteDir <- file.path(dir, subdir)
outDir <- file_path_as_absolute(outDir)
packageName <- basename(outDir)
outVignetteDir <- file.path(outDir, "doc")
## --fake and --no-inst installs do not have a outVignetteDir.
if(!dir.exists(outVignetteDir)) return(invisible())
## If there is an HTML index in the @file{inst/doc} subdirectory of
## the package source directory (@code{dir}), we do not overwrite it
## (similar to top-level @file{INDEX} files). Installation already
## copied this over.
hasHtmlIndex <- file_test("-f", file.path(vignetteDir, "index.html"))
htmlIndex <- file.path(outDir, "doc", "index.html")
vigns <- pkgVignettes(dir = dir, subdirs = subdir, check = TRUE)
## Write dummy HTML index if no vignettes are found and exit.
if(length(vigns$docs) == 0L) {
## we don't want to write an index if the directory is in fact empty
files <- list.files(vignetteDir, all.files = TRUE, no.. = TRUE)
if((length(files) > 0L) && !hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex)
return(invisible())
}
if (subdir == "vignettes") {
## copy vignette sources over.
file.copy(vigns$docs, outVignetteDir)
}
vigns <- tryCatch({
pkgVignettes(dir=outDir, subdirs="doc", output=TRUE, source=TRUE)
}, error = function(ex) {
pkgVignettes(dir=outDir, subdirs="doc")
})
vignetteIndex <- .build_vignette_index(vigns)
if(NROW(vignetteIndex) > 0L) {
cwd <- getwd()
if (is.null(cwd))
stop("current working directory cannot be ascertained")
setwd(outVignetteDir)
loadVignetteBuilder(dir, mustwork = FALSE)
## install tangled versions of Sweave vignettes. FIXME: Vignette
## *.R files should have been included when the package was built,
## but in the interim before they are all built with the new code,
## this is needed.
for(i in seq_along(vigns$docs)) {
file <- vigns$docs[i]
if (!is.null(vigns$sources) && !is.null(vigns$sources[file][[1]]))
next
file <- basename(file)
enc <- vigns$encodings[i]
cat(" ", sQuote(basename(file)),
if(nzchar(enc)) paste("using", sQuote(enc)), "\n")
engine <- try(vignetteEngine(vigns$engines[i]), silent = TRUE)
if (!inherits(engine, "try-error"))
engine$tangle(file, quiet = TRUE, encoding = enc)
setwd(outVignetteDir) # just in case some strange tangle function changed it
}
setwd(cwd)
# Update - now from the output directory
vigns <- pkgVignettes(dir=outDir, subdirs="doc", source=TRUE)
## remove any files with no R code (they will have header comments).
## if not correctly declared they might not be in the current encoding
sources <- unlist(vigns$sources)
for(i in seq_along(sources)) {
file <- sources[i]
if (!file_test("-f", file)) next
bfr <- readLines(file, warn = FALSE)
if(all(grepl("(^###|^[[:space:]]*$)", bfr, useBytes = TRUE)))
unlink(file)
}
# Update
vigns <- pkgVignettes(dir=outDir, subdirs="doc", source=TRUE)
# Add tangle source files (*.R) to the vignette index
# Only the "main" R file, because tangle may also split
# output into multiple files
sources <- character(length(vigns$docs))
for (i in seq_along(vigns$docs)) {
name <- vigns$names[i]
source <- find_vignette_product(name, by = "tangle", main = TRUE, dir = vigns$dir, engine = engine)
if (length(source) > 0L)
sources[i] <- basename(source)
}
vignetteIndex$R <- sources
}
if(!hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex, vignetteIndex)
saveRDS(vignetteIndex,
file = file.path(outDir, "Meta", "vignette.rds"))
invisible()
}
### * .install_package_vignettes3
## called from R CMD INSTALL for 3.0.2 or later tarballs
.install_package_vignettes3 <-
function(dir, outDir, encoding = "")
{
packageName <- basename(outDir)
dir <- file_path_as_absolute(dir)
indexname <- file.path(dir, "build", "vignette.rds")
ok <- file_test("-f", indexname)
## Create a vignette index only if the vignette dir exists.
if (!ok)
return(invisible())
## Copy the index to Meta
file.copy(indexname, file.path(outDir, "Meta"))
## If there is an HTML index in the @file{inst/doc} subdirectory of
## the package source directory (@code{dir}), we do not overwrite it
## (similar to top-level @file{INDEX} files). Installation already
## copied this over.
vignetteDir <- file.path(outDir, "doc")
hasHtmlIndex <- file_test("-f", file.path(vignetteDir, "index.html"))
htmlIndex <- file.path(outDir, "doc", "index.html")
vignetteIndex <- readRDS(indexname)
if(!hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex, vignetteIndex)
invisible()
}
### * .install_package_demo_index
.install_package_demo_index <-
function(dir, outDir)
{
demoDir <- file.path(dir, "demo")
if(!dir.exists(demoDir)) return(invisible())
demoIndex <- .build_demo_index(demoDir)
saveRDS(demoIndex,
file = file.path(outDir, "Meta", "demo.rds"))
invisible()
}
### * .vinstall_package_indices
## called from src/library/Makefile
.vinstall_package_indices <-
function(src_dir, out_dir, packages)
{
## For the given packages with sources rooted at @file{src_dir} and
## installations rooted at @file{out_dir}, install the package
## indices.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+")))
.install_package_indices(file.path(src_dir, p), file.path(out_dir, p))
utils::make.packages.html(.Library, verbose = FALSE)
invisible()
}
### * .install_package_vignettes
## called from src/library/Makefile[.win]
## this is only used when building R
.install_package_vignettes <-
function(dir, outDir, keep.source = TRUE)
{
dir <- file_path_as_absolute(dir)
vigns <- pkgVignettes(dir = dir)
if(is.null(vigns) || !length(vigns$docs)) return(invisible())
outDir <- file_path_as_absolute(outDir)
outVignetteDir <- file.path(outDir, "doc")
if(!dir.exists(outVignetteDir) && !dir.create(outVignetteDir))
stop(gettextf("cannot open directory '%s'", outVignetteDir),
domain = NA)
## We have to be careful to avoid repeated rebuilding.
vignettePDFs <-
file.path(outVignetteDir,
sub("$", ".pdf",
basename(file_path_sans_ext(vigns$docs))))
upToDate <- file_test("-nt", vignettePDFs, vigns$docs)
## The primary use of this function is to build and install PDF
## vignettes in base packages.
## Hence, we build in a subdir of the current directory rather
## than a temp dir: this allows inspection of problems and
## automatic cleanup via Make.
cwd <- getwd()
if (is.null(cwd))
stop("current working directory cannot be ascertained")
buildDir <- file.path(cwd, ".vignettes")
if(!dir.exists(buildDir) && !dir.create(buildDir))
stop(gettextf("cannot create directory '%s'", buildDir), domain = NA)
on.exit(setwd(cwd))
setwd(buildDir)
loadVignetteBuilder(vigns$pkgdir)
for(i in seq_along(vigns$docs)[!upToDate]) {
file <- vigns$docs[i]
name <- vigns$names[i]
engine <- vignetteEngine(vigns$engines[i])
message(gettextf("processing %s", sQuote(basename(file))),
domain = NA)
## Note that contrary to all other weave/tangle calls, here
## 'file' is not a file in the current directory [hence no
## file <- basename(file) above]. However, weave should/must
## always create a file ('output') in the current directory.
output <- tryCatch({
engine$weave(file, pdf = TRUE, eps = FALSE, quiet = TRUE,
keep.source = keep.source, stylepath = FALSE)
setwd(buildDir)
find_vignette_product(name, by = "weave", engine = engine)
}, error = function(e) {
stop(gettextf("running %s on vignette '%s' failed with message:\n%s",
engine[["name"]], file, conditionMessage(e)),
domain = NA, call. = FALSE)
})
## In case of an error, do not clean up: should we point to
## buildDir for possible inspection of results/problems?
## We need to ensure that vignetteDir is in TEXINPUTS and BIBINPUTS.
if (vignette_is_tex(output)) {
## <FIXME>
## What if this fails?
## Now gives a more informative error texi2pdf fails
## or if it does not produce a <name>.pdf.
tryCatch({
texi2pdf(file = output, quiet = TRUE, texinputs = vigns$dir)
output <- find_vignette_product(name, by = "texi2pdf", engine = engine)
}, error = function(e) {
stop(gettextf("compiling TeX file %s failed with message:\n%s",
sQuote(output), conditionMessage(e)),
domain = NA, call. = FALSE)
})
## </FIXME>
}
if(!file.copy(output, outVignetteDir, overwrite = TRUE))
stop(gettextf("cannot copy '%s' to '%s'",
output,
outVignetteDir),
domain = NA)
}
## Need to change out of this dir before we delete it,
## at least on Windows.
setwd(cwd)
unlink(buildDir, recursive = TRUE)
## Now you need to update the HTML index!
## This also creates the .R files
.install_package_vignettes2(dir, outDir)
invisible()
}
### * .install_package_namespace_info
.install_package_namespace_info <-
function(dir, outDir)
{
dir <- file_path_as_absolute(dir)
nsFile <- file.path(dir, "NAMESPACE")
if(!file_test("-f", nsFile)) return(invisible())
nsInfoFilePath <- file.path(outDir, "Meta", "nsInfo.rds")
if(file_test("-nt", nsInfoFilePath, nsFile)) return(invisible())
nsInfo <- parseNamespaceFile(basename(dir), dirname(dir))
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'", outMetaDir),
domain = NA)
saveRDS(nsInfo, nsInfoFilePath)
invisible()
}
### * .vinstall_package_namespaces_as_RDS
## called from src/library/Makefile
.vinstall_package_namespaces_as_RDS <-
function(dir, packages)
{
## For the given packages installed in @file{dir} which have a
## NAMESPACE file, install the namespace info as R metadata.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+")))
.install_package_namespace_info(file.path(dir, p),
file.path(dir, p))
invisible()
}
### * .install_package_Rd_objects
## called from src/library/Makefile
.install_package_Rd_objects <-
function(dir, outDir, encoding = "unknown")
{
dir <- file_path_as_absolute(dir)
mandir <- file.path(dir, "man")
manfiles <- if(!dir.exists(mandir)) character()
else list_files_with_type(mandir, "docs")
manOutDir <- file.path(outDir, "help")
dir.create(manOutDir, FALSE)
db_file <- file.path(manOutDir,
paste0(basename(outDir), ".rdx"))
built_file <- file.path(dir, "build", "partial.rdb")
macro_files <- list.files(file.path(dir, "man", "macros"), pattern = "\\.Rd$", full.names = TRUE)
if (length(macro_files)) {
macroDir <- file.path(manOutDir, "macros")
dir.create(macroDir, FALSE)
file.copy(macro_files, macroDir, overwrite = TRUE)
}
## Avoid (costly) rebuilding if not needed.
## Actually, it seems no more costly than these tests, which it also does
pathsFile <- file.path(manOutDir, "paths.rds")
if(!file_test("-f", db_file) || !file.exists(pathsFile) ||
!identical(sort(manfiles), sort(readRDS(pathsFile))) ||
!all(file_test("-nt", db_file, manfiles))) {
db <- .build_Rd_db(dir, manfiles, db_file = db_file,
encoding = encoding, built_file = built_file)
nm <- as.character(names(db)) # Might be NULL
saveRDS(structure(nm,
first = nchar(file.path(mandir)) + 2L),
pathsFile)
names(db) <- sub("\\.[Rr]d$", "", basename(nm))
makeLazyLoadDB(db, file.path(manOutDir, basename(outDir)))
}
invisible()
}
### * .install_package_demos
## called from basepkg.mk and .install_packages
.install_package_demos <-
function(dir, outDir)
{
## NB: we no longer install 00Index
demodir <- file.path(dir, "demo")
if(!dir.exists(demodir)) return()
demofiles <- list_files_with_type(demodir, "demo", full.names = FALSE)
if(!length(demofiles)) return()
demoOutDir <- file.path(outDir, "demo")
if(!dir.exists(demoOutDir)) dir.create(demoOutDir)
file.copy(file.path(demodir, demofiles), demoOutDir,
overwrite = TRUE)
}
### * .find_cinclude_paths
.find_cinclude_paths <-
function(pkgs, lib.loc = NULL, file = NULL)
{
## given a character string of comma-separated package names,
## find where the packages are installed and generate
## -I"/path/to/package/include" ...
if(!is.null(file)) {
tmp <- read.dcf(file, "LinkingTo")[1L, 1L]
if(is.na(tmp)) return(invisible())
pkgs <- tmp
}
pkgs <- strsplit(pkgs[1L], ",[[:blank:]]*")[[1L]]
paths <- find.package(pkgs, lib.loc, quiet=TRUE)
if(length(paths))
cat(paste(paste0('-I"', paths, '/include"'), collapse=" "))
return(invisible())
}
### * .Rtest_package_depends_R_version
.Rtest_package_depends_R_version <-
function(dir)
{
if(missing(dir)) dir <- "."
meta <- .read_description(file.path(dir, "DESCRIPTION"))
deps <- .split_description(meta, verbose = TRUE)$Rdepends2
status <- 0
current <- getRversion()
for(depends in deps) {
## .split_description will have ensured that this is NULL or
## of length 3.
if(length(depends) > 1L) {
## .check_package_description will insist on these operators
if(!depends$op %in% c("<=", ">=", "<", ">", "==", "!="))
message("WARNING: malformed 'Depends' field in 'DESCRIPTION'")
else {
status <- if(inherits(depends$version, "numeric_version"))
!do.call(depends$op, list(current, depends$version))
else {
ver <- R.version
if (ver$status %in% c("", "Patched")) FALSE
else !do.call(depends$op,
list(ver[["svn rev"]],
as.numeric(sub("^r", "", depends$version))))
}
}
if(status != 0) {
package <- Sys.getenv("R_PACKAGE_NAME")
if(!nzchar(package))
package <- meta["Package"]
msg <- if(nzchar(package))
gettextf("ERROR: this R is version %s, package '%s' requires R %s %s",
current, package,
depends$op, depends$version)
else
gettextf("ERROR: this R is version %s, required is R %s %s",
current, depends$op, depends$version)
message(strwrap(msg, exdent = 2L))
break
}
}
}
status
}
## no longer used
.test_package_depends_R_version <-
function(dir)
q(status = .Rtest_package_depends_R_version(dir))
### * .test_load_package
.test_load_package <- function(pkg_name, lib)
{
options(warn = 1)
res <- try(suppressPackageStartupMessages(library(pkg_name, lib.loc = lib, character.only = TRUE, logical.return = TRUE)))
if (inherits(res, "try-error") || !res)
stop("loading failed", call. = FALSE)
}
### * checkRdaFiles
checkRdaFiles <- function(paths)
{
if(length(paths) == 1L && dir.exists(paths)) {
paths <- Sys.glob(c(file.path(paths, "*.rda"),
file.path(paths, "*.RData")))
## Exclude .RData, which this may or may not match
paths <- grep("/[.]RData$", paths, value = TRUE, invert = TRUE)
}
res <- data.frame(size = NA_real_, ASCII = NA,
compress = NA_character_, version = NA_integer_,
stringsAsFactors = FALSE)
res <- res[rep_len(1L, length(paths)), ]
row.names(res) <- paths
keep <- file.exists(paths)
res$size[keep] <- file.size(paths)[keep]
for(p in paths[keep]) {
magic <- readBin(p, "raw", n = 5)
res[p, "compress"] <- if(all(magic[1:2] == c(0x1f, 0x8b))) "gzip"
else if(rawToChar(magic[1:3]) == "BZh") "bzip2"
else if(magic[1L] == 0xFD && rawToChar(magic[2:5]) == "7zXZ") "xz"
else if(grepl("RD[ABX][12]", rawToChar(magic), useBytes = TRUE)) "none"
else "unknown"
con <- gzfile(p)
magic <- readChar(con, 5L, useBytes = TRUE)
close(con)
res[p, "ASCII"] <- if (grepl("RD[ABX][12]", magic, useBytes = TRUE))
substr(magic, 3, 3) == "A" else NA
ver <- sub("(RD[ABX])([12]*)", "\\2", magic, useBytes = TRUE)
res$version <- as.integer(ver)
}
res
}
### * resaveRdaFiles
resaveRdaFiles <- function(paths,
compress = c("auto", "gzip", "bzip2", "xz"),
compression_level)
{
if(length(paths) == 1L && dir.exists(paths))
paths <- Sys.glob(c(file.path(paths, "*.rda"),
file.path(paths, "*.RData")))
compress <- match.arg(compress)
if (missing(compression_level))
compression_level <- switch(compress, "gzip" = 6, 9)
for(p in paths) {
env <- new.env(hash = TRUE) # probably small, need not be
suppressPackageStartupMessages(load(p, envir = env))
if(compress == "auto") {
f1 <- tempfile()
save(file = f1, list = ls(env, all.names = TRUE), envir = env)
f2 <- tempfile()
save(file = f2, list = ls(env, all.names = TRUE), envir = env,
compress = "bzip2")
ss <- file.size(c(f1, f2)) * c(0.9, 1.0)
names(ss) <- c(f1, f2)
if(ss[1L] > 10240) {
f3 <- tempfile()
save(file = f3, list = ls(env, all.names = TRUE), envir = env,
compress = "xz")
ss <- c(ss, file.size(f3))
names(ss) <- c(f1, f2, f3)
}
nm <- names(ss)
ind <- which.min(ss)
file.copy(nm[ind], p, overwrite = TRUE)
unlink(nm)
} else
save(file = p, list = ls(env, all.names = TRUE), envir = env,
compress = compress, compression_level = compression_level)
}
}
### * compactPDF
compactPDF <-
function(paths, qpdf = Sys.which(Sys.getenv("R_QPDF", "qpdf")),
gs_cmd = Sys.getenv("R_GSCMD", ""),
gs_quality = Sys.getenv("GS_QUALITY", "none"),
gs_extras = character())
{
use_qpdf <- nzchar(qpdf)
gs_quality <- match.arg(gs_quality, c("none", "printer", "ebook", "screen"))
use_gs <- if(gs_quality != "none") nzchar(gs_cmd <- find_gs_cmd(gs_cmd)) else FALSE
if (!use_gs && !use_qpdf) return()
if(length(paths) == 1L && dir.exists(paths))
paths <- Sys.glob(file.path(paths, "*.pdf"))
dummy <- rep.int(NA_real_, length(paths))
ans <- data.frame(old = dummy, new = dummy, row.names = paths)
tf <- tempfile("pdf"); tf2 <- tempfile("pdf")
for (p in paths) {
res <- 0
if (use_gs) {
res <- system2(gs_cmd,
c("-q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite",
sprintf("-dPDFSETTINGS=/%s", gs_quality),
"-dCompatibilityLevel=1.5",
"-dAutoRotatePages=/None",
sprintf("-sOutputFile=%s", tf),
gs_extras, p), FALSE, FALSE)
if(!res && use_qpdf) {
unlink(tf2) # precaution
file.rename(tf, tf2)
res <- system2(qpdf, c("--stream-data=compress",
"--object-streams=generate",
tf2, tf), FALSE, FALSE)
unlink(tf2)
}
} else if(use_qpdf) {
res <- system2(qpdf, c("--stream-data=compress",
"--object-streams=generate",
p, tf), FALSE, FALSE)
}
if(!res && file.exists(tf)) {
old <- file.size(p); new <- file.size(tf)
if(new/old < 0.9 && new < old - 1e4) {
file.copy(tf, p, overwrite = TRUE)
ans[p, ] <- c(old, new)
}
}
unlink(tf)
}
structure(na.omit(ans), class = c("compactPDF", "data.frame"))
}
find_gs_cmd <- function(gs_cmd = "")
{
if(!nzchar(gs_cmd)) {
if(.Platform$OS.type == "windows") {
gsexe <- Sys.getenv("R_GSCMD")
if (!nzchar(gsexe)) gsexe <- Sys.getenv("GSC")
gs_cmd <- Sys.which(gsexe)
if (!nzchar(gs_cmd)) gs_cmd <- Sys.which("gswin64c")
if (!nzchar(gs_cmd)) gs_cmd <- Sys.which("gswin32c")
gs_cmd
} else Sys.which(Sys.getenv("R_GSCMD", "gs"))
} else Sys.which(gs_cmd)
}
format.compactPDF <- function(x, ratio = 0.9, diff = 1e4, ...)
{
if(!nrow(x)) return(character())
z <- y <- x[with(x, new/old < ratio & new < old - diff), ]
if(!nrow(z)) return(character())
z[] <- lapply(y, function(x) sprintf("%.0fKb", x/1024))
large <- y$new >= 1024^2
z[large, ] <- lapply(y[large, ], function(x) sprintf("%.1fMb", x/1024^2))
paste(' compacted', sQuote(basename(row.names(y))),
'from', z[, 1L], 'to', z[, 2L])
}
### * add_datalist
add_datalist <- function(pkgpath, force = FALSE)
{
dlist <- file.path(pkgpath, "data", "datalist")
if (!force && file.exists(dlist)) return()
size <- sum(file.size(Sys.glob(file.path(pkgpath, "data", "*"))))
if(size <= 1024^2) return()
z <- suppressPackageStartupMessages(list_data_in_pkg(dataDir = file.path(pkgpath, "data"))) # for BARD
if(!length(z)) return()
con <- file(dlist, "w")
for (nm in names(z)) {
zz <- z[[nm]]
if (length(zz) == 1L && zz == nm) writeLines(nm, con)
else cat(nm, ": ", paste(zz, collapse = " "), "\n",
sep = "", file = con)
}
close(con)
invisible()
}
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "### [*]+" ***
### End: ***
| 42,256 | gpl-2.0 |
c7590628fcdf4e67541a78f6926dc0ebd0f49c57 | pchmieli/h2o-3 | h2o-r/tests/testdir_algos/glrm/runit_glrm_missing_arrests.R |
test.glrm.arrests_miss <- function() {
missing_frac <- seq(from = 0.1, to = 0.9, by = 0.1)
stats_names <- c("Fraction", "Objective", "AvgChangeObj", "Iterations", "StepSize", "TrainSSE", "ValidSSE", "MissingASE")
model_stats <- data.frame(matrix(0, nrow = length(missing_frac), ncol = length(stats_names)))
colnames(model_stats) <- stats_names
Log.info("Importing USArrests.csv data and saving for validation...\n")
arrests.full <- h2o.uploadFile(locate("smalldata/pca_test/USArrests.csv"))
totobs <- nrow(arrests.full) * ncol(arrests.full)
for(i in 1:length(missing_frac)) {
f <- missing_frac[i]
Log.info(paste("Copying data and inserting ", 100 * f, "% missing entries:\n", sep = ""))
arrests.miss <- h2o.assign(arrests.full, "arrests.miss")
h2o.insertMissingValues(data = arrests.miss, fraction = f, seed = SEED)
print(summary(arrests.miss))
Log.info(paste("H2O GLRM with ", 100 * f, "% missing entries:\n", sep = ""))
arrests.glrm <- h2o.glrm(training_frame = arrests.miss, validation_frame = arrests.full, ignore_const_cols = FALSE, k = 4, loss = "Quadratic",
regularization_x = "None", regularization_y = "None", init = "PlusPlus", max_iterations = 10, min_step_size = 1e-6, seed = SEED)
# Check imputed data and error metrics
trainmm <- arrests.glrm@model$training_metrics@metrics
validmm <- arrests.glrm@model$validation_metrics@metrics
checkGLRMPredErr(arrests.glrm, arrests.miss, arrests.full, tolerance = 1e-6)
expect_equal(trainmm$numerr, arrests.glrm@model$objective, tolerance = 1e-6)
expect_equal(trainmm$caterr, 0)
expect_equal(validmm$caterr, 0)
expect_true(validmm$numcnt > trainmm$numcnt)
expect_equal(validmm$numcnt, totobs)
h2o.rm(arrests.glrm@model$representation_name) # Remove X matrix to free memory
# Save relevant information from this run
misserr <- (validmm$numerr - trainmm$numerr) / (validmm$numcnt - trainmm$numcnt) # Average squared error over missing entries only
model_stats[i,] <- c(missing_frac[i], arrests.glrm@model$objective, arrests.glrm@model$avg_change_obj,
arrests.glrm@model$iterations, arrests.glrm@model$step_size, trainmm$numerr,
validmm$numerr, misserr)
}
print(model_stats)
}
doTest("GLRM Test: USArrests Data with Missing Entries Inserted", test.glrm.arrests_miss)
| 2,436 | apache-2.0 |
5eba3eeb33f67f45455162ef3cec2b23a3ab892e | jeffreyhorner/R-Judy-Arrays | src/library/tools/R/admin.R | # File src/library/tools/R/admin.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2015 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
### * .install_package_description
## called from basepkg.mk and .install_packages
.install_package_description <-
function(dir, outDir, builtStamp=character())
{
## Function for taking the DESCRIPTION package meta-information,
## checking/validating it, and installing it with the 'Built:'
## field added. Note that from 1.7.0 on, packages without
## compiled code are not marked as being from any platform.
## Check first. Note that this also calls .read_description(), but
## .check_package_description() currently really needs to know the
## path to the DESCRIPTION file, and returns an object with check
## results and not the package metadata ...
ok <- .check_package_description(file.path(dir, "DESCRIPTION"))
if(any(as.integer(sapply(ok, length)) > 0L)) {
stop(paste(gettext("Invalid DESCRIPTION file") ,
paste(.eval_with_capture(print(ok))$output,
collapse = "\n"),
sep = "\n\n"),
domain = NA,
call. = FALSE)
}
## This reads (in C locale) byte-by-byte, declares latin1 or UTF-8
## Maybe it would be better to re-encode others (there are none at
## present, at least in a UTF-8 locale?
db <- .read_description(file.path(dir, "DESCRIPTION"))
## should not have a Built: field, so ignore it if it is there
nm <- names(db)
if("Built" %in% nm) {
db <- db[-match("Built", nm)]
warning(gettextf("*** someone has corrupted the Built field in package '%s' ***",
db["Package"]),
domain = NA,
call. = FALSE)
}
OStype <- R.version$platform
if (grepl("-apple-darwin", OStype) && nzchar(Sys.getenv("R_ARCH")))
OStype <- sub(".*-apple-darwin", "universal-apple-darwin", OStype)
Built <-
paste0("R ",
paste(R.version[c("major", "minor")], collapse = "."),
"; ",
if(dir.exists(file.path(dir, "src"))) OStype else "",
"; ",
## Some build systems want to supply a package-build timestamp for reproducibility
## Prefer date in ISO 8601 format, UTC.
if (length(builtStamp)==0) format(Sys.time(), tz = "UTC", usetz = TRUE) else builtStamp,
## Sys.time(),
"; ",
.OStype())
## At some point of time, we had:
## We must not split the Built: field across lines.
## Not sure if this is still true. If not, the following could be
## simplified to
## db["Built"] <- Built
## write.dcf(rbind(db), file.path(outDir, "DESCRIPTION"))
## But in any case, it is true for fields obtained from expanding R
## fields (Authors@R): these should not be reformatted.
db <- c(db,
.expand_package_description_db_R_fields(db),
Built = Built)
## This cannot be done in a MBCS: write.dcf fails
ctype <- Sys.getlocale("LC_CTYPE")
Sys.setlocale("LC_CTYPE", "C")
on.exit(Sys.setlocale("LC_CTYPE", ctype))
.write_description(db, file.path(outDir, "DESCRIPTION"))
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'",
outMetaDir),
domain = NA)
saveInfo <- .split_description(db)
saveRDS(saveInfo, file.path(outMetaDir, "package.rds"))
invisible()
}
### * .split_description
## also used in .getRequiredPackages
.split_description <-
function(db, verbose = FALSE)
{
if(!is.na(Built <- db["Built"])) {
Built <- as.list(strsplit(Built, "; ")[[1L]])
if(length(Built) != 4L) {
warning(gettextf("*** someone has corrupted the Built field in package '%s' ***",
db["Package"]),
domain = NA,
call. = FALSE)
Built <- NULL
} else {
names(Built) <- c("R", "Platform", "Date", "OStype")
Built[["R"]] <- R_system_version(sub("^R ([0-9.]+)", "\\1",
Built[["R"]]))
}
} else Built <- NULL
## might perhaps have multiple entries
Depends <- .split_dependencies(db[names(db) %in% "Depends"])
## several packages 'Depends' on base!
ind <- match("base", names(Depends), 0L)
if(ind) Depends <- Depends[-ind]
## We only need Rdepends for R < 2.7.0, but we still need to be
## able to check that someone is not trying to load this into a
## very old version of R.
if("R" %in% names(Depends)) {
Rdeps2 <- Depends["R" == names(Depends)]
names(Rdeps2) <- NULL
Rdeps <- Depends[["R", exact = TRUE]] # the first one
Depends <- Depends[names(Depends) != "R"]
## several packages have 'Depends: R', which is a noop.
if(verbose && length(Rdeps) == 1L)
message("WARNING: omitting pointless dependence on 'R' without a version requirement")
if(length(Rdeps) <= 1L) Rdeps <- NULL
} else Rdeps2 <- Rdeps <- NULL
Rdeps <- as.vector(Rdeps)
Suggests <- .split_dependencies(db[names(db) %in% "Suggests"])
Imports <- .split_dependencies(db[names(db) %in% "Imports"])
LinkingTo <- .split_dependencies(db[names(db) %in% "LinkingTo"])
structure(list(DESCRIPTION = db, Built = Built,
Rdepends = Rdeps, Rdepends2 = Rdeps2,
Depends = Depends, Suggests = Suggests,
Imports = Imports, LinkingTo = LinkingTo),
class = "packageDescription2")
}
### * .vinstall_package_descriptions_as_RDS
## called from src/library/Makefile
.vinstall_package_descriptions_as_RDS <-
function(dir, packages)
{
## For the given packages installed in @file{dir}, install their
## DESCRIPTION package metadata as R metadata.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+"))) {
meta_dir <- file.path(dir, p, "Meta")
if(!dir.exists(meta_dir) && !dir.create(meta_dir))
stop(gettextf("cannot open directory '%s'", meta_dir))
package_info_dcf_file <- file.path(dir, p, "DESCRIPTION")
package_info_rds_file <- file.path(meta_dir, "package.rds")
if(file_test("-nt",
package_info_rds_file,
package_info_dcf_file))
next
saveRDS(.split_description(.read_description(package_info_dcf_file)),
package_info_rds_file)
}
invisible()
}
### * .update_package_rds
## not used
.update_package_rds <-
function(lib.loc = NULL)
{
## rebuild the dumped package descriptions for all packages in lib.loc
if (is.null(lib.loc)) lib.loc <- .libPaths()
lib.loc <- lib.loc[file.exists(lib.loc)]
for (lib in lib.loc) {
a <- list.files(lib, all.files = FALSE, full.names = TRUE)
for (nam in a) {
dfile <- file.path(nam, "DESCRIPTION")
if (file.exists(dfile)) {
print(nam)
.install_package_description(nam, nam)
}
}
}
}
### * .install_package_code_files
.install_package_code_files <-
function(dir, outDir)
{
if(!dir.exists(dir))
stop(gettextf("directory '%s' does not exist", dir),
domain = NA)
dir <- file_path_as_absolute(dir)
## Attempt to set the LC_COLLATE locale to 'C' to turn off locale
## specific sorting.
curLocale <- Sys.getlocale("LC_COLLATE")
on.exit(Sys.setlocale("LC_COLLATE", curLocale), add = TRUE)
## (Guaranteed to work as per the Sys.setlocale() docs.)
lccollate <- "C"
if(Sys.setlocale("LC_COLLATE", lccollate) != lccollate) {
## <NOTE>
## I don't think we can give an error here.
## It may be the case that Sys.setlocale() fails because the "OS
## reports request cannot be honored" (src/main/platform.c), in
## which case we should still proceed ...
warning("cannot turn off locale-specific sorting via LC_COLLATE")
## </NOTE>
}
## We definitely need a valid DESCRIPTION file.
db <- .read_description(file.path(dir, "DESCRIPTION"))
codeDir <- file.path(dir, "R")
if(!dir.exists(codeDir)) return(invisible())
codeFiles <- list_files_with_type(codeDir, "code", full.names = FALSE)
collationField <-
c(paste("Collate", .OStype(), sep = "."), "Collate")
if(any(i <- collationField %in% names(db))) {
collationField <- collationField[i][1L]
codeFilesInCspec <- .read_collate_field(db[collationField])
## Duplicated entries in the collation spec?
badFiles <-
unique(codeFilesInCspec[duplicated(codeFilesInCspec)])
if(length(badFiles)) {
out <- gettextf("\nduplicated files in '%s' field:",
collationField)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## See which files are listed in the collation spec but don't
## exist.
badFiles <- setdiff(codeFilesInCspec, codeFiles)
if(length(badFiles)) {
out <- gettextf("\nfiles in '%s' field missing from '%s':",
collationField,
codeDir)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## See which files exist but are missing from the collation
## spec. Note that we do not want the collation spec to use
## only a subset of the available code files.
badFiles <- setdiff(codeFiles, codeFilesInCspec)
if(length(badFiles)) {
out <- gettextf("\nfiles in '%s' missing from '%s' field:",
codeDir,
collationField)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## Everything's groovy ...
codeFiles <- codeFilesInCspec
}
codeFiles <- file.path(codeDir, codeFiles)
if(!dir.exists(outDir) && !dir.create(outDir))
stop(gettextf("cannot open directory '%s'", outDir),
domain = NA)
outCodeDir <- file.path(outDir, "R")
if(!dir.exists(outCodeDir) && !dir.create(outCodeDir))
stop(gettextf("cannot open directory '%s'", outCodeDir),
domain = NA)
outFile <- file.path(outCodeDir, db["Package"])
if(!file.create(outFile))
stop(gettextf("unable to create '%s'", outFile), domain = NA)
writeLines(paste0(".packageName <- \"", db["Package"], "\""),
outFile)
enc <- as.vector(db["Encoding"])
need_enc <- !is.na(enc) # Encoding was specified
## assume that if locale is 'C' we can used 8-bit encodings unchanged.
if(need_enc && !(Sys.getlocale("LC_CTYPE") %in% c("C", "POSIX"))) {
con <- file(outFile, "a")
on.exit(close(con)) # Windows does not like files left open
for(f in codeFiles) {
tmp <- iconv(readLines(f, warn = FALSE), from = enc, to = "")
if(length(bad <- which(is.na(tmp)))) {
warning(sprintf(ngettext(length(bad),
"unable to re-encode %s line %s",
"unable to re-encode %s lines %s"),
sQuote(basename(f)),
paste(bad, collapse = ", ")),
domain = NA, call. = FALSE)
tmp <- iconv(readLines(f, warn = FALSE), from = enc, to = "",
sub = "byte")
}
writeLines(paste0("#line 1 \"", f, "\""), con)
writeLines(tmp, con)
}
close(con); on.exit()
} else {
## <NOTE>
## It may be safer to do
## writeLines(sapply(codeFiles, readLines), outFile)
## instead, but this would be much slower ...
## use fast version of file.append that ensures LF between files
if(!all(.file_append_ensuring_LFs(outFile, codeFiles)))
stop("unable to write code files")
## </NOTE>
}
## A syntax check here, so that we do not install a broken package.
## FIXME: this is only needed if we don't lazy load, as the lazy loader
## would detect the error.
op <- options(showErrorCalls=FALSE)
on.exit(options(op))
parse(outFile)
invisible()
}
### * .install_package_indices
## called from R CMD INSTALL
.install_package_indices <-
function(dir, outDir)
{
options(warn = 1) # to ensure warnings get seen
if(!dir.exists(dir))
stop(gettextf("directory '%s' does not exist", dir),
domain = NA)
if(!dir.exists(outDir))
stop(gettextf("directory '%s' does not exist", outDir),
domain = NA)
## If there is an @file{INDEX} file in the package sources, we
## install this, and do not build it.
if(file_test("-f", file.path(dir, "INDEX")))
if(!file.copy(file.path(dir, "INDEX"),
file.path(outDir, "INDEX"),
overwrite = TRUE))
stop(gettextf("unable to copy INDEX to '%s'",
file.path(outDir, "INDEX")),
domain = NA)
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'", outMetaDir),
domain = NA)
.install_package_Rd_indices(dir, outDir)
.install_package_demo_index(dir, outDir)
invisible()
}
### * .install_package_Rd_indices
.install_package_Rd_indices <-
function(dir, outDir)
{
dir <- file_path_as_absolute(dir)
docsDir <- file.path(dir, "man")
dataDir <- file.path(outDir, "data")
outDir <- file_path_as_absolute(outDir)
## <FIXME>
## Not clear whether we should use the basename of the directory we
## install to, or the package name as obtained from the DESCRIPTION
## file in the directory we install from (different for versioned
## installs). We definitely do not want the basename of the dir we
## install from.
packageName <- basename(outDir)
## </FIXME>
allRd <- if(dir.exists(docsDir))
list_files_with_type(docsDir, "docs") else character()
## some people have man dirs without any valid .Rd files
if(length(allRd)) {
## we want the date of the newest .Rd file we will install
newestRd <- max(file.mtime(allRd))
## these files need not exist, which gives NA.
indices <- c(file.path("Meta", "Rd.rds"),
file.path("Meta", "hsearch.rds"),
file.path("Meta", "links.rds"),
"INDEX")
upToDate <- file.mtime(file.path(outDir, indices)) >= newestRd
if(dir.exists(dataDir)
&& length(dataFiles <- list.files(dataDir))) {
## Note that the data index is computed from both the package's
## Rd files and the data sets actually available.
newestData <- max(file.mtime(dataFiles))
upToDate <- c(upToDate,
file.mtime(file.path(outDir, "Meta", "data.rds")) >=
max(newestRd, newestData))
}
## Note that this is not quite good enough: an Rd file or data file
## might have been removed since the indices were made.
RdsFile <- file.path("Meta", "Rd.rds")
if(file.exists(RdsFile)) { ## for Rd files
## this has file names without path
files <- readRDS(RdsFile)$File
if(!identical(basename(allRd), files)) upToDate <- FALSE
}
## we want to proceed if any is NA.
if(all(upToDate %in% TRUE)) return(invisible())
## Rd objects should already have been installed.
db <- tryCatch(Rd_db(basename(outDir), lib.loc = dirname(outDir)),
error = function(e) NULL)
## If not, we build the Rd db from the sources:
if(is.null(db)) db <- .build_Rd_db(dir, allRd)
contents <- Rd_contents(db)
.write_Rd_contents_as_RDS(contents,
file.path(outDir, "Meta", "Rd.rds"))
defaultEncoding <- as.vector(readRDS(file.path(outDir, "Meta", "package.rds"))$DESCRIPTION["Encoding"])
if(is.na(defaultEncoding)) defaultEncoding <- NULL
saveRDS(.build_hsearch_index(contents, packageName, defaultEncoding),
file.path(outDir, "Meta", "hsearch.rds"))
saveRDS(.build_links_index(contents, packageName),
file.path(outDir, "Meta", "links.rds"))
## If there is no @file{INDEX} file in the package sources, we
## build one.
## <NOTE>
## We currently do not also save this in RDS format, as we can
## always do
## .build_Rd_index(readRDS(file.path(outDir, "Meta", "Rd.rds"))
if(!file_test("-f", file.path(dir, "INDEX")))
writeLines(formatDL(.build_Rd_index(contents)),
file.path(outDir, "INDEX"))
## </NOTE>
} else {
contents <- NULL
saveRDS(.build_hsearch_index(contents, packageName, defaultEncoding),
file.path(outDir, "Meta", "hsearch.rds"))
saveRDS(.build_links_index(contents, packageName),
file.path(outDir, "Meta", "links.rds"))
}
if(dir.exists(dataDir))
saveRDS(.build_data_index(dataDir, contents),
file.path(outDir, "Meta", "data.rds"))
invisible()
}
### * .install_package_vignettes2
## called from R CMD INSTALL for pre 3.0.2-built tarballs, and for base packages
.install_package_vignettes2 <-
function(dir, outDir, encoding = "")
{
dir <- file_path_as_absolute(dir)
subdirs <- c("vignettes", file.path("inst", "doc"))
ok <- dir.exists(file.path(dir, subdirs))
## Create a vignette index only if the vignette dir exists.
if (!any(ok))
return(invisible())
subdir <- subdirs[ok][1L]
vignetteDir <- file.path(dir, subdir)
outDir <- file_path_as_absolute(outDir)
packageName <- basename(outDir)
outVignetteDir <- file.path(outDir, "doc")
## --fake and --no-inst installs do not have a outVignetteDir.
if(!dir.exists(outVignetteDir)) return(invisible())
## If there is an HTML index in the @file{inst/doc} subdirectory of
## the package source directory (@code{dir}), we do not overwrite it
## (similar to top-level @file{INDEX} files). Installation already
## copied this over.
hasHtmlIndex <- file_test("-f", file.path(vignetteDir, "index.html"))
htmlIndex <- file.path(outDir, "doc", "index.html")
vigns <- pkgVignettes(dir = dir, subdirs = subdir, check = TRUE)
## Write dummy HTML index if no vignettes are found and exit.
if(length(vigns$docs) == 0L) {
## we don't want to write an index if the directory is in fact empty
files <- list.files(vignetteDir, all.files = TRUE, no.. = TRUE)
if((length(files) > 0L) && !hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex)
return(invisible())
}
if (subdir == "vignettes") {
## copy vignette sources over.
file.copy(vigns$docs, outVignetteDir)
}
vigns <- tryCatch({
pkgVignettes(dir=outDir, subdirs="doc", output=TRUE, source=TRUE)
}, error = function(ex) {
pkgVignettes(dir=outDir, subdirs="doc")
})
vignetteIndex <- .build_vignette_index(vigns)
if(NROW(vignetteIndex) > 0L) {
cwd <- getwd()
if (is.null(cwd))
stop("current working directory cannot be ascertained")
setwd(outVignetteDir)
loadVignetteBuilder(dir, mustwork = FALSE)
## install tangled versions of Sweave vignettes. FIXME: Vignette
## *.R files should have been included when the package was built,
## but in the interim before they are all built with the new code,
## this is needed.
for(i in seq_along(vigns$docs)) {
file <- vigns$docs[i]
if (!is.null(vigns$sources) && !is.null(vigns$sources[file][[1]]))
next
file <- basename(file)
enc <- vigns$encodings[i]
cat(" ", sQuote(basename(file)),
if(nzchar(enc)) paste("using", sQuote(enc)), "\n")
engine <- try(vignetteEngine(vigns$engines[i]), silent = TRUE)
if (!inherits(engine, "try-error"))
engine$tangle(file, quiet = TRUE, encoding = enc)
setwd(outVignetteDir) # just in case some strange tangle function changed it
}
setwd(cwd)
# Update - now from the output directory
vigns <- pkgVignettes(dir=outDir, subdirs="doc", source=TRUE)
## remove any files with no R code (they will have header comments).
## if not correctly declared they might not be in the current encoding
sources <- unlist(vigns$sources)
for(i in seq_along(sources)) {
file <- sources[i]
if (!file_test("-f", file)) next
bfr <- readLines(file, warn = FALSE)
if(all(grepl("(^###|^[[:space:]]*$)", bfr, useBytes = TRUE)))
unlink(file)
}
# Update
vigns <- pkgVignettes(dir=outDir, subdirs="doc", source=TRUE)
# Add tangle source files (*.R) to the vignette index
# Only the "main" R file, because tangle may also split
# output into multiple files
sources <- character(length(vigns$docs))
for (i in seq_along(vigns$docs)) {
name <- vigns$names[i]
source <- find_vignette_product(name, by = "tangle", main = TRUE, dir = vigns$dir, engine = engine)
if (length(source) > 0L)
sources[i] <- basename(source)
}
vignetteIndex$R <- sources
}
if(!hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex, vignetteIndex)
saveRDS(vignetteIndex,
file = file.path(outDir, "Meta", "vignette.rds"))
invisible()
}
### * .install_package_vignettes3
## called from R CMD INSTALL for 3.0.2 or later tarballs
.install_package_vignettes3 <-
function(dir, outDir, encoding = "")
{
packageName <- basename(outDir)
dir <- file_path_as_absolute(dir)
indexname <- file.path(dir, "build", "vignette.rds")
ok <- file_test("-f", indexname)
## Create a vignette index only if the vignette dir exists.
if (!ok)
return(invisible())
## Copy the index to Meta
file.copy(indexname, file.path(outDir, "Meta"))
## If there is an HTML index in the @file{inst/doc} subdirectory of
## the package source directory (@code{dir}), we do not overwrite it
## (similar to top-level @file{INDEX} files). Installation already
## copied this over.
vignetteDir <- file.path(outDir, "doc")
hasHtmlIndex <- file_test("-f", file.path(vignetteDir, "index.html"))
htmlIndex <- file.path(outDir, "doc", "index.html")
vignetteIndex <- readRDS(indexname)
if(!hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex, vignetteIndex)
invisible()
}
### * .install_package_demo_index
.install_package_demo_index <-
function(dir, outDir)
{
demoDir <- file.path(dir, "demo")
if(!dir.exists(demoDir)) return(invisible())
demoIndex <- .build_demo_index(demoDir)
saveRDS(demoIndex,
file = file.path(outDir, "Meta", "demo.rds"))
invisible()
}
### * .vinstall_package_indices
## called from src/library/Makefile
.vinstall_package_indices <-
function(src_dir, out_dir, packages)
{
## For the given packages with sources rooted at @file{src_dir} and
## installations rooted at @file{out_dir}, install the package
## indices.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+")))
.install_package_indices(file.path(src_dir, p), file.path(out_dir, p))
utils::make.packages.html(.Library, verbose = FALSE)
invisible()
}
### * .install_package_vignettes
## called from src/library/Makefile[.win]
## this is only used when building R
.install_package_vignettes <-
function(dir, outDir, keep.source = TRUE)
{
dir <- file_path_as_absolute(dir)
vigns <- pkgVignettes(dir = dir)
if(is.null(vigns) || !length(vigns$docs)) return(invisible())
outDir <- file_path_as_absolute(outDir)
outVignetteDir <- file.path(outDir, "doc")
if(!dir.exists(outVignetteDir) && !dir.create(outVignetteDir))
stop(gettextf("cannot open directory '%s'", outVignetteDir),
domain = NA)
## We have to be careful to avoid repeated rebuilding.
vignettePDFs <-
file.path(outVignetteDir,
sub("$", ".pdf",
basename(file_path_sans_ext(vigns$docs))))
upToDate <- file_test("-nt", vignettePDFs, vigns$docs)
## The primary use of this function is to build and install PDF
## vignettes in base packages.
## Hence, we build in a subdir of the current directory rather
## than a temp dir: this allows inspection of problems and
## automatic cleanup via Make.
cwd <- getwd()
if (is.null(cwd))
stop("current working directory cannot be ascertained")
buildDir <- file.path(cwd, ".vignettes")
if(!dir.exists(buildDir) && !dir.create(buildDir))
stop(gettextf("cannot create directory '%s'", buildDir), domain = NA)
on.exit(setwd(cwd))
setwd(buildDir)
loadVignetteBuilder(vigns$pkgdir)
for(i in seq_along(vigns$docs)[!upToDate]) {
file <- vigns$docs[i]
name <- vigns$names[i]
engine <- vignetteEngine(vigns$engines[i])
message(gettextf("processing %s", sQuote(basename(file))),
domain = NA)
## Note that contrary to all other weave/tangle calls, here
## 'file' is not a file in the current directory [hence no
## file <- basename(file) above]. However, weave should/must
## always create a file ('output') in the current directory.
output <- tryCatch({
engine$weave(file, pdf = TRUE, eps = FALSE, quiet = TRUE,
keep.source = keep.source, stylepath = FALSE)
setwd(buildDir)
find_vignette_product(name, by = "weave", engine = engine)
}, error = function(e) {
stop(gettextf("running %s on vignette '%s' failed with message:\n%s",
engine[["name"]], file, conditionMessage(e)),
domain = NA, call. = FALSE)
})
## In case of an error, do not clean up: should we point to
## buildDir for possible inspection of results/problems?
## We need to ensure that vignetteDir is in TEXINPUTS and BIBINPUTS.
if (vignette_is_tex(output)) {
## <FIXME>
## What if this fails?
## Now gives a more informative error texi2pdf fails
## or if it does not produce a <name>.pdf.
tryCatch({
texi2pdf(file = output, quiet = TRUE, texinputs = vigns$dir)
output <- find_vignette_product(name, by = "texi2pdf", engine = engine)
}, error = function(e) {
stop(gettextf("compiling TeX file %s failed with message:\n%s",
sQuote(output), conditionMessage(e)),
domain = NA, call. = FALSE)
})
## </FIXME>
}
if(!file.copy(output, outVignetteDir, overwrite = TRUE))
stop(gettextf("cannot copy '%s' to '%s'",
output,
outVignetteDir),
domain = NA)
}
## Need to change out of this dir before we delete it,
## at least on Windows.
setwd(cwd)
unlink(buildDir, recursive = TRUE)
## Now you need to update the HTML index!
## This also creates the .R files
.install_package_vignettes2(dir, outDir)
invisible()
}
### * .install_package_namespace_info
.install_package_namespace_info <-
function(dir, outDir)
{
dir <- file_path_as_absolute(dir)
nsFile <- file.path(dir, "NAMESPACE")
if(!file_test("-f", nsFile)) return(invisible())
nsInfoFilePath <- file.path(outDir, "Meta", "nsInfo.rds")
if(file_test("-nt", nsInfoFilePath, nsFile)) return(invisible())
nsInfo <- parseNamespaceFile(basename(dir), dirname(dir))
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'", outMetaDir),
domain = NA)
saveRDS(nsInfo, nsInfoFilePath)
invisible()
}
### * .vinstall_package_namespaces_as_RDS
## called from src/library/Makefile
.vinstall_package_namespaces_as_RDS <-
function(dir, packages)
{
## For the given packages installed in @file{dir} which have a
## NAMESPACE file, install the namespace info as R metadata.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+")))
.install_package_namespace_info(file.path(dir, p),
file.path(dir, p))
invisible()
}
### * .install_package_Rd_objects
## called from src/library/Makefile
.install_package_Rd_objects <-
function(dir, outDir, encoding = "unknown")
{
dir <- file_path_as_absolute(dir)
mandir <- file.path(dir, "man")
manfiles <- if(!dir.exists(mandir)) character()
else list_files_with_type(mandir, "docs")
manOutDir <- file.path(outDir, "help")
dir.create(manOutDir, FALSE)
db_file <- file.path(manOutDir,
paste0(basename(outDir), ".rdx"))
built_file <- file.path(dir, "build", "partial.rdb")
macro_files <- list.files(file.path(dir, "man", "macros"), pattern = "\\.Rd$", full.names = TRUE)
if (length(macro_files)) {
macroDir <- file.path(manOutDir, "macros")
dir.create(macroDir, FALSE)
file.copy(macro_files, macroDir, overwrite = TRUE)
}
## Avoid (costly) rebuilding if not needed.
## Actually, it seems no more costly than these tests, which it also does
pathsFile <- file.path(manOutDir, "paths.rds")
if(!file_test("-f", db_file) || !file.exists(pathsFile) ||
!identical(sort(manfiles), sort(readRDS(pathsFile))) ||
!all(file_test("-nt", db_file, manfiles))) {
db <- .build_Rd_db(dir, manfiles, db_file = db_file,
encoding = encoding, built_file = built_file)
nm <- as.character(names(db)) # Might be NULL
saveRDS(structure(nm,
first = nchar(file.path(mandir)) + 2L),
pathsFile)
names(db) <- sub("\\.[Rr]d$", "", basename(nm))
makeLazyLoadDB(db, file.path(manOutDir, basename(outDir)))
}
invisible()
}
### * .install_package_demos
## called from basepkg.mk and .install_packages
.install_package_demos <-
function(dir, outDir)
{
## NB: we no longer install 00Index
demodir <- file.path(dir, "demo")
if(!dir.exists(demodir)) return()
demofiles <- list_files_with_type(demodir, "demo", full.names = FALSE)
if(!length(demofiles)) return()
demoOutDir <- file.path(outDir, "demo")
if(!dir.exists(demoOutDir)) dir.create(demoOutDir)
file.copy(file.path(demodir, demofiles), demoOutDir,
overwrite = TRUE)
}
### * .find_cinclude_paths
.find_cinclude_paths <-
function(pkgs, lib.loc = NULL, file = NULL)
{
## given a character string of comma-separated package names,
## find where the packages are installed and generate
## -I"/path/to/package/include" ...
if(!is.null(file)) {
tmp <- read.dcf(file, "LinkingTo")[1L, 1L]
if(is.na(tmp)) return(invisible())
pkgs <- tmp
}
pkgs <- strsplit(pkgs[1L], ",[[:blank:]]*")[[1L]]
paths <- find.package(pkgs, lib.loc, quiet=TRUE)
if(length(paths))
cat(paste(paste0('-I"', paths, '/include"'), collapse=" "))
return(invisible())
}
### * .Rtest_package_depends_R_version
.Rtest_package_depends_R_version <-
function(dir)
{
if(missing(dir)) dir <- "."
meta <- .read_description(file.path(dir, "DESCRIPTION"))
deps <- .split_description(meta, verbose = TRUE)$Rdepends2
status <- 0
current <- getRversion()
for(depends in deps) {
## .split_description will have ensured that this is NULL or
## of length 3.
if(length(depends) > 1L) {
## .check_package_description will insist on these operators
if(!depends$op %in% c("<=", ">=", "<", ">", "==", "!="))
message("WARNING: malformed 'Depends' field in 'DESCRIPTION'")
else {
status <- if(inherits(depends$version, "numeric_version"))
!do.call(depends$op, list(current, depends$version))
else {
ver <- R.version
if (ver$status %in% c("", "Patched")) FALSE
else !do.call(depends$op,
list(ver[["svn rev"]],
as.numeric(sub("^r", "", depends$version))))
}
}
if(status != 0) {
package <- Sys.getenv("R_PACKAGE_NAME")
if(!nzchar(package))
package <- meta["Package"]
msg <- if(nzchar(package))
gettextf("ERROR: this R is version %s, package '%s' requires R %s %s",
current, package,
depends$op, depends$version)
else
gettextf("ERROR: this R is version %s, required is R %s %s",
current, depends$op, depends$version)
message(strwrap(msg, exdent = 2L))
break
}
}
}
status
}
## no longer used
.test_package_depends_R_version <-
function(dir)
q(status = .Rtest_package_depends_R_version(dir))
### * .test_load_package
.test_load_package <- function(pkg_name, lib)
{
options(warn = 1)
res <- try(suppressPackageStartupMessages(library(pkg_name, lib.loc = lib, character.only = TRUE, logical.return = TRUE)))
if (inherits(res, "try-error") || !res)
stop("loading failed", call. = FALSE)
}
### * checkRdaFiles
checkRdaFiles <- function(paths)
{
if(length(paths) == 1L && dir.exists(paths)) {
paths <- Sys.glob(c(file.path(paths, "*.rda"),
file.path(paths, "*.RData")))
## Exclude .RData, which this may or may not match
paths <- grep("/[.]RData$", paths, value = TRUE, invert = TRUE)
}
res <- data.frame(size = NA_real_, ASCII = NA,
compress = NA_character_, version = NA_integer_,
stringsAsFactors = FALSE)
res <- res[rep_len(1L, length(paths)), ]
row.names(res) <- paths
keep <- file.exists(paths)
res$size[keep] <- file.size(paths)[keep]
for(p in paths[keep]) {
magic <- readBin(p, "raw", n = 5)
res[p, "compress"] <- if(all(magic[1:2] == c(0x1f, 0x8b))) "gzip"
else if(rawToChar(magic[1:3]) == "BZh") "bzip2"
else if(magic[1L] == 0xFD && rawToChar(magic[2:5]) == "7zXZ") "xz"
else if(grepl("RD[ABX][12]", rawToChar(magic), useBytes = TRUE)) "none"
else "unknown"
con <- gzfile(p)
magic <- readChar(con, 5L, useBytes = TRUE)
close(con)
res[p, "ASCII"] <- if (grepl("RD[ABX][12]", magic, useBytes = TRUE))
substr(magic, 3, 3) == "A" else NA
ver <- sub("(RD[ABX])([12]*)", "\\2", magic, useBytes = TRUE)
res$version <- as.integer(ver)
}
res
}
### * resaveRdaFiles
resaveRdaFiles <- function(paths,
compress = c("auto", "gzip", "bzip2", "xz"),
compression_level)
{
if(length(paths) == 1L && dir.exists(paths))
paths <- Sys.glob(c(file.path(paths, "*.rda"),
file.path(paths, "*.RData")))
compress <- match.arg(compress)
if (missing(compression_level))
compression_level <- switch(compress, "gzip" = 6, 9)
for(p in paths) {
env <- new.env(hash = TRUE) # probably small, need not be
suppressPackageStartupMessages(load(p, envir = env))
if(compress == "auto") {
f1 <- tempfile()
save(file = f1, list = ls(env, all.names = TRUE), envir = env)
f2 <- tempfile()
save(file = f2, list = ls(env, all.names = TRUE), envir = env,
compress = "bzip2")
ss <- file.size(c(f1, f2)) * c(0.9, 1.0)
names(ss) <- c(f1, f2)
if(ss[1L] > 10240) {
f3 <- tempfile()
save(file = f3, list = ls(env, all.names = TRUE), envir = env,
compress = "xz")
ss <- c(ss, file.size(f3))
names(ss) <- c(f1, f2, f3)
}
nm <- names(ss)
ind <- which.min(ss)
file.copy(nm[ind], p, overwrite = TRUE)
unlink(nm)
} else
save(file = p, list = ls(env, all.names = TRUE), envir = env,
compress = compress, compression_level = compression_level)
}
}
### * compactPDF
compactPDF <-
function(paths, qpdf = Sys.which(Sys.getenv("R_QPDF", "qpdf")),
gs_cmd = Sys.getenv("R_GSCMD", ""),
gs_quality = Sys.getenv("GS_QUALITY", "none"),
gs_extras = character())
{
use_qpdf <- nzchar(qpdf)
gs_quality <- match.arg(gs_quality, c("none", "printer", "ebook", "screen"))
use_gs <- if(gs_quality != "none") nzchar(gs_cmd <- find_gs_cmd(gs_cmd)) else FALSE
if (!use_gs && !use_qpdf) return()
if(length(paths) == 1L && dir.exists(paths))
paths <- Sys.glob(file.path(paths, "*.pdf"))
dummy <- rep.int(NA_real_, length(paths))
ans <- data.frame(old = dummy, new = dummy, row.names = paths)
tf <- tempfile("pdf"); tf2 <- tempfile("pdf")
for (p in paths) {
res <- 0
if (use_gs) {
res <- system2(gs_cmd,
c("-q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite",
sprintf("-dPDFSETTINGS=/%s", gs_quality),
"-dCompatibilityLevel=1.5",
"-dAutoRotatePages=/None",
sprintf("-sOutputFile=%s", tf),
gs_extras, p), FALSE, FALSE)
if(!res && use_qpdf) {
unlink(tf2) # precaution
file.rename(tf, tf2)
res <- system2(qpdf, c("--stream-data=compress",
"--object-streams=generate",
tf2, tf), FALSE, FALSE)
unlink(tf2)
}
} else if(use_qpdf) {
res <- system2(qpdf, c("--stream-data=compress",
"--object-streams=generate",
p, tf), FALSE, FALSE)
}
if(!res && file.exists(tf)) {
old <- file.size(p); new <- file.size(tf)
if(new/old < 0.9 && new < old - 1e4) {
file.copy(tf, p, overwrite = TRUE)
ans[p, ] <- c(old, new)
}
}
unlink(tf)
}
structure(na.omit(ans), class = c("compactPDF", "data.frame"))
}
find_gs_cmd <- function(gs_cmd = "")
{
if(!nzchar(gs_cmd)) {
if(.Platform$OS.type == "windows") {
gsexe <- Sys.getenv("R_GSCMD")
if (!nzchar(gsexe)) gsexe <- Sys.getenv("GSC")
gs_cmd <- Sys.which(gsexe)
if (!nzchar(gs_cmd)) gs_cmd <- Sys.which("gswin64c")
if (!nzchar(gs_cmd)) gs_cmd <- Sys.which("gswin32c")
gs_cmd
} else Sys.which(Sys.getenv("R_GSCMD", "gs"))
} else Sys.which(gs_cmd)
}
format.compactPDF <- function(x, ratio = 0.9, diff = 1e4, ...)
{
if(!nrow(x)) return(character())
z <- y <- x[with(x, new/old < ratio & new < old - diff), ]
if(!nrow(z)) return(character())
z[] <- lapply(y, function(x) sprintf("%.0fKb", x/1024))
large <- y$new >= 1024^2
z[large, ] <- lapply(y[large, ], function(x) sprintf("%.1fMb", x/1024^2))
paste(' compacted', sQuote(basename(row.names(y))),
'from', z[, 1L], 'to', z[, 2L])
}
### * add_datalist
add_datalist <- function(pkgpath, force = FALSE)
{
dlist <- file.path(pkgpath, "data", "datalist")
if (!force && file.exists(dlist)) return()
size <- sum(file.size(Sys.glob(file.path(pkgpath, "data", "*"))))
if(size <= 1024^2) return()
z <- suppressPackageStartupMessages(list_data_in_pkg(dataDir = file.path(pkgpath, "data"))) # for BARD
if(!length(z)) return()
con <- file(dlist, "w")
for (nm in names(z)) {
zz <- z[[nm]]
if (length(zz) == 1L && zz == nm) writeLines(nm, con)
else cat(nm, ": ", paste(zz, collapse = " "), "\n",
sep = "", file = con)
}
close(con)
invisible()
}
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "### [*]+" ***
### End: ***
| 42,256 | gpl-2.0 |
5eba3eeb33f67f45455162ef3cec2b23a3ab892e | cmosetick/RRO | R-src/src/library/tools/R/admin.R | # File src/library/tools/R/admin.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2015 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
### * .install_package_description
## called from basepkg.mk and .install_packages
.install_package_description <-
function(dir, outDir, builtStamp=character())
{
## Function for taking the DESCRIPTION package meta-information,
## checking/validating it, and installing it with the 'Built:'
## field added. Note that from 1.7.0 on, packages without
## compiled code are not marked as being from any platform.
## Check first. Note that this also calls .read_description(), but
## .check_package_description() currently really needs to know the
## path to the DESCRIPTION file, and returns an object with check
## results and not the package metadata ...
ok <- .check_package_description(file.path(dir, "DESCRIPTION"))
if(any(as.integer(sapply(ok, length)) > 0L)) {
stop(paste(gettext("Invalid DESCRIPTION file") ,
paste(.eval_with_capture(print(ok))$output,
collapse = "\n"),
sep = "\n\n"),
domain = NA,
call. = FALSE)
}
## This reads (in C locale) byte-by-byte, declares latin1 or UTF-8
## Maybe it would be better to re-encode others (there are none at
## present, at least in a UTF-8 locale?
db <- .read_description(file.path(dir, "DESCRIPTION"))
## should not have a Built: field, so ignore it if it is there
nm <- names(db)
if("Built" %in% nm) {
db <- db[-match("Built", nm)]
warning(gettextf("*** someone has corrupted the Built field in package '%s' ***",
db["Package"]),
domain = NA,
call. = FALSE)
}
OStype <- R.version$platform
if (grepl("-apple-darwin", OStype) && nzchar(Sys.getenv("R_ARCH")))
OStype <- sub(".*-apple-darwin", "universal-apple-darwin", OStype)
Built <-
paste0("R ",
paste(R.version[c("major", "minor")], collapse = "."),
"; ",
if(dir.exists(file.path(dir, "src"))) OStype else "",
"; ",
## Some build systems want to supply a package-build timestamp for reproducibility
## Prefer date in ISO 8601 format, UTC.
if (length(builtStamp)==0) format(Sys.time(), tz = "UTC", usetz = TRUE) else builtStamp,
## Sys.time(),
"; ",
.OStype())
## At some point of time, we had:
## We must not split the Built: field across lines.
## Not sure if this is still true. If not, the following could be
## simplified to
## db["Built"] <- Built
## write.dcf(rbind(db), file.path(outDir, "DESCRIPTION"))
## But in any case, it is true for fields obtained from expanding R
## fields (Authors@R): these should not be reformatted.
db <- c(db,
.expand_package_description_db_R_fields(db),
Built = Built)
## This cannot be done in a MBCS: write.dcf fails
ctype <- Sys.getlocale("LC_CTYPE")
Sys.setlocale("LC_CTYPE", "C")
on.exit(Sys.setlocale("LC_CTYPE", ctype))
.write_description(db, file.path(outDir, "DESCRIPTION"))
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'",
outMetaDir),
domain = NA)
saveInfo <- .split_description(db)
saveRDS(saveInfo, file.path(outMetaDir, "package.rds"))
invisible()
}
### * .split_description
## also used in .getRequiredPackages
.split_description <-
function(db, verbose = FALSE)
{
if(!is.na(Built <- db["Built"])) {
Built <- as.list(strsplit(Built, "; ")[[1L]])
if(length(Built) != 4L) {
warning(gettextf("*** someone has corrupted the Built field in package '%s' ***",
db["Package"]),
domain = NA,
call. = FALSE)
Built <- NULL
} else {
names(Built) <- c("R", "Platform", "Date", "OStype")
Built[["R"]] <- R_system_version(sub("^R ([0-9.]+)", "\\1",
Built[["R"]]))
}
} else Built <- NULL
## might perhaps have multiple entries
Depends <- .split_dependencies(db[names(db) %in% "Depends"])
## several packages 'Depends' on base!
ind <- match("base", names(Depends), 0L)
if(ind) Depends <- Depends[-ind]
## We only need Rdepends for R < 2.7.0, but we still need to be
## able to check that someone is not trying to load this into a
## very old version of R.
if("R" %in% names(Depends)) {
Rdeps2 <- Depends["R" == names(Depends)]
names(Rdeps2) <- NULL
Rdeps <- Depends[["R", exact = TRUE]] # the first one
Depends <- Depends[names(Depends) != "R"]
## several packages have 'Depends: R', which is a noop.
if(verbose && length(Rdeps) == 1L)
message("WARNING: omitting pointless dependence on 'R' without a version requirement")
if(length(Rdeps) <= 1L) Rdeps <- NULL
} else Rdeps2 <- Rdeps <- NULL
Rdeps <- as.vector(Rdeps)
Suggests <- .split_dependencies(db[names(db) %in% "Suggests"])
Imports <- .split_dependencies(db[names(db) %in% "Imports"])
LinkingTo <- .split_dependencies(db[names(db) %in% "LinkingTo"])
structure(list(DESCRIPTION = db, Built = Built,
Rdepends = Rdeps, Rdepends2 = Rdeps2,
Depends = Depends, Suggests = Suggests,
Imports = Imports, LinkingTo = LinkingTo),
class = "packageDescription2")
}
### * .vinstall_package_descriptions_as_RDS
## called from src/library/Makefile
.vinstall_package_descriptions_as_RDS <-
function(dir, packages)
{
## For the given packages installed in @file{dir}, install their
## DESCRIPTION package metadata as R metadata.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+"))) {
meta_dir <- file.path(dir, p, "Meta")
if(!dir.exists(meta_dir) && !dir.create(meta_dir))
stop(gettextf("cannot open directory '%s'", meta_dir))
package_info_dcf_file <- file.path(dir, p, "DESCRIPTION")
package_info_rds_file <- file.path(meta_dir, "package.rds")
if(file_test("-nt",
package_info_rds_file,
package_info_dcf_file))
next
saveRDS(.split_description(.read_description(package_info_dcf_file)),
package_info_rds_file)
}
invisible()
}
### * .update_package_rds
## not used
.update_package_rds <-
function(lib.loc = NULL)
{
## rebuild the dumped package descriptions for all packages in lib.loc
if (is.null(lib.loc)) lib.loc <- .libPaths()
lib.loc <- lib.loc[file.exists(lib.loc)]
for (lib in lib.loc) {
a <- list.files(lib, all.files = FALSE, full.names = TRUE)
for (nam in a) {
dfile <- file.path(nam, "DESCRIPTION")
if (file.exists(dfile)) {
print(nam)
.install_package_description(nam, nam)
}
}
}
}
### * .install_package_code_files
.install_package_code_files <-
function(dir, outDir)
{
if(!dir.exists(dir))
stop(gettextf("directory '%s' does not exist", dir),
domain = NA)
dir <- file_path_as_absolute(dir)
## Attempt to set the LC_COLLATE locale to 'C' to turn off locale
## specific sorting.
curLocale <- Sys.getlocale("LC_COLLATE")
on.exit(Sys.setlocale("LC_COLLATE", curLocale), add = TRUE)
## (Guaranteed to work as per the Sys.setlocale() docs.)
lccollate <- "C"
if(Sys.setlocale("LC_COLLATE", lccollate) != lccollate) {
## <NOTE>
## I don't think we can give an error here.
## It may be the case that Sys.setlocale() fails because the "OS
## reports request cannot be honored" (src/main/platform.c), in
## which case we should still proceed ...
warning("cannot turn off locale-specific sorting via LC_COLLATE")
## </NOTE>
}
## We definitely need a valid DESCRIPTION file.
db <- .read_description(file.path(dir, "DESCRIPTION"))
codeDir <- file.path(dir, "R")
if(!dir.exists(codeDir)) return(invisible())
codeFiles <- list_files_with_type(codeDir, "code", full.names = FALSE)
collationField <-
c(paste("Collate", .OStype(), sep = "."), "Collate")
if(any(i <- collationField %in% names(db))) {
collationField <- collationField[i][1L]
codeFilesInCspec <- .read_collate_field(db[collationField])
## Duplicated entries in the collation spec?
badFiles <-
unique(codeFilesInCspec[duplicated(codeFilesInCspec)])
if(length(badFiles)) {
out <- gettextf("\nduplicated files in '%s' field:",
collationField)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## See which files are listed in the collation spec but don't
## exist.
badFiles <- setdiff(codeFilesInCspec, codeFiles)
if(length(badFiles)) {
out <- gettextf("\nfiles in '%s' field missing from '%s':",
collationField,
codeDir)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## See which files exist but are missing from the collation
## spec. Note that we do not want the collation spec to use
## only a subset of the available code files.
badFiles <- setdiff(codeFiles, codeFilesInCspec)
if(length(badFiles)) {
out <- gettextf("\nfiles in '%s' missing from '%s' field:",
codeDir,
collationField)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## Everything's groovy ...
codeFiles <- codeFilesInCspec
}
codeFiles <- file.path(codeDir, codeFiles)
if(!dir.exists(outDir) && !dir.create(outDir))
stop(gettextf("cannot open directory '%s'", outDir),
domain = NA)
outCodeDir <- file.path(outDir, "R")
if(!dir.exists(outCodeDir) && !dir.create(outCodeDir))
stop(gettextf("cannot open directory '%s'", outCodeDir),
domain = NA)
outFile <- file.path(outCodeDir, db["Package"])
if(!file.create(outFile))
stop(gettextf("unable to create '%s'", outFile), domain = NA)
writeLines(paste0(".packageName <- \"", db["Package"], "\""),
outFile)
enc <- as.vector(db["Encoding"])
need_enc <- !is.na(enc) # Encoding was specified
## assume that if locale is 'C' we can used 8-bit encodings unchanged.
if(need_enc && !(Sys.getlocale("LC_CTYPE") %in% c("C", "POSIX"))) {
con <- file(outFile, "a")
on.exit(close(con)) # Windows does not like files left open
for(f in codeFiles) {
tmp <- iconv(readLines(f, warn = FALSE), from = enc, to = "")
if(length(bad <- which(is.na(tmp)))) {
warning(sprintf(ngettext(length(bad),
"unable to re-encode %s line %s",
"unable to re-encode %s lines %s"),
sQuote(basename(f)),
paste(bad, collapse = ", ")),
domain = NA, call. = FALSE)
tmp <- iconv(readLines(f, warn = FALSE), from = enc, to = "",
sub = "byte")
}
writeLines(paste0("#line 1 \"", f, "\""), con)
writeLines(tmp, con)
}
close(con); on.exit()
} else {
## <NOTE>
## It may be safer to do
## writeLines(sapply(codeFiles, readLines), outFile)
## instead, but this would be much slower ...
## use fast version of file.append that ensures LF between files
if(!all(.file_append_ensuring_LFs(outFile, codeFiles)))
stop("unable to write code files")
## </NOTE>
}
## A syntax check here, so that we do not install a broken package.
## FIXME: this is only needed if we don't lazy load, as the lazy loader
## would detect the error.
op <- options(showErrorCalls=FALSE)
on.exit(options(op))
parse(outFile)
invisible()
}
### * .install_package_indices
## called from R CMD INSTALL
.install_package_indices <-
function(dir, outDir)
{
options(warn = 1) # to ensure warnings get seen
if(!dir.exists(dir))
stop(gettextf("directory '%s' does not exist", dir),
domain = NA)
if(!dir.exists(outDir))
stop(gettextf("directory '%s' does not exist", outDir),
domain = NA)
## If there is an @file{INDEX} file in the package sources, we
## install this, and do not build it.
if(file_test("-f", file.path(dir, "INDEX")))
if(!file.copy(file.path(dir, "INDEX"),
file.path(outDir, "INDEX"),
overwrite = TRUE))
stop(gettextf("unable to copy INDEX to '%s'",
file.path(outDir, "INDEX")),
domain = NA)
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'", outMetaDir),
domain = NA)
.install_package_Rd_indices(dir, outDir)
.install_package_demo_index(dir, outDir)
invisible()
}
### * .install_package_Rd_indices
.install_package_Rd_indices <-
function(dir, outDir)
{
dir <- file_path_as_absolute(dir)
docsDir <- file.path(dir, "man")
dataDir <- file.path(outDir, "data")
outDir <- file_path_as_absolute(outDir)
## <FIXME>
## Not clear whether we should use the basename of the directory we
## install to, or the package name as obtained from the DESCRIPTION
## file in the directory we install from (different for versioned
## installs). We definitely do not want the basename of the dir we
## install from.
packageName <- basename(outDir)
## </FIXME>
allRd <- if(dir.exists(docsDir))
list_files_with_type(docsDir, "docs") else character()
## some people have man dirs without any valid .Rd files
if(length(allRd)) {
## we want the date of the newest .Rd file we will install
newestRd <- max(file.mtime(allRd))
## these files need not exist, which gives NA.
indices <- c(file.path("Meta", "Rd.rds"),
file.path("Meta", "hsearch.rds"),
file.path("Meta", "links.rds"),
"INDEX")
upToDate <- file.mtime(file.path(outDir, indices)) >= newestRd
if(dir.exists(dataDir)
&& length(dataFiles <- list.files(dataDir))) {
## Note that the data index is computed from both the package's
## Rd files and the data sets actually available.
newestData <- max(file.mtime(dataFiles))
upToDate <- c(upToDate,
file.mtime(file.path(outDir, "Meta", "data.rds")) >=
max(newestRd, newestData))
}
## Note that this is not quite good enough: an Rd file or data file
## might have been removed since the indices were made.
RdsFile <- file.path("Meta", "Rd.rds")
if(file.exists(RdsFile)) { ## for Rd files
## this has file names without path
files <- readRDS(RdsFile)$File
if(!identical(basename(allRd), files)) upToDate <- FALSE
}
## we want to proceed if any is NA.
if(all(upToDate %in% TRUE)) return(invisible())
## Rd objects should already have been installed.
db <- tryCatch(Rd_db(basename(outDir), lib.loc = dirname(outDir)),
error = function(e) NULL)
## If not, we build the Rd db from the sources:
if(is.null(db)) db <- .build_Rd_db(dir, allRd)
contents <- Rd_contents(db)
.write_Rd_contents_as_RDS(contents,
file.path(outDir, "Meta", "Rd.rds"))
defaultEncoding <- as.vector(readRDS(file.path(outDir, "Meta", "package.rds"))$DESCRIPTION["Encoding"])
if(is.na(defaultEncoding)) defaultEncoding <- NULL
saveRDS(.build_hsearch_index(contents, packageName, defaultEncoding),
file.path(outDir, "Meta", "hsearch.rds"))
saveRDS(.build_links_index(contents, packageName),
file.path(outDir, "Meta", "links.rds"))
## If there is no @file{INDEX} file in the package sources, we
## build one.
## <NOTE>
## We currently do not also save this in RDS format, as we can
## always do
## .build_Rd_index(readRDS(file.path(outDir, "Meta", "Rd.rds"))
if(!file_test("-f", file.path(dir, "INDEX")))
writeLines(formatDL(.build_Rd_index(contents)),
file.path(outDir, "INDEX"))
## </NOTE>
} else {
contents <- NULL
saveRDS(.build_hsearch_index(contents, packageName, defaultEncoding),
file.path(outDir, "Meta", "hsearch.rds"))
saveRDS(.build_links_index(contents, packageName),
file.path(outDir, "Meta", "links.rds"))
}
if(dir.exists(dataDir))
saveRDS(.build_data_index(dataDir, contents),
file.path(outDir, "Meta", "data.rds"))
invisible()
}
### * .install_package_vignettes2
## called from R CMD INSTALL for pre 3.0.2-built tarballs, and for base packages
.install_package_vignettes2 <-
function(dir, outDir, encoding = "")
{
dir <- file_path_as_absolute(dir)
subdirs <- c("vignettes", file.path("inst", "doc"))
ok <- dir.exists(file.path(dir, subdirs))
## Create a vignette index only if the vignette dir exists.
if (!any(ok))
return(invisible())
subdir <- subdirs[ok][1L]
vignetteDir <- file.path(dir, subdir)
outDir <- file_path_as_absolute(outDir)
packageName <- basename(outDir)
outVignetteDir <- file.path(outDir, "doc")
## --fake and --no-inst installs do not have a outVignetteDir.
if(!dir.exists(outVignetteDir)) return(invisible())
## If there is an HTML index in the @file{inst/doc} subdirectory of
## the package source directory (@code{dir}), we do not overwrite it
## (similar to top-level @file{INDEX} files). Installation already
## copied this over.
hasHtmlIndex <- file_test("-f", file.path(vignetteDir, "index.html"))
htmlIndex <- file.path(outDir, "doc", "index.html")
vigns <- pkgVignettes(dir = dir, subdirs = subdir, check = TRUE)
## Write dummy HTML index if no vignettes are found and exit.
if(length(vigns$docs) == 0L) {
## we don't want to write an index if the directory is in fact empty
files <- list.files(vignetteDir, all.files = TRUE, no.. = TRUE)
if((length(files) > 0L) && !hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex)
return(invisible())
}
if (subdir == "vignettes") {
## copy vignette sources over.
file.copy(vigns$docs, outVignetteDir)
}
vigns <- tryCatch({
pkgVignettes(dir=outDir, subdirs="doc", output=TRUE, source=TRUE)
}, error = function(ex) {
pkgVignettes(dir=outDir, subdirs="doc")
})
vignetteIndex <- .build_vignette_index(vigns)
if(NROW(vignetteIndex) > 0L) {
cwd <- getwd()
if (is.null(cwd))
stop("current working directory cannot be ascertained")
setwd(outVignetteDir)
loadVignetteBuilder(dir, mustwork = FALSE)
## install tangled versions of Sweave vignettes. FIXME: Vignette
## *.R files should have been included when the package was built,
## but in the interim before they are all built with the new code,
## this is needed.
for(i in seq_along(vigns$docs)) {
file <- vigns$docs[i]
if (!is.null(vigns$sources) && !is.null(vigns$sources[file][[1]]))
next
file <- basename(file)
enc <- vigns$encodings[i]
cat(" ", sQuote(basename(file)),
if(nzchar(enc)) paste("using", sQuote(enc)), "\n")
engine <- try(vignetteEngine(vigns$engines[i]), silent = TRUE)
if (!inherits(engine, "try-error"))
engine$tangle(file, quiet = TRUE, encoding = enc)
setwd(outVignetteDir) # just in case some strange tangle function changed it
}
setwd(cwd)
# Update - now from the output directory
vigns <- pkgVignettes(dir=outDir, subdirs="doc", source=TRUE)
## remove any files with no R code (they will have header comments).
## if not correctly declared they might not be in the current encoding
sources <- unlist(vigns$sources)
for(i in seq_along(sources)) {
file <- sources[i]
if (!file_test("-f", file)) next
bfr <- readLines(file, warn = FALSE)
if(all(grepl("(^###|^[[:space:]]*$)", bfr, useBytes = TRUE)))
unlink(file)
}
# Update
vigns <- pkgVignettes(dir=outDir, subdirs="doc", source=TRUE)
# Add tangle source files (*.R) to the vignette index
# Only the "main" R file, because tangle may also split
# output into multiple files
sources <- character(length(vigns$docs))
for (i in seq_along(vigns$docs)) {
name <- vigns$names[i]
source <- find_vignette_product(name, by = "tangle", main = TRUE, dir = vigns$dir, engine = engine)
if (length(source) > 0L)
sources[i] <- basename(source)
}
vignetteIndex$R <- sources
}
if(!hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex, vignetteIndex)
saveRDS(vignetteIndex,
file = file.path(outDir, "Meta", "vignette.rds"))
invisible()
}
### * .install_package_vignettes3
## called from R CMD INSTALL for 3.0.2 or later tarballs
.install_package_vignettes3 <-
function(dir, outDir, encoding = "")
{
packageName <- basename(outDir)
dir <- file_path_as_absolute(dir)
indexname <- file.path(dir, "build", "vignette.rds")
ok <- file_test("-f", indexname)
## Create a vignette index only if the vignette dir exists.
if (!ok)
return(invisible())
## Copy the index to Meta
file.copy(indexname, file.path(outDir, "Meta"))
## If there is an HTML index in the @file{inst/doc} subdirectory of
## the package source directory (@code{dir}), we do not overwrite it
## (similar to top-level @file{INDEX} files). Installation already
## copied this over.
vignetteDir <- file.path(outDir, "doc")
hasHtmlIndex <- file_test("-f", file.path(vignetteDir, "index.html"))
htmlIndex <- file.path(outDir, "doc", "index.html")
vignetteIndex <- readRDS(indexname)
if(!hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex, vignetteIndex)
invisible()
}
### * .install_package_demo_index
.install_package_demo_index <-
function(dir, outDir)
{
demoDir <- file.path(dir, "demo")
if(!dir.exists(demoDir)) return(invisible())
demoIndex <- .build_demo_index(demoDir)
saveRDS(demoIndex,
file = file.path(outDir, "Meta", "demo.rds"))
invisible()
}
### * .vinstall_package_indices
## called from src/library/Makefile
.vinstall_package_indices <-
function(src_dir, out_dir, packages)
{
## For the given packages with sources rooted at @file{src_dir} and
## installations rooted at @file{out_dir}, install the package
## indices.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+")))
.install_package_indices(file.path(src_dir, p), file.path(out_dir, p))
utils::make.packages.html(.Library, verbose = FALSE)
invisible()
}
### * .install_package_vignettes
## called from src/library/Makefile[.win]
## this is only used when building R
.install_package_vignettes <-
function(dir, outDir, keep.source = TRUE)
{
dir <- file_path_as_absolute(dir)
vigns <- pkgVignettes(dir = dir)
if(is.null(vigns) || !length(vigns$docs)) return(invisible())
outDir <- file_path_as_absolute(outDir)
outVignetteDir <- file.path(outDir, "doc")
if(!dir.exists(outVignetteDir) && !dir.create(outVignetteDir))
stop(gettextf("cannot open directory '%s'", outVignetteDir),
domain = NA)
## We have to be careful to avoid repeated rebuilding.
vignettePDFs <-
file.path(outVignetteDir,
sub("$", ".pdf",
basename(file_path_sans_ext(vigns$docs))))
upToDate <- file_test("-nt", vignettePDFs, vigns$docs)
## The primary use of this function is to build and install PDF
## vignettes in base packages.
## Hence, we build in a subdir of the current directory rather
## than a temp dir: this allows inspection of problems and
## automatic cleanup via Make.
cwd <- getwd()
if (is.null(cwd))
stop("current working directory cannot be ascertained")
buildDir <- file.path(cwd, ".vignettes")
if(!dir.exists(buildDir) && !dir.create(buildDir))
stop(gettextf("cannot create directory '%s'", buildDir), domain = NA)
on.exit(setwd(cwd))
setwd(buildDir)
loadVignetteBuilder(vigns$pkgdir)
for(i in seq_along(vigns$docs)[!upToDate]) {
file <- vigns$docs[i]
name <- vigns$names[i]
engine <- vignetteEngine(vigns$engines[i])
message(gettextf("processing %s", sQuote(basename(file))),
domain = NA)
## Note that contrary to all other weave/tangle calls, here
## 'file' is not a file in the current directory [hence no
## file <- basename(file) above]. However, weave should/must
## always create a file ('output') in the current directory.
output <- tryCatch({
engine$weave(file, pdf = TRUE, eps = FALSE, quiet = TRUE,
keep.source = keep.source, stylepath = FALSE)
setwd(buildDir)
find_vignette_product(name, by = "weave", engine = engine)
}, error = function(e) {
stop(gettextf("running %s on vignette '%s' failed with message:\n%s",
engine[["name"]], file, conditionMessage(e)),
domain = NA, call. = FALSE)
})
## In case of an error, do not clean up: should we point to
## buildDir for possible inspection of results/problems?
## We need to ensure that vignetteDir is in TEXINPUTS and BIBINPUTS.
if (vignette_is_tex(output)) {
## <FIXME>
## What if this fails?
## Now gives a more informative error texi2pdf fails
## or if it does not produce a <name>.pdf.
tryCatch({
texi2pdf(file = output, quiet = TRUE, texinputs = vigns$dir)
output <- find_vignette_product(name, by = "texi2pdf", engine = engine)
}, error = function(e) {
stop(gettextf("compiling TeX file %s failed with message:\n%s",
sQuote(output), conditionMessage(e)),
domain = NA, call. = FALSE)
})
## </FIXME>
}
if(!file.copy(output, outVignetteDir, overwrite = TRUE))
stop(gettextf("cannot copy '%s' to '%s'",
output,
outVignetteDir),
domain = NA)
}
## Need to change out of this dir before we delete it,
## at least on Windows.
setwd(cwd)
unlink(buildDir, recursive = TRUE)
## Now you need to update the HTML index!
## This also creates the .R files
.install_package_vignettes2(dir, outDir)
invisible()
}
### * .install_package_namespace_info
.install_package_namespace_info <-
function(dir, outDir)
{
dir <- file_path_as_absolute(dir)
nsFile <- file.path(dir, "NAMESPACE")
if(!file_test("-f", nsFile)) return(invisible())
nsInfoFilePath <- file.path(outDir, "Meta", "nsInfo.rds")
if(file_test("-nt", nsInfoFilePath, nsFile)) return(invisible())
nsInfo <- parseNamespaceFile(basename(dir), dirname(dir))
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'", outMetaDir),
domain = NA)
saveRDS(nsInfo, nsInfoFilePath)
invisible()
}
### * .vinstall_package_namespaces_as_RDS
## called from src/library/Makefile
.vinstall_package_namespaces_as_RDS <-
function(dir, packages)
{
## For the given packages installed in @file{dir} which have a
## NAMESPACE file, install the namespace info as R metadata.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+")))
.install_package_namespace_info(file.path(dir, p),
file.path(dir, p))
invisible()
}
### * .install_package_Rd_objects
## called from src/library/Makefile
.install_package_Rd_objects <-
function(dir, outDir, encoding = "unknown")
{
dir <- file_path_as_absolute(dir)
mandir <- file.path(dir, "man")
manfiles <- if(!dir.exists(mandir)) character()
else list_files_with_type(mandir, "docs")
manOutDir <- file.path(outDir, "help")
dir.create(manOutDir, FALSE)
db_file <- file.path(manOutDir,
paste0(basename(outDir), ".rdx"))
built_file <- file.path(dir, "build", "partial.rdb")
macro_files <- list.files(file.path(dir, "man", "macros"), pattern = "\\.Rd$", full.names = TRUE)
if (length(macro_files)) {
macroDir <- file.path(manOutDir, "macros")
dir.create(macroDir, FALSE)
file.copy(macro_files, macroDir, overwrite = TRUE)
}
## Avoid (costly) rebuilding if not needed.
## Actually, it seems no more costly than these tests, which it also does
pathsFile <- file.path(manOutDir, "paths.rds")
if(!file_test("-f", db_file) || !file.exists(pathsFile) ||
!identical(sort(manfiles), sort(readRDS(pathsFile))) ||
!all(file_test("-nt", db_file, manfiles))) {
db <- .build_Rd_db(dir, manfiles, db_file = db_file,
encoding = encoding, built_file = built_file)
nm <- as.character(names(db)) # Might be NULL
saveRDS(structure(nm,
first = nchar(file.path(mandir)) + 2L),
pathsFile)
names(db) <- sub("\\.[Rr]d$", "", basename(nm))
makeLazyLoadDB(db, file.path(manOutDir, basename(outDir)))
}
invisible()
}
### * .install_package_demos
## called from basepkg.mk and .install_packages
.install_package_demos <-
function(dir, outDir)
{
## NB: we no longer install 00Index
demodir <- file.path(dir, "demo")
if(!dir.exists(demodir)) return()
demofiles <- list_files_with_type(demodir, "demo", full.names = FALSE)
if(!length(demofiles)) return()
demoOutDir <- file.path(outDir, "demo")
if(!dir.exists(demoOutDir)) dir.create(demoOutDir)
file.copy(file.path(demodir, demofiles), demoOutDir,
overwrite = TRUE)
}
### * .find_cinclude_paths
.find_cinclude_paths <-
function(pkgs, lib.loc = NULL, file = NULL)
{
## given a character string of comma-separated package names,
## find where the packages are installed and generate
## -I"/path/to/package/include" ...
if(!is.null(file)) {
tmp <- read.dcf(file, "LinkingTo")[1L, 1L]
if(is.na(tmp)) return(invisible())
pkgs <- tmp
}
pkgs <- strsplit(pkgs[1L], ",[[:blank:]]*")[[1L]]
paths <- find.package(pkgs, lib.loc, quiet=TRUE)
if(length(paths))
cat(paste(paste0('-I"', paths, '/include"'), collapse=" "))
return(invisible())
}
### * .Rtest_package_depends_R_version
.Rtest_package_depends_R_version <-
function(dir)
{
if(missing(dir)) dir <- "."
meta <- .read_description(file.path(dir, "DESCRIPTION"))
deps <- .split_description(meta, verbose = TRUE)$Rdepends2
status <- 0
current <- getRversion()
for(depends in deps) {
## .split_description will have ensured that this is NULL or
## of length 3.
if(length(depends) > 1L) {
## .check_package_description will insist on these operators
if(!depends$op %in% c("<=", ">=", "<", ">", "==", "!="))
message("WARNING: malformed 'Depends' field in 'DESCRIPTION'")
else {
status <- if(inherits(depends$version, "numeric_version"))
!do.call(depends$op, list(current, depends$version))
else {
ver <- R.version
if (ver$status %in% c("", "Patched")) FALSE
else !do.call(depends$op,
list(ver[["svn rev"]],
as.numeric(sub("^r", "", depends$version))))
}
}
if(status != 0) {
package <- Sys.getenv("R_PACKAGE_NAME")
if(!nzchar(package))
package <- meta["Package"]
msg <- if(nzchar(package))
gettextf("ERROR: this R is version %s, package '%s' requires R %s %s",
current, package,
depends$op, depends$version)
else
gettextf("ERROR: this R is version %s, required is R %s %s",
current, depends$op, depends$version)
message(strwrap(msg, exdent = 2L))
break
}
}
}
status
}
## no longer used
.test_package_depends_R_version <-
function(dir)
q(status = .Rtest_package_depends_R_version(dir))
### * .test_load_package
.test_load_package <- function(pkg_name, lib)
{
options(warn = 1)
res <- try(suppressPackageStartupMessages(library(pkg_name, lib.loc = lib, character.only = TRUE, logical.return = TRUE)))
if (inherits(res, "try-error") || !res)
stop("loading failed", call. = FALSE)
}
### * checkRdaFiles
checkRdaFiles <- function(paths)
{
if(length(paths) == 1L && dir.exists(paths)) {
paths <- Sys.glob(c(file.path(paths, "*.rda"),
file.path(paths, "*.RData")))
## Exclude .RData, which this may or may not match
paths <- grep("/[.]RData$", paths, value = TRUE, invert = TRUE)
}
res <- data.frame(size = NA_real_, ASCII = NA,
compress = NA_character_, version = NA_integer_,
stringsAsFactors = FALSE)
res <- res[rep_len(1L, length(paths)), ]
row.names(res) <- paths
keep <- file.exists(paths)
res$size[keep] <- file.size(paths)[keep]
for(p in paths[keep]) {
magic <- readBin(p, "raw", n = 5)
res[p, "compress"] <- if(all(magic[1:2] == c(0x1f, 0x8b))) "gzip"
else if(rawToChar(magic[1:3]) == "BZh") "bzip2"
else if(magic[1L] == 0xFD && rawToChar(magic[2:5]) == "7zXZ") "xz"
else if(grepl("RD[ABX][12]", rawToChar(magic), useBytes = TRUE)) "none"
else "unknown"
con <- gzfile(p)
magic <- readChar(con, 5L, useBytes = TRUE)
close(con)
res[p, "ASCII"] <- if (grepl("RD[ABX][12]", magic, useBytes = TRUE))
substr(magic, 3, 3) == "A" else NA
ver <- sub("(RD[ABX])([12]*)", "\\2", magic, useBytes = TRUE)
res$version <- as.integer(ver)
}
res
}
### * resaveRdaFiles
resaveRdaFiles <- function(paths,
compress = c("auto", "gzip", "bzip2", "xz"),
compression_level)
{
if(length(paths) == 1L && dir.exists(paths))
paths <- Sys.glob(c(file.path(paths, "*.rda"),
file.path(paths, "*.RData")))
compress <- match.arg(compress)
if (missing(compression_level))
compression_level <- switch(compress, "gzip" = 6, 9)
for(p in paths) {
env <- new.env(hash = TRUE) # probably small, need not be
suppressPackageStartupMessages(load(p, envir = env))
if(compress == "auto") {
f1 <- tempfile()
save(file = f1, list = ls(env, all.names = TRUE), envir = env)
f2 <- tempfile()
save(file = f2, list = ls(env, all.names = TRUE), envir = env,
compress = "bzip2")
ss <- file.size(c(f1, f2)) * c(0.9, 1.0)
names(ss) <- c(f1, f2)
if(ss[1L] > 10240) {
f3 <- tempfile()
save(file = f3, list = ls(env, all.names = TRUE), envir = env,
compress = "xz")
ss <- c(ss, file.size(f3))
names(ss) <- c(f1, f2, f3)
}
nm <- names(ss)
ind <- which.min(ss)
file.copy(nm[ind], p, overwrite = TRUE)
unlink(nm)
} else
save(file = p, list = ls(env, all.names = TRUE), envir = env,
compress = compress, compression_level = compression_level)
}
}
### * compactPDF
compactPDF <-
function(paths, qpdf = Sys.which(Sys.getenv("R_QPDF", "qpdf")),
gs_cmd = Sys.getenv("R_GSCMD", ""),
gs_quality = Sys.getenv("GS_QUALITY", "none"),
gs_extras = character())
{
use_qpdf <- nzchar(qpdf)
gs_quality <- match.arg(gs_quality, c("none", "printer", "ebook", "screen"))
use_gs <- if(gs_quality != "none") nzchar(gs_cmd <- find_gs_cmd(gs_cmd)) else FALSE
if (!use_gs && !use_qpdf) return()
if(length(paths) == 1L && dir.exists(paths))
paths <- Sys.glob(file.path(paths, "*.pdf"))
dummy <- rep.int(NA_real_, length(paths))
ans <- data.frame(old = dummy, new = dummy, row.names = paths)
tf <- tempfile("pdf"); tf2 <- tempfile("pdf")
for (p in paths) {
res <- 0
if (use_gs) {
res <- system2(gs_cmd,
c("-q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite",
sprintf("-dPDFSETTINGS=/%s", gs_quality),
"-dCompatibilityLevel=1.5",
"-dAutoRotatePages=/None",
sprintf("-sOutputFile=%s", tf),
gs_extras, p), FALSE, FALSE)
if(!res && use_qpdf) {
unlink(tf2) # precaution
file.rename(tf, tf2)
res <- system2(qpdf, c("--stream-data=compress",
"--object-streams=generate",
tf2, tf), FALSE, FALSE)
unlink(tf2)
}
} else if(use_qpdf) {
res <- system2(qpdf, c("--stream-data=compress",
"--object-streams=generate",
p, tf), FALSE, FALSE)
}
if(!res && file.exists(tf)) {
old <- file.size(p); new <- file.size(tf)
if(new/old < 0.9 && new < old - 1e4) {
file.copy(tf, p, overwrite = TRUE)
ans[p, ] <- c(old, new)
}
}
unlink(tf)
}
structure(na.omit(ans), class = c("compactPDF", "data.frame"))
}
find_gs_cmd <- function(gs_cmd = "")
{
if(!nzchar(gs_cmd)) {
if(.Platform$OS.type == "windows") {
gsexe <- Sys.getenv("R_GSCMD")
if (!nzchar(gsexe)) gsexe <- Sys.getenv("GSC")
gs_cmd <- Sys.which(gsexe)
if (!nzchar(gs_cmd)) gs_cmd <- Sys.which("gswin64c")
if (!nzchar(gs_cmd)) gs_cmd <- Sys.which("gswin32c")
gs_cmd
} else Sys.which(Sys.getenv("R_GSCMD", "gs"))
} else Sys.which(gs_cmd)
}
format.compactPDF <- function(x, ratio = 0.9, diff = 1e4, ...)
{
if(!nrow(x)) return(character())
z <- y <- x[with(x, new/old < ratio & new < old - diff), ]
if(!nrow(z)) return(character())
z[] <- lapply(y, function(x) sprintf("%.0fKb", x/1024))
large <- y$new >= 1024^2
z[large, ] <- lapply(y[large, ], function(x) sprintf("%.1fMb", x/1024^2))
paste(' compacted', sQuote(basename(row.names(y))),
'from', z[, 1L], 'to', z[, 2L])
}
### * add_datalist
add_datalist <- function(pkgpath, force = FALSE)
{
dlist <- file.path(pkgpath, "data", "datalist")
if (!force && file.exists(dlist)) return()
size <- sum(file.size(Sys.glob(file.path(pkgpath, "data", "*"))))
if(size <= 1024^2) return()
z <- suppressPackageStartupMessages(list_data_in_pkg(dataDir = file.path(pkgpath, "data"))) # for BARD
if(!length(z)) return()
con <- file(dlist, "w")
for (nm in names(z)) {
zz <- z[[nm]]
if (length(zz) == 1L && zz == nm) writeLines(nm, con)
else cat(nm, ": ", paste(zz, collapse = " "), "\n",
sep = "", file = con)
}
close(con)
invisible()
}
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "### [*]+" ***
### End: ***
| 42,256 | gpl-2.0 |
5eba3eeb33f67f45455162ef3cec2b23a3ab892e | o-/Rexperiments | src/library/tools/R/admin.R | # File src/library/tools/R/admin.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2015 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
### * .install_package_description
## called from basepkg.mk and .install_packages
.install_package_description <-
function(dir, outDir, builtStamp=character())
{
## Function for taking the DESCRIPTION package meta-information,
## checking/validating it, and installing it with the 'Built:'
## field added. Note that from 1.7.0 on, packages without
## compiled code are not marked as being from any platform.
## Check first. Note that this also calls .read_description(), but
## .check_package_description() currently really needs to know the
## path to the DESCRIPTION file, and returns an object with check
## results and not the package metadata ...
ok <- .check_package_description(file.path(dir, "DESCRIPTION"))
if(any(as.integer(sapply(ok, length)) > 0L)) {
stop(paste(gettext("Invalid DESCRIPTION file") ,
paste(.eval_with_capture(print(ok))$output,
collapse = "\n"),
sep = "\n\n"),
domain = NA,
call. = FALSE)
}
## This reads (in C locale) byte-by-byte, declares latin1 or UTF-8
## Maybe it would be better to re-encode others (there are none at
## present, at least in a UTF-8 locale?
db <- .read_description(file.path(dir, "DESCRIPTION"))
## should not have a Built: field, so ignore it if it is there
nm <- names(db)
if("Built" %in% nm) {
db <- db[-match("Built", nm)]
warning(gettextf("*** someone has corrupted the Built field in package '%s' ***",
db["Package"]),
domain = NA,
call. = FALSE)
}
OStype <- R.version$platform
if (grepl("-apple-darwin", OStype) && nzchar(Sys.getenv("R_ARCH")))
OStype <- sub(".*-apple-darwin", "universal-apple-darwin", OStype)
Built <-
paste0("R ",
paste(R.version[c("major", "minor")], collapse = "."),
"; ",
if(dir.exists(file.path(dir, "src"))) OStype else "",
"; ",
## Some build systems want to supply a package-build timestamp for reproducibility
## Prefer date in ISO 8601 format, UTC.
if (length(builtStamp)==0) format(Sys.time(), tz = "UTC", usetz = TRUE) else builtStamp,
## Sys.time(),
"; ",
.OStype())
## At some point of time, we had:
## We must not split the Built: field across lines.
## Not sure if this is still true. If not, the following could be
## simplified to
## db["Built"] <- Built
## write.dcf(rbind(db), file.path(outDir, "DESCRIPTION"))
## But in any case, it is true for fields obtained from expanding R
## fields (Authors@R): these should not be reformatted.
db <- c(db,
.expand_package_description_db_R_fields(db),
Built = Built)
## This cannot be done in a MBCS: write.dcf fails
ctype <- Sys.getlocale("LC_CTYPE")
Sys.setlocale("LC_CTYPE", "C")
on.exit(Sys.setlocale("LC_CTYPE", ctype))
.write_description(db, file.path(outDir, "DESCRIPTION"))
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'",
outMetaDir),
domain = NA)
saveInfo <- .split_description(db)
saveRDS(saveInfo, file.path(outMetaDir, "package.rds"))
invisible()
}
### * .split_description
## also used in .getRequiredPackages
.split_description <-
function(db, verbose = FALSE)
{
if(!is.na(Built <- db["Built"])) {
Built <- as.list(strsplit(Built, "; ")[[1L]])
if(length(Built) != 4L) {
warning(gettextf("*** someone has corrupted the Built field in package '%s' ***",
db["Package"]),
domain = NA,
call. = FALSE)
Built <- NULL
} else {
names(Built) <- c("R", "Platform", "Date", "OStype")
Built[["R"]] <- R_system_version(sub("^R ([0-9.]+)", "\\1",
Built[["R"]]))
}
} else Built <- NULL
## might perhaps have multiple entries
Depends <- .split_dependencies(db[names(db) %in% "Depends"])
## several packages 'Depends' on base!
ind <- match("base", names(Depends), 0L)
if(ind) Depends <- Depends[-ind]
## We only need Rdepends for R < 2.7.0, but we still need to be
## able to check that someone is not trying to load this into a
## very old version of R.
if("R" %in% names(Depends)) {
Rdeps2 <- Depends["R" == names(Depends)]
names(Rdeps2) <- NULL
Rdeps <- Depends[["R", exact = TRUE]] # the first one
Depends <- Depends[names(Depends) != "R"]
## several packages have 'Depends: R', which is a noop.
if(verbose && length(Rdeps) == 1L)
message("WARNING: omitting pointless dependence on 'R' without a version requirement")
if(length(Rdeps) <= 1L) Rdeps <- NULL
} else Rdeps2 <- Rdeps <- NULL
Rdeps <- as.vector(Rdeps)
Suggests <- .split_dependencies(db[names(db) %in% "Suggests"])
Imports <- .split_dependencies(db[names(db) %in% "Imports"])
LinkingTo <- .split_dependencies(db[names(db) %in% "LinkingTo"])
structure(list(DESCRIPTION = db, Built = Built,
Rdepends = Rdeps, Rdepends2 = Rdeps2,
Depends = Depends, Suggests = Suggests,
Imports = Imports, LinkingTo = LinkingTo),
class = "packageDescription2")
}
### * .vinstall_package_descriptions_as_RDS
## called from src/library/Makefile
.vinstall_package_descriptions_as_RDS <-
function(dir, packages)
{
## For the given packages installed in @file{dir}, install their
## DESCRIPTION package metadata as R metadata.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+"))) {
meta_dir <- file.path(dir, p, "Meta")
if(!dir.exists(meta_dir) && !dir.create(meta_dir))
stop(gettextf("cannot open directory '%s'", meta_dir))
package_info_dcf_file <- file.path(dir, p, "DESCRIPTION")
package_info_rds_file <- file.path(meta_dir, "package.rds")
if(file_test("-nt",
package_info_rds_file,
package_info_dcf_file))
next
saveRDS(.split_description(.read_description(package_info_dcf_file)),
package_info_rds_file)
}
invisible()
}
### * .update_package_rds
## not used
.update_package_rds <-
function(lib.loc = NULL)
{
## rebuild the dumped package descriptions for all packages in lib.loc
if (is.null(lib.loc)) lib.loc <- .libPaths()
lib.loc <- lib.loc[file.exists(lib.loc)]
for (lib in lib.loc) {
a <- list.files(lib, all.files = FALSE, full.names = TRUE)
for (nam in a) {
dfile <- file.path(nam, "DESCRIPTION")
if (file.exists(dfile)) {
print(nam)
.install_package_description(nam, nam)
}
}
}
}
### * .install_package_code_files
.install_package_code_files <-
function(dir, outDir)
{
if(!dir.exists(dir))
stop(gettextf("directory '%s' does not exist", dir),
domain = NA)
dir <- file_path_as_absolute(dir)
## Attempt to set the LC_COLLATE locale to 'C' to turn off locale
## specific sorting.
curLocale <- Sys.getlocale("LC_COLLATE")
on.exit(Sys.setlocale("LC_COLLATE", curLocale), add = TRUE)
## (Guaranteed to work as per the Sys.setlocale() docs.)
lccollate <- "C"
if(Sys.setlocale("LC_COLLATE", lccollate) != lccollate) {
## <NOTE>
## I don't think we can give an error here.
## It may be the case that Sys.setlocale() fails because the "OS
## reports request cannot be honored" (src/main/platform.c), in
## which case we should still proceed ...
warning("cannot turn off locale-specific sorting via LC_COLLATE")
## </NOTE>
}
## We definitely need a valid DESCRIPTION file.
db <- .read_description(file.path(dir, "DESCRIPTION"))
codeDir <- file.path(dir, "R")
if(!dir.exists(codeDir)) return(invisible())
codeFiles <- list_files_with_type(codeDir, "code", full.names = FALSE)
collationField <-
c(paste("Collate", .OStype(), sep = "."), "Collate")
if(any(i <- collationField %in% names(db))) {
collationField <- collationField[i][1L]
codeFilesInCspec <- .read_collate_field(db[collationField])
## Duplicated entries in the collation spec?
badFiles <-
unique(codeFilesInCspec[duplicated(codeFilesInCspec)])
if(length(badFiles)) {
out <- gettextf("\nduplicated files in '%s' field:",
collationField)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## See which files are listed in the collation spec but don't
## exist.
badFiles <- setdiff(codeFilesInCspec, codeFiles)
if(length(badFiles)) {
out <- gettextf("\nfiles in '%s' field missing from '%s':",
collationField,
codeDir)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## See which files exist but are missing from the collation
## spec. Note that we do not want the collation spec to use
## only a subset of the available code files.
badFiles <- setdiff(codeFiles, codeFilesInCspec)
if(length(badFiles)) {
out <- gettextf("\nfiles in '%s' missing from '%s' field:",
codeDir,
collationField)
out <- paste(out,
paste(" ", badFiles, collapse = "\n"),
sep = "\n")
stop(out, domain = NA)
}
## Everything's groovy ...
codeFiles <- codeFilesInCspec
}
codeFiles <- file.path(codeDir, codeFiles)
if(!dir.exists(outDir) && !dir.create(outDir))
stop(gettextf("cannot open directory '%s'", outDir),
domain = NA)
outCodeDir <- file.path(outDir, "R")
if(!dir.exists(outCodeDir) && !dir.create(outCodeDir))
stop(gettextf("cannot open directory '%s'", outCodeDir),
domain = NA)
outFile <- file.path(outCodeDir, db["Package"])
if(!file.create(outFile))
stop(gettextf("unable to create '%s'", outFile), domain = NA)
writeLines(paste0(".packageName <- \"", db["Package"], "\""),
outFile)
enc <- as.vector(db["Encoding"])
need_enc <- !is.na(enc) # Encoding was specified
## assume that if locale is 'C' we can used 8-bit encodings unchanged.
if(need_enc && !(Sys.getlocale("LC_CTYPE") %in% c("C", "POSIX"))) {
con <- file(outFile, "a")
on.exit(close(con)) # Windows does not like files left open
for(f in codeFiles) {
tmp <- iconv(readLines(f, warn = FALSE), from = enc, to = "")
if(length(bad <- which(is.na(tmp)))) {
warning(sprintf(ngettext(length(bad),
"unable to re-encode %s line %s",
"unable to re-encode %s lines %s"),
sQuote(basename(f)),
paste(bad, collapse = ", ")),
domain = NA, call. = FALSE)
tmp <- iconv(readLines(f, warn = FALSE), from = enc, to = "",
sub = "byte")
}
writeLines(paste0("#line 1 \"", f, "\""), con)
writeLines(tmp, con)
}
close(con); on.exit()
} else {
## <NOTE>
## It may be safer to do
## writeLines(sapply(codeFiles, readLines), outFile)
## instead, but this would be much slower ...
## use fast version of file.append that ensures LF between files
if(!all(.file_append_ensuring_LFs(outFile, codeFiles)))
stop("unable to write code files")
## </NOTE>
}
## A syntax check here, so that we do not install a broken package.
## FIXME: this is only needed if we don't lazy load, as the lazy loader
## would detect the error.
op <- options(showErrorCalls=FALSE)
on.exit(options(op))
parse(outFile)
invisible()
}
### * .install_package_indices
## called from R CMD INSTALL
.install_package_indices <-
function(dir, outDir)
{
options(warn = 1) # to ensure warnings get seen
if(!dir.exists(dir))
stop(gettextf("directory '%s' does not exist", dir),
domain = NA)
if(!dir.exists(outDir))
stop(gettextf("directory '%s' does not exist", outDir),
domain = NA)
## If there is an @file{INDEX} file in the package sources, we
## install this, and do not build it.
if(file_test("-f", file.path(dir, "INDEX")))
if(!file.copy(file.path(dir, "INDEX"),
file.path(outDir, "INDEX"),
overwrite = TRUE))
stop(gettextf("unable to copy INDEX to '%s'",
file.path(outDir, "INDEX")),
domain = NA)
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'", outMetaDir),
domain = NA)
.install_package_Rd_indices(dir, outDir)
.install_package_demo_index(dir, outDir)
invisible()
}
### * .install_package_Rd_indices
.install_package_Rd_indices <-
function(dir, outDir)
{
dir <- file_path_as_absolute(dir)
docsDir <- file.path(dir, "man")
dataDir <- file.path(outDir, "data")
outDir <- file_path_as_absolute(outDir)
## <FIXME>
## Not clear whether we should use the basename of the directory we
## install to, or the package name as obtained from the DESCRIPTION
## file in the directory we install from (different for versioned
## installs). We definitely do not want the basename of the dir we
## install from.
packageName <- basename(outDir)
## </FIXME>
allRd <- if(dir.exists(docsDir))
list_files_with_type(docsDir, "docs") else character()
## some people have man dirs without any valid .Rd files
if(length(allRd)) {
## we want the date of the newest .Rd file we will install
newestRd <- max(file.mtime(allRd))
## these files need not exist, which gives NA.
indices <- c(file.path("Meta", "Rd.rds"),
file.path("Meta", "hsearch.rds"),
file.path("Meta", "links.rds"),
"INDEX")
upToDate <- file.mtime(file.path(outDir, indices)) >= newestRd
if(dir.exists(dataDir)
&& length(dataFiles <- list.files(dataDir))) {
## Note that the data index is computed from both the package's
## Rd files and the data sets actually available.
newestData <- max(file.mtime(dataFiles))
upToDate <- c(upToDate,
file.mtime(file.path(outDir, "Meta", "data.rds")) >=
max(newestRd, newestData))
}
## Note that this is not quite good enough: an Rd file or data file
## might have been removed since the indices were made.
RdsFile <- file.path("Meta", "Rd.rds")
if(file.exists(RdsFile)) { ## for Rd files
## this has file names without path
files <- readRDS(RdsFile)$File
if(!identical(basename(allRd), files)) upToDate <- FALSE
}
## we want to proceed if any is NA.
if(all(upToDate %in% TRUE)) return(invisible())
## Rd objects should already have been installed.
db <- tryCatch(Rd_db(basename(outDir), lib.loc = dirname(outDir)),
error = function(e) NULL)
## If not, we build the Rd db from the sources:
if(is.null(db)) db <- .build_Rd_db(dir, allRd)
contents <- Rd_contents(db)
.write_Rd_contents_as_RDS(contents,
file.path(outDir, "Meta", "Rd.rds"))
defaultEncoding <- as.vector(readRDS(file.path(outDir, "Meta", "package.rds"))$DESCRIPTION["Encoding"])
if(is.na(defaultEncoding)) defaultEncoding <- NULL
saveRDS(.build_hsearch_index(contents, packageName, defaultEncoding),
file.path(outDir, "Meta", "hsearch.rds"))
saveRDS(.build_links_index(contents, packageName),
file.path(outDir, "Meta", "links.rds"))
## If there is no @file{INDEX} file in the package sources, we
## build one.
## <NOTE>
## We currently do not also save this in RDS format, as we can
## always do
## .build_Rd_index(readRDS(file.path(outDir, "Meta", "Rd.rds"))
if(!file_test("-f", file.path(dir, "INDEX")))
writeLines(formatDL(.build_Rd_index(contents)),
file.path(outDir, "INDEX"))
## </NOTE>
} else {
contents <- NULL
saveRDS(.build_hsearch_index(contents, packageName, defaultEncoding),
file.path(outDir, "Meta", "hsearch.rds"))
saveRDS(.build_links_index(contents, packageName),
file.path(outDir, "Meta", "links.rds"))
}
if(dir.exists(dataDir))
saveRDS(.build_data_index(dataDir, contents),
file.path(outDir, "Meta", "data.rds"))
invisible()
}
### * .install_package_vignettes2
## called from R CMD INSTALL for pre 3.0.2-built tarballs, and for base packages
.install_package_vignettes2 <-
function(dir, outDir, encoding = "")
{
dir <- file_path_as_absolute(dir)
subdirs <- c("vignettes", file.path("inst", "doc"))
ok <- dir.exists(file.path(dir, subdirs))
## Create a vignette index only if the vignette dir exists.
if (!any(ok))
return(invisible())
subdir <- subdirs[ok][1L]
vignetteDir <- file.path(dir, subdir)
outDir <- file_path_as_absolute(outDir)
packageName <- basename(outDir)
outVignetteDir <- file.path(outDir, "doc")
## --fake and --no-inst installs do not have a outVignetteDir.
if(!dir.exists(outVignetteDir)) return(invisible())
## If there is an HTML index in the @file{inst/doc} subdirectory of
## the package source directory (@code{dir}), we do not overwrite it
## (similar to top-level @file{INDEX} files). Installation already
## copied this over.
hasHtmlIndex <- file_test("-f", file.path(vignetteDir, "index.html"))
htmlIndex <- file.path(outDir, "doc", "index.html")
vigns <- pkgVignettes(dir = dir, subdirs = subdir, check = TRUE)
## Write dummy HTML index if no vignettes are found and exit.
if(length(vigns$docs) == 0L) {
## we don't want to write an index if the directory is in fact empty
files <- list.files(vignetteDir, all.files = TRUE, no.. = TRUE)
if((length(files) > 0L) && !hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex)
return(invisible())
}
if (subdir == "vignettes") {
## copy vignette sources over.
file.copy(vigns$docs, outVignetteDir)
}
vigns <- tryCatch({
pkgVignettes(dir=outDir, subdirs="doc", output=TRUE, source=TRUE)
}, error = function(ex) {
pkgVignettes(dir=outDir, subdirs="doc")
})
vignetteIndex <- .build_vignette_index(vigns)
if(NROW(vignetteIndex) > 0L) {
cwd <- getwd()
if (is.null(cwd))
stop("current working directory cannot be ascertained")
setwd(outVignetteDir)
loadVignetteBuilder(dir, mustwork = FALSE)
## install tangled versions of Sweave vignettes. FIXME: Vignette
## *.R files should have been included when the package was built,
## but in the interim before they are all built with the new code,
## this is needed.
for(i in seq_along(vigns$docs)) {
file <- vigns$docs[i]
if (!is.null(vigns$sources) && !is.null(vigns$sources[file][[1]]))
next
file <- basename(file)
enc <- vigns$encodings[i]
cat(" ", sQuote(basename(file)),
if(nzchar(enc)) paste("using", sQuote(enc)), "\n")
engine <- try(vignetteEngine(vigns$engines[i]), silent = TRUE)
if (!inherits(engine, "try-error"))
engine$tangle(file, quiet = TRUE, encoding = enc)
setwd(outVignetteDir) # just in case some strange tangle function changed it
}
setwd(cwd)
# Update - now from the output directory
vigns <- pkgVignettes(dir=outDir, subdirs="doc", source=TRUE)
## remove any files with no R code (they will have header comments).
## if not correctly declared they might not be in the current encoding
sources <- unlist(vigns$sources)
for(i in seq_along(sources)) {
file <- sources[i]
if (!file_test("-f", file)) next
bfr <- readLines(file, warn = FALSE)
if(all(grepl("(^###|^[[:space:]]*$)", bfr, useBytes = TRUE)))
unlink(file)
}
# Update
vigns <- pkgVignettes(dir=outDir, subdirs="doc", source=TRUE)
# Add tangle source files (*.R) to the vignette index
# Only the "main" R file, because tangle may also split
# output into multiple files
sources <- character(length(vigns$docs))
for (i in seq_along(vigns$docs)) {
name <- vigns$names[i]
source <- find_vignette_product(name, by = "tangle", main = TRUE, dir = vigns$dir, engine = engine)
if (length(source) > 0L)
sources[i] <- basename(source)
}
vignetteIndex$R <- sources
}
if(!hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex, vignetteIndex)
saveRDS(vignetteIndex,
file = file.path(outDir, "Meta", "vignette.rds"))
invisible()
}
### * .install_package_vignettes3
## called from R CMD INSTALL for 3.0.2 or later tarballs
.install_package_vignettes3 <-
function(dir, outDir, encoding = "")
{
packageName <- basename(outDir)
dir <- file_path_as_absolute(dir)
indexname <- file.path(dir, "build", "vignette.rds")
ok <- file_test("-f", indexname)
## Create a vignette index only if the vignette dir exists.
if (!ok)
return(invisible())
## Copy the index to Meta
file.copy(indexname, file.path(outDir, "Meta"))
## If there is an HTML index in the @file{inst/doc} subdirectory of
## the package source directory (@code{dir}), we do not overwrite it
## (similar to top-level @file{INDEX} files). Installation already
## copied this over.
vignetteDir <- file.path(outDir, "doc")
hasHtmlIndex <- file_test("-f", file.path(vignetteDir, "index.html"))
htmlIndex <- file.path(outDir, "doc", "index.html")
vignetteIndex <- readRDS(indexname)
if(!hasHtmlIndex)
.writeVignetteHtmlIndex(packageName, htmlIndex, vignetteIndex)
invisible()
}
### * .install_package_demo_index
.install_package_demo_index <-
function(dir, outDir)
{
demoDir <- file.path(dir, "demo")
if(!dir.exists(demoDir)) return(invisible())
demoIndex <- .build_demo_index(demoDir)
saveRDS(demoIndex,
file = file.path(outDir, "Meta", "demo.rds"))
invisible()
}
### * .vinstall_package_indices
## called from src/library/Makefile
.vinstall_package_indices <-
function(src_dir, out_dir, packages)
{
## For the given packages with sources rooted at @file{src_dir} and
## installations rooted at @file{out_dir}, install the package
## indices.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+")))
.install_package_indices(file.path(src_dir, p), file.path(out_dir, p))
utils::make.packages.html(.Library, verbose = FALSE)
invisible()
}
### * .install_package_vignettes
## called from src/library/Makefile[.win]
## this is only used when building R
.install_package_vignettes <-
function(dir, outDir, keep.source = TRUE)
{
dir <- file_path_as_absolute(dir)
vigns <- pkgVignettes(dir = dir)
if(is.null(vigns) || !length(vigns$docs)) return(invisible())
outDir <- file_path_as_absolute(outDir)
outVignetteDir <- file.path(outDir, "doc")
if(!dir.exists(outVignetteDir) && !dir.create(outVignetteDir))
stop(gettextf("cannot open directory '%s'", outVignetteDir),
domain = NA)
## We have to be careful to avoid repeated rebuilding.
vignettePDFs <-
file.path(outVignetteDir,
sub("$", ".pdf",
basename(file_path_sans_ext(vigns$docs))))
upToDate <- file_test("-nt", vignettePDFs, vigns$docs)
## The primary use of this function is to build and install PDF
## vignettes in base packages.
## Hence, we build in a subdir of the current directory rather
## than a temp dir: this allows inspection of problems and
## automatic cleanup via Make.
cwd <- getwd()
if (is.null(cwd))
stop("current working directory cannot be ascertained")
buildDir <- file.path(cwd, ".vignettes")
if(!dir.exists(buildDir) && !dir.create(buildDir))
stop(gettextf("cannot create directory '%s'", buildDir), domain = NA)
on.exit(setwd(cwd))
setwd(buildDir)
loadVignetteBuilder(vigns$pkgdir)
for(i in seq_along(vigns$docs)[!upToDate]) {
file <- vigns$docs[i]
name <- vigns$names[i]
engine <- vignetteEngine(vigns$engines[i])
message(gettextf("processing %s", sQuote(basename(file))),
domain = NA)
## Note that contrary to all other weave/tangle calls, here
## 'file' is not a file in the current directory [hence no
## file <- basename(file) above]. However, weave should/must
## always create a file ('output') in the current directory.
output <- tryCatch({
engine$weave(file, pdf = TRUE, eps = FALSE, quiet = TRUE,
keep.source = keep.source, stylepath = FALSE)
setwd(buildDir)
find_vignette_product(name, by = "weave", engine = engine)
}, error = function(e) {
stop(gettextf("running %s on vignette '%s' failed with message:\n%s",
engine[["name"]], file, conditionMessage(e)),
domain = NA, call. = FALSE)
})
## In case of an error, do not clean up: should we point to
## buildDir for possible inspection of results/problems?
## We need to ensure that vignetteDir is in TEXINPUTS and BIBINPUTS.
if (vignette_is_tex(output)) {
## <FIXME>
## What if this fails?
## Now gives a more informative error texi2pdf fails
## or if it does not produce a <name>.pdf.
tryCatch({
texi2pdf(file = output, quiet = TRUE, texinputs = vigns$dir)
output <- find_vignette_product(name, by = "texi2pdf", engine = engine)
}, error = function(e) {
stop(gettextf("compiling TeX file %s failed with message:\n%s",
sQuote(output), conditionMessage(e)),
domain = NA, call. = FALSE)
})
## </FIXME>
}
if(!file.copy(output, outVignetteDir, overwrite = TRUE))
stop(gettextf("cannot copy '%s' to '%s'",
output,
outVignetteDir),
domain = NA)
}
## Need to change out of this dir before we delete it,
## at least on Windows.
setwd(cwd)
unlink(buildDir, recursive = TRUE)
## Now you need to update the HTML index!
## This also creates the .R files
.install_package_vignettes2(dir, outDir)
invisible()
}
### * .install_package_namespace_info
.install_package_namespace_info <-
function(dir, outDir)
{
dir <- file_path_as_absolute(dir)
nsFile <- file.path(dir, "NAMESPACE")
if(!file_test("-f", nsFile)) return(invisible())
nsInfoFilePath <- file.path(outDir, "Meta", "nsInfo.rds")
if(file_test("-nt", nsInfoFilePath, nsFile)) return(invisible())
nsInfo <- parseNamespaceFile(basename(dir), dirname(dir))
outMetaDir <- file.path(outDir, "Meta")
if(!dir.exists(outMetaDir) && !dir.create(outMetaDir))
stop(gettextf("cannot open directory '%s'", outMetaDir),
domain = NA)
saveRDS(nsInfo, nsInfoFilePath)
invisible()
}
### * .vinstall_package_namespaces_as_RDS
## called from src/library/Makefile
.vinstall_package_namespaces_as_RDS <-
function(dir, packages)
{
## For the given packages installed in @file{dir} which have a
## NAMESPACE file, install the namespace info as R metadata.
## Really only useful for base packages under Unix.
## See @file{src/library/Makefile.in}.
for(p in unlist(strsplit(packages, "[[:space:]]+")))
.install_package_namespace_info(file.path(dir, p),
file.path(dir, p))
invisible()
}
### * .install_package_Rd_objects
## called from src/library/Makefile
.install_package_Rd_objects <-
function(dir, outDir, encoding = "unknown")
{
dir <- file_path_as_absolute(dir)
mandir <- file.path(dir, "man")
manfiles <- if(!dir.exists(mandir)) character()
else list_files_with_type(mandir, "docs")
manOutDir <- file.path(outDir, "help")
dir.create(manOutDir, FALSE)
db_file <- file.path(manOutDir,
paste0(basename(outDir), ".rdx"))
built_file <- file.path(dir, "build", "partial.rdb")
macro_files <- list.files(file.path(dir, "man", "macros"), pattern = "\\.Rd$", full.names = TRUE)
if (length(macro_files)) {
macroDir <- file.path(manOutDir, "macros")
dir.create(macroDir, FALSE)
file.copy(macro_files, macroDir, overwrite = TRUE)
}
## Avoid (costly) rebuilding if not needed.
## Actually, it seems no more costly than these tests, which it also does
pathsFile <- file.path(manOutDir, "paths.rds")
if(!file_test("-f", db_file) || !file.exists(pathsFile) ||
!identical(sort(manfiles), sort(readRDS(pathsFile))) ||
!all(file_test("-nt", db_file, manfiles))) {
db <- .build_Rd_db(dir, manfiles, db_file = db_file,
encoding = encoding, built_file = built_file)
nm <- as.character(names(db)) # Might be NULL
saveRDS(structure(nm,
first = nchar(file.path(mandir)) + 2L),
pathsFile)
names(db) <- sub("\\.[Rr]d$", "", basename(nm))
makeLazyLoadDB(db, file.path(manOutDir, basename(outDir)))
}
invisible()
}
### * .install_package_demos
## called from basepkg.mk and .install_packages
.install_package_demos <-
function(dir, outDir)
{
## NB: we no longer install 00Index
demodir <- file.path(dir, "demo")
if(!dir.exists(demodir)) return()
demofiles <- list_files_with_type(demodir, "demo", full.names = FALSE)
if(!length(demofiles)) return()
demoOutDir <- file.path(outDir, "demo")
if(!dir.exists(demoOutDir)) dir.create(demoOutDir)
file.copy(file.path(demodir, demofiles), demoOutDir,
overwrite = TRUE)
}
### * .find_cinclude_paths
.find_cinclude_paths <-
function(pkgs, lib.loc = NULL, file = NULL)
{
## given a character string of comma-separated package names,
## find where the packages are installed and generate
## -I"/path/to/package/include" ...
if(!is.null(file)) {
tmp <- read.dcf(file, "LinkingTo")[1L, 1L]
if(is.na(tmp)) return(invisible())
pkgs <- tmp
}
pkgs <- strsplit(pkgs[1L], ",[[:blank:]]*")[[1L]]
paths <- find.package(pkgs, lib.loc, quiet=TRUE)
if(length(paths))
cat(paste(paste0('-I"', paths, '/include"'), collapse=" "))
return(invisible())
}
### * .Rtest_package_depends_R_version
.Rtest_package_depends_R_version <-
function(dir)
{
if(missing(dir)) dir <- "."
meta <- .read_description(file.path(dir, "DESCRIPTION"))
deps <- .split_description(meta, verbose = TRUE)$Rdepends2
status <- 0
current <- getRversion()
for(depends in deps) {
## .split_description will have ensured that this is NULL or
## of length 3.
if(length(depends) > 1L) {
## .check_package_description will insist on these operators
if(!depends$op %in% c("<=", ">=", "<", ">", "==", "!="))
message("WARNING: malformed 'Depends' field in 'DESCRIPTION'")
else {
status <- if(inherits(depends$version, "numeric_version"))
!do.call(depends$op, list(current, depends$version))
else {
ver <- R.version
if (ver$status %in% c("", "Patched")) FALSE
else !do.call(depends$op,
list(ver[["svn rev"]],
as.numeric(sub("^r", "", depends$version))))
}
}
if(status != 0) {
package <- Sys.getenv("R_PACKAGE_NAME")
if(!nzchar(package))
package <- meta["Package"]
msg <- if(nzchar(package))
gettextf("ERROR: this R is version %s, package '%s' requires R %s %s",
current, package,
depends$op, depends$version)
else
gettextf("ERROR: this R is version %s, required is R %s %s",
current, depends$op, depends$version)
message(strwrap(msg, exdent = 2L))
break
}
}
}
status
}
## no longer used
.test_package_depends_R_version <-
function(dir)
q(status = .Rtest_package_depends_R_version(dir))
### * .test_load_package
.test_load_package <- function(pkg_name, lib)
{
options(warn = 1)
res <- try(suppressPackageStartupMessages(library(pkg_name, lib.loc = lib, character.only = TRUE, logical.return = TRUE)))
if (inherits(res, "try-error") || !res)
stop("loading failed", call. = FALSE)
}
### * checkRdaFiles
checkRdaFiles <- function(paths)
{
if(length(paths) == 1L && dir.exists(paths)) {
paths <- Sys.glob(c(file.path(paths, "*.rda"),
file.path(paths, "*.RData")))
## Exclude .RData, which this may or may not match
paths <- grep("/[.]RData$", paths, value = TRUE, invert = TRUE)
}
res <- data.frame(size = NA_real_, ASCII = NA,
compress = NA_character_, version = NA_integer_,
stringsAsFactors = FALSE)
res <- res[rep_len(1L, length(paths)), ]
row.names(res) <- paths
keep <- file.exists(paths)
res$size[keep] <- file.size(paths)[keep]
for(p in paths[keep]) {
magic <- readBin(p, "raw", n = 5)
res[p, "compress"] <- if(all(magic[1:2] == c(0x1f, 0x8b))) "gzip"
else if(rawToChar(magic[1:3]) == "BZh") "bzip2"
else if(magic[1L] == 0xFD && rawToChar(magic[2:5]) == "7zXZ") "xz"
else if(grepl("RD[ABX][12]", rawToChar(magic), useBytes = TRUE)) "none"
else "unknown"
con <- gzfile(p)
magic <- readChar(con, 5L, useBytes = TRUE)
close(con)
res[p, "ASCII"] <- if (grepl("RD[ABX][12]", magic, useBytes = TRUE))
substr(magic, 3, 3) == "A" else NA
ver <- sub("(RD[ABX])([12]*)", "\\2", magic, useBytes = TRUE)
res$version <- as.integer(ver)
}
res
}
### * resaveRdaFiles
resaveRdaFiles <- function(paths,
compress = c("auto", "gzip", "bzip2", "xz"),
compression_level)
{
if(length(paths) == 1L && dir.exists(paths))
paths <- Sys.glob(c(file.path(paths, "*.rda"),
file.path(paths, "*.RData")))
compress <- match.arg(compress)
if (missing(compression_level))
compression_level <- switch(compress, "gzip" = 6, 9)
for(p in paths) {
env <- new.env(hash = TRUE) # probably small, need not be
suppressPackageStartupMessages(load(p, envir = env))
if(compress == "auto") {
f1 <- tempfile()
save(file = f1, list = ls(env, all.names = TRUE), envir = env)
f2 <- tempfile()
save(file = f2, list = ls(env, all.names = TRUE), envir = env,
compress = "bzip2")
ss <- file.size(c(f1, f2)) * c(0.9, 1.0)
names(ss) <- c(f1, f2)
if(ss[1L] > 10240) {
f3 <- tempfile()
save(file = f3, list = ls(env, all.names = TRUE), envir = env,
compress = "xz")
ss <- c(ss, file.size(f3))
names(ss) <- c(f1, f2, f3)
}
nm <- names(ss)
ind <- which.min(ss)
file.copy(nm[ind], p, overwrite = TRUE)
unlink(nm)
} else
save(file = p, list = ls(env, all.names = TRUE), envir = env,
compress = compress, compression_level = compression_level)
}
}
### * compactPDF
compactPDF <-
function(paths, qpdf = Sys.which(Sys.getenv("R_QPDF", "qpdf")),
gs_cmd = Sys.getenv("R_GSCMD", ""),
gs_quality = Sys.getenv("GS_QUALITY", "none"),
gs_extras = character())
{
use_qpdf <- nzchar(qpdf)
gs_quality <- match.arg(gs_quality, c("none", "printer", "ebook", "screen"))
use_gs <- if(gs_quality != "none") nzchar(gs_cmd <- find_gs_cmd(gs_cmd)) else FALSE
if (!use_gs && !use_qpdf) return()
if(length(paths) == 1L && dir.exists(paths))
paths <- Sys.glob(file.path(paths, "*.pdf"))
dummy <- rep.int(NA_real_, length(paths))
ans <- data.frame(old = dummy, new = dummy, row.names = paths)
tf <- tempfile("pdf"); tf2 <- tempfile("pdf")
for (p in paths) {
res <- 0
if (use_gs) {
res <- system2(gs_cmd,
c("-q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite",
sprintf("-dPDFSETTINGS=/%s", gs_quality),
"-dCompatibilityLevel=1.5",
"-dAutoRotatePages=/None",
sprintf("-sOutputFile=%s", tf),
gs_extras, p), FALSE, FALSE)
if(!res && use_qpdf) {
unlink(tf2) # precaution
file.rename(tf, tf2)
res <- system2(qpdf, c("--stream-data=compress",
"--object-streams=generate",
tf2, tf), FALSE, FALSE)
unlink(tf2)
}
} else if(use_qpdf) {
res <- system2(qpdf, c("--stream-data=compress",
"--object-streams=generate",
p, tf), FALSE, FALSE)
}
if(!res && file.exists(tf)) {
old <- file.size(p); new <- file.size(tf)
if(new/old < 0.9 && new < old - 1e4) {
file.copy(tf, p, overwrite = TRUE)
ans[p, ] <- c(old, new)
}
}
unlink(tf)
}
structure(na.omit(ans), class = c("compactPDF", "data.frame"))
}
find_gs_cmd <- function(gs_cmd = "")
{
if(!nzchar(gs_cmd)) {
if(.Platform$OS.type == "windows") {
gsexe <- Sys.getenv("R_GSCMD")
if (!nzchar(gsexe)) gsexe <- Sys.getenv("GSC")
gs_cmd <- Sys.which(gsexe)
if (!nzchar(gs_cmd)) gs_cmd <- Sys.which("gswin64c")
if (!nzchar(gs_cmd)) gs_cmd <- Sys.which("gswin32c")
gs_cmd
} else Sys.which(Sys.getenv("R_GSCMD", "gs"))
} else Sys.which(gs_cmd)
}
format.compactPDF <- function(x, ratio = 0.9, diff = 1e4, ...)
{
if(!nrow(x)) return(character())
z <- y <- x[with(x, new/old < ratio & new < old - diff), ]
if(!nrow(z)) return(character())
z[] <- lapply(y, function(x) sprintf("%.0fKb", x/1024))
large <- y$new >= 1024^2
z[large, ] <- lapply(y[large, ], function(x) sprintf("%.1fMb", x/1024^2))
paste(' compacted', sQuote(basename(row.names(y))),
'from', z[, 1L], 'to', z[, 2L])
}
### * add_datalist
add_datalist <- function(pkgpath, force = FALSE)
{
dlist <- file.path(pkgpath, "data", "datalist")
if (!force && file.exists(dlist)) return()
size <- sum(file.size(Sys.glob(file.path(pkgpath, "data", "*"))))
if(size <= 1024^2) return()
z <- suppressPackageStartupMessages(list_data_in_pkg(dataDir = file.path(pkgpath, "data"))) # for BARD
if(!length(z)) return()
con <- file(dlist, "w")
for (nm in names(z)) {
zz <- z[[nm]]
if (length(zz) == 1L && zz == nm) writeLines(nm, con)
else cat(nm, ": ", paste(zz, collapse = " "), "\n",
sep = "", file = con)
}
close(con)
invisible()
}
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "### [*]+" ***
### End: ***
| 42,256 | gpl-2.0 |
1a4c55f2b21f2c3a2e7d677a9330e4d82d82b3fd | ctufts/knn-dashboard-shiny-plotly | ui.R | # This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
shinyUI(navbarPage("",
tabPanel("Classifier",
# Application title
titlePanel("Classification of Heart Disease w/KNN"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("k",
"number of neighbors",
min = 1,
max = 20,
value = 5),
checkboxGroupInput("checkGroup", label = h3("Dataset Features"),
choices = feature.list, inline = F,
selected = names(feature.list))
),
# Display KNN results
mainPanel(
dataTableOutput('confusionMatrix'),
verbatimTextOutput("value"),
includeMarkdown("ShinyAppDescription.Rmd")
)
)
),
tabPanel("Visualize Features",
fluidRow(
column(4, selectInput("featureDisplay_x",
label = h3("X-Axis Feature"),
choices = feature.list,
selected = feature.list[1])),
column(4, selectInput("featureDisplay_y",
label = h3("Y-Axis Feature"),
choices = feature.list,
selected = feature.list[2]))
),
fluidRow(
column(4,
graphOutput("distPlotA")
),
column(4,
graphOutput("distPlotB")
),
column(4,
graphOutput("ScatterPlot")
)
)
),
tabPanel("Feature Descriptions",
fluidRow(
column(10,
includeMarkdown("include.Rmd")
)
)
),
tabPanel("References",
fluidRow(
column(10,
includeMarkdown("references.Rmd")
)
)
)
)) | 3,810 | mit |
e3a1668cacb79100919fc1a9d69eee57547207de | jltsiren/bwt-merge | paper/comparison.R | # Use R --slave --args name < comparison.R
args = commandArgs()
name = args[4]
x = 3
y = 3
data <- read.csv(file = paste(name, ".csv", sep = ""), head = FALSE, sep = ",", dec = ".", check.names = FALSE)
pdf(file = paste(name, ".pdf", sep = ""), width = x, height = y, paper = "special",
family = "Helvetica", pointsize = 11)
par(mar=c(4, 4, 1, 1))
xrange = c(0, 256)
xscale = c(0, 64, 128, 192, 256)
xtitle = "Memory usage (GB)"
xlabs = xscale
yrange = c(0, 72)
yscale = c(0, 12, 24, 36, 48, 60, 72)
ytitle = ""
ylabs = yscale
plot(c(1),
c(1),
type = "n",
axes = F,
main = "",
xlab = xtitle,
ylab = ytitle,
xlim = xrange,
ylim = yrange)
axis(1, at = xscale, lab = xlabs, cex.axis = 0.8)
axis(2, at = yscale, lab = ylabs, cex.axis = 0.8)
box()
nr = nrow(data)
nc = ncol(data)
points(data[1:nr, 3], data[1:nr, 2] / 3600, type = "p", pch = 20)
text(data[1, 3], data[1, 2] / 3600, data[1, 1], cex = 0.8, pos = 2) # RopeBWT
text(data[2, 3], data[2, 2] / 3600, data[2, 1], cex = 0.8, pos = 2) # RopeBWT2
text(data[3, 3], data[3, 2] / 3600, data[3, 1], cex = 0.8, pos = 3) # BWT-merge
text(data[4, 3], data[4, 2] / 3600, data[4, 1], cex = 0.8, pos = 1) # RopeBWT (RLO)
text(data[5, 3], data[5, 2] / 3600, data[5, 1], cex = 0.8, pos = 1) # RopeBWT2 (RLO)
text(data[6, 3], data[6, 2] / 3600, data[6, 1], cex = 0.8, pos = 1) # BWT-merge (RLO)
dev.off()
q()
| 1,377 | mit |
88ef524d2d796008aba09ef2ef47d4e501efa83d | johndharrison/webpagetestr | man-roxygen/forceSoftwareRendering.R | #' @param forceSoftwareRendering force software rendering, disable GPU
#' acceleration (Chrome only)
| 102 | mit |
23aa536a0bc37595e16361d92abf62924126afa0 | ucbrise/clipper | containers/R/tests/install_test_dependencies.R | #!/usr/bin/env RScript
print("Installing R container test dependencies...")
install.packages('versions', repos='http://cran.us.r-project.org')
tryCatch(
versions::install.versions('jsonlite', version='1.5'),
warning = function(warn) {
message(warn)
},
error = function(err) {
quit(status=11)
})
tryCatch(
versions::install.versions('Rcpp', version='0.12.11'),
warning = function(warn) {
message(warn)
},
error = function(err) {
quit(status=11)
})
tryCatch(
versions::install.versions('optparse', version='1.4.4'),
warning = function(warn) {
message(warn)
},
error = function(err) {
quit(status=11)
})
tryCatch(
versions::install.versions('stringr', version='1.2.0'),
warning = function(warn) {
message(warn)
},
error = function(err) {
quit(status=11)
})
tryCatch(
versions::install.versions('CodeDepends', version='0.5-3'),
warning = function(warn) {
message(warn)
},
error = function(err) {
quit(status=11)
})
tryCatch(
versions::install.versions('histry', version='0.1.2'),
warning = function(warn) {
message(warn)
},
error = function(err) {
quit(status=11)
})
tryCatch(
versions::install.versions('randomForest', version='4.6-12'),
warning = function(warn) {
message(warn)
},
error = function(err) {
quit(status=11)
})
| 1,649 | apache-2.0 |
8377a589b2e4649a173b7d3f7c10d266084f148b | jeroenooms/r-source | tests/reg-tests-1c.R | ## Regression tests for R 3.[0-3].*
pdf("reg-tests-1c.pdf", encoding = "ISOLatin1.enc")
.pt <- proc.time()
## mapply with classed objects with length method
## was not documented to work in 2.x.y
setClass("A", representation(aa = "integer"))
a <- new("A", aa = 101:106)
setMethod("length", "A", function(x) length(x@aa))
setMethod("[[", "A", function(x, i, j, ...) x@aa[[i]])
(z <- mapply(function(x, y) {x * y}, a, rep(1:3, 2)))
stopifnot(z == c(101, 204, 309, 104, 210, 318))
## reported as a bug (which it was not) by H. Pages in
## https://stat.ethz.ch/pipermail/r-devel/2012-November/065229.html
## recyling in split()
## https://stat.ethz.ch/pipermail/r-devel/2013-January/065700.html
x <- 1:6
y <- split(x, 1:2)
class(x) <- "ABC" ## class(x) <- "A" creates an invalid object
yy <- split(x, 1:2)
stopifnot(identical(y, yy))
## were different in R < 3.0.0
## dates with fractional seconds after 2038 (PR#15200)
## Extremely speculative!
z <- as.POSIXct(2^31+c(0.4, 0.8), origin=ISOdatetime(1970,1,1,0,0,0,tz="GMT"))
zz <- format(z)
stopifnot(zz[1] == zz[2])
## printed form rounded not truncated in R < 3.0.0
## origin coerced in tz and not GMT by as.POSIXct.numeric()
x <- as.POSIXct(1262304000, origin="1970-01-01", tz="EST")
y <- as.POSIXct(1262304000, origin=.POSIXct(0, "GMT"), tz="EST")
stopifnot(identical(x, y))
## Handling records with quotes in names
x <- c("a b' c",
"'d e' f g",
"h i 'j",
"k l m'")
y <- data.frame(V1 = c("a", "d e", "h"), V2 = c("b'", "f", "i"), V3 = c("c", "g", "j\nk l m"))
f <- tempfile()
writeLines(x, f)
stopifnot(identical(count.fields(f), c(3L, 3L, NA_integer_, 3L)))
stopifnot(identical(read.table(f), y))
stopifnot(identical(scan(f, ""), as.character(t(as.matrix(y)))))
## docu always said 'length 1 is sorted':
stopifnot(!is.unsorted(NA))
## str(.) for large factors should be fast:
u <- as.character(runif(1e5))
dummy <- str(u); dummy <- str(u); # force compilation of str
t1 <- max(0.001, system.time(str(u))[[1]]) # get a baseline > 0
uf <- factor(u)
(t2 <- system.time(str(uf))[[1]]) / t1 # typically around 1--2
stopifnot(t2 / t1 < 30)
## was around 600--850 for R <= 3.0.1
## ftable(<array with unusual dimnames>)
(m <- matrix(1:12, 3,4, dimnames=list(ROWS=paste0("row",1:3), COLS=NULL)))
ftable(m)
## failed to format (and hence print) because of NULL 'COLS' dimnames
## regression test formerly in kmeans.Rd, but result differs by platform
## Artificial example [was "infinite loop" on x86_64; PR#15364]
rr <- c(rep(-0.4, 5), rep(-0.4- 1.11e-16, 14), -.5)
r. <- signif(rr, 12)
k3 <- kmeans(rr, 3, trace=2) ## Warning: Quick-Transfer.. steps exceed
try ( k. <- kmeans(r., 3) ) # after rounding, have only two distinct points
k. <- kmeans(r., 2) # fine
## PR#15376
stem(c(1, Inf))
## hung in 3.0.1
## PR#15377, very long variable names
x <- 1:10
y <- x + rnorm(10)
z <- y + rnorm(10)
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy <- y
fit <- lm(cbind(yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy, z) ~ x)
## gave spurious error message in 3.0.1.
## PR#15341 singular complex matrix in rcond()
set.seed(11)
n <- 5
A <- matrix(runif(n*n),nrow=n)
B <- matrix(runif(n*n),nrow=n)
B[n,] <- (B[n-1,]+B[n-2,])/2
rcond(B)
B <- B + 0i
rcond(B)
## gave error message (OK) in R 3.0.1: now returns 0 as in real case.
## Misuse of formatC as in PR#15303
days <- as.Date(c("2012-02-02", "2012-03-03", "2012-05-05"))
(z <- formatC(days))
stopifnot(!is.object(z), is.null(oldClass(z)))
## used to copy over class in R < 3.0.2.
## PR15219
val <- sqrt(pi)
fun <- function(x) (-log(x))^(-1/2)
(res <- integrate(fun, 0, 1, rel.tol = 1e-4))
stopifnot(abs(res$value - val) < res$abs.error)
(res <- integrate(fun, 0, 1, rel.tol = 1e-6))
stopifnot(abs(res$value - val) < res$abs.error)
res <- integrate(fun, 0, 1, rel.tol = 1e-8)
stopifnot(abs(res$value - val) < res$abs.error)
fun <- function(x) x^(-1/2)*exp(-x)
(res <- integrate(fun, 0, Inf, rel.tol = 1e-4))
stopifnot(abs(res$value - val) < res$abs.error)
(res <- integrate(fun, 0, Inf, rel.tol = 1e-6))
stopifnot(abs(res$value - val) < res$abs.error)
(res <- integrate(fun, 0, Inf, rel.tol = 1e-8))
stopifnot(abs(res$value - val) < res$abs.error)
## sometimes exceeded reported error in 2.12.0 - 3.0.1
## Unary + should coerce
x <- c(TRUE, FALSE, NA, TRUE)
stopifnot(is.integer(+x))
## +x was logical in R <= 3.0.1
## Attritbutes of value of unary operators
# +x, -x were ts, !x was not in 3.0.2
x <- ts(c(a=TRUE, b=FALSE, c=NA, d=TRUE), frequency = 4, start = 2000)
x; +x; -x; !x
stopifnot(is.ts(!x), !is.ts(+x), !is.ts(-x))
# +x, -x were ts, !x was not in 3.0.2
x <- ts(c(a=1, b=2, c=0, d=4), frequency = 4, start = 2010)
x; +x; -x; !x
stopifnot(!is.ts(!x), is.ts(+x), is.ts(-x))
##
## regression test incorrectly in colorRamp.Rd
bb <- colorRampPalette(2)(4)
stopifnot(bb[1] == bb)
## special case, invalid in R <= 2.15.0:
## Setting NAMED on ... arguments
f <- function(...) { x <- (...); x[1] <- 7; (...) }
stopifnot(f(1+2) == 3)
## was 7 in 3.0.1
## copying attributes from only one arg of a binary operator.
A <- array(c(1), dim = c(1L,1L), dimnames = list("a", 1))
x <- c(a = 1)
B <- A/(pi*x)
stopifnot(is.null(names(B)))
## was wrong in R-devel in Aug 2013
## needed an un-NAMED rhs.
## lgamma(x) for very small negative x
X <- 3e-308; stopifnot(identical(lgamma(-X), lgamma(X)))
## lgamma(-X) was NaN in R <= 3.0.1
## PR#15413
z <- subset(data.frame(one = numeric()), select = one)
stopifnot(nrow(z) == 0L)
## created a row prior to 3.0.2
## https://stat.ethz.ch/pipermail/r-devel/2013-September/067524.html
dbeta(0.9, 9.9e307, 10)
dbeta(0.1, 9, 9.9e307)
dbeta(0.1, 9.9e307, 10)
## first two hung in R <= 3.0.2
## PR#15465 (0-extent matrix / data frame)
provideDimnames(matrix(nrow = 0, ncol = 1))
provideDimnames(table(character()))
as.data.frame(table(character()))
## all failed in 3.0.2
## PR#15004
n <- 10
s <- 3
l <- 10000
m <- 20
x <- data.frame(x1 = 1:n, x2 = 1:n)
by <- data.frame(V1 = factor(rep(1:3, n %/% s + 1)[1:n], levels = 1:s))
for(i in 1:m) {
by[[i + 1]] <- factor(rep(l, n), levels = 1:l)
}
agg <- aggregate.data.frame(x, by, mean)
stopifnot(nrow(unique(by)) == nrow(agg))
## rounding caused groups to be falsely merged
## PR#15454
set.seed(357)
z <- matrix(c(runif(50, -1, 1), runif(50, -1e-190, 1e-190)), nrow = 10)
contour(z)
## failed because rounding made crossing tests inconsistent
## Various cases where zero length vectors were not handled properly
## by functions in base and utils, including PR#15499
y <- as.data.frame(list())
format(y)
format(I(integer()))
gl(0, 2)
z <- list(numeric(0), 1)
stopifnot(identical(relist(unlist(z), z), z))
summary(y)
## all failed in 3.0.2
## PR#15518 Parser catching errors in particular circumstance:
(ee <- tryCatch(parse(text = "_"), error= function(e)e))
stopifnot(inherits(ee, "error"))
## unexpected characters caused the parser to segfault in 3.0.2
## nonsense value of nmax
unique(1:3, nmax = 1)
## infinite-looped in 3.0.2, now ignored.
## besselI() (and others), now using sinpi() etc:
stopifnot(all.equal(besselI(2.125,-5+1/1024),
0.02679209380095711, tol= 8e-16),
all.equal(lgamma(-12+1/1024), -13.053274367453049, tol=8e-16))
## rel.error was 1.5e-13 / 7.5e-14 in R <= 3.0.x
ss <- sinpi(2*(-10:10)-2^-12)
tt <- tanpi( (-10:10)-2^-12)
stopifnot(ss == ss[1], tt == tt[1], # as internal arithmetic must be exact here
all.equal(ss[1], -0.00076699031874270453, tol=8e-16),
all.equal(tt[1], -0.00076699054434309260, tol=8e-16))
## (checked via Rmpfr) The above failed during development
## PR#15535 c() "promoted" raw vectors to bad logical values
stopifnot( c(as.raw(11), TRUE) == TRUE )
## as.raw(11) became a logical value coded as 11,
## and did not test equal to TRUE.
## PR#15564
fit <- lm(rnorm(10) ~ I(1:10))
predict(fit, interval = "confidence", scale = 1)
## failed in <= 3.0.2 with object 'w' not found
## PR#15534 deparse() did not produce reparseable complex vectors
assert.reparsable <- function(sexp) {
deparsed <- paste(deparse(sexp), collapse=" ")
reparsed <- tryCatch(eval(parse(text=deparsed)[[1]]), error = function(e) NULL)
if (is.null(reparsed))
stop(sprintf("Deparsing produced invalid syntax: %s", deparsed))
if(!identical(reparsed, sexp))
stop(sprintf("Deparsing produced change: value is not %s", reparsed))
}
assert.reparsable(1)
assert.reparsable("string")
assert.reparsable(2+3i)
assert.reparsable(1:10)
assert.reparsable(c(NA, 12, NA, 14))
assert.reparsable(as.complex(NA))
assert.reparsable(complex(real=Inf, i=4))
assert.reparsable(complex(real=Inf, i=Inf))
assert.reparsable(complex(real=Inf, i=-Inf))
assert.reparsable(complex(real=3, i=-Inf))
assert.reparsable(complex(real=3, i=NaN))
assert.reparsable(complex(r=NaN, i=0))
assert.reparsable(complex(real=NA, i=1))
assert.reparsable(complex(real=1, i=NA))
## last 7 all failed
## PR#15621 backticks could not be escaped
stopifnot(deparse(as.name("`"), backtick=TRUE) == "`\\``")
assign("`", TRUE)
`\``
tools::assertError(parse("```"))
##
## We document tanpi(0.5) etc to be NaN
stopifnot(is.nan(tanpi(c(0.5, 1.5, -0.5, -1.5))))
## That is not required for system implementations, and some give +/-Inf
## PR#15642 segfault when parsing overflowing reals
as.double("1e1000")
ll <- ml <- list(1,2); dim(ml) <- 2:1
ali <- all.equal(list( ), identity) # failed in R-devel for ~ 30 hours
al1 <- all.equal(list(1), identity) # failed in R < 3.1.0
stopifnot(length(ali) == 3, grepl("list", ali[1]),
grepl("length", ali[2], ignore.case=TRUE),
is.character(al1), length(al1) >= 2,
all.equal(ml, ml),
all.equal(ll, ml, check.attributes=FALSE))
## PR#15699 aggregate failed when there were no grouping variables
dat <- data.frame(Y = runif(10), X = sample(LETTERS[1:3], 10, TRUE))
aggregate(Y ~ 1, FUN = mean, data = dat)
## merge() with duplicated column names, similar to PR#15618
X <- data.frame(Date = c("1967-02-01", "1967-02-02", "1967-02-03"),
Settle.x = c(NA, NA, NA), Settle.y = c(NA, NA, NA),
Settle = c(35.4, 35.15, 34.95))
Y <- data.frame(Date = c("2013-12-10", "2013-12-11", "2013-12-12"),
Settle = c(16.44, 16.65, 16.77))
merge(X, Y, by = "Date", all = TRUE)
## failed in R < 3.1.0: now warns (correctly).
## PR#15679
badstructure <- function(depth, key)
{
ch <- if (depth == 1L) list() else list(badstructure(depth-1,key))
r <- list()
r[[key]] <- ch
r
}
badstructure(20, "children")
## overran, segfaulted for the original reporter.
## PR#15702 and PR#15703
d <- as.dendrogram(hclust(dist(sin(1:7))))
(dl <- d[[c(2,1,2)]]) # single-leaf dendrogram
stopifnot(inherits(dl, "dendrogram"), is.leaf(dl),
identical(attributes(reorder(dl, 1:7)), c(attributes(dl), value = 5L)),
identical(order.dendrogram(dl), as.vector(dl)),
identical(d, as.dendrogram(d)))
## as.dendrogram() was hidden; order.*() failed for leaf
## using *named* method
hw <- hclust(dist(sqrt(1:5)), method=c(M = "ward"))
## failed for 2 days in R-devel/-alpha
## PR#15758
my_env <- new.env(); my_env$one <- 1L
save(one, file = tempfile(), envir = my_env)
## failed in R < 3.1.1.
## Conversion to numeric in boundary case
ch <- "0x1.ffa0000000001p-1"
rr <- type.convert(ch, numerals = "allow.loss")
rX <- type.convert(ch, numerals = "no.loss")
stopifnot(is.numeric(rr), identical(rr, rX),
all.equal(rr, 0.999267578125),
all.equal(type.convert(ch, numerals = "warn"),
type.convert("0x1.ffap-1",numerals = "warn"), tol = 5e-15))
## type.convert(ch) was not numeric in R 3.1.0
##
ch <- "1234567890123456789"
rr <- type.convert(ch, numerals = "allow.loss")
rX <- type.convert(ch, numerals = "no.loss")
rx <- type.convert(ch, numerals = "no.loss", as.is = TRUE)
tools::assertWarning(r. <- type.convert(ch, numerals = "warn.loss"))
stopifnot(is.numeric(rr), identical(rr, r.), all.equal(rr, 1.234567890e18),
is.factor(rX), identical(rx, ch))
## PR#15764: integer overflow could happen without a warning or giving NA
tools::assertWarning(ii <- 1980000020L + 222000000L)
stopifnot(is.na(ii))
tools::assertWarning(ii <- (-1980000020L) + (-222000000L))
stopifnot(is.na(ii))
tools::assertWarning(ii <- (-1980000020L) - 222000000L)
stopifnot(is.na(ii))
tools::assertWarning(ii <- 1980000020L - (-222000000L))
stopifnot(is.na(ii))
## first two failed for some version of clang in R < 3.1.1
## PR#15735: formulae with exactly 32 variables
myFormula <- as.formula(paste(c("y ~ x0", paste0("x", 1:30)), collapse = "+"))
ans <- update(myFormula, . ~ . - w1)
stopifnot(identical(ans, myFormula))
updateArgument <-
as.formula(paste(c(". ~ . ", paste0("w", 1:30)), collapse = " - "))
ans2 <- update(myFormula, updateArgument)
stopifnot(identical(ans2, myFormula))
## PR#15753
0x110p-5L # (+ warning)
stopifnot(.Last.value == 8.5)
## was 272 with a garbled message in R 3.0.0 - 3.1.0.
## numericDeriv failed to duplicate variables in
## the expression before modifying them. PR#15849
x <- 10; y <- 10
d1 <- numericDeriv(quote(x+y),c("x","y"))
x <- y <- 10
d2 <- numericDeriv(quote(x+y),c("x","y"))
stopifnot(identical(d1,d2))
## The second gave the wrong answer
## prettyNum(x, zero.print = .) failed when x had NAs
pp <- sapply(list(TRUE, FALSE, ".", " "), function(.)
prettyNum(c(0:1,NA), zero.print = . ))
stopifnot(identical(pp[1,], c("0", " ", ".", " ")),
pp[2:3,] == c("1","NA"))
## all 4 prettyNum() would error out
## checking all.equal() with externalptr
library(methods) # getClass()'s versionKey is an e.ptr
cA <- getClass("ANY")
stopifnot(all.equal(cA, cA),
is.character(all.equal(cA, getClass("S4"))))
# both all.equal() failed in R <= 3.1.1
## as.hexmode(x), as.octmode(x) when x is double
x <- c(NA, 1)
stopifnot(identical(x == x,
as.hexmode(x) == as.octmode(x)))
p <- c(1, pi)
tools::assertError(as.hexmode(p))
tools::assertError(as.octmode(p))
## where all "wrong" in R <= 3.1.1
## PR#15935
y <- 1:3
drop1(lm(y ~ 1))
drop1(glm(y ~ 1))
stats:::drop1.default(glm(y ~ 1))
## gave error in R < 3.1.2
## getAnywhere() wrongly dealing with namespace hidden list object
nm <- deparse(body(pbinom)[[2]])# == "C_pbinom" currently
gg <- getAnywhere(nm)
stopifnot(length(gg$objs) == 1)
## was 4 and printed "4 differing objects matching ‘C_pbinom’ ..." in R <= 3.1.1
## 0-length consistency of options(), PR#15979
stopifnot(identical(options(list()), options(NULL)))
## options(list()) failed in R <= 3.1.1
## merge.dendrogram(), PR#15648
mkDend <- function(n, lab, method = "complete",
## gives *ties* often:
rGen = function(n) 1+round(16*abs(rnorm(n)))) {
stopifnot(is.numeric(n), length(n) == 1, n >= 1, is.character(lab))
a <- matrix(rGen(n*n), n, n)
colnames(a) <- rownames(a) <- paste0(lab, 1:n)
.HC. <<- hclust(as.dist(a + t(a)), method=method)
as.dendrogram(.HC.)
}
set.seed(7)
da <- mkDend(4, "A")
db <- mkDend(3, "B")
d.ab <- merge(da, db)
hcab <- as.hclust(d.ab)
stopifnot(hcab$order == c(2, 4, 1, 3, 7, 5, 6),
hcab$labels == c(paste0("A", 1:4), paste0("B", 1:3)))
## was wrong in R <= 3.1.1
set.seed(1) ; h1 <- as.hclust(mkDend(5, "S", method="single")); hc1 <- .HC.
set.seed(5) ; h5 <- as.hclust(mkDend(5, "S", method="single")); hc5 <- .HC.
set.seed(42); h3 <- as.hclust(mkDend(5, "A", method="single")); hc3 <- .HC.
## all failed (differently!) because of ties in R <= 3.2.3
stopifnot(all.equal(h1[1:4], hc1[1:4], tol = 1e-12),
all.equal(h5[1:4], hc5[1:4], tol = 1e-12),
all.equal(h3[1:4], hc3[1:4], tol = 1e-12))
## bw.SJ() and similar with NA,Inf values, PR#16024
try(bw.SJ (c(NA,2,3)))
try(bw.bcv(c(-Inf,2,3)))
try(bw.ucv(c(1,NaN,3,4)))
## seg.faulted in 3.0.0 <= R <= 3.1.1
## as.dendrogram() with wrong input
x <- rbind(c( -6, -9), c( 0, 13),
c(-15, 6), c(-14, 0), c(12,-10))
dx <- dist(x,"manhattan")
hx <- hclust(dx)
hx$merge <- matrix(c(-3, 1, -2, 3,
-4, -5, 2, 3), 4,2)
tools::assertError(as.dendrogram(hx))
## 8 member dendrogram and memory explosion for larger examples in R <= 3.1.2
## abs with named args failed, PR#16047
abs(x=1i)
## Complained that the arg should be named z
## Big exponents overflowed, PR#15976
x <- 0E4933
y <- 0x0p100000
stopifnot(x == 0, y == 0)
##
## drop.terms() dropped some attributes, PR#16029
test <- model.frame(Employed ~ Year + poly(GNP,3) + Population, data=longley)
mterm <- terms(test)
mterm2 <- drop.terms(mterm, 3)
predvars <- attr(mterm2, "predvars")
dataClasses <- attr(mterm2, "dataClasses")
factors <- attr(mterm2, "factors")
stopifnot(is.language(predvars), length(predvars) == length(dataClasses)+1,
all(names(dataClasses) == rownames(factors)))
## Previously dropped predvars and dataClasses
## prompt() did not escape percent signs properly
fn <- function(fmt = "%s") {}
f <- tempfile(fileext = ".Rd")
prompt(fn, filename = f)
rd <- tools::parse_Rd(f)
## Gave syntax errors because the percent sign in Usage
## was taken as the start of a comment.
## pass no arguments to 0-parameter macro
cat("\\newcommand{\\mac0}{MAC0}\\mac0", file=f)
rd <- tools::parse_Rd(f)
stopifnot(identical(as.character(rd), "MAC0\n"))
## pass empty argument to a 1-parameter macro (failed in 3.5.0 and earlier)
cat("\\newcommand{\\mac1}{MAC1:#1}\\mac1{}", file=f)
rd <- tools::parse_Rd(f)
stopifnot(identical(as.character(rd), "MAC1:\n"))
## pass empty argument to a 2-parameter macro (failed in 3.5.0 and earlier)
cat("\\newcommand{\\mac2}{MAC2:#2}\\mac2{}{XX}", file=f)
rd <- tools::parse_Rd(f)
stopifnot(identical(as.character(rd), "MAC2:XX\n"))
cat("\\newcommand{\\mac2}{MAC2:#2#1}\\mac2{YY}{}", file=f)
rd <- tools::parse_Rd(f)
stopifnot(identical(as.character(rd), "MAC2:YY\n"))
## pass multi-line argument to a user macro (failed in 3.5.0 and earlier)
cat("\\newcommand{\\mac1}{MAC1:#1}\\mac1{XXX\nYYY}", file=f)
rd <- tools::parse_Rd(f)
stopifnot(identical(as.character(rd), c("MAC1:XXX\n","YYY\n")))
## comments are removed from macro arguments (not in 3.5.0 and earlier)
cat("\\newcommand{\\mac1}{MAC1:#1}\\mac1{XXX%com\n}", file=f)
rd <- tools::parse_Rd(f)
stopifnot(identical(as.character(rd), c("MAC1:XXX\n","\n")))
cat("\\newcommand{\\mac1}{MAC1:#1}\\mac1{XXX%com\nYYY}", file=f)
rd <- tools::parse_Rd(f)
stopifnot(identical(as.character(rd), c("MAC1:XXX\n","YYY\n")))
## power.t.test() failure for very large n (etc): PR#15792
(ptt <- power.t.test(delta = 1e-4, sd = .35, power = .8))
(ppt <- power.prop.test(p1 = .5, p2 = .501, sig.level=.001, power=0.90, tol=1e-8))
stopifnot(all.equal(ptt$n, 192297000, tol = 1e-5),
all.equal(ppt$n, 10451937, tol = 1e-7))
## call to uniroot() did not allow n > 1e7
## save(*, ascii=TRUE): PR#16137
x0 <- x <- c(1, NA, NaN)
save(x, file=(sf <- tempfile()), ascii = TRUE)
load(sf)
stopifnot(identical(x0, x))
## x had 'NA' instead of 'NaN'
## PR#16205
stopifnot(length(glob2rx(character())) == 0L)
## was "^$" in R < 3.1.3
### Bugs fixed in R 3.2.0
## Bugs reported by Radford Neal
x <- pairlist(list(1, 2))
x[[c(1, 2)]] <- NULL # wrongly gave an error, referring to misuse
# of the internal SET_VECTOR_ELT procedure
stopifnot(identical(x, pairlist(list(1))))
a <- pairlist(10, 20, 30, 40, 50, 60)
dim(a) <- c(2, 3)
dimnames(a) <- list(c("a", "b"), c("x", "y", "z"))
# print(a) # doesn't print names, not fixed
a[["a", "x"]] <- 0
stopifnot(a[["a", "x"]] == 0)
## First gave a spurious error, second caused a seg.fault
## Radford (R-devel, June 24, 2014); M.Maechler
m <- matrix(1:2, 1,2); v <- 1:3
stopifnot(identical(crossprod(2, v), t(2) %*% v),
identical(crossprod(m, v), t(m) %*% v),
identical(5 %*% v, 5 %*% t(v)),
identical(tcrossprod(m, 1:2), m %*% 1:2) )
## gave error "non-conformable arguments" in R <= 3.2.0
proc.time() - .pt; .pt <- proc.time()
## list <--> environment
L0 <- list()
stopifnot(identical(L0, as.list(as.environment(L0))))
## as.env..() did not work, and as.list(..) gave non-NULL names in R 3.1.x
### all.equal() refClass()es check moved to methods package
## missing() did not propagate through '...', PR#15707
check <- function(x,y,z) c(missing(x), missing(y), missing(z))
check1 <- function(...) check(...)
check2 <- function(...) check1(...)
stopifnot(identical(check2(one, , three), c(FALSE, TRUE, FALSE)))
## missing() was unable to handle recursive promises
### envRefClass check moved to methods package
## takes too long with JIT enabled:
.jit.lev <- compiler::enableJIT(0)
Sys.getenv("_R_CHECK_LENGTH_1_CONDITION_") -> oldV
Sys.setenv("_R_CHECK_LENGTH_1_CONDITION_" = "false") # only *warn*
## while did not protect its argument, which caused an error
## under gctorture, PR#15990
gctorture()
suppressWarnings(while(c(FALSE, TRUE)) 1)
gctorture(FALSE)
## gave an error because the test got released when the warning was generated.
compiler::enableJIT(.jit.lev)# revert
Sys.setenv("_R_CHECK_LENGTH_1_CONDITION_" = oldV)
## hist(x, breaks =) with too large bins, PR#15988
set.seed(5); x <- runif(99)
Hist <- function(x, b) hist(x, breaks = b, plot = FALSE)$counts
for(k in 1:5) {
b0 <- seq_len(k-1)/k
H.ok <- Hist(x, c(-10, b0, 10))
for(In in c(1000, 1e9, Inf))
stopifnot(identical(Hist(x, c(-In, b0, In)), H.ok),
identical(Hist(x, c( 0, b0, In)), H.ok))
}
## "wrong" results for k in {2,3,4} in R 3.1.x
## eigen(*, symmetric = <default>) with asymmetric dimnames, PR#16151
m <- matrix(c(83,41), 5, 4,
dimnames=list(paste0("R",1:5), paste0("C",1:4)))[-5,] + 3*diag(4)
stopifnot( all.equal(eigen(m, only.values=TRUE) $ values,
c(251, 87, 3, 3), tol=1e-14) )
## failed, using symmetric=FALSE and complex because of the asymmetric dimnames()
## match.call() re-matching '...'
test <- function(x, ...) test2(x, 2, ...)
test2 <- function(x, ...) match.call(test2, sys.call())
stopifnot(identical(test(1, 3), quote(test2(x=x, 2, 3))))
## wrongly gave test2(x=x, 2, 2, 3) in R <= 3.1.2
## callGeneric not forwarding dots in call (PR#16141)
setGeneric("foo", function(x, ...) standardGeneric("foo"))
setMethod("foo", "character",
function(x, capitalize = FALSE) if (capitalize) toupper(x) else x)
setMethod("foo", "factor",
function(x, capitalize = FALSE) { x <- as.character(x); callGeneric() })
toto1 <- function(x, ...) foo(x, ...)
stopifnot(identical(toto1(factor("a"), capitalize = TRUE), "A"))
## wrongly did not capitalize in R <= 3.1.2
## Accessing non existing objects must be an error
tools::assertError(base :: foobar)
tools::assertError(base :::foobar)
tools::assertError(stats:::foobar)
tools::assertError(stats:: foobar)
## lazy data only via '::', not ':::' :
stopifnot( nrow(datasets:: swiss) == 47)
tools::assertError(datasets:::swiss)
## The ::: versions gave NULL in certain development versions of R
stopifnot(identical(stats4::show -> s4s,
get("show", asNamespace("stats4") -> ns4)),
s4s@package == "methods",
is.null(ns4[["show"]]) # not directly in stats4 ns
)
## stats4::show was NULL for 4 hours in R-devel
## mode<- did too much evaluation (PR#16215)
x <- y <- quote(-2^2)
x <- as.list(x)
mode(y) <- "list"
stopifnot(identical(x, y))
## y ended up containing -4, not -2^2
## besselJ()/besselY() with too large order
besselJ(1, 2^64) ## NaN with a warning
besselY(1, c(2^(60:70), Inf))
## seg.faulted in R <= 3.1.2
## besselJ()/besselY() with nu = k + 1/2; k in {-1,-2,..}
besselJ(1, -1750.5) ## Inf, with only one warning...
stopifnot(is.finite(besselY(1, .5 - (1500 + 0:10))))
## last gave NaNs; both: more warnings in R <= 3.1.x
## BIC() for arima(), also with NA's
lho <- lh; lho[c(3,7,13,17)] <- NA
alh300 <- arima(lh, order = c(3,0,0))
alh311 <- arima(lh, order = c(3,1,1))
ao300 <- arima(lho, order = c(3,0,0))
ao301 <- arima(lho, order = c(3,0,1))
## AIC/BIC for *different* data rarely makes sense ... want warning:
tools::assertWarning(AA <- AIC(alh300,alh311, ao300,ao301))
tools::assertWarning(BB <- BIC(alh300,alh311, ao300,ao301))
fmLst <- list(alh300,alh311, ao300,ao301)
## nobs() did not "work" in R < 3.2.0:
stopifnot(sapply(fmLst, nobs) == c(48,47, 44,44))
lls <- lapply(fmLst, logLik)
str(lapply(lls, unclass))# -> 'df' and 'nobs'
## 'manual BIC' via generalized AIC:
stopifnot(all.equal(BB[,"BIC"],
sapply(fmLst, function(fm) AIC(fm, k = log(nobs(fm))))))
## BIC() was NA unnecessarily in R < 3.2.0; nobs() was not available eiher
## as.integer() close and beyond maximal integer
MI <- .Machine$integer.max
stopifnot(identical( MI, as.integer( MI + 0.99)),
identical(-MI, as.integer(-MI - 0.99)),
is.na(as.integer(as.character( 100*MI))),
is.na(as.integer(as.character(-100*MI))))
## The two cases with positive numbers failed in R <= 3.2.0
## Ensure that sort() works with a numeric vector "which is an object":
stopifnot(is.object(y <- freeny$y))
stopifnot(diff(sort(y)) > 0)
## order() and hence sort() failed here badly for a while around 2015-04-16
## NAs in data frame names (but *not* in row.names; that's really wrong):
dn <- list(c("r1", "r2"), c("V", NA))
d11 <- as.data.frame(matrix(c(1, 1, 1, 1), ncol = 2, dimnames = dn))
stopifnot(identical(names(d11), dn[[2]]),
identical(row.names(d11), dn[[1]]))
## as.data.frame() failed in R-devel for a couple of hours ..
## Ensure R -e .. works on Unix
if(.Platform$OS.type == "unix" &&
file.exists(Rc <- file.path(R.home("bin"), "R")) &&
file.access(Rc, mode = 1) == 0) { # 1: executable
cmd <- paste(Rc, "-q --vanilla -e 1:3")
ans <- system(cmd, intern=TRUE)
stopifnot(length(ans) >= 3,
identical(ans[1:2], c("> 1:3",
"[1] 1 2 3")))
}
## (failed for < 1 hr, in R-devel only)
proc.time() - .pt; .pt <- proc.time()
## Parsing large exponents of floating point numbers, PR#16358
set.seed(12)
lrg <- sprintf("%.0f", round(exp(10*(2+abs(rnorm(2^10))))))
head(huge <- paste0("1e", lrg))
micro <- paste0("1e-", lrg)
stopifnot(as.numeric(huge) == Inf,
as.numeric(micro) == 0)
## Both failed in R <= 3.2.0
## vcov() failed on manova() results, PR#16380
tear <- c(6.5, 6.2, 5.8, 6.5, 6.5, 6.9, 7.2, 6.9, 6.1, 6.3, 6.7, 6.6, 7.2, 7.1, 6.8, 7.1, 7.0, 7.2, 7.5, 7.6)
gloss <- c(9.5, 9.9, 9.6, 9.6, 9.2, 9.1, 10.0, 9.9, 9.5, 9.4, 9.1, 9.3, 8.3, 8.4, 8.5, 9.2, 8.8, 9.7, 10.1, 9.2)
opacity <- c(4.4, 6.4, 3.0, 4.1, 0.8, 5.7, 2.0, 3.9, 1.9, 5.7, 2.8, 4.1, 3.8,1.6, 3.4, 8.4, 5.2, 6.9, 2.7, 1.9)
Y <- cbind(tear, gloss, opacity)
rate <- factor(gl(2,10), labels = c("Low", "High"))
fit <- manova(Y ~ rate)
vcov(fit)
## Gave error because coef.aov() turned matrix of coefficients into a vector
## Unary / Binary uses of logic operations, PR#16385
tools::assertError(`&`(FALSE))
tools::assertError(`|`(TRUE))
## Did not give errors in R <= 3.2.0
E <- tryCatch(`!`(), error = function(e)e)
stopifnot(grepl("0 argument.*\\<1", conditionMessage(E)))
## PR#17456 : ^^ a version that also matches in a --disable-nls configuration
## Gave wrong error message in R <= 3.2.0
stopifnot(identical(!matrix(TRUE), matrix(FALSE)),
identical(!matrix(FALSE), matrix(TRUE)))
## was wrong for while in R 3.2.0 patched
## cummax(<integer>)
iNA <- NA_integer_
x <- c(iNA, 1L)
stopifnot(identical(cummin(x), c(iNA, iNA)),
identical(cummax(x), c(iNA, iNA)))
## an initial NA was not propagated in R <= 3.2.0
## summaryRprof failed for very short profile, PR#16395
profile <- tempfile()
writeLines(c(
'memory profiling: sample.interval=20000',
':145341:345360:13726384:0:"stdout"',
':208272:345360:19600000:0:"stdout"'), profile)
summaryRprof(filename = profile, memory = "both")
unlink(profile)
## failed when a matrix was downgraded to a vector
## option(OutDec = *) -- now gives a warning when not 1 character
op <- options(OutDec = ".", digits = 7, # <- default
warn = 2)# <- (unexpected) warnings become errors
stopifnot(identical("3.141593", fpi <- format(pi)))
options(OutDec = ",")
stopifnot(identical("3,141593", cpi <- format(pi)))
## warnings, but it "works" (for now):
tools::assertWarning(options(OutDec = ".1."))
stopifnot(identical("3.1.141593", format(pi)))
tools::assertWarning(options(OutDec = ""))
tools::assertWarning(stopifnot(identical("3141593", format(pi))))
options(op)# back to sanity
## No warnings in R versions <= 3.2.1
## format(*, decimal.mark=".") when OutDec != "." (PR#16411)
op <- options(OutDec = ",")
stopifnot(identical(fpi, format(pi, decimal.mark=".")))
options(op)
## failed in R <= 3.2.1
## model.frame() removed ts attributes on original data (PR#16436)
orig <- class(EuStockMarkets)
mf <- model.frame(EuStockMarkets ~ 1, na.action=na.fail)
stopifnot(identical(orig, class(EuStockMarkets)))
## ts class lost in R <= 3.2.1
##
foo <- as.expression(1:3)
matrix(foo, 3, 3) # always worked
matrix(foo, 3, 3, byrow = TRUE)
## failed in R <= 3.1.2
## labels.dendrogram(), dendrapply(), etc -- see comment #15 of PR#15215 :
(D <- as.dendrogram(hclust(dist(cbind(setNames(c(0,1,4), LETTERS[1:3]))))))
stopifnot(
identical(labels(D), c("C", "A", "B")),
## has been used in "CRAN package space"
identical(suppressWarnings(dendrapply(D, labels)),
list("C", list("A", "B"), "C")))
## dendrapply(D, labels) failed in R-devel for a day or two
## poly() / polym() predict()ion
library(datasets)
alm <- lm(stack.loss ~ poly(Air.Flow, Water.Temp, degree=3), stackloss)
f20 <- fitted(alm)[1:20] # "correct" prediction values [1:20]
stopifnot(all.equal(unname(f20[1:4]), c(39.7703378, 39.7703378, 35.8251359, 21.5661761)),
all.equal(f20, predict(alm, stackloss) [1:20] , tolerance = 1e-14),
all.equal(f20, predict(alm, stackloss[1:20, ]), tolerance = 1e-14))
## the second prediction went off in R <= 3.2.1
## PR#16478
kkk <- c("a\tb", "3.14\tx")
z1 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c("numeric", "character"))
z2 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c(b = "character", a = "numeric"))
stopifnot(identical(z1, z2))
z3 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c(b = "character"))
stopifnot(identical(z1, z3))
z4 <- read.table(textConnection(kkk), sep = "\t", header = TRUE,
colClasses = c(c = "integer", b = "character", a = "numeric"))
stopifnot(identical(z1, z4))
## z2 and z4 used positional matching (and failed) in R < 3.3.0.
## PR#16484
z <- regexpr("(.)", NA_character_, perl = TRUE)
stopifnot(is.na(attr(z, "capture.start")), is.na(attr(z, "capture.length")))
## Result was random integers in R <= 3.2.2.
## PR#14861
if(.Platform$OS.type == "unix") { # no 'ls /' on Windows
con <- pipe("ls /", open = "rt")
data <- readLines(con)
z <- close(con)
print(z)
stopifnot(identical(z, 0L))
}
## was NULL in R <= 3.2.2
## Sam Steingold: compiler::enableJIT(3) not working in ~/.Rprofile anymore
stopifnot(identical(topenv(baseenv()),
baseenv()))
## accidentally globalenv in R 3.2.[12] only
## widths of unknown Unicode characters
stopifnot(nchar("\u200b", "w") == 0)
## was -1 in R 3.2.2
## abbreviate dropped names in some cases
x <- c("AA", "AB", "AA", "CBA") # also test handling of duplicates
for(m in 2:0) {
print(y <- abbreviate(x, m))
stopifnot(identical(names(y), x))
}
## dropped for 0 in R <= 3.2.2
## match(<NA>, <NA>)
stopifnot(
isTRUE(NA %in% c(NA, TRUE)),
isTRUE(NA_integer_ %in% c(TRUE, NA)),
isTRUE(NA_real_ %in% c(NA, FALSE)),# !
isTRUE(!(NaN %in% c(NA, FALSE))),
isTRUE(NA %in% c(3L, NA)),
isTRUE(NA_integer_ %in% c(NA, 3L)),
isTRUE(NA_real_ %in% c(3L, NA)),# !
isTRUE(!(NaN %in% c(3L, NA))),
isTRUE(NA %in% c(2., NA)),# !
isTRUE(NA_integer_ %in% c(NA, 2.)),# !
isTRUE(NA_real_ %in% c(2., NA)),# !
isTRUE(!(NaN %in% c(2., NA))))
## the "!" gave FALSE in R-devel (around 20.Sep.2015)
## oversight in within.data.frame() [R-help, Sep 20 2015 14:23 -04]
df <- data.frame(.id = 1:3 %% 3 == 2, a = 1:3)
d2 <- within(df, {d = a + 2})
stopifnot(identical(names(d2), c(".id", "a", "d")))
## lost the '.id' column in R <= 3.2.2
proc.time() - .pt; .pt <- proc.time()
## system() truncating and splitting long lines of output, PR#16544
## only works when platform has getline() in stdio.h, and Solaris does not.
known.POSIX_2008 <- .Platform$OS.type == "unix" &&
(Sys.info()[["sysname"]] != "SunOS")
## ^^^ explicitly exclude *non*-working platforms above
if(known.POSIX_2008) {
cat("testing system(\"echo\", <large>) : "); op <- options(warn = 2)# no warnings allowed
cn <- paste(1:2222, collapse=" ")
rs <- system(paste("echo", cn), intern=TRUE)
stopifnot(identical(rs, cn))
cat("[Ok]\n"); options(op)
}
## tail.matrix()
B <- 100001; op <- options(max.print = B + 99)
mat.l <- list(m0 = matrix(, 0,2),
m0n = matrix(, 0,2, dimnames = list(NULL, paste0("c",1:2))),
m2 = matrix(1:2, 2,1),
m2n = matrix(1:2, 2,3, dimnames = list(NULL, paste0("c",1:3))),
m9n = matrix(1:9, 9,1, dimnames = list(paste0("r",1:9),"CC")),
m12 = matrix(1:12, 12,1),
mBB = matrix(1:B, B, 1))
## tail() used to fail for 0-rows matrices m0*
n.s <- -3:3
hl <- lapply(mat.l, function(M) lapply(n.s, function(n) head(M, n)))
tl <- lapply(mat.l, function(M) lapply(n.s, function(n) tail(M, n)))
## Check dimensions of resulting matrices --------------
## ncol:
Mnc <- do.call(rbind, rep(list(vapply(mat.l, ncol, 1L)), length(n.s)))
stopifnot(identical(Mnc, sapply(hl, function(L) vapply(L, ncol, 1L))),
identical(Mnc, sapply(tl, function(L) vapply(L, ncol, 1L))))
## nrow:
fNR <- function(L) vapply(L, nrow, 1L)
tR <- sapply(tl, fNR)
stopifnot(identical(tR, sapply(hl, fNR)), # head() & tail both
tR[match(0, n.s),] == 0, ## tail(*,0) has always 0 rows
identical(tR, outer(n.s, fNR(mat.l), function(x,y)
ifelse(x < 0, pmax(0L, y+x), pmin(y,x)))))
for(j in c("m0", "m0n")) { ## 0-row matrices: tail() and head() look like identity
co <- capture.output(mat.l[[j]])
stopifnot(vapply(hl[[j]], function(.) identical(co, capture.output(.)), NA),
vapply(tl[[j]], function(.) identical(co, capture.output(.)), NA))
}
CO1 <- function(.) capture.output(.)[-1] # drop the printed column names
## checking tail(.) rownames formatting
nP <- n.s > 0
for(nm in c("m9n", "m12", "mBB")) { ## rownames: rather [100000,] than [1e5,]
tf <- file(); capture.output(mat.l[[nm]], file=tf)
co <- readLines(tf); close(tf)
stopifnot(identical(# tail(.) of full output == output of tail(.) :
lapply(n.s[nP], function(n) tail(co, n)),
lapply(tl[[nm]][nP], CO1)))
}
identCO <- function(x,y, ...) identical(capture.output(x), capture.output(y), ...)
headI <- function(M, n) M[head(seq_len(nrow(M)), n), , drop=FALSE]
tailI <- function(M, n) M[tail(seq_len(nrow(M)), n), , drop=FALSE]
for(mat in mat.l) {
## do not capture.output for tail(<large>, <small negative>)
n.set <- if(nrow(mat) < 999) -3:3 else 0:3
stopifnot(
vapply(n.set, function(n) identCO (head(mat, n), headI(mat, n)), NA),
vapply(n.set, function(n) identCO (tail (mat, n, keepnums=FALSE),
tailI(mat, n)), NA),
vapply(n.set, function(n) all.equal(tail(mat, n), tailI(mat, n),
check.attributes=FALSE), NA))
}
options(op)
## end{tail.matrix check} ------------------
## format.data.frame() & as.data.frame.list() - PR#16580
myL <- list(x=1:20, y=rnorm(20), stringsAsFactors = gl(4,5))
names(myL)[1:2] <- lapply(1:2, function(i)
paste(sample(letters, 300, replace=TRUE), collapse=""))
nD <- names(myD <- as.data.frame(myL))
nD2 <- names(myD2 <- as.data.frame(myL, cut.names = 280))
nD3 <- names(myD3 <- as.data.frame(myL, cut.names = TRUE))
stopifnot(nchar(nD) == c(300,300,16), is.data.frame(myD), dim(myD) == c(20,3),
nchar(nD2)== c(278,278,16), is.data.frame(myD2), dim(myD2) == c(20,3),
nchar(nD3)== c(254,254,16), is.data.frame(myD3), dim(myD3) == c(20,3),
identical(nD[3], "stringsAsFactors"),
identical(nD[3], nD2[3]), identical(nD[3], nD3[3]))
names(myD)[1:2] <- c("Variable.1", "")# 2nd col.name is "empty"
## A data frame with a column that is an empty data frame:
d20 <- structure(list(type = c("F", "G"), properties = data.frame(i=1:2)[,-1]),
class = "data.frame", row.names = c(NA, -2L))
stopifnot(is.data.frame(d20), dim(d20) == c(2,2),
identical(colnames(d20), c("type", "properties")),
identical(capture.output(d20), c(" type", "1 F", "2 G")))
## format(d20) failed in intermediate R versions
stopifnot(identical(names(myD), names(format(head(myD)))),
identical(names(myD), c("Variable.1", "", "stringsAsFactors")),
identical(rbind.data.frame(2:1, 1:2), ## was wrong for some days
data.frame(X2.1 = 2:1, X1.2 = 1:2)))
## format.data.frame() did not show "stringsAsFactors" in R <= 3.2.2
## Follow up: the new as.data.frame.list() must be careful with 'AsIs' columns:
desc <- structure( c("a", NA, "z"), .Names = c("A", NA, "Z"))
tools::assertError( data.frame(desc = desc, stringsAsFactors = FALSE) )
## however
dd <- data.frame(desc = structure(desc, class="AsIs"),
row.names = c("A","M","Z"), stringsAsFactors = FALSE)
## is "legal" (because "AsIs" can be 'almost anything')
dd ## <- did not format nor print correctly in R-devel early Nov.2015
fdesc <- structure(c("a", "NA", "z"), .Names=names(desc), class="AsIs")
stopifnot(identical(format(dd),
data.frame(desc = fdesc, row.names = c("A", "M", "Z"))),
identical(capture.output(dd),
c(" desc", "A a",
"M <NA>", "Z z")),
identical(dd, data.frame(list(dd))))# lost row.names for a while
## var(x) and hence sd(x) with factor x, PR#16564
tools::assertError(cov(1:6, f <- gl(2,3)))# was ok already
tools::assertError(var(f))# these two give an error now (R >= 3.6.0)
tools::assertError( sd(f))
## var() "worked" in R <= 3.2.2 using the underlying integer codes
proc.time() - .pt; .pt <- proc.time()
## loess(*, .. weights) - PR#16587
d.loess <-
do.call(expand.grid,
c(formals(loess.control)[1:3],
list(iterations = c(1, 10),
KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)))
d.loess $ iterTrace <- (d.loess$ iterations > 1)
## apply(d.loes, 1L, ...) would coerce everything to atomic, i.e, "character":
loess.c.list <- lapply(1:nrow(d.loess), function(i)
do.call(loess.control, as.list(d.loess[i,])))
set.seed(123)
for(n in 1:6) { if(n %% 10 == 0) cat(n,"\n")
wt <- runif(nrow(cars))
for(ctrl in loess.c.list) {
cars.wt <- loess(dist ~ speed, data = cars, weights = wt,
family = if(ctrl$iterations > 1) "symmetric" else "gaussian",
control = ctrl)
cPr <- predict(cars.wt)
cPrN <- predict(cars.wt, newdata=cars)
stopifnot(all.equal(cPr, cPrN, check.attributes = FALSE, tol=1e-14))
}
}
## gave (typically slightly) wrong predictions in R <= 3.2.2
## aperm() for named dim()s:
na <- list(A=LETTERS[1:2], B=letters[1:3], C=LETTERS[21:25], D=letters[11:17])
da <- lengths(na)
A <- array(1:210, dim=da, dimnames=na)
aA <- aperm(A)
a2 <- aperm(A, (pp <- c(3:1,4)))
stopifnot(identical( dim(aA), rev(da)),# including names(.)
identical(dimnames(aA), rev(na)),
identical( dim(a2), da[pp]), # including names(.)
identical(dimnames(a2), na[pp]))
## dim(aperm(..)) did lose names() in R <= 3.2.2
## poly() / predict(poly()) with NAs -- PR#16597
fm <- lm(y ~ poly(x, 3), data=data.frame(x=1:7, y=sin(1:7)))
x <- c(1,NA,3:7)
stopifnot(all.equal(c(predict(fm, newdata=list(x = 1:3)), `4`=NA),
predict(fm, newdata=list(x=c(1:3,NA))), tol=1e-15),
all.equal(unclass(poly(x, degree=2, raw=TRUE)),
cbind(x, x^2), check.attributes=FALSE))
## both gave error about NA in R <= 3.2.2
## data(package = *) on some platforms
dd <- data(package="datasets")[["results"]]
if(anyDuplicated(dd[,"Item"])) stop("data(package=*) has duplications")
## sometimes returned the data sets *twice* in R <= 3.2.2
## prettyNum(*, big.mark, decimal.mark)
b.m <- c(".", ",", "'", "")
d.m <- c(".", ",", ".,", "..")
pa <- expand.grid(big.mark = b.m, decimal.mark = d.m,
x = c(1005.24, 100.22, 1000000.33), scientific=FALSE, digits=9,
stringsAsFactors=FALSE, KEEP.OUT.ATTRS=FALSE)
r <- vapply(1:nrow(pa), function(i) do.call(prettyNum, pa[i,]), "")# with 6x2 warnings
r
b.m[b.m == ""] <- "0"
## big.mark: only >= 1000; *and* because particular chosen numbers:
r.2 <- substr(r[pa[,"x"] > 1000], 2, 2)
## compute location of decimal point (which maybe more than one char)
nd <- nchar(dm.s <- rep(d.m, each=length(b.m)))
nr <- nchar(r) - 3 + (nd == 1)
nr2 <- nr + (nd > 1)
stopifnot(identical(r.2, rep_len(b.m, length(r.2))),
identical(substr(r, nr,nr2), rep_len(dm.s, length(r))))
## several cases (1, 5, 9, 10,..) were wrong in R 3.2.2
## kmeans with just one center -- PR#16623
set.seed(23)
x <- rbind(matrix(rnorm(100, sd = 0.3), ncol = 2),
matrix(rnorm(100, mean = 1, sd = 0.3), ncol = 2))
k1 <- kmeans(x, 1)
k2 <- kmeans(x, centers = k1$centers)
stopifnot(all.equal(k1, k2), k1$cluster == 1)
## the kmeans(*, centers=.) called failed in R <= 3.2.3
## invalid dimnames for array()
tools::assertError(array(1, 2:3, dimnames="foo"))
## were silently disregarded in R <= 3.2.3
## addmargins() - dimnames with (by default) "Sum"
m <- rbind(1, 2:3)
m2 <- addmargins(m, 2)
am <- addmargins(m)
stopifnot(
identical(dimnames(m2), list(NULL, c("", "", "Sum"))),
identical(am[,"Sum"], setNames(c(2, 5, 7), c("", "", "Sum"))))
## the dimnames array() bug above hid the addmargins() not adding "Sum"
## dim( x[,] ) -- should keep names(dim(.)) --
## --- ----
##_ 1 D _
A1 <- array(1:6, (d <- c(nam=6L)))
stopifnot(identical(dim(A1), d),
identical(dim(A1), dim(A1[])))
##_ 2 D _
A2 <- A[1,2,,]
stopifnot(identical(names(dim(A2)), c("C", "D")),
identical(dim(A2), dim(A)[-(1:2)]),
identical(dim(A2[ ]), dim(A2)),
identical(dim(A2[,]), dim(A2)),
identical(dim(A2[1, , drop=FALSE]), c(C = 1L, D = 7L)),
identical(dim(A2[, 1, drop=FALSE]), c(C = 5L, D = 1L)))
##_ higher D_
A3 <- A[1, ,,]
stopifnot(
identical(dim(A ), dim(A [,,,])),# was already wrong: [,,,] losing names(dim(.))
identical(dim(A[,-1,-1,-1]), dim(A) - c(0:1,1L,1L)),
identical(dim(A3), dim(A)[-1]),
identical(dim(A3), dim(A3[,, ])),
identical(dim(A3[,1,]), c(B = 3L, D = 7L)))
## all subsetting of arrays lost names(dim(.)) in R < 3.3.0
## NextMethod() dispatch for `$` and `$<-`
`$.foo` <- function(x, fun) paste("foo:", NextMethod())
x <- list(a = 1, b = 2)
class(x) <- "foo"
stopifnot(identical(x$b, "foo: 2")) # 'x$b' failed prior to R 3.3.0
`$<-.foo` <- function(x, value, fun) {
attr(x, "modified") <- "yes"
NextMethod()
}
x$y <- 10 ## failed prior to R 3.3.0
stopifnot(identical(attr(x, "modified"), "yes"))
## illegal 'row.names' for as.data.frame(): -- for now just a warning --
tools::assertWarning(
d3 <- as.data.frame(1:3, row.names = letters[1:2])
)
stopifnot(dim(d3) == c(3,1)) ## was (2, 1) in R <= 3.2.3
## 'row.names' were not checked and produced a "corrupted" data frame in R <= 3.2.3
## rbind.data.frame()'s smart row names construction
mk1 <- function(x) data.frame(x=x)
d4 <- rbind(mk1(1:4)[3:4,,drop=FALSE], mk1(1:2))
stopifnot(identical(dimnames(d4),
list(c("3", "4", "1", "2"), "x")),
## the rownames were "3" "4" "31" "41" in R <= 3.3.0
identical(attr(rbind(mk1(5:8), 7, mk1(6:3)), "row.names"), 1:9)
)
## sort on integer() should drop NAs by default
stopifnot(identical(1L, sort(c(NA, 1L))))
## and other data types for method="radix"
stopifnot(identical("a", sort(c(NA, "a"), method="radix")))
stopifnot(identical(character(0L), sort(c(NA, NA_character_), method="radix")))
stopifnot(identical(1, sort(c(NA, 1), method="radix")))
## dummy.coef(.) in the case of "non-trivial terms" -- PR#16665
op <- options(contrasts = c("contr.treatment", "contr.poly"))
fm1 <- lm(Fertility ~ cut(Agriculture, breaks=4) + Infant.Mortality, data=swiss)
(dc1 <- dummy.coef(fm1)) ## failed in R <= 3.3.0
## (R-help, Alexandra Kuznetsova, 24 Oct 2013):
set.seed(56)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(rnorm(10, 4), rnorm(10, 5))
x <- rnorm(20)
lm9 <- lm(weight ~ group + x + I(x^2))
dc9 <- dummy.coef(lm9)
## failed in R <= 3.3.0
stopifnot( # depends on contrasts:
all.equal(unname(coef(fm1)), unlist(dc1, use.names=FALSE)[-2], tol= 1e-14),
all.equal(unname(coef(lm9)), unlist(dc9, use.names=FALSE)[-2], tol= 1e-14))
## a 'use.na=TRUE' example
dd <- data.frame(x1 = rep(letters[1:2], each=3),
x2 = rep(LETTERS[1:3], 2),
y = rnorm(6),
stringsAsFactors = TRUE)
dd[6,2] <- "B" # => no (b,C) combination => that coef should be NA
fm3 <- lm(y ~ x1*x2, dd)
(d3F <- dummy.coef(fm3, use.na=FALSE))
(d3T <- dummy.coef(fm3, use.na=TRUE))
stopifnot(all.equal(d3F[-4], d3T[-4]),
all.equal(d3F[[4]][-6], d3T[[4]][-6]),
all.equal(drop(d3T$`x1:x2`),
c("a:A"= 0, "b:A"= 0, "a:B"= 0,
"b:B"= 0.4204843786, "a:C"=0, "b:C"=NA)))
## in R <= 3.2.3, d3T$`x1:x2` was *all* NA
##
## dummy.coef() for "manova"
## artificial data inspired by the summary.manova example
rate <- gl(2,10, labels=c("Lo", "Hi"))
additive <- gl(4, 1, length = 20, labels = paste("d", 1:4, sep="."))
additive <- C(additive, "contr.sum")# => less trivial dummy.coef
X <- model.matrix(~ rate*additive)
E <- matrix(round(rnorm(20*3), 2), 20,3) %*% cbind(1, c(.5,-1,.5), -1:1)
bet <- outer(1:8, c(tear = 2, gloss = 5, opacity = 20))
Y <- X %*% bet + E
fit <- manova(Y ~ rate * additive)
## For consistency checking, one of the univariate models:
flm <- lm(Y[,"tear"] ~ rate * additive)
dclm <- lapply(dummy.coef(flm), drop); names(dclm[[1]]) <- "tear"
op <- options(digits = 3, width = 88)
(cf <- coef(fit))
(dcf <- dummy.coef(fit))
options(op)
stopifnot(all.equal(coef(flm), cf[,"tear"]),
all.equal(dclm,
lapply(dcf, function(cc)
if(is.matrix(cc)) cc["tear",] else cc["tear"])),
identical(lengths(dcf),
c("(Intercept)" = 3L, "rate" = 6L,
"additive" = 12L, "rate:additive" = 24L)),
identical(sapply(dcf[-1], dim),
cbind(rate = 3:2, additive = 3:4,
`rate:additive` = c(3L, 8L))))
## dummy.coef() were missing coefficients in R <= 3.2.3
proc.time() - .pt; .pt <- proc.time()
## format.POSIXlt() with modified 'zone' or length-2 format
f0 <- "2016-01-28 01:23:45"; tz0 <- "Europe/Stockholm"
d2 <- d1 <- rep(as.POSIXlt(f0, tz = tz0), 2)
(f1 <- format(d1, usetz=TRUE))
identical(f1, rep(paste(f0, "CET"), 2))# often TRUE (but too platform dependent)
d2$zone <- d1$zone[1] # length 1 instead of 2
f2 <- format(d2, usetz=TRUE)## -> segfault
f1.2 <- format(as.POSIXlt("2016-01-28 01:23:45"), format=c("%d", "%y"))# segfault
stopifnot(identical(f2, format(as.POSIXct(d2), usetz=TRUE)),# not yet in R <= 3.5.x
identical(f1.2, c("28", "16")))
tims <- seq.POSIXt(as.POSIXct("2016-01-01"),
as.POSIXct("2017-11-11"), by = as.difftime(pi, units="weeks"))
form <- c("%m/%d/%y %H:%M:%S", "", "%Y-%m-%d %H:%M:%S")
op <- options(warn = 2)# no warnings allowed
head(rf1 <- format(tims, form)) # recycling was wrong
head(rf2 <- format(tims, form[c(2,1,3)]))
stopifnot(identical(rf1[1:3], c("01/01/16 00:00:00", "2016-01-22 23:47:15",
"2016-02-13 23:34:30")),
identical(rf2[1:3], c("2016-01-01 00:00:00", "01/22/16 23:47:15",
rf1[3])),
nchar(rf1) == rep(c(17,19,19), length = length(rf1)),
nchar(rf2) == rep(c(19,17,19), length = length(rf2)))
options(op)
## Wrong-length 'zone' or short 'x' segfaulted -- PR#16685
## Default 'format' setting sometimes failed for length(format) > 1
## saveRDS(*, compress= .)
opts <- setNames(,c("bzip2", "xz", "gzip"))
fil <- tempfile(paste0("xx", 1:6, "_"), fileext = ".rds")
names(fil) <- c("default", opts, FALSE,TRUE)
xx <- 1:11
saveRDS(xx, fil["default"])
saveRDS(xx, fil[opts[1]], compress = opts[1])
saveRDS(xx, fil[opts[2]], compress = opts[2])
saveRDS(xx, fil[opts[3]], compress = opts[3])
saveRDS(xx, fil["FALSE"], compress = FALSE)
saveRDS(xx, fil["TRUE" ], compress = TRUE)
f.raw <- lapply(fil, readBin, what = "raw", n = 100)
lengths(f.raw) # 'gzip' is best in this case
for(i in 1:6) stopifnot(identical(xx, readRDS(fil[i])))
eMsg <- tryCatch(saveRDS(xx, tempfile(), compress = "Gzip"),
error = function(e) e$message)
stopifnot(
grepl("'compress'.*Gzip", eMsg), # had ".. not interpretable as logical"
identical(f.raw[["default"]], f.raw[["TRUE"]]),
identical(f.raw[["default"]], f.raw[[opts["gzip"]]]))
## compress = "gzip" failed (PR#16653), but compress = c(a = "xz") did too
## recursive dendrogram methods and deeply nested dendrograms
op <- options(expressions = 999)# , verbose = 2) # -> max. depth= 961
set.seed(11); d <- mkDend(1500, "A", method="single")
rd <- reorder(d, nobs(d):1)
## Error: evaluation nested too deeply: infinite recursion .. in R <= 3.2.3
stopifnot(is.leaf(r1 <- rd[[1]]), is.leaf(r2 <- rd[[2:1]]),
attr(r1, "label") == "A1458", attr(r2, "label") == "A1317")
options(op)# revert
## cor.test() with extremely small p values
b <- 1:10; set.seed(1)
for(n in 1:256) {
a <- round(jitter(b, f = 1/8), 3)
p1 <- cor.test(a, b)$ p.value
p2 <- cor.test(a,-b)$ p.value
stopifnot(abs(p1 - p2) < 8e-16 * (p1+p2))
## on two different Linuxen, they actually are always equal
}
## were slightly off in R <= 3.2.3. PR#16704
## smooth(*, do.ends=TRUE)
y <- c(4,2,2,3,10,5:7,7:6)
stopifnot(
identical(c(smooth(y, "3RSR" , do.ends=TRUE, endrule="copy")),
c(4, 2, 2, 3, 5, 6, 6, 7, 7, 6) -> sy.c),
identical(c(smooth(y, "3RSS" , do.ends=TRUE, endrule="copy")), sy.c),
identical(c(smooth(y, "3RS3R", do.ends=TRUE, endrule="copy")), sy.c),
identical(c(smooth(y, "3RSR" , do.ends=FALSE, endrule="copy")),
c(4, 4, 4, 4, 5, 6, 6, 6, 6, 6)),
identical(c(smooth(y, "3RSS" , do.ends=FALSE, endrule="copy")),
c(4, 4, 2, 3, 5, 6, 6, 6, 6, 6)),
identical(c(smooth(y, "3RS3R", do.ends=FALSE, endrule="copy")),
c(4, 4, 3, 3, 5, 6, 6, 6, 6, 6)))
## do.ends=TRUE was not obeyed for the "3RS*" kinds, for 3.0.0 <= R <= 3.2.3
proc.time() - .pt; .pt <- proc.time()
## prettyDate() for subsecond ranges
##' checking pretty():
chkPretty <- function(x, n = 5, min.n = NULL, ..., max.D = 1) {
if(is.null(min.n)) {
## work with both pretty.default() and greDevices::prettyDate()
## *AND* these have a different default for 'min.n' we must be "extra smart":
min.n <-
if(inherits(x, "Date") || inherits(x, "POSIXt"))
n %/% 2 # grDevices:::prettyDate
else
n %/% 3 # pretty.default
}
pr <- pretty(x, n=n, min.n=min.n, ...)
## if debugging: pr <- grDevices:::prettyDate(x, n=n, min.n=min.n, ...)
stopifnot(length(pr) >= (min.n+1),
## pretty(x, *) must cover range of x:
min(pr) <= min(x), max(x) <= max(pr))
if((D <- abs(length(pr) - (n+1))) > max.D)
stop("| |pretty(.)| - (n+1) | = ", D, " > max.D = ", max.D)
## is it equidistant [may need fuzz, i.e., signif(.) ?]:
eqD <- length(pr) == 1 || length(udp <- unique(dp <- diff(pr))) == 1
## may well FALSE (differing number days in months; leap years, leap seconds)
if(!eqD) {
if(inherits(dp, "difftime") && units(dp) %in% c("days")# <- more ??
)
attr(pr, "chkPr") <- "not equidistant"
else
stop("non equidistant: has ", length(udp)," unique differences")
}
invisible(pr)
}
sTime <- structure(1455056860.75, class = c("POSIXct", "POSIXt"))
for(n in c(1:16, 30:32, 41, 50, 60)) # (not for much larger n, (TODO ?))
chkPretty(sTime, n=n)
set.seed(7)
for(n in c(1:7, 12)) replicate(32, chkPretty(sTime + .001*rlnorm(1) * 0:9, n = n))
## failed in R <= 3.2.3
seqD <- function(d1,d2) seq.Date(as.Date(d1), as.Date(d2), by = "1 day")
seqDp <- function(d1,d2) { s <- seqD(d1,d2); structure(s, labels=format(s,"%b %d")) }
time2d <- function(i) sprintf("%02d", i %% 60)
MTbd <- as.Date("1960-02-10")
(p1 <- chkPretty(MTbd))
stopifnot(
identical(p1, seqDp("1960-02-08", "1960-02-13")) ,
identical(attr(p1, "labels"), paste("Feb", time2d(8:13))),
identical(chkPretty(MTbd + rep(0,2)), p1) ,
identical(chkPretty(MTbd + 0:1), p1) ,
identical(chkPretty(MTbd + -1:1), p1) ,
identical(chkPretty(MTbd + 0:3), seqDp("1960-02-09", "1960-02-14")) )
## all pretty() above gave length >= 5 answer (with duplicated values!) in R <= 3.2.3!
## and length 1 or 2 instead of about 6 in R 3.2.4
(p2 <- chkPretty(as.POSIXct("2002-02-02 02:02", tz = "GMT-1"), n = 5, min.n = 5))
stopifnot(length(p2) >= 5+1,
identical(p2, structure(1012611717 + (0:5), class = c("POSIXct", "POSIXt"),
tzone = "GMT-1", labels = time2d(57 + (0:5)))))
## failed in R 3.2.4
(T3 <- structure(1460019857.25, class = c("POSIXct", "POSIXt")))# typical Sys.date()
chkPretty(T3, 1) # error in svn 70438
## "Data" from example(pretty.Date) :
steps <- setNames(,
c("10 secs", "1 min", "5 mins", "30 mins", "6 hours", "12 hours",
"1 DSTday", "2 weeks", "1 month", "6 months", "1 year",
"10 years", "50 years", "1000 years"))
t02 <- as.POSIXct("2002-02-02 02:02")
(at <- chkPretty(t02 + 0:1, n = 5, min.n = 3, max.D=2))
xU <- as.POSIXct("2002-02-02 02:02", tz = "UTC")
x5 <- as.POSIXct("2002-02-02 02:02", tz = "EST5EDT")
atU <- chkPretty(seq(xU, by = "30 mins", length = 2), n = 5)
at5 <- chkPretty(seq(x5, by = "30 mins", length = 2), n = 5)
stopifnot(length(at) >= 4,
identical(sort(names(aat <- attributes(at))), c("class", "labels", "tzone")),
identical(aat$labels, time2d(59+ 0:3)),
identical(x5 - xU, structure(5, units = "hours", class = "difftime")),
identical(attr(at5, "labels"), attr(atU, "labels") -> lat),
identical(lat, paste("02", time2d(10* 0:4), sep=":"))
)
nns <- c(1:9, 15:17); names(nns) <- paste0("n=",nns)
prSeq <- function(x, n, st, ...) pretty(seq(x, by = st, length = 2), n = n, ...)
pps <- lapply(nns, function(n)
lapply(steps, function(st) prSeq(x=t02, n=n, st=st)))
Ls.ok <- list(
`10 secs` = c("00", "02", "04", "06", "08", "10"),
`1 min` = sprintf("%02d", 10*((0:6) %% 6)),
`5 mins` = sprintf("02:%02d", 2:7),
`30 mins` = sprintf("02:%02d", (0:4)*10),
`6 hours` = sprintf("%02d:00", 2:9),
`12 hours` = sprintf("%02d:00", (0:5)*3),
`1 DSTday` = c("Feb 02 00:00", "Feb 02 06:00", "Feb 02 12:00",
"Feb 02 18:00", "Feb 03 00:00", "Feb 03 06:00"),
`2 weeks` = c("Jan 28", "Feb 04", "Feb 11", "Feb 18"),
`1 month` = c("Jan 28", "Feb 04", "Feb 11", "Feb 18", "Feb 25", "Mar 04"),
`6 months` = c("Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep"),
`1 year` = c("Jan", "Apr", "Jul", "Oct", "Jan", "Apr"),
`10 years` = as.character(2000 + 2*(1:7)),
`50 years` = as.character(2000 + 10*(0:6)),
`1000 years`= as.character(2000 + 200*(0:6)))
stopifnot(identical(Ls.ok,
lapply(pps[["n=5"]], attr, "label")))
##
chkSeq <- function(st, x, n, max.D = if(n <= 4) 1 else if(n <= 10) 2 else 3, ...)
tryCatch(chkPretty(seq(x, by = st, length = 2), n = n, max.D=max.D, ...),
error = conditionMessage)
prSeq.errs <- function(tt, nset, tSteps) {
stopifnot(length(tt) == 1)
c.ps <- lapply(nset, function(n) lapply(tSteps, chkSeq, x = tt, n = n))
## ensure that all are ok *but* some which did not match 'n' well enough:
cc.ps <- unlist(c.ps, recursive=FALSE)
ok <- vapply(cc.ps, inherits, NA, what = "POSIXt")
errs <- unlist(cc.ps[!ok])
stopifnot(startsWith(errs, prefix = "| |pretty(.)| - (n+1) |"))
list(ok = ok,
Ds = as.numeric(sub(".*\\| = ([0-9]+) > max.*", "\\1", errs)))
}
r.t02 <- prSeq.errs(t02, nset = nns, tSteps = steps)
table(r.t02 $ ok)
table(r.t02 $ Ds -> Ds)
## Currently [may improve]
## 3 4 5 6 7 8
## 4 14 6 3 2 1
## ... and ensure we only improve:
stopifnot(length(Ds) <= 30, max(Ds) <= 8, sum(Ds) <= 138)
## A Daylight saving time -- halfmonth combo:
(tOz <- structure(c(1456837200, 1460728800), class = c("POSIXct", "POSIXt"),
tzone = "Australia/Sydney"))
(pz <- pretty(tOz)) # failed in R 3.3.0, PR#16923
stopifnot(length(pz) <= 6, # is 5
attr(dpz <- diff(pz), "units") == "days", sd(dpz) < 1.6)
if(FALSE) { # save 0.4 sec
print(system.time(
r.tOz <- prSeq.errs(tOz[1], nset = nns, tSteps = steps)
))
stopifnot(sum(r.tOz $ ok) >= 132,
max(r.tOz $ Ds -> DOz) <= 8, mean(DOz) < 4.5)
}
nn <- c(1:33,10*(4:9),100*(1+unique(sort(rpois(20,4)))))
pzn <- lengths(lapply(nn, pretty, x=tOz))
stopifnot(0.5 <= min(pzn/(nn+1)), max(pzn/(nn+1)) <= 1.5)
proc.time() - .pt; .pt <- proc.time()
stopifnot(c("round.Date", "round.POSIXt") %in% as.character(methods(round)))
## round.POSIXt suppressed in R <= 3.2.x
## approxfun(*, method="constant")
Fn <- ecdf(1:5)
t <- c(NaN, NA, 1:5)
stopifnot(all.equal(Fn(t), t/5))
## In R <= 3.2.3, NaN values resulted in something like (n-1)/n.
## tar() default (i.e. "no files") behaviour:
doit <- function(...) {
dir.create(td <- tempfile("tar-experi"))
setwd(td)
dfil <- "base_Desc"
file.copy(system.file("DESCRIPTION"), dfil)
## tar w/o specified files
tar("ex.tar", ... ) # all files, i.e. 'dfil'
unlink(dfil)
stopifnot(grepl(dfil, untar("ex.tar", list = TRUE)))
untar("ex.tar")
myF2 <- c(dfil, "ex.tar")
stopifnot(identical(list.files(), myF2))
unlink(myF2)
}
doit() # produced an empty tar file in R < 3.3.0, PR#16716
if(nzchar(Sys.which("tar"))) doit(tar = "tar")
## format.POSIXlt() of Jan.1 if 1941 or '42 is involved:
tJan1 <- function(n1, n2)
strptime(paste0(n1:n2,"/01/01"), "%Y/%m/%d", tz="CET")
wDSTJan1 <- function(n1, n2)
which("CEST" == sub(".* ", '', format(tJan1(n1,n2), usetz=TRUE)))
(w8 <- wDSTJan1(1801, 2300))
(w9 <- wDSTJan1(1901, 2300))
stopifnot(identical(w8, 141:142),# exactly 1941:1942 had CEST on Jan.1
identical(w9, 41: 42))
## for R-devel Jan.2016 to Mar.14 -- *AND* for R 3.2.4 -- the above gave
## integer(0) and c(41:42, 99:100, ..., 389:390) respectively
## tsp<- did not remove mts class
z <- ts(cbind(1:5,1:5))
tsp(z) <- NULL
stopifnot(identical(c(FALSE, TRUE),
c("mts","matrix") %in% class(z)))
## kept "mts" in 3.2.4, PR#16769
## as.hclust() and str() for deeply nested dendrograms
op <- options(expressions = 300) # so problem triggers early
d500 <- mkDend(500, 'x', 'single')
sink(tempfile()); str(d500) ; sink()
hc2 <- as.hclust(d500)
options(op)
## gave .. nested too deeply / node stack overflow / "C stack usage ..."
## for R <= 3.3.z
## keep at end
rbind(last = proc.time() - .pt,
total = proc.time())
| 60,003 | gpl-2.0 |
a02d4ca3efbbd8e1d9f2683a835adc2eefab4749 | wangzongyan/network | acsEdgeLength.R | set.seed(1)
library(data.table) # so fast!
library(igraph) # all the basic graph operations.
library(zipcode)
source("loadData.R")
wi = DT[State == "WI"]
zip = wi$"Zip Code"
zip = substr(zip, start = 1, stop = 5)
data(zipcode) # this contains the locations of zip codes
zipcode = as.data.table(zipcode); setkey(zipcode, zip) # thanks data.table for making things so fast!
loc = zipcode[zip, c("latitude", "longitude"), with = F]
loc = loc[complete.cases(loc)]
loc = as.matrix(loc)
plot(loc)
plot(loc[,2], loc[,1])
library(geosphere)
# so ugly and so fast!
samp = DT$NPI[sample(dim(DT)[1], 10000)] # take a random sample of NPI's.
DTsamp = DT[samp,mult ="first"]
dim(DTsamp)
DTsamp = DTsamp[complete.cases(DTsamp$"Zip Code")]
dim(DTsamp)
setkey(DTsamp, NPI)
tmp = Et[DTsamp$NPI]
Esamp = tmp[complete.cases(tmp)] #lots of NA's. Have not inspected why.
Esamp=as.matrix(Esamp)[,1:2] #igraph needs the edgelist to be in matrix format
ed = distGeo(
zipcode[
substr(
DT[Esamp[,1], mult = "first"]$"Zip Code" ,start = 1, stop = 5
)
, c("longitude", "latitude"), with = F]
, zipcode[substr(DT[Esamp[,2], mult = "first"]$"Zip Code" ,start = 1, stop = 5), c("longitude", "latitude"), with = F]
)/1000
mean(ed ==0, na.rm = T)
# How do the distribution of referral distances vary between providers?
# Let's study it using some characteristics of the provider's zip code from the ACS.
library(acs)
source('~/dataFiles/physicianReferral/acskey.R') # this loads my acs key
edgeZip = cbind(
substr(DT[Esamp[,1], mult = "first"]$"Zip Code",1,5),
substr(DT[Esamp[,2], mult = "first"]$"Zip Code",1,5)
)
names(which.max(table(Esamp[,1])))
us.zip=geo.make(zip.code = "*")
# B01002 is the median age
acsdat = acs.fetch(geography=us.zip, table.number="B01002", col.names="pretty")
acsdat = estimate(acsdat)
acsDT = as.data.table(cbind(substr(rownames(acsdat),start=7, stop =11),acsdat))
setnames(acsDT, "V1", "zip")
setkey(acsDT, zip)
x = acsDT[edgeZip[,1]]$"Median Age by Sex: Median age -- Total: "
y = acsDT[edgeZip[,2]]$"Median Age by Sex: Median age -- Total: "
plot(x,y, pch = '.')
abline(0,1)
library(MASS)
X = cbind(as.numeric(x), as.numeric(y))
X = X[complete.cases(X),]
X = X[X[,1]!=X[,2],]
contour(kde2d(X[,1],X[,2]))
| 2,274 | epl-1.0 |
f4880effedc19ed28bf62debab4ea1cc018745b3 | variani/matlm | tests/testthat/test-sim.R | context("sim")
test_that("correlated predictors", {
rho <- 0.9
N <- 1000
M <- 6
C <- matrix(rho, M, M)
diag(C) <- 1
simdat <- matlm_sim_randpred(seed = 1, N = N, M = M, rho = rho)
C_data <- simdat$pred %>% cor %>% round(1)
expect_equal(C, C_data)
})
| 275 | gpl-3.0 |
69462481769202a12aae27afb22d3f0d2f926996 | zhangh12/ARTP2 | R/load.summary.files.R |
load.summary.files <- function(summary.files, lambda, sel.snps, options){
msg <- paste("Loading summary files:", date())
if(options$print) message(msg)
ambigFlag <- options$ambig.by.AF
header <- c('SNP', 'RefAllele', 'EffectAllele', 'BETA') # columns that must be provided by users
opt.header <- c('P', 'SE')
if (ambigFlag) {
opt.header3 <- c("RAF", "EAF")
} else {
opt.header3 <- NULL
}
complete.header <- c(header, opt.header, 'Chr', 'Pos', 'Direction', opt.header3)
nfiles <- length(summary.files)
stat <- list()
lam <- NULL
fid <- 0
for(i in 1:nfiles){
st <- load.file(summary.files[i], header = TRUE, nrows = 1e4)
header.map <- colnames(st)
colnames(st) <- convert.header(colnames(st), complete.header)
tmp <- (header %in% colnames(st))
if(!all(tmp)){
msg <- paste0("Columns below were not found in ", summary.files[i], ":\n", paste(header[!tmp], collapse = " "))
stop(msg)
}
names(header.map) <- colnames(st)
col.class <- sapply(st, class)
col.id <- which(colnames(st) %in% complete.header)
col.class[-col.id] <- "NULL"
col.class[c('SNP', 'RefAllele', 'EffectAllele')] <- 'character'
names(col.class) <- header.map[names(col.class)]
try(st <- load.file(summary.files[i], header = TRUE, select = col.class), silent = TRUE)
colnames(st) <- convert.header(colnames(st), complete.header)
if(!is.null(sel.snps)){
st <- st[st$SNP %in% sel.snps, ]
}
if(nrow(st) == 0){
next
}
if(!any(opt.header %in% colnames(st))){
msg <- paste0("Neither SE nor P is not provided in ", summary.files[i])
stop(msg)
}
if(!('P' %in% colnames(st))){
st$P <- NA
}
if(!('SE' %in% colnames(st))){
st$SE <- NA
}
if(!('Chr' %in% colnames(st))){
st$P <- NA
}
if(!('Pos' %in% colnames(st))){
st$P <- NA
}
if(!('Direction' %in% colnames(st))){
msg <- paste0('Direction is absent in ', summary.files[i], '. Function meta() assumed equal sample sizes for all SNPs in that study. Violation of this assumption can lead to false positive if summary data of this study is used in pathway analysis')
warning(msg)
st$Direction <- ifelse(st$BETA == 0, '0', ifelse(st$BETA > 0, '+', '-'))
}
if (ambigFlag) st <- ambig.check.data(st, summary.files[i], vars=opt.header3)
nc <- unique(nchar(st$Direction))
if(length(nc) != 1){
msg <- paste0('String lengths of Direction are unequal in ', summary.files[i])
stop(msg)
}
st <- st[, which(toupper(colnames(st)) %in% toupper(complete.header))]
dup <- duplicated(st$SNP)
if(any(dup)){
dup.snps <- unique(st$SNP[dup])
msg <- paste("SNPs duplicated in ", summary.files[i], " are discarded: ", paste(dup.snps, collapse = " "))
warning(msg)
st <- subset(st, !(st$SNP %in% dup.snps))
if(nrow(st) == 0){
next
}
}
id.no.SE.P <- which(is.na(st$SE) & is.na(st$P))
if(length(id.no.SE.P) > 0){
msg <- paste("For SNPs below, neither SE nor P is not provided in", summary.files[i], ":\n", paste(st$SNP[id.no.SE.P], collapse = " "))
stop(msg)
}
st$RefAllele <- toupper(st$RefAllele)
st$EffectAllele <- toupper(st$EffectAllele)
id.no.SE <- which(is.na(st$SE))
id.no.P <- which(is.na(st$P))
if(length(id.no.SE) > 0){
z2 <- qchisq(st$P[id.no.SE], df = 1, lower.tail = FALSE)
st$SE[id.no.SE] <- abs(st$BETA[id.no.SE]/sqrt(z2))
}
if(length(id.no.P) > 0){
st$P[id.no.P] <- pchisq((st$BETA[id.no.P]/st$SE[id.no.P])^2, df = 1, lower.tail = FALSE)
}
fid <- fid + 1
lam <- c(lam, lambda[i])
rownames(st) <- st$SNP
st <- st[complete.cases(st), ]
stat[[fid]] <- st
rm(st)
gc()
}
if(length(stat) == 0){
msg <- "No SNPs to be included in analysis"
stop(msg)
}
lambda <- lam
list(stat = stat, lambda = lambda)
}
| 4,059 | mit |
509f1f53fdb89e45b0ed818153e19d460ee62f1f | milokmilo/Stranded | R/hp.nqx.nM.R | #' Helligman Pollard mortality
#'
#' Helligman Pollard mortality. Modified from HPbayes_0.1
#' @param H.out
#' @param age. Default = seq(0, 85, 1)
#' @keywords Heligman Pollard mortality
#' @export
#' @examples
#' hp.nqx.nM()
hp.nqx.nM <- function (H.out, age = seq(0, 85, 1)) {
H.new.hat <- matrix(NA, nrow = nrow(H.out), ncol = length(age))
for (i in 1:nrow(H.out)) {
H.new.hat[i, ] <- mod.nM(theta = H.out[i, ], x = age)
}
ans <- H.new.hat
return(ans)
} | 471 | gpl-2.0 |
c790040cedf901689039eaa3628e9a8f6f0ff4e5 | danmaclean/h_pseu_analysis | nls_ss_gc/load.R | setup_environment()
data <- read.table('nls_ss_gc.csv', sep="\t",
header=TRUE,
col.names=c('scaffold', 'start', 'stop', 'nls_count', 'ss_count', 'gc_percent'),
colClasses=c('factor', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric') )
| 304 | cc0-1.0 |
69462481769202a12aae27afb22d3f0d2f926996 | zhangh12/ARTP3 | R/load.summary.files.R |
load.summary.files <- function(summary.files, lambda, sel.snps, options){
msg <- paste("Loading summary files:", date())
if(options$print) message(msg)
ambigFlag <- options$ambig.by.AF
header <- c('SNP', 'RefAllele', 'EffectAllele', 'BETA') # columns that must be provided by users
opt.header <- c('P', 'SE')
if (ambigFlag) {
opt.header3 <- c("RAF", "EAF")
} else {
opt.header3 <- NULL
}
complete.header <- c(header, opt.header, 'Chr', 'Pos', 'Direction', opt.header3)
nfiles <- length(summary.files)
stat <- list()
lam <- NULL
fid <- 0
for(i in 1:nfiles){
st <- load.file(summary.files[i], header = TRUE, nrows = 1e4)
header.map <- colnames(st)
colnames(st) <- convert.header(colnames(st), complete.header)
tmp <- (header %in% colnames(st))
if(!all(tmp)){
msg <- paste0("Columns below were not found in ", summary.files[i], ":\n", paste(header[!tmp], collapse = " "))
stop(msg)
}
names(header.map) <- colnames(st)
col.class <- sapply(st, class)
col.id <- which(colnames(st) %in% complete.header)
col.class[-col.id] <- "NULL"
col.class[c('SNP', 'RefAllele', 'EffectAllele')] <- 'character'
names(col.class) <- header.map[names(col.class)]
try(st <- load.file(summary.files[i], header = TRUE, select = col.class), silent = TRUE)
colnames(st) <- convert.header(colnames(st), complete.header)
if(!is.null(sel.snps)){
st <- st[st$SNP %in% sel.snps, ]
}
if(nrow(st) == 0){
next
}
if(!any(opt.header %in% colnames(st))){
msg <- paste0("Neither SE nor P is not provided in ", summary.files[i])
stop(msg)
}
if(!('P' %in% colnames(st))){
st$P <- NA
}
if(!('SE' %in% colnames(st))){
st$SE <- NA
}
if(!('Chr' %in% colnames(st))){
st$P <- NA
}
if(!('Pos' %in% colnames(st))){
st$P <- NA
}
if(!('Direction' %in% colnames(st))){
msg <- paste0('Direction is absent in ', summary.files[i], '. Function meta() assumed equal sample sizes for all SNPs in that study. Violation of this assumption can lead to false positive if summary data of this study is used in pathway analysis')
warning(msg)
st$Direction <- ifelse(st$BETA == 0, '0', ifelse(st$BETA > 0, '+', '-'))
}
if (ambigFlag) st <- ambig.check.data(st, summary.files[i], vars=opt.header3)
nc <- unique(nchar(st$Direction))
if(length(nc) != 1){
msg <- paste0('String lengths of Direction are unequal in ', summary.files[i])
stop(msg)
}
st <- st[, which(toupper(colnames(st)) %in% toupper(complete.header))]
dup <- duplicated(st$SNP)
if(any(dup)){
dup.snps <- unique(st$SNP[dup])
msg <- paste("SNPs duplicated in ", summary.files[i], " are discarded: ", paste(dup.snps, collapse = " "))
warning(msg)
st <- subset(st, !(st$SNP %in% dup.snps))
if(nrow(st) == 0){
next
}
}
id.no.SE.P <- which(is.na(st$SE) & is.na(st$P))
if(length(id.no.SE.P) > 0){
msg <- paste("For SNPs below, neither SE nor P is not provided in", summary.files[i], ":\n", paste(st$SNP[id.no.SE.P], collapse = " "))
stop(msg)
}
st$RefAllele <- toupper(st$RefAllele)
st$EffectAllele <- toupper(st$EffectAllele)
id.no.SE <- which(is.na(st$SE))
id.no.P <- which(is.na(st$P))
if(length(id.no.SE) > 0){
z2 <- qchisq(st$P[id.no.SE], df = 1, lower.tail = FALSE)
st$SE[id.no.SE] <- abs(st$BETA[id.no.SE]/sqrt(z2))
}
if(length(id.no.P) > 0){
st$P[id.no.P] <- pchisq((st$BETA[id.no.P]/st$SE[id.no.P])^2, df = 1, lower.tail = FALSE)
}
fid <- fid + 1
lam <- c(lam, lambda[i])
rownames(st) <- st$SNP
st <- st[complete.cases(st), ]
stat[[fid]] <- st
rm(st)
gc()
}
if(length(stat) == 0){
msg <- "No SNPs to be included in analysis"
stop(msg)
}
lambda <- lam
list(stat = stat, lambda = lambda)
}
| 4,059 | mit |
36ef5570caf4d05e2a77fd24ec2527f8dafd4966 | fhernanb/semilleroApps | Beta/server.R | library(shiny)
shinyServer(function(input, output)
{
output$grafico1 <- renderPlot({
curve(dbeta(x, shape1=input$shape1, shape2=input$shape2),
from=0, to=1, ylab="Densidad",
las=1, lwd=3, col="deepskyblue3")
grid()
})
output$med_var <- renderText({
a <- input$shape1
b <- input$shape2
esperanza <- a/(a+b)
varianza <- (a*b) / ((a+b)^2 * (a+b+1))
paste(c("Para esta configuración E(X) =", round(esperanza, 2),
"con Var(X) =", round(varianza, 4)))
})
}) | 531 | gpl-2.0 |
01a897301e8eb2a10a1f413eba54b6f4bb36397e | aloy/qqplotr | revdep/checks.noindex/latrend/old/latrend.Rcheck/tests/testthat/test-lcmm.R | context('LCMM models')
skip_if_not_installed('lcmm')
rngReset()
test_that('default gmm', {
m = lcMethodTestLcmmGMM()
model = latrend(m, testLongData) %>%
expect_silent()
expect_valid_lcModel(model)
})
test_that('gmm with single cluster', {
latrend(lcMethodTestLcmmGMM(), testLongData, nClusters=1) %>%
expect_valid_lcModel()
})
test_that('gmm with empty cluster', {
latrend(lcMethodTestLcmmGMM(), testLongData, nClusters=5) %>%
expect_valid_lcModel()
})
test_that('default gbtm', {
m = lcMethodTestLcmmGBTM()
model = latrend(m, testLongData) %>%
expect_silent()
expect_valid_lcModel(model)
})
test_that('gbtm with nclusters', {
latrend(lcMethodTestLcmmGBTM(), testLongData, nClusters=1) %>%
expect_valid_lcModel()
})
| 760 | gpl-3.0 |
01a897301e8eb2a10a1f413eba54b6f4bb36397e | aloy/qqplotr | revdep/checks.noindex/latrend/new/latrend.Rcheck/tests/testthat/test-lcmm.R | context('LCMM models')
skip_if_not_installed('lcmm')
rngReset()
test_that('default gmm', {
m = lcMethodTestLcmmGMM()
model = latrend(m, testLongData) %>%
expect_silent()
expect_valid_lcModel(model)
})
test_that('gmm with single cluster', {
latrend(lcMethodTestLcmmGMM(), testLongData, nClusters=1) %>%
expect_valid_lcModel()
})
test_that('gmm with empty cluster', {
latrend(lcMethodTestLcmmGMM(), testLongData, nClusters=5) %>%
expect_valid_lcModel()
})
test_that('default gbtm', {
m = lcMethodTestLcmmGBTM()
model = latrend(m, testLongData) %>%
expect_silent()
expect_valid_lcModel(model)
})
test_that('gbtm with nclusters', {
latrend(lcMethodTestLcmmGBTM(), testLongData, nClusters=1) %>%
expect_valid_lcModel()
})
| 760 | gpl-3.0 |
110b896e2b9061b741890666b83ffb4efb8def13 | pbastide/PhylogeneticEM | R/generic_functions.R | # {General functions}
# Copyright (C) {2014} {SR, MM, PB}
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
########################################################
# Here are some general functions used in all the files.
########################################################
##
# getAncestor ( phy, x )
# PARAMETERS:
# @phy (tree) Input tree
# @x (int) Number of a node in the tree
# RETURNS:
# (int) Number of parental node of node x in tree phy
# DEPENDENCIES:
# none
# PURPOSE:
# Get the ancestor of node x
# NOTES:
# none
# REVISIONS:
# 16/05/14 - Initial release
##
getAncestor <- function(phy, x){
if (x == Ntip(phy) + 1) return(NA)
i <- which(phy$edge[, 2] == x)
return(phy$edge[i, 1])
}
getAncestors <- function(phy, x){
i <- match(x, phy$edge[, 2])
return(phy$edge[i, 1])
}
replaceInList <- function (x, FUN, ...) {
if (is.list(x)) {
for (i in seq_along(x)) {
x[i] <- list(replaceInList(x[[i]], FUN, ...))
}
x
}
else FUN(x, ...)
}
##
#' @title Correspondence between edges numbers
#'
#' @description
#' \code{correspondenceEdges} takes edges numbers on an input tree, and gives back
#' their corresponding numbers on the output tree.
#'
#' @param edges vector of index of edges in the tree "from"
#' @param from initial input tree (format "\code{phylo}")
#' @param to aimed output tree (format "\code{phylo}")
#'
#' @return vector of index of edges in the tree "to"
#'
#' @export
#26/05/14
##
correspondenceEdges <- function(edges, from, to){
mm <- match(from$edge[, 2], to$edge[, 2])
newEdges <- mm[edges]
return(newEdges)
}
correspondanceEdges <- correspondenceEdges
##
# compute_times_ca (phy)
# PARAMETERS:
# @phy (tree) input tree
# RETURNS:
# (matrix) : entry (i,j) of the matrix is t_ij, the time of shared ancestry between nodes i and j
# DEPENDENCIES:
# (node.depth.edgelength, mrca)
# PURPOSE:
# Compute t_ij
# NOTES:
# none
# REVISIONS:
# 22/05/14 - Initial release
##
##
#' @title Common Ancestors Times
#'
#' @description
#' \code{compute_times_ca} computes the times t_ij between the root and the common
#' ancestor of two tips i, j.
#'
#' @details
#' This function relies on \code{ape} functions
#' \code{\link[ape]{node.depth.edgelength}} and \code{\link[ape]{mrca}}.
#'
#' @param phy a phylogenetic tree of class \code{\link[ape]{phylo}}.
#'
#' @return a matrix of times of shared evolution, ordered as the tips of the
#' tree. The matrix is of type \code{\link[Matrix]{symmetricMatrix-class}}.
#'
#' @export
##
compute_times_ca <- function(phy) {
times <- ape::node.depth.edgelength(phy)
prac <- ape::mrca(phy,full=TRUE)
times_ca <- matrix(times[prac],dim(prac))
# attr(times_ca, "ntaxa") <- length(phy$tip.label)
# times_ca <- phy$root.edge + times_ca # Add the root length
return(as(times_ca, "symmetricMatrix"))
}
##
# compute_dist_phy (phy)
# PARAMETERS:
# @phy (tree) input tree
# RETURNS:
# (matrix) : entry (i,j) of the matrix is d_ij, the phylogenetic distance between nodes i and j
# DEPENDENCIES:
# (dist.nodes)
# PURPOSE:
# Compute d_ij
# NOTES:
# none
# REVISIONS:
# 22/05/14 - Initial release
##
##
#' @title Phylogenetic Distances
#'
#' @description
#' \code{compute_dist_phy} computes the phylogenetic distances d_ij between all the
#' tips i, j.
#'
#' @details
#' This function relies on \code{ape} function
#' \code{\link[ape]{dist.nodes}}.
#'
#' @param phy a phylogenetic tree of class \code{\link[ape]{phylo}}.
#'
#' @return a matrix of phylogenetic distances, ordered as the tips of the
#' tree. The matrix is of type \code{\link[Matrix]{symmetricMatrix-class}}.
#'
#' @export
##
compute_dist_phy <- function(phy) {
dist_phy <- ape::dist.nodes(phy)
attr(dist_phy, "ntaxa") <- length(phy$tip.label)
return(as(dist_phy, "symmetricMatrix"))
}
scale.tree <- function(phylo){
if (!is.ultrametric(phylo)) stop("The tree is not ultrametric")
ntaxa <- length(phylo$tip.label)
height <- min(ape::node.depth.edgelength(phylo)[1:ntaxa]) - .Machine$double.eps^0.5# take the min so that any error is above 1
phylo$edge.length <- phylo$edge.length/height
return(phylo)
}
###############################################################################
## Functions to wander on the tree
###############################################################################
##
#' @title Generic recursion down the tree.
#'
#' @description
#' \code{recursionDown} uses the function \code{updateDown} to compute
#' daughters rows of matrix param.
#' @details
#' This function is to be used in other more complex function that need to
#' update a quantity from the root to the tips of a tree. Note that the
#' input tree must be in cladewise order.
#'
#' @param phy Input tree, in cladewise order.
#' @param params Matrix of parameters to update by the recursion
#' @param updateDown Function to be used for the update
#' @param ... Arguments to be used by the function updateDown
#'
#' @return Matrix of parameters updated.
#'
#' @keywords internal
#'
##
recursionDown <- function(phy, params, updateDown, ...) {
if (attr(phy,"order") != "cladewise") stop("The tree must be in cladewise order")
## Choose function to subset
if (hasArg(subset_node)){
subset_node <- list(...)[["subset_node"]]
} else {
subset_node <- subset_node.default
}
if (hasArg(allocate_subset_node)){
allocate_subset_node <- list(...)[["allocate_subset_node"]]
} else {
allocate_subset_node <- allocate_subset_node.default
}
## Tree recursion from root to tips
for (e in 1:nrow(phy$edge)) {
edge <- phy$edge[e, ]
length <- phy$edge.length[e]
parent <- edge[1]
daughter <- edge[2]
params <- allocate_subset_node(daughter, params,
updateDown(edgeNbr = e,
ancestral = subset_node(parent, params),
length = length, ...))
}
return(params)
}
allocate_subset_node.default <- function(node, matrix, value){
matrix[node, ] <- value
return(matrix)
}
subset_node.default <- function(node, matrix){
return(matrix[node, ])
}
##
# recursionUp ( phy, params, updateUp, ... )
# PARAMETERS:
# @phy (tree) Input tree, in postorder order
# @params (matrix) Matrix of parameters to update by the recursion
# @updateUp (function) Function to be used for the update
# RETURNS:
# (matrix) Matrix of parameters updated
# DEPENDENCIES:
# none
# PURPOSE:
# Do the recursion from the tips to the root. params is updated row after row.
# NOTES:
# The input tree must be in postorder order
# REVISIONS:
# 21/05/14 - Initial release
##
recursionUp <- function(phy, params, updateUp, ...){
if (attr(phy,"order") != "postorder") stop("The tree must be in postorder order")
## Tree recursion
e <- 1
while (e <= nrow(phy$edge)) {
edge <- phy$edge[e, ]
parent <- edge[1]
ii <- which(phy$edge[,1]==parent)
daughters <- phy$edge[ii,2]
params[parent,] <- updateUp(edgesNbr=ii,
daughters=daughters,
daughtersParams = params[daughters,,drop=F],
parent = parent, ...)
e <- ii[length(ii)]+1
}
return(params)
}
recursionUp_list <- function(phy, params, updateUp, ...){
if (attr(phy,"order") != "postorder") stop("The tree must be in postorder order")
## Tree recursion
e <- 1
while (e <= nrow(phy$edge)) {
edge <- phy$edge[e, ]
parent <- edge[1]
ii <- which(phy$edge[,1]==parent)
daughters <- phy$edge[ii,2]
params[[parent]] <- updateUp(edgesNbr=ii,
daughters=daughters,
daughtersParams = params[daughters],
parent = parent, ...)
e <- ii[length(ii)]+1
}
return(params)
}
###############################################################################
## Functions to generate trees with fixed topologies
###############################################################################
##
# rtree.sym (n)
# PARAMETERS:
# @n (int) tree with 2^n tips
# RETURNS:
# (tree) A symetric tree with 2^n tips
# DEPENDENCIES:
# read.tree
# PURPOSE:
# Generate a symetric tree
# NOTES:
# none
# REVISIONS:
# 26/05/14 - Initial release
##
rtree.sym <- function(n){
tree <- "A"
for (k in 1:n) {
tree <- paste("(", tree, ",", tree, ")", sep="")
}
return(read.tree(text=paste(tree, ";", sep="")))
}
##
# rtree.comb (n)
# PARAMETERS:
# @n (int) tree with n tips
# RETURNS:
# (tree) A comb-like tree with n tips
# DEPENDENCIES:
# read.tree
# PURPOSE:
# Generate a comb-like tree
# NOTES:
# none
# REVISIONS:
# 26/05/14 - Initial release
##
rtree.comb <- function(n){
if (n == 1) return(read.tree(text="(A);"))
tree <- "A"
for (k in 2:n) {
tree <- paste("(A,", tree, ")", sep="")
}
return(read.tree(text=paste(tree, ";", sep="")))
}
###############################################################################
## Functions to test the parameters of the processes
###############################################################################
##
#' @title Check selection strength
#'
#' @description
#' \code{check.selection.strength} checks, if the process is an OU, if the
#' selection strength is not too low, in which case the process is replaced
#' with a BM.
#'
#' @details
#' This function return a process in a form of a character. If the entry
#' process is "BM", then nothing is done. If it is "OU", then the verification
#' of the selection strength is done.
#'
#' @param process : "BM" or "OU"
#' @param selection.strength the selection strength parameter (if OU)
#' @param eps the tolerance for the selection strength
#'
#' @return character : "BM" or "OU"
#'
#' @keywords internal
#16/06/14 - Initial release
##
check.selection.strength <- function(process, selection.strength = NA,
eps = 10^(-6), ...){
if (process == "BM") {
return("BM")
} else if (sum(abs(selection.strength)) < eps) {
warning(paste("The selection strength is too low (L1-norm<", eps, "), process is considered to be a simple Brownian Motion", sep=""))
return("BM")
} else if (any(Re(eigen(selection.strength)$values) < eps)) {
warning("All the eigen values of the selection strengh do not have a strictly positive real part. That might cause some issue. Proceed with caution.")
}
return(process)
}
##
#' @title Test state of root.
#'
#' @description
#' \code{test.root.state} test whether the parameters of root.state given
#' by the user are coherent. If not, it returns a new corrected list to
#' define root.state.
#'
#' @details
#' To test coherence, the following priorities are applied:
#' random > stationary.root > values.root = exp.root = var.root
#'
#' @param root.state A list giving the root state
#' @param process "BM", "OU" or "scOU"
#' @param ... parameters of the process (if OU)
#'
#' @return Coherent list root.state.
#'
#' @keywords internal
# 28/05/14 - Initial release
##
test.root.state <- function(root.state, process=c("BM", "OU", "scOU", "OUBM"), ...) {
process <- match.arg(process)
process <- check.selection.strength(process, ...)
if (process == "BM") {
return(test.root.state.BM(root.state))
} else if (process %in% c("OU", "scOU", "OUBM")) {
return(test.root.state.OU(root.state, ...))
}
}
test.root.state.BM <- function(root.state, ...) {
if (!is.null(root.state$stationary.root) && root.state$stationary.root){
warning("The BM does not have a stationary state. root.state$stationary.root is set to NULL")
root.state$stationary.root <- NULL
}
if (root.state$random && !anyNA(root.state$value.root)) {
warning("As root state is supposed random, its value is not defined and set to NA")
root.state$value.root <- NA
root.state$var.root <- as(root.state$var.root, "dpoMatrix")
}
if (!root.state$random && (!anyNA(root.state$exp.root) || !anyNA(root.state$exp.root))) {
warning("As root state is supposed fixed, its expectation and variance are not defined and set to NA")
root.state$exp.root <- NA
root.state$var.root <- NA
}
return(root.state)
}
test.root.state.OU <- function(root.state, process, variance, selection.strength, optimal.value, ...) {
if (root.state$random && !anyNA(root.state$value.root)) {
warning("As root state is supposed random, its value is not defined and set to NA")
root.state$value.root <- NA
root.state$var.root <- as(root.state$var.root, "dpoMatrix")
}
if (!root.state$random && (!anyNA(root.state$exp.root) || !anyNA(root.state$exp.root))) {
warning("As root state is supposed fixed, its expectation and variance are not defined and set to NA")
root.state$exp.root <- NA
root.state$var.root <- NA
}
if (is.null(root.state$stationary.root)) {
warning("root.state$stationary.root was not defined, and is now set to its default value")
if (root.state$random){
root.state$stationary.root <- TRUE
} else {
root.state$stationary.root <- FALSE
}
}
if (!root.state$random && root.state$stationary.root) {
warning("As root state is supposed fixed, the root cannot be at its stationary state. root.state$stationary.root is set to FALSE")
root.state$stationary.root <- FALSE
}
# if (root.state$stationary.root &&
# (!isTRUE(all.equal(root.state$exp.root, optimal.value)) ||
# !isTRUE(all.equal(root.state$var.root, variance/(2 * selection.strength))))) {
# warning("As root is supposed to be at stationary distribution, mu=beta and gamma2=sigma2/(2*alpha)")
# root.state$exp.root <- optimal.value
# root.state$var.root <- variance/(2 * selection.strength)
# }
root.state <- coherence_stationary_case(root.state, optimal.value,
variance, selection.strength)
return(root.state)
}
coherence_stationary_case <- function(root.state, optimal.value,
variance, selection.strength){
if (!root.state$stationary.root){
return(root.state) ## Do nothing
} else {
if (!isTRUE(all.equal(root.state$exp.root, optimal.value))){
root.state$exp.root <- optimal.value
warning("As root is supposed to be in stationary case, root expectation was set to be equal to optimal value.")
}
root_var_expected <- compute_stationary_variance(variance, selection.strength)
if(!isTRUE(all.equal(root.state$var.root, root_var_expected))){
root.state$var.root <- as(root_var_expected, "dpoMatrix")
warning("As the root is supposed to be in stationary state, root variance Gamma was set to: vec(Gamma) = (A kro_plus A)^{-1}vec(R).")
}
return(root.state)
}
}
##
#' @title Compute the stationary variance matrix
#'
#' @description
#' \code{compute_stationary_variance} computes the stationary variance matrix of
#' an OU process.
#'
#' @param variance the variance (rate matrix) of the process.
#' @param selection.strength the selection strength (alpha) matrix of the
#' process.
#'
#' @return A positive definite Matrix of class \code{\link[Matrix]{dpoMatrix-class}}.
#'
#' @export
##
compute_stationary_variance <- function(variance, selection.strength){
if (is.null(selection.strength)) return(NA)
if (length(as.vector(selection.strength)) == 1){
vv <- as.matrix(variance) / (2 * selection.strength)
vv <- Matrix(vv)
vv <- as(vv, "dpoMatrix")
return(vv)
} else if (isDiagonal(selection.strength)) {
dd <- diag(selection.strength)
vv <- variance / outer(dd, dd, "+")
vv <- as(vv, "dpoMatrix")
} else {
variance_vec <- as.vector(variance)
kro_sum_A <- kronecker_sum(selection.strength, selection.strength)
kro_sum_A_inv <- solve(kro_sum_A)
root_var_vec <- kro_sum_A_inv %*% variance_vec
gamma <- matrix(root_var_vec, dim(variance))
if (!isSymmetric(gamma, tol = (.Machine$double.eps)^(0.7))) stop("Error in computation of stationary variance: matrix computed was not symmetric.")
gamma <- symmpart(gamma)
gamma <- nearPD(gamma)
return(gamma$mat)
# return(as(gamma, "symmetricMatrix"))
# return(as(gamma, "dsyMatrix"))
# return(as(gamma, "dpoMatrix"))
}
}
compute_variance_from_stationary <- function(var.root, selection.strength){
if (dim(var.root)[1] == 1){
return(var.root * (2 * selection.strength))
} else {
var.root_vec <- as.vector(var.root)
kro_sum_A <- kronecker_sum(selection.strength, selection.strength)
variance_vec <- kro_sum_A%*%var.root_vec
return(matrix(variance_vec, dim(var.root)))
}
}
kronecker_sum <- function(M, N){
if (!is.matrix(M) || !is.matrix(N))
stop("Entries of Kronecker sum must be matrices")
if ((length(dim(M)) != 2) || (length(dim(N)) != 2))
stop("Entries of Kronecker sum must be matrices")
if ((dim(M)[1] != dim(M)[2]) || (dim(N)[1] != dim(N)[2]))
stop("Entries of Kronecker sum must be squared matrice.")
m <- dim(M)[1]; Im <- diag(1, m, m)
n <- dim(N)[1]; In <- diag(1, n, n)
return(kronecker(M, Im) + kronecker(In, N))
}
##
# @title Log Likelihood of a model
#
# @description
# \code{likelihood.OU} computes the likelihhod of the data given a model. This
# is a non-efficient debugging function.
#
# @details
# This function uses functions \code{compute_mean_variance.simple}, \code{compute_times_ca}, \code{compute_dist_phy}, \code{compute_log_likelihood.simple}
#
# @param Y the vector of the data at the tips
# @param phylo a phylogenetic tree
# @param params list of parameters with the correct structure
#
# @return boolean
#
# @keywords internal
#02/10/14 - Initial release
##
# log_likelihood.OU <- function(Y, phylo, params, ...) {
# moments <- compute_mean_variance.simple(phylo = phylo,
# times_shared = compute_times_ca(phylo),
# distances_phylo = compute_dist_phy(phylo),
# process = "OU",
# params_old = params, ...)
# LL <- compute_log_likelihood.simple(phylo = phylo,
# Y_data = Y,
# sim = moments$sim,
# Sigma = moments$Sigma,
# Sigma_YY_inv = moments$Sigma_YY_inv)
# return(LL)
# }
##
#' @title Check dimensions of the parameters
#'
#' @description
#' \code{check_dimensions} checks dimensions of the parameters.
#' If wrong, throw an error.
#'
#' @param p dimension of the trait simulated
#' @param root.state (list) state of the root, with:
#' random : random state (TRUE) or deterministic state (FALSE)
#' value.root : if deterministic, value of the character at the root
#' exp.root : if random, expectation of the character at the root
#' var.root : if random, variance of the character at the root
#' @param shifts (list) position and values of the shifts :
#' edges : vector of the K id of edges where the shifts are
#' values : matrix p x K of values of the shifts on the edges (one column = one shift)
#' relativeTimes : vector of dimension K of relative time of the shift from the
#' parent node of edges
#' @param variance variance-covariance matrix size p x p
#' @param selection.strength matrix of selection strength size p x p (OU)
#' @param optimal.value vector of p optimal values at the root (OU)
#'
#' @return Nothing
#'
#' @keywords internal
#'
# 25/08/15 - Multivariate
##
check_dimensions <- function(p,
root.state, shifts, variance,
selection.strength = NULL, optimal.value = NULL){
root.state <- check_dimensions.root.state(p, root.state)
#if (!is.null(unlist(shifts)))
shifts <- check_dimensions.shifts(p, shifts)
variance <- check_dimensions.matrix(p, p, variance, "variance")
variance <- as(variance, "dpoMatrix")
if (!is.null(selection.strength))
if (is.vector(selection.strength) && length(selection.strength) == p){
selection.strength <- diag(selection.strength, ncol = length(selection.strength))
}
if (is.vector(selection.strength) && length(selection.strength) == 1){
selection.strength <- diag(rep(selection.strength, p))
}
selection.strength <- check_dimensions.matrix(p, p, selection.strength, "selection strength")
if (!is.null(optimal.value))
optimal.value <- check_dimensions.vector(p, optimal.value, "optimal value")
return(params = list(root.state = root.state,
shifts = shifts,
variance = variance,
selection.strength = selection.strength,
optimal.value = optimal.value))
}
check_dimensions.matrix <- function(p, q, matrix, name = "matrix"){
if (is.null(matrix)) matrix <- matrix(0, p, q)
if (p == 1){
if (is.vector(matrix) && length(matrix) != q)
stop(paste0(matrix, " should be a scalar in dimension q = ", q, "."))
dim(matrix) <- c(1, q)
}
if (is.vector(matrix) || !all(dim(matrix) == c(p, q)))
stop(paste0("Dimensions of ", matrix, " matrix do not match"))
return(matrix)
}
check_dimensions.vector <- function(p, v, name = "vector"){
if (!is.vector(v)) stop(paste0(name, " should be a vector."))
if (length(v) != p)
stop(paste0("Dimensions of ", name, " do not match"))
return(v)
}
check_dimensions.root.state <- function(p, root.state){
if (root.state$random){
root.state$exp.root <- check_dimensions.vector(p, root.state$exp.root, "Root Expectation")
root.state$var.root <- check_dimensions.matrix(p, p, root.state$var.root, "root variance")
root.state$var.root <- as(root.state$var.root, "dpoMatrix")
} else {
root.state$value.root <- check_dimensions.vector(p, root.state$value.root, "Root Value")
}
return(root.state)
}
check_dimensions.shifts <- function(p, shifts){
K <- length(shifts$edges)
shifts$values <- check_dimensions.matrix(p, K, shifts$values, "shifts values")
if (sum(shifts$relativeTimes) == 0) # If all zero, re-formate
shifts$relativeTimes <- rep(0, K)
shifts$relativeTimes <- check_dimensions.vector(K, shifts$relativeTimes, "shifts relative Times")
return(shifts)
}
##
#' @title Find a reasonable grid for alpha
#'
#' @description Grid so that
#' 2*ln(2)*quantile(d_ij)/factor_up_alpha < t_{1/2} < factor_down_alpha * ln(2) * h_tree
#' Ensures that for alpha_min, it is almost a BM, and for alpha_max,
#' almost all the tips are decorrelated.
#'
#' @details
#' If \code{quantile_low_distance=0}, then \code{quantile(d_ij)=min(d_ij)}, and, for any
#' two tips i,j, the correlation between i and j is bounded by exp(-factor_up_alpha/2).
#' Those values of alpha will be used for the re-scaling of the tree, which has an
#' exponential term in exp(2*alpha*h). The function makes sure that this number is
#' below the maximal float allowed (equals to \code{.Machine$double.xmax}).
#'
#' @param phy phylogenetic tree of class "\code{phylo}"
#' @param alpha fixed vector of alpha values if already known. Default to NULL.
#' @param nbr_alpha the number of elements in the grid
#' @param factor_up_alpha factor for up scalability
#' @param factor_down_alpha factor for down scalability
#' @param quantile_low_distance quantile for min distance
#' @param log_transform whether to take a log scale for the spacing of alpha
#' values. Default to TRUE.
#' @param allow_negative whether to allow negative values for alpha (Early Burst).
#' See documentation of \code{\link{PhyloEM}} for more details. Default to FALSE.
#' @param ... not used.
#'
#' @return A grid of alpha values
#'
#' @seealso \code{\link{transform_branch_length}}, \code{\link{.Machine}}
#'
#' @export
#'
##
find_grid_alpha <- function(phy, alpha = NULL,
nbr_alpha = 10,
factor_up_alpha = 2,
factor_down_alpha = 3,
quantile_low_distance = 0.0001,
log_transform = TRUE,
allow_negative = FALSE, ...){
if (!is.null(alpha)) return(alpha)
dtips <- cophenetic(phy)
d_min <- quantile(dtips[dtips > 0], quantile_low_distance)
h_tree <- node.depth.edgelength(phy)[1]
alpha_min <- 1 / (factor_down_alpha * h_tree)
alpha_max <- factor_up_alpha / (2 * d_min)
alpha_max_machine <- log(.Machine$double.xmax^0.975)/(2*h_tree)
if (alpha_max > alpha_max_machine){
warning("The chosen alpha_max was above the machine precision. Taking alpha_max as the largest possible on this machine.")
alpha_max <- alpha_max_machine
}
if (allow_negative) nbr_alpha <- nbr_alpha %/% 2
if (log_transform){
alpha_grid <- exp(seq(log(alpha_min), log(alpha_max), length.out = nbr_alpha))
} else {
alpha_grid <- seq(alpha_min, alpha_max, length.out = nbr_alpha)
}
if (allow_negative){
alpha_min_neg_machine <- log(.Machine$double.eps^0.9)/(2*h_tree)
if (log_transform){
alpha_grid <- c(-exp(seq(log(-alpha_min_neg_machine), log(alpha_min), length.out = nbr_alpha)), 0, alpha_grid)
} else {
alpha_grid <- c(seq(alpha_min_neg_machine, -alpha_min, length.out = nbr_alpha), 0, alpha_grid)
}
}
return(alpha_grid)
}
##
#' @title Check range of alpha
#'
#' @description Check that the chosen values of alpha are not too large
#' or too small, in order to avoid numerical instabilities.
#'
#' @param alpha a vector of alpha values.
#' @param h_tree the total height of the tree.
#'
#' @keywords internal
#'
##
check_range_alpha <- function(alpha, h_tree){
alpha_max_machine <- log(.Machine$double.xmax^0.98)/(2*h_tree)
alpha_min_machine <- log(.Machine$double.eps^0.98)/(2*h_tree)
if (any(alpha > alpha_max_machine)) {
stop(paste0("The value for the selection strengh you took is too big, and will lead to numerical instabilities. Please consider using a value below ", alpha_max_machine))
}
if (any(alpha < alpha_min_machine)) {
stop(paste0("The value for the selection strengh you took is too small, and will lead to numerical instabilities. Please consider using a value above ", alpha_min_machine))
}
}
##
#' @title Transform branch length for a re-scaled BM
#'
#' @description Re-scale the branch length of the tree so that a BM running
#' on the new tree produces the same observations at the tips than an OU with
#' parameter alpha.
#'
#' @param phylo A phylogenetic tree of class \code{\link[ape]{phylo}}, with branch
#' lengths.
#' @param alp Value of the selection strength.
#'
#' @return phylo The same phylogenetic tree, with transformed branch lengths.
#'
#' @export
# 25/08/15 - Multivariate
##
transform_branch_length <- function(phylo, alp){
if (alp == 0){
return(phylo)
} else {
nodes_depth <- node.depth.edgelength(phylo)
h_tree <- nodes_depth[1]
fun <- function(z){
return(1 / (2 * alp) * exp(- 2 * alp * h_tree) * (exp(2 * alp * nodes_depth[z[2]]) - exp(2 * alp * nodes_depth[z[1]])))
}
## Root edge if exists
phylo$edge.length <- apply(phylo$edge, 1, fun)
if (!is.null(phylo$root.edge)){
phylo$root.edge <- 1 / (2 * alp) * exp(- 2 * alp * h_tree) * phylo$root.edge
}
return(phylo)
}
}
##
#' @title Scale variance and selection strength from a linear transform
#'
#' @description Used for process equivalencies on re-scaled trees.
#'
#' @param params Parameters list
#' @param f Factor of the linear transform. If t' = f * t, the function takes
#' parameters from phylo' back to phylo.
#'
#' @return re-scaled parameters
#'
#' @keywords internal
#'
##
scale_params <- function(params, f){
if (!is.null(params$variance)) params$variance <- f * params$variance
if (!is.null(params$selection.strength)) params$selection.strength <- f * params$selection.strength
return(params)
}
##
#' @title Split independent parameters into a list of parameters
#'
#' @description \code{split_params_independent} split a params object for a
#' process with p independent traits into p params objects.
#' The reverse operation is done by \code{merge_params_independent}
#'
#' @param params parameters
#'
#' @return A list of p parameters
#'
#' @keywords internal
#'
##
split_params_independent <- function(params){
p <- dim(params$variance)[1]
params_split <- vector(mode = "list", length = p)
for (l in 1:p){
params_split[[l]] <- params
params_split[[l]]$variance <- params$variance[l, l]
params_split[[l]]$selection.strength <- params$selection.strength[l, l]
if (!is.null(params$shifts$edges)){
params_split[[l]]$shifts$values <- params$shifts$values[l, ]
}
if (!anyNA(params$root.state$value.root)){
params_split[[l]]$root.state$value.root <- params$root.state$value.root[l]
}
if (!anyNA(params$root.state$exp.root)){
params_split[[l]]$root.state$exp.root <- params$root.state$exp.root[l]
}
if (!anyNA(params$root.state$var.root)){
params_split[[l]]$root.state$var.root <- params$root.state$var.root[l, l]
}
params_split[[l]]$optimal.value <- params$optimal.value[l]
params_split[[l]] <- check_dimensions(1,
params_split[[l]]$root.state,
params_split[[l]]$shifts,
params_split[[l]]$variance,
params_split[[l]]$selection.strength,
params_split[[l]]$optimal.value)
if (!is.null(attr(params, "p_dim"))) attr(params_split[[l]], "p_dim") <- 1
}
return(params_split)
}
##
#' @title Merge a list of independent parameters into into one parameter
#'
#' @description \code{merge_params_independent} merges a list of p params
#' objects into one param object of dimension p
#' The reverse operation is done by \code{split_params_independent}
#'
#' @param params_split: a list of parameters
#'
#' @return A parameter object
#'
#' @keywords internal
#'
##
merge_params_independent <- function(params_split){
p <- length(params_split)
params <- params_split[[1]]
if (p > 1){
params$variance <- diag(sapply(params_split, function(z) return(as.vector(z$variance))))
if (!is.null(params$selection.strength)){
params$selection.strength <- diag(sapply(params_split, function(z) return(z$selection.strength)))
}
if (length(params$shifts$edges) > 1){
params$shifts$values <- t(sapply(params_split, function(z) return(z$shifts$values)))
} else if (length(params$shifts$edges) == 1) {
params$shifts$values <- sapply(params_split, function(z) return(z$shifts$values))
dim(params$shifts$values) <- c(p,1)
} else {
params$shifts$values <- matrix(0, p, 0)
}
if (!anyNA(params$root.state$value.root)){
params$root.state$value.root <- sapply(params_split, function(z) return(z$root.state$value.root))
}
if (!anyNA(params$root.state$exp.root)){
params$root.state$exp.root <- sapply(params_split, function(z) return(z$root.state$exp.root))
}
if (!anyNA(as.vector(params$root.state$var.root))){
params$root.state$var.root <- diag(sapply(params_split, function(z) return(as.vector(z$root.state$var.root))))
}
if (!is.null(params$optimal.value)){
params$optimal.value <- sapply(params_split, function(z) return(z$optimal.value))
}
}
params <- check_dimensions(p,
params$root.state,
params$shifts,
params$variance,
params$selection.strength,
params$optimal.value)
if (!is.null(attr(params_split[[1]], "p_dim"))) attr(params, "p_dim") <- p
return(params)
} | 32,351 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | kalibera/rexp | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | hxfeng/R-3.1.2 | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | andy-thomason/little_r | test/R-tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | mit |
207b7c00661120e7519c7334a8ded21ee3920335 | apoikola/fillarilaskennat | source/liikenne_20140927.R | # Code for analysing and visualising traffic data
# License: FreeBSD, http://en.wikipedia.org/wiki/BSD_licenses
# Copyright 2012 Juuso Parkkinen <juuso.parkkinen@gmail.com> and Antti Poikola <antti poikola@gmail.com>. All rights reserved.
library(ggplot2)
library(reshape)
search.pattern <-"kuu" # pattern used to search for folder names which will be included in the analysis
data.folder <- "/Users/apoikola/Documents/github/fillarilaskennat/data/"
output.folder <- "/Users/apoikola/Documents/github/fillarilaskennat/output/"
# data.folder <- "./data/"
# output.folder <- "./output/"
# TODO
# Add place names and coordinates (if found)
# Visualize cycling counters on a map
# coordinate.file <- paste(data.folder, "Laskentapisteet 2011 - Laskentapisteet 2011 FIX.csv", sep = "")
# laskentapisteet <- read.csv(coordinate.file)
# laskentapisteet$KX <- as.numeric(gsub(",", ".", laskentapisteet$KX))
# laskentapisteet$KY <- as.numeric(gsub(",", ".", laskentapisteet$KY))
# hel.plot <- ggplot(hel.coastline.df, aes(x=long, y=lat)) + geom_polygon(aes(group=group), fill="grey80")
# hel.plot <- hel.plot + xlim(bbox.Helsinki.center["x","min"], bbox.Helsinki.center["x","max"]) + ylim(bbox.Helsinki.center["y","min"], bbox.Helsinki.center["y","max"])
# hel.plot3 <- hel.plot2 + geom_text(data=laskentapisteet, aes(x=KX, y=KY, label=X), size=2, hjust=1, vjust=1, angle=-45)
# hel.plot3 <- hel.plot3 + geom_point(data=laskentapisteet, aes(x=KX, y=KY, label=X), colour="red")
# ggsave("HSOpen4/Liikkennelaskentapisteet2_20120309.pdf", plot=hel.plot3, width=8, height=8)
#Clean up the raw data files
loc.names <- read.table(paste(data.folder, "pyoralaskennat_readme.txt", sep=""), skip=17, sep="=", fileEncoding="ISO-8859-1")
month.folders <- dir(data.folder, pattern=search.pattern)
final.df <- c()
for (mi in 1:length(month.folders)) {
month.files <- dir(paste(data.folder, month.folders[mi],sep=""), pattern="\\.2") # pattern needed to skip corrupted files which filename ends with .104
for (fi in 1:length(month.files)) {
if (length(month.files)==0){
print(paste("no useful files in folder",month.folders[mi]))
break
}
# Extract site number and name
filename <- paste(data.folder, month.folders[mi], "/", month.files[fi],sep="")
site.number <- as.numeric(unlist(strsplit(unlist(strsplit(filename, split="\\."))[1], split="/"))[9])
site.init <- as.numeric(substr(as.character(site.number), 1, 3))
if (site.init==117)
site.init <- as.numeric(substr(as.character(site.number), 1, 4))
site.name <- as.vector(loc.names$V2[match(site.init, loc.names$V1)])
dat.raw <- scan(filename, what=character(), sep="\n", strip.white=TRUE)
# Separate locations
loc.rows <- grep("LOCATION", dat.raw)
loc.list <- list()
# 2007_06 12001803.207 has only one week of data
if (length(loc.rows)==1) {
loc.list[[1]] <- dat.raw[loc.rows[1]:length(dat.raw)]
}
else {
for (li in 1:(length(loc.rows)-1)){
loc.list[[li]] <- dat.raw[loc.rows[li]:(loc.rows[li+1]-1)]
loc.list[[li+1]] <- dat.raw[loc.rows[li+1]:length(dat.raw)]
}
}
# If clause needed to skip corrupted files recognized by the second location row being longer than 79 characters i.e. 2005_09 11803512.205
if (nchar(dat.raw[loc.rows[1]])<79){
# Extract data from each location
loc.mat <- c()
for (li in 1:length(loc.list)) {
date.row <- grep("DATE", loc.list[[li]])
# Check whether tabs or spaces were used as separator
if (length(grep("\t", loc.list[[li]][date.row]))>0) {
# Fix first date row
loc.list[[li]][date.row] <- gsub("\t/", "/", loc.list[[li]][date.row])
# Change then all "\t" to " "
loc.list[[li]][date.row:(date.row+25)] <- gsub("\t", " ", loc.list[[li]][date.row:(date.row+25)])
}
date.temp <- unlist(strsplit(loc.list[[li]][date.row], split=" "))
date.temp <- date.temp[-which(date.temp=="")]
dates <- gsub("00", "20", as.character(as.Date(date.temp[2])+0:6))
dat.mat <- matrix(NA, nrow=7, ncol=24, dimnames=list(dates, 0:23))
for (hi in 1:24) {
hrow <- date.row + hi + 1
h.temp <- unlist(strsplit(loc.list[[li]][hrow], split=" "))
if (!is.na(h.temp)[1]) { # Needed as e.g. 1171001.211 in January is not complete
if (any(h.temp=="")) # Needed for '\t' -files
h.temp <- h.temp[-which(h.temp=="")]
}
dat.mat[,hi] <- suppressWarnings(as.numeric(h.temp[2:8]))
}
loc.mat <- rbind(loc.mat, dat.mat)
}
loc.df <- data.frame(LocationID1=site.number, LocationID2=site.init, melt(loc.mat))
names(loc.df)[3:5] <- c("Date", "Hour", "Value")
final.df <- rbind(final.df, loc.df)
}
}
# Inside for loop this writes monthly CSV files
# if (!is.null(final.df)){
# # Reorder based on 1) Location, 2) Date, 3) Hour
# final.df <- final.df[order(final.df$LocationID1, final.df$Date, final.df$Hour),]
# # Add weekday
# final.df$WeekDay <- factor(weekdays(as.Date(final.df$Date)))
# final.df <- final.df[c(1:3, 6, 4:5)]
#
# #save(final.df, file=paste(output.folder, paste(month.folders[mi],".RData", sep = "")))
# write.csv(final.df, file=paste(output.folder, paste(month.folders[mi],".csv", sep = "")))
#
# final.df <- c()
# }
}
if (!is.null(final.df)){
# Reorder based on 1) Location, 2) Date, 3) Hour
final.df <- final.df[order(final.df$LocationID1, final.df$Date, final.df$Hour),]
# Add weekday
final.df$WeekDay <- factor(weekdays(as.Date(final.df$Date)))
final.df <- final.df[c(1:3, 6, 4:5)]
#save(final.df, file=paste(output.folder, paste(month.folders[mi],".RData", sep = "")))
write.csv(final.df, file=paste(output.folder, paste(search.pattern,"_20140927.csv", sep = "")))
final.df <- c()
}
| 5,925 | bsd-2-clause |
400e1cafca76487c5d413e572bfa3f179c035485 | o-/Rexperiments | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | andy-thomason/r-source | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | jeffreyhorner/R-Judy-Arrays | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | abiyug/r-source | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | glycerine/bigbird | r-3.0.2/tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | bsd-2-clause |
400e1cafca76487c5d413e572bfa3f179c035485 | CodeGit/SequenceImp | dependencies-bin/windows/bin/R/tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-3.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | lajus/customr | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
5e22c80e75528b35f99ef70efab51a8948b369b0 | henriquepgomide/drinkless | feasibility.R | # WHO FEASIBILITY STUDY ============================================================================
# Objectives --
# 1 - Compare alcohol consumption after 6 weeks among - intention to treat and treatment completers.
# 2 - Create a model to predict success.
# Libraries
library(car) # Recode
library(lattice) # for graphs
library(caret) # Modelling
library(ez) # for traditional anova ezANOVA
library(reshape2) # Melt function
library(ggplot2) # Graphs
library(nnet) # Regression
# Data import
drinkLess_cc <- read.csv("banco_artigo.csv", na.strings = c("NA",99), dec=",")
drinkLess_cc <- drinkLess_cc[,-c(2,7,8,9,10,11,12,13,14,15)]
drinkLess_full <- read.csv("banco_artigo_full.csv", na.strings = c("NA",99), dec=",")
# Final dataframe
drinkLess <- merge(drinkLess_full, drinkLess_cc, by = "client_id", all = TRUE)
## Clean unused objects
rm(drinkLess_cc); rm(drinkLess_full)
# Data preprocessing
## Recode vars to numeric
for (i in c(3,5,9,10,11,12,13,15)) {
drinkLess[, i] <- as.numeric(drinkLess[,i])
}
## Removing outliers
### alcohol_pre
drinkLess$alcohol_pre[scale(drinkLess$alcohol_pre) > 3 | scale(drinkLess$alcohol_pre) < -3 ] <- NA
### alcohol_pos
drinkLess$alcohol_pos[scale(drinkLess$alcohol_pos) > 3 | scale(drinkLess$alcohol_pos) < -3 ] <- NA
### login_times
drinkLess$login_times[scale(drinkLess$login_times) > 3 | scale(drinkLess$login_times) < -3 ] <- NA
### ave_login_time
drinkLess$ave_login_time[scale(drinkLess$ave_login_time) > 3 | scale(drinkLess$ave_login_time) < -3 ] <- NA
### sum_login
drinkLess$sum_login[scale(drinkLess$sum_login) > 3 | scale(drinkLess$sum_login) < -3 ] <- NA
## Fix reduced_factor
drinkLess$reduced_factor <- drinkLess$alcohol_pos - drinkLess$alcohol_pre
drinkLess$reduced_factor[drinkLess$reduced_factor < 0] <- "-"
drinkLess$reduced_factor[drinkLess$reduced_factor > 0] <- "+"
## Recode vars to appropriate factors
### audit
drinkLess$audit <- Recode(drinkLess$audit, "0 = 'Low risk'; 1 = 'Risk'; 2 = 'Dependence'")
### gender
drinkLess$gender <- Recode(drinkLess$gender, "0 = 'Men'; 1 = 'Women'")
### educational level
drinkLess$school <- Recode(drinkLess$school, "0 = 'High School'; 1 = 'College'")
### rcq
drinkLess$rcq_factor <- Recode(drinkLess$rcq_factor, "0 = 'Pre'; 1 = 'Con'; 2 = 'Action'")
### completed
drinkLess$completed <- Recode(drinkLess$completed, "0 = 'Yes'; 1 = 'No'")
# Save data.frame
write.csv(drinkLess, "drinkless_R.csv")
### EXPLORATORY ANALYSIS ====
drinkLess <- read.csv("drinkless_R.csv")
# Atrition rate after 6 weeks
table(drinkLess$completed) # 85.6%
# TREATMENT COMPLETERS ----
# Completers
treatComp <- subset(drinkLess, drinkLess$completed == "Yes")
# Program Evaluation
mean(treatComp$prog_evaluation) # 8.45
sd(treatComp$prog_evaluation, na.rm = TRUE) # 1.60
# pre
histogram(~ alcohol_pre | audit * gender, data = treatComp)
qqmath(~ alcohol_pre | audit * gender, data = treatComp)
# pos
histogram(~ alcohol_pos | audit * gender, data = treatComp)
qqmath(~ alcohol_pos | audit * gender, data = treatComp)
# average login time
histogram(~ ave_login_time | audit * gender, data = treatComp)
qqmath(~ ave_login_time | audit * gender, data = treatComp)
# sum logins
histogram(~ sum_login | audit * gender, data = treatComp)
qqmath(~ sum_login | audit * gender, data = treatComp)
# login times
histogram(~ login_times | audit * gender, data = treatComp)
qqmath(~ login_times | audit * gender, data = treatComp)
# Graph analyses suggest the partition of the data using audit as a classifier. Low risk users should be excluded due to floor effects. Standard-doses, login_times, and ave_login_time must be normalized first to proceed to comparasions.
# Check non-zero values
nearZeroVar(drinkLess, saveMetrics=TRUE)
# Table
table(drinkLess$reduced_factor, drinkLess$audit)
hist(drinkLess$sum_login)
hist(log10(drinkLess$alcohol_pos)+1)
#### OBJECTIVE 1 #######################################################################################
# 1 - Compare alcohol consumption after 6 weeks among - intention to treat and treatment completers. ---
# Include age, gender and group in analysis
# Treatment Completers ----
treatCompA <- subset(treatComp, treatComp$client_id != 2302 )
treatCompA <- treatCompA[, -c(2,7,8,9,10,11,13,14,15,16,17,18)]
treatMelted <- melt(treatCompA, id = c("client_id", "gender", "age", "audit"), measured = c("alcohol_pre", "alcohol_pos"))
names(treatMelted) <- c("id", "sex", "age", "group", "time", "value")
## Normalization of variables
# alcohol prelog
treatMelted$value <- log(treatMelted$value + 1)
qqnorm(treatMelted$value)
# Traditional ANOVA approach
## Set contrasts
treatMelted$group <- as.factor(treatMelted$group)
alcoholvsLow <- c(1,-2, 1)
contrasts(treatMelted$group, 1) <- cbind(alcoholvsLow, depvsLow, depvsRisk, riskvsLow)
# Traditional Anova
alcoholModel <- ezANOVA(data = treatMelted, dv = .(value), wid = .(id), within = .(time), between = .(group, sex), detailed = TRUE, type = 3, return_aov = TRUE)
# Print results
alcoholModel
# Graphs
# Data frame
plotdf <- treatMelted
plotdf$group <- factor(plotdf$group, levels=c("Dependence", "Risk", "Low risk"))
plotdf$time <- Recode(plotdf$time, "'alcohol_pre' = 'Pre'; 'alcohol_pos' = 'Pos'")
plotdf$time <- factor(plotdf$time, levels=c("Pre", "Pos"))
plotdf$value <- exp(plotdf$value) - 1
# vs. Group
bar2 <- ggplot(plotdf, aes(time, value, fill = group))
bar2 + stat_summary(fun.y = mean, geom = "bar", position = "dodge", colour="black") + stat_summary(fun.data = mean_cl_normal, geom = "errorbar", width = .2) + facet_wrap( ~ group) + labs(x = "Time", y = "Mean number of standard Drinks", fill = "group") + theme_bw(base_size = 18) + scale_fill_manual(values=c("#777777","#e0e0e0","#FFFFFF")) + theme(legend.position = "none")
# vs. Sex
bar3 <- ggplot(plotdf, aes(time, value, fill = sex))
bar3 + stat_summary(fun.y = mean, geom = "bar", position = "dodge", colour="black") + stat_summary(fun.data = mean_cl_normal, geom = "errorbar", width = .2) + facet_wrap( ~ sex) + labs(x = "Time", y = "Mean number of Standard Drinks", fill = "group") + theme_bw(base_size = 18) + scale_fill_manual(values=c("#777777","#e0e0e0","#FFFFFF")) + theme(legend.position = "none")
# Intention to treat #----
# Assumption - missing values as failure
## Create a dataframe
itt <- subset(drinkLess, drinkLess$client_id != 2302)
ittA <- itt[, -c(1,3,8,10,11,12,14,15,16)]
## Impute missing as no change
ittA$alcohol_pos <- ifelse(is.na(ittA$alcohol_pos), ittA$alcohol_pre, ittA$alcohol_pos)
# Prepare data to repeated measures analyses
ittMelted <- melt(ittA, id = c("client_id", "gender", "age", "audit", "rcq_factor"), measured = c("alcohol_pre", "alcohol_pos"))
# Pick good names for dataframe
names(ittMelted) <- c("id", "sex", "age", "group", "rcq", "time", "value")
## Normalization of variables
# alcohol prelog
ittMelted$value <- log(ittMelted$value + 1)
qqnorm(ittMelted$value)
# Traditional ANOVA approach
## Set contrasts
ittMelted$group <- as.factor(ittMelted$group)
alcoholvsLow <- c(1,-2, 1)
contrasts(ittMelted$group, 1) <- cbind(alcoholvsLow)
# Remove NA's
ittMelted <- subset(ittMelted, complete.cases(ittMelted))
# Traditional Anova
itt_alcoholModel <- ezANOVA(data = ittMelted, dv = .(value), wid = .(id), within = .(time), between = .(group, sex), detailed = TRUE, type = 3, return_aov = TRUE)
# Print results
itt_alcoholModel
# Graphs
# Data frame
plotdf <- ittMelted
plotdf$group <- factor(plotdf$group, levels=c("Dependence", "Risk", "Low risk"))
plotdf$time <- Recode(plotdf$time, "'alcohol_pre' = 'Pre'; 'alcohol_pos' = 'Pos'")
plotdf$time <- factor(plotdf$time, levels=c("Pre", "Pos"))
plotdf$value <- exp(plotdf$value) - 1
# vs. Group
bar2 <- ggplot(plotdf, aes(time, value, fill = group))
bar2 + stat_summary(fun.y = mean, geom = "bar", position = "dodge", colour="black") + stat_summary(fun.data = mean_cl_normal, geom = "errorbar", width = .2) + facet_wrap( ~ group) + labs(x = "Time", y = "Mean number of standard Drinks", fill = "group") + theme_bw(base_size = 18) + scale_fill_manual(values=c("#777777","#e0e0e0","#FFFFFF")) + theme(legend.position = "none")
# vs. Sex
bar3 <- ggplot(plotdf, aes(time, value, fill = sex))
bar3 + stat_summary(fun.y = mean, geom = "bar", position = "dodge", colour="black") + stat_summary(fun.data = mean_cl_normal, geom = "errorbar", width = .2) + facet_wrap( ~ sex) + labs(x = "Time", y = "Mean number of Standard Drinks", fill = "group") + theme_bw(base_size = 18) + scale_fill_manual(values=c("#777777","#e0e0e0","#FFFFFF")) + theme(legend.position = "none")
#### OBJECTIVE 2 #######################################################################################
# 2 - Create a model to explain success. ###############################################################
drinkLessReg <- drinkLess[,-c(1,8,11,14,15,16)]
drinkLessReg$completed <- factor(drinkLessReg$completed, levels = c("Yes", "No"))
drinkLessReg$completed <- relevel(drinkLessReg$completed, "No")
drinkLessReg$audit <- factor(drinkLessReg$audit, levels = c("Dependence", "Low risk", "Risk"))
drinkLessReg$audit <- relevel(drinkLessReg$audit, "Low risk")
drinkLessReg$rcq_factor <- factor(drinkLessReg$audit, levels = c("Action", "Con", "Pre"))
drinkLessReg$rcq_factor <- relevel(drinkLessReg$rcq_factor, "Pre")
drinkLessReg$gender <- factor(drinkLessReg$gender, levels = c("Men", "Women"))
drinkLessReg$gender <- relevel(drinkLessReg$gender, "Men")
set.seed(666)
# Partioning data
inTrain <- createDataPartition(y = drinkLessReg$completed, p = .75, list = FALSE)
training <- drinkLessReg[inTrain,] # training
testing <- drinkLessReg[-inTrain,] # testing
featurePlot(x=training[,c("age","alcohol_pre", "login_times", "ave_login_time", "sum_login","gender")], y = training$completed, plot="pairs")
model0 <- glm(completed ~ 1, data = training, family=binomial())
model1 <- update(model0, .~. + alcohol_pre)
model2 <- update(model1, .~. + sum_login + age + gender + audit)
# summary
summary(model0); summary(model1); summary(model2)
# AIC
model0$aic; model1$aic; model2$aic
# Odss
round(cbind(exp(model2$coefficients)),3)
round(exp(confint(model2)),3)
# R^ for models
logisticPseudoR2s <- function(LogModel){
dev <- LogModel$deviance
nullDev <- LogModel$null.deviance
modelN <- length(LogModel$fitted.values)
R.l <- 1 - dev / nullDev
R.cs <- 1-exp(-(nullDev - dev) / modelN)
R.n <- R.cs / ( 1 - ( exp (-(nullDev / modelN))))
cat("Pseudo R^2 for logistic regression \n")
cat("Hosmer and Lemeshor R^2 ", round(R.l,3), "\n")
cat("Cox and Snell R^2 ", round(R.cs,3), "\n")
cat("Nagelkerke R^2 ", round(R.n,3), "\n")
}
logisticPseudoR2s(model2)
teste <- predict(model2, testing)
table(teste, testing$completed)
# Caret Package ---------------
caretModel <- train(completed ~ alcohol_pre + sum_login + age + gender + audit, data = training, preProcess = "knnImpute", method="multinom")
predictedValues <- predict(caretModel, newdata = testing)
# Check Model Accuracy
confusionMatrix(predictedValues, testing$completed)
caretModel$pred
anova(model0, model1, model2)
nearZeroVar(drinkLessReg, saveMetrics = TRUE)
str(training)
| 11,313 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | skyguy94/R | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | limeng12/r-source | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | jagdeesh109/RRO | R-src/tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | jeffreyhorner/R-Array-Hash | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | cxxr-devel/cxxr-svn-mirror | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | ArcherCraftStore/ArcherVMPeridot | R-3.1.0/tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | apache-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | ChiWang/r-source | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | LeifAndersen/R | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | RevolutionAnalytics/RRO | R-src/tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | WelkinGuan/r-source | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | mirror/r | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | cmosetick/RRO | R-src/tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | nathan-russell/r-source | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | ArcherSys/ArcherSys | R/tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | mit |
400e1cafca76487c5d413e572bfa3f179c035485 | mathematicalcoffee/r-source | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | patperry/r-source | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
400e1cafca76487c5d413e572bfa3f179c035485 | hadley/r-source | tests/simple-true.R | ###---- ALL tests here should return TRUE !
###
###---- "Real" Arithmetic; Numerics etc --> ./arith-true.R
### mode checking, etc.
is.recursive(expression(1+3, 2/sqrt(pi)))# fix PR#9
## sum():
all(1:12 == cumsum(rep(1,12)))
x <- rnorm(127); sx <- sum(x); abs((sum(rev(x)) -sx)) < 1e-12 * abs(sx)
## seq():
typeof(1:4) == "integer" #-- fails for 0.2, 0.3,.., 0.9
## Check parsing with L suffix for integer literals.
typeof(1L) == "integer"
typeof(1000L) == "integer"
typeof(1e3L) == "integer"
typeof(1e-3L) == "double" # gives warning
1.L # gives warning
try(parse(text = "12iL")) # gives syntax error
all((0:6) == pi + ((-pi):pi))
all((0:7) == (pi+seq(-pi,pi, length=8))*7/(2*pi))
1 == as.integer(is.na(c(pi,NA)[2]))
1 == as.integer(is.nan(0/0))
## rev():
cc <- c(1:10,10:1) ; all(cc == rev(cc))
## dim[names]():
all(names(c(a=pi, b=1, d=1:4)) == c("a","b", paste("d", 1:4, sep="")))
##P names(c(a=pi, b=1, d=1:4))
ncb <- dimnames(cbind(a=1, yy=1:3))[[2]]
(!is.null(ncb)) && all(ncb == c("a","yy"))
all(cbind(a=1:2, b=1:3, c=1:6) == t(rbind(a=1:2, b=1:3, c=1:6)))
##P rbind(a=1:2, b=1:3, c=1:6)
all(dim(cbind(cbind(I=1,x=1:4), c(a=pi))) == 4:3)# fails in S+
a <- b <- 1:3
all(dimnames(cbind(a, b))[[2]] == c("a","b"))
## rbind PR#338
all(dim(m <- rbind(1:2, diag(2))) == 3:2)
all(m == c(1,1,0, 2,0,1))
## factor():
is.factor(factor(integer()))
all(levels(ordered(rev(gl(3,4)))) == 1:3)# coercion to char
all(levels(factor(factor(9:1)[3:5])) == 5:7)
## crossing bug PR#40
is.factor(ff <- gl(2,3) : gl(3,2)) && length(ff) == 6
all(levels(ff) == t(outer(1:2, 1:3, paste, sep=":")))
## from PR#5
ll <- c("A","B"); ff <- factor(ll); f0 <- ff[, drop=TRUE]
all(f0 == ff) && all(levels(ff) == ll) && is.factor(ff) && is.factor(f0)
### data.frame s :
## from lists [bug PR#100]
x <- NULL
x$x1 <- 1:10
x$x2 <- 0:9
all(dim(dx <- as.data.frame(x)) == c(10,2))
## Logicals: (S is wrong)
l1 <- c(TRUE,FALSE,TRUE)
(! as.logical(as.data.frame(FALSE)[,1]))
all(l1 == as.logical(as.data.frame(l1)[,1]))
## empty data.frames :
x <- data.frame(a=1:3)
x30 <- {
if(is.R()) x[, -1]# not even possible in S+
else structure(list(), row.names = paste(1:3), class = "data.frame")
}
all(dim(x30) == c(3,0))
x01 <- x[-(1:3), , drop = FALSE]
x00 <- x01[,-1]
all(dim(x01) == 0:1)
all(dim(x00) == 0)
all(dim(x) == dim(rbind(x, x01)))
## bugs up to 1.2.3 :
all(dim(x30) == dim(m30 <- as.matrix(x30)))
all(dim(x01) == dim(m01 <- as.matrix(x01)))
all(dim(x30) == dim(as.data.frame(m30)))
all(dim(x01) == dim(as.data.frame(m01)))
all(dim(x01) == dim( data.frame(m01)))
all(dim(x30) == dim( data.frame(m30)))
all(dim(x) == dim(cbind(x, x30)))
## up to 1.4.0 :
all(dim(x30) == dim( data.matrix(x30)))
all(dim(x00) == dim( data.matrix(x00)))
m0 <- matrix(pi, 0,3)
a302 <- array("", dim=c(3,0,2))
identical(apply(m0, 1, dim), NULL)
identical(apply(m0, 2, dim), NULL)
identical(apply(m0, 1,length), integer(0))
identical(apply(m0, 2,length), integer(3))
identical(apply(a302, 1, mode), rep("character",3))
## NO (maybe later?):
## identical(apply(a302, 2, mode), rep("character",0))
is.character(aa <- apply(a302, 2, mode)) && length(aa) == 0
identical(apply(a302, 3, mode), rep("character",2))
identical(apply(a302, 3, length),integer(2))
identical(apply(a302, 3, dim), matrix(as.integer(c(3,0)), 2 ,2))
identical(apply(a302, 1, dim), matrix(as.integer(c(0,2)), 2 ,3))
identical(apply(array(dim=3), 1,length), rep(1:1, 3))
identical(apply(array(dim=0), 1,length), rep(1:1, 0))# = integer(0)
### Subsetting
## bug PR#425
x <- matrix(1:4, 2, 2, dimnames=list(c("abc","ab"), c("cde","cd")))
y <- as.data.frame(x)
all(x["ab",] == c(2,4))
all(y["ab",] == c(2,4))
## from bug PR#447
x <- 1:2 ; x[c("2","2")] <- 4
all.equal(x, c(1:2, "2" = 4))
## stretching
l2 <- list(a=1, b=2)
l2["cc"] <- pi
l2[["d"]] <- 4
l2 $ e <- 55
all.equal(l2, list(a = 1, b = 2, cc = pi, d = 4, e = 55), tolerance = 0)
all.equal(l2["d"], list(d = 4))
l2$d == 4 && l2$d == l2[["d"]]
## bug in R <= 1.1
f1 <- y1 ~ x1
f2 <- y2 ~ x2
f2[2] <- f1[2]
deparse(f2) == "y1 ~ x2"
m <- cbind(a=1:2,b=c(R=10,S=11))
all(sapply(dimnames(m), length) == c(2,2))
## [[ for matrix:
m[[1,2]] == m[[3]] && m[[3]] == m[3] && m[3] == m[1,2]
## bug in R <= 1.1.1 : unclass(*) didn't drop the class!
## to be robust to S4 methods DON'T test for null class
## The test for attr(,"class") is valid, if essentially useless
d1 <- rbind(data.frame(a=1, b = I(TRUE)), new = c(7, "N"))
is.null(attr(unclass(d1$b), "class"))
## bugs in R 1.2.0
format(as.POSIXct(relR120 <- "2000-12-15 11:24:40")) == relR120
format(as.POSIXct(substr(relR120,1,10))) == substr(relR120,1,10)
## rank() with NAs (and ties)
x <- c(3:1,6,4,3,NA,5,0,NA)
rx <- rank(x)
all(rx == c(4.5, 3:2, 8, 6, 4.5, 9, 7, 1, 10))
rxK <- rank(x, na.last = "keep")
all(rx [rx <= 8] == na.omit(rxK))
all(rank(x, na.last = NA) == na.omit(rxK))
## as.list.function() instead of *.default():
identical(as.list(as.list),
alist(x = , ... = , UseMethod("as.list")))
| 4,971 | gpl-2.0 |
e55f266b35492a53326f232dafd79016d0a23705 | geoffrosen/hmp_mapping | requirements/Maaslin/R/Utility.R | #####################################################################################
#Copyright (C) <2012>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in the
#Software without restriction, including without limitation the rights to use, copy,
#modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
#and to permit persons to whom the Software is furnished to do so, subject to
#the following conditions:
#
#The above copyright notice and this permission notice shall be included in all copies
#or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# This file is a component of the MaAsLin (Multivariate Associations Using Linear Models),
# authored by the Huttenhower lab at the Harvard School of Public Health
# (contact Timothy Tickle, ttickle@hsph.harvard.edu).
#####################################################################################
inlinedocs <- function(
##author<< Curtis Huttenhower <chuttenh@hsph.harvard.edu> and Timothy Tickle <ttickle@hsph.harvard.edu>
##description<< Collection of minor utility scripts
) { return( pArgs ) }
#source("Constants.R")
funcRename <- function(
### Modifies labels for plotting
### If the name is not an otu collapse to the last two clades
### Otherwise use the most terminal clade
astrNames
### Names to modify for plotting
){
astrRet <- c()
for( strName in astrNames )
{
astrName <- strsplit( strName, c_cFeatureDelimRex )[[1]]
i <- length( astrName )
if( ( astrName[i] == c_strUnclassified ) || !is.na( as.numeric( astrName[i] ) ) )
{
strRet <- paste( astrName[( i - 1 ):i], collapse = c_cFeatureDelim )
} else {
strRet <- astrName[i]
}
astrRet <- c(astrRet, strRet)
}
return( astrRet )
### List of modified names
}
funcBonferonniCorrectFactorData <- function
### Bonferroni correct for factor data
(dPvalue,
### P-value to correct
vsFactors,
### Factors of the data to correct
fIgnoreNAs = TRUE
){
vsUniqueFactors = unique( vsFactors )
if( fIgnoreNAs ){ vsUniqueFactors = setdiff( vsUniqueFactors, c("NA","na","Na","nA") ) }
return( dPvalue * max( 1, ( length( vsUniqueFactors ) - 1 ) ) )
### Numeric p-value that is correct for levels (excluding NA levels)
}
funcCalculateTestCounts <- function(
### Calculates the number of tests used in inference
iDataCount,
asMetadata,
asForced,
asRandom,
fAllvAll
){
iMetadata = length(asMetadata)
iForced = length(setdiff(intersect( asForced, asMetadata ), asRandom))
iRandom = length(intersect( asRandom, asMetadata ))
if(fAllvAll)
{
#AllvAll flow formula
return((iMetadata-iForced-iRandom) * iDataCount)
}
#Normal flow formula
return((iMetadata-iRandom) * iDataCount)
}
funcGetRandomColors=function(
#Generates a given number of random colors
tempNumberColors = 1
### Number of colors to generate
){
adRet = c()
return(sapply(1:tempNumberColors, function(x){
adRGB <- ( runif( 3 ) * 0.66 ) + 0.33
adRet <- c(adRet, rgb( adRGB[1], adRGB[2], adRGB[3] ))
}))
}
funcCoef2Col <- function(
### Searches through a dataframe and looks for a column that would match the coefficient
### by the name of the column or the column name and level appended together.
strCoef,
### String coefficient name
frmeData,
### Data frame of data
astrCols = c()
### Column names of interest (if NULL is given, all column names are inspected).
){
#If the coefficient is the intercept there is no data column to return so return null
if( strCoef %in% c("(Intercept)", "Intercept") ) { return( NULL ) }
#Remove ` from coefficient
strCoef <- gsub( "`", "", strCoef )
#If the coefficient name is not in the data frame
if( !( strCoef %in% colnames( frmeData ) ) )
{
fHit <- FALSE
#If the column names are not provided, use the column names of the dataframe.
if( is.null( astrCols ) ){astrCols <- colnames( frmeData )}
#Search through the different column names (factors)
for( strFactor in astrCols )
{
#Select a column, if it is not a factor or does not begin with the factor's name then skip
adCur <- frmeData[,strFactor]
if( ( class( adCur ) != "factor" ) ||
( substr( strCoef, 1, nchar( strFactor ) ) != strFactor ) ) { next }
#For the factors, create factor-level name combinations to read in factors
#Then check to see the factor-level combination is the coefficient of interest
#If it is then store that factor as the coefficient of interest
#And break
for( strValue in levels( adCur ) )
{
strCur <- paste( strFactor, strValue, sep = c_sFactorNameSep )
if( strCur == strCoef )
{
strCoef <- strFactor
fHit <- TRUE
break
}
}
#If the factor was found, return
if( fHit ){break }
}
}
#If the original coefficient or the coefficient factor combination name are in the
#data frame, return the name. Otherwise return NA.
return( ifelse( ( strCoef %in% colnames( frmeData ) ), strCoef, NA ) )
### Coefficient name
}
funcColToMFAValue = function(
### Given a column name, return the MFA values that could be associated with the name
lsColNames,
### String list of column names (as you would get from names(dataframe))
dfData
### Data frame of data the column names refer to
){
lsMFAValues = c()
for(sColName in lsColNames)
{
axCur = dfData[[sColName]]
if(is.logical(axCur)){axCur=as.factor(axCur)}
if(is.factor(axCur))
{
lsLevels = levels(axCur)
if((length(lsLevels)==2) && (!is.na(as.numeric(lsLevels[1]))) && (!is.na(as.numeric(lsLevels[2]))))
{
lsMFAValues = c(lsMFAValues,paste(sColName,lsLevels[1],sep=c_sMFANameSep1),paste(sColName,lsLevels[2],sep=c_sMFANameSep1))
}else{
for(sLevel in levels(axCur))
{
lsMFAValues = c(lsMFAValues,sLevel)
}
}
} else {
lsMFAValues = c(lsMFAValues,sColName)
}
}
return(setdiff(lsMFAValues,c("NA",NA)))
}
funcMFAValue2Col = function(
### Given a value in a column, the column name is returned.
xValue,
dfData,
aiColumnIndicesToSearch = NULL
){
lsColumnNames = names(dfData)
if(is.null(aiColumnIndicesToSearch))
{
aiColumnIndicesToSearch = c(1:dim(dfData)[2])
}
# Could be the column name
if(xValue %in% lsColumnNames){return(xValue)}
# Could be the column name and value
iValueLength = length(xValue)
for( iColIndex in c(1:length(lsColumnNames) ))
{
adCur = dfData[[lsColumnNames[iColIndex]]]
if(is.factor(adCur))
{
for(strValue in levels(adCur))
{
strCurVersion1 <- paste( lsColumnNames[iColIndex], strValue, sep = c_sMFANameSep1 )
strCurVersion2 <- paste( lsColumnNames[iColIndex], strValue, sep = c_sMFANameSep2 )
if((xValue == strCurVersion1) || (xValue == strCurVersion2)){return(lsColumnNames[iColIndex])}
}
}
}
# Could be the value
for(iColIndex in aiColumnIndicesToSearch)
{
if(xValue %in% dfData[[lsColumnNames[iColIndex]]]){return(lsColumnNames[iColIndex])}
}
return(NULL)
}
funcColorHelper <- function(
### Makes sure the max is max and the min is min, and dmed is average
dMax = 1,
### Max number
dMin = -1,
### Min number
dMed = NA
### Average value
){
#Make sure max is max and min is min
vSort = sort(c(dMin,dMax))
return( list( dMin = vSort[1], dMax = vSort[2], dMed = ifelse((is.na(dMed)), (dMin+dMax)/2.0, dMed ) ))
### List of min, max and med numbers
}
funcColor <- function(
### Generate a color based on a number that is forced to be between a min and max range.
### The color is based on how far the number is from the center of the given range
### From red to green (high) are produced with default settings
dX,
### Number from which to generate the color
dMax = 1,
### Max possible value
dMin = -1,
### Min possible value
dMed = NA,
### Central value if you don't want to be the average
adMax = c(1, 1, 0),
### Is used to generate the color for the higher values in the range, this can be changed to give different colors set to green
adMin = c(0, 0, 1),
### Is used to generate the color for the lower values in the range, this can be changed to give different colors set to red
adMed = c(0, 0, 0)
### Is used to generate the color for the central values in the range, this can be changed to give different colors set to black
){
lsTmp <- funcColorHelper( dMax, dMin, dMed )
dMax <- lsTmp$dMax
dMin <- lsTmp$dMin
dMed <- lsTmp$dMed
if( is.na( dX ) )
{
dX <- dMed
}
if( dX > dMax )
{
dX <- dMax
} else if( dX < dMin )
{
dX <- dMin }
if( dX < dMed )
{
d <- ( dMed - dX ) / ( dMed - dMin )
adCur <- ( adMed * ( 1 - d ) ) + ( adMin * d )
} else {
d <- ( dMax - dX ) / ( dMax - dMed )
adCur <- ( adMed * d ) + ( adMax * ( 1 - d ) )
}
return( rgb( adCur[1], adCur[2], adCur[3] ) )
### RGB object
}
funcColors <- function(
### Generate a range of colors
dMax = 1,
### Max possible value
dMin = -1,
### Min possible value
dMed = NA,
### Central value if you don't want to be the average
adMax = c(1, 1, 0),
### Is used to generate the color for the higher values in the range, this can be changed to give different colors set to green
adMin = c(0, 0, 1),
### Is used to generate the color for the lower values in the range, this can be changed to give different colors set to red
adMed = c(0, 0, 0),
### Is used to generate the color for the central values in the range, this can be changed to give different colors set to black
iSteps = 64
### Number of intermediary colors made in the range of colors
){
lsTmp <- funcColorHelper( dMax, dMin, dMed )
dMax <- lsTmp$dMax
dMin <- lsTmp$dMin
dMed <- lsTmp$dMed
aRet <- c ()
for( dCur in seq( dMin, dMax, ( dMax - dMin ) / ( iSteps - 1 ) ) )
{
aRet <- c(aRet, funcColor( dCur, dMax, dMin, dMed, adMax, adMin, adMed ))
}
return( aRet )
### List of colors
}
funcGetColor <- function(
### Get a color based on col parameter
) {
adCol <- col2rgb( par( "col" ) )
return( sprintf( "#%02X%02X%02X", adCol[1], adCol[2], adCol[3] ) )
### Return hexadecimal color
}
funcTrim=function(
### Remove whitespace at the beginning or the end of a string
tempString
### tempString String to be trimmed.
){
return(gsub("^\\s+|\\s+$","",tempString))
}
funcWrite <- function(
### Write a string or a table of data
### This transposes a table before it is written
pOut,
### String or table to write
strFile
### File to which to write
){
if(!is.na(strFile))
{
if( length( intersect( class( pOut ), c("character", "numeric") ) ) )
{
write.table( t(pOut), strFile, quote = FALSE, sep = c_cTableDelimiter, col.names = FALSE, row.names = FALSE, na = "", append = TRUE )
} else {
capture.output( print( pOut ), file = strFile, append = TRUE )
}
}
}
funcWriteTable <- function(
### Log a table to a file
frmeTable,
### Table to write
strFile,
### File to which to write
fAppend = FALSE
### Append when writing
){
if(!is.na(strFile))
{
write.table( frmeTable, strFile, quote = FALSE, sep = c_cTableDelimiter, na = "", col.names = NA, append = fAppend )
}
}
funcWriteQCReport <- function(
### Write out the quality control report
strProcessFileName,
### File name
lsQCData,
### List of QC data generated by maaslin to be written
liDataDim,
### Dimensions of the data matrix
liMetadataDim
### Dimensions of the metadata matrix
){
unlink(strProcessFileName)
funcWrite( paste("Initial Metadata Matrix Size: Rows ",liMetadataDim[1]," Columns ",liMetadataDim[2],sep=""), strProcessFileName )
funcWrite( paste("Initial Data Matrix Size: Rows ",liDataDim[1]," Columns ",liDataDim[2],sep=""), strProcessFileName )
funcWrite( paste("\nInitial Data Count: ",length(lsQCData$aiDataInitial),sep=""), strProcessFileName )
funcWrite( paste("Initial Metadata Count: ",length(lsQCData$aiMetadataInitial),sep=""), strProcessFileName )
funcWrite( paste("Data Count after preprocess: ",length(lsQCData$aiAfterPreprocess),sep=""), strProcessFileName )
funcWrite( paste("Removed for missing metadata: ",length(lsQCData$iMissingMetadata),sep=""), strProcessFileName )
funcWrite( paste("Removed for missing data: ",length(lsQCData$iMissingData),sep=""), strProcessFileName )
funcWrite( paste("Number of data with outliers: ",length(which(lsQCData$aiDataSumOutlierPerDatum>0)),sep=""), strProcessFileName )
funcWrite( paste("Number of metadata with outliers: ",length(which(lsQCData$aiMetadataSumOutlierPerDatum>0)),sep=""), strProcessFileName )
funcWrite( paste("Metadata count which survived clean: ",length(lsQCData$aiMetadataCleaned),sep=""), strProcessFileName )
funcWrite( paste("Data count which survived clean: ",length(lsQCData$aiDataCleaned),sep=""), strProcessFileName )
funcWrite( paste("\nBoostings: ",lsQCData$iBoosts,sep=""), strProcessFileName )
funcWrite( paste("Boosting Errors: ",lsQCData$iBoostErrors,sep=""), strProcessFileName )
funcWrite( paste("LMs with no terms suriving boosting: ",lsQCData$iNoTerms,sep=""), strProcessFileName )
funcWrite( paste("LMs performed: ",lsQCData$iLms,sep=""), strProcessFileName )
if(!is.null(lsQCData$lsQCCustom))
{
funcWrite("Custom preprocess QC data: ", strProcessFileName )
funcWrite(lsQCData$lsQCCustom, strProcessFileName )
} else {
funcWrite("No custom preprocess QC data.", strProcessFileName )
}
funcWrite( "\n#Details###########################", strProcessFileName )
funcWrite("\nInitial Data Count: ", strProcessFileName )
funcWrite(lsQCData$aiDataInitial, strProcessFileName )
funcWrite("\nInitial Metadata Count: ", strProcessFileName )
funcWrite(lsQCData$aiMetadataInitial, strProcessFileName )
funcWrite("\nData Count after preprocess: ", strProcessFileName )
funcWrite(lsQCData$aiAfterPreprocess, strProcessFileName )
funcWrite("\nRemoved for missing metadata: ", strProcessFileName )
funcWrite(lsQCData$iMissingMetadata, strProcessFileName )
funcWrite("\nRemoved for missing data: ", strProcessFileName )
funcWrite(lsQCData$iMissingData, strProcessFileName )
funcWrite("\nDetailed outlier indices: ", strProcessFileName )
for(sFeature in names(lsQCData$liOutliers))
{
funcWrite(paste("Feature",sFeature,"Outlier indice(s):", paste(lsQCData$liOutliers[[sFeature]],collapse=",")), strProcessFileName )
}
funcWrite("\nMetadata which survived clean: ", strProcessFileName )
funcWrite(lsQCData$aiMetadataCleaned, strProcessFileName )
funcWrite("\nData which survived clean: ", strProcessFileName )
funcWrite(lsQCData$aiDataCleaned, strProcessFileName )
}
funcLMToNoNAFormula <-function(
lMod,
frmeTmp,
adCur
){
dfCoef = coef(lMod)
astrCoefNames = setdiff(names(dfCoef[as.vector(!is.na(dfCoef))==TRUE]),"(Intercept)")
astrPredictors = unique(as.vector(sapply(astrCoefNames,funcCoef2Col, frmeData=frmeTmp)))
strFormula = paste( "adCur ~", paste( sprintf( "`%s`", astrPredictors ), collapse = " + " ), sep = " " )
return(try( lm(as.formula( strFormula ), data=frmeTmp )))
}
funcFormulaStrToList <- function(
#Takes a lm or mixed model formula and returns a list of covariate names in the formula
strFormula
#Formula to extract covariates from
){
#Return list
lsRetComparisons = c()
#If you get a null or na just return
if(is.null(strFormula)||is.na(strFormula)){return(lsRetComparisons)}
#Get test comparisons (predictor names from formula string)
asComparisons = gsub("`","",setdiff(unlist(strsplit(unlist(strsplit(strFormula,"~"))[2]," ")),c("","+")))
#Change metadata in formula to univariate comparisons
for(sComparison in asComparisons)
{
#Removed random covariate formating
lsParse = unlist(strsplit(sComparison, "[\\(\\|\\)]", perl=FALSE))
lsRetComparisons = c(lsRetComparisons,lsParse[length(lsParse)])
}
return(lsRetComparisons)
}
funcFormulaListToString <- function(
# Using covariate and random covariate names, creates a lm or mixed model formula
# returns a vector of c(strLM, strMixedModel), one will be NA given the existance of random covariates.
# On error c(NA,NA) is given
astrTerms,
#Fixed covariates or all covariates if using an lm
astrRandomCovariates = NULL
#Random covariates for a mixed model
){
strRetLMFormula = NA
strRetMMFormula = NA
#If no covariates return NA
if(is.null(astrTerms)){return(c(strRetLMFormula, strRetMMFormula))}
#Get fixed covariates
astrFixedCovariates = setdiff(astrTerms,astrRandomCovariates)
#If no fixed coavariates return NA
# Can not run a model with no fixed covariate, restriction of lmm
if(length(astrFixedCovariates)==0){return(c(strRetLMFormula, strRetMMFormula))}
# Fixed Covariates
strFixedCovariates = paste( sprintf( "`%s`", astrFixedCovariates ), collapse = " + " )
#If random covariates, set up a formula for mixed models
if(length(astrRandomCovariates)>0)
{
#Format for lmer
#strRetFormula <- paste( "adCur ~ ", paste( sprintf( "(1|`%s`))", intersect(astrRandomCovariates, astrTerms)), collapse = " + " ))
#Format for glmmpql
strRandomCovariates = paste( sprintf( "1|`%s`", setdiff(astrRandomCovariates, astrTerms)), collapse = " + " )
strRetMMFormula <- paste( "adCur ~ ", strFixedCovariates, " + ", strRandomCovariates, sep="")
} else {
#This is either the formula for all covariates in an lm or fixed covariates in the lmm
strRetLMFormula <- paste( "adCur ~ ", strFixedCovariates, sep="")
}
return(c(strRetLMFormula, strRetMMFormula))
} | 17,975 | mit |
4f2d75b1afd17269cc35ac55321f5adc238f27ba | cxxr-devel/cxxr-svn-mirror | src/library/utils/R/Sweave.R | # File src/library/utils/R/Sweave.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
### The drivers are now in SweaveDrivers.R
### FIXMEs
### b) It would be nice to allow multiple 'grdevice' options
### Encodings (currently, different from 2.13.0)
###
### SweaveReadFile figures out an encoding, uses it (not currently for
### \SweaveInclude files) and returns it as an attribute. This is
### then passed as an attribute of 'file' to the driver's setup
### routine. Unless it is "" or "ASCII", the RweaveLatex driver
### re-encodes the output back to 'encoding': the Rtangle driver
### leaves it in the encoding of the current locale and records what
### that is in a comment.
###
### SweaveReadFile first looks for a call to one of the LaTeX packages
### inputen[cx] and deduces the vignette encoding from that, falling
### back to the package encoding, then Latin-1 (with a warning). This
### should work OK provided the package encoding is Latin-1: it is
### UTF-8 then LaTeX needs to be told what to do. It also assumes
### that R output is in the current locale: a package with a different
### encoding from the current one might have data in that package's
### encoding.
### Correspondence between input and output is maintained in two
### places: Each chunk has a srclines attribute, recording the input
### lines it corresponds to. Each code chunk will have attached
### srcrefs that duplicate the srclines. We don't need srclines for
### code, but we do need it for doc chunks, and it's easiest to just
### keep it for everything.
Stangle <- function(file, driver = Rtangle(),
syntax = getOption("SweaveSyntax"),
encoding = "", ...)
Sweave(file = file, driver = driver, encoding = encoding, ...)
Sweave <- function(file, driver = RweaveLatex(),
syntax = getOption("SweaveSyntax"),
encoding = "", ...)
{
if (is.character(driver)) driver <- get(driver, mode = "function")()
else if (is.function(driver)) driver <- driver()
if (is.null(syntax)) syntax <- SweaveGetSyntax(file) # from the extension
if (is.character(syntax)) syntax <- get(syntax, mode = "list")
if (.Platform$OS.type == "windows") file <- chartr("\\", "/", file)
text <- SweaveReadFile(file, syntax, encoding = encoding)
attr(file, "encoding") <- encoding <- attr(text, "encoding")
srcFilenames <- attr(text, "files")
srcFilenum <- attr(text, "srcFilenum")
srcLinenum <- attr(text, "srcLinenum")
## drobj$options is the current set of options for this file.
drobj <- driver$setup(file = file, syntax = syntax, ...)
on.exit(driver$finish(drobj, error = TRUE))
syntax <- attr(text, "syntax") # this is from the file commands.
if (!is.na(envopts <- Sys.getenv("SWEAVE_OPTIONS", NA)))
drobj$options <-
SweaveParseOptions(envopts, drobj$options, driver$checkopts)
drobj$filename <- file
mode <- "doc"
chunknr <- 0L
chunk <- NULL
chunkopts <- NULL
namedchunks <- list()
prevfilenum <- 0L
prevlinediff <- 0L
for (linenum in seq_along(text)) {
line <- text[linenum]
filenum <- srcFilenum[linenum]
linediff <- srcLinenum[linenum] - linenum
if(nzchar(Sys.getenv("R_DEBUG_Sweave"))) {
## Extensive logging for debugging, needs 'ls' (unix-like or Rtools):
cat(sprintf("l.%3d: %30s -'%4s'- ", linenum, substr(line,1,30), mode))
cat(sprintf("%16s\n", system(paste("ls -s",
summary(drobj$output)$description), intern=TRUE)))
}
if (length(grep(syntax$doc, line))) { # start new documentation chunk
if (mode == "doc") {
if (!is.null(chunk)) drobj <- driver$writedoc(drobj, chunk)
} else {
if (!is.null(chunkopts$label))
namedchunks[[chunkopts$label]] <- chunk
if (!is.null(chunk))
drobj <- driver$runcode(drobj, chunk, chunkopts)
mode <- "doc"
}
chunk <- NULL
} else if (length(grep(syntax$code, line))) { # start new code chunk
if (mode == "doc") {
if (!is.null(chunk)) drobj <- driver$writedoc(drobj, chunk)
} else {
if (!is.null(chunkopts$label))
namedchunks[[chunkopts$label]] <- chunk
if (!is.null(chunk))
drobj <- driver$runcode(drobj, chunk, chunkopts)
}
mode <- "code"
chunkopts <- sub(syntax$code, "\\1", line)
chunkopts <- SweaveParseOptions(chunkopts,
drobj$options,
driver$checkopts)
## these #line directives are used for error messages when parsing
file <- srcFilenames[filenum]
chunk <- paste0("#line ", linenum+linediff+1L, ' "', basename(file), '"')
attr(chunk, "srclines") <- linenum + linediff
attr(chunk, "srcFilenum") <- filenum
attr(chunk, "srcFilenames") <- srcFilenames
chunknr <- chunknr + 1L # this is really 'code chunk number'
chunkopts$chunknr <- chunknr
} else { # continuation of current chunk
if (mode == "code" && length(grep(syntax$coderef, line))) {
chunkref <- sub(syntax$coderef, "\\1", line)
if (!(chunkref %in% names(namedchunks))) {
## omit unknown references
warning(gettextf("reference to unknown chunk %s",
sQuote(chunkref)),
call. = TRUE,domain = NA)
next
} else {
## these #line directives are used for error messages
## when parsing
file <- srcFilenames[filenum]
line <- c(namedchunks[[chunkref]],
paste0("#line ", linenum+linediff+1L,
' "', basename(file), '"'))
}
}
if (mode == "code" &&
(prevfilenum != filenum ||
prevlinediff != linediff)) {
file <- srcFilenames[filenum]
line <- c(paste0("#line ", linenum+linediff, ' "', basename(file), '"'),
line)
}
srclines <- c(attr(chunk, "srclines"), rep(linenum+linediff, length(line)))
srcfilenum <- c(attr(chunk, "srcFilenum"), rep(filenum, length(line)))
chunk <- c(chunk, line)
attr(chunk, "srclines") <- srclines
attr(chunk, "srcFilenum") <- srcfilenum
attr(chunk, "srcFilenames") <- srcFilenames
}
prevfilenum <- filenum
prevlinediff <- linediff
}
if (!is.null(chunk)) { # write out final chunk
drobj <-
if (mode == "doc") driver$writedoc(drobj, chunk)
else driver$runcode(drobj, chunk, chunkopts)
}
on.exit() # clear action to finish with error = TRUE
drobj$srcFilenames <- srcFilenames
driver$finish(drobj)
}
SweaveReadFile <- function(file, syntax, encoding = "")
{
## file can be a vector to keep track of recursive calls to
## SweaveReadFile. In this case only the first element is
## tried to read in, the rest are forbidden names for further
## SweaveInput
f <- file[1L]
bf <- basename(f)
df <- dirname(f)
if (!file.exists(f)) {
f <- list.files(df, full.names = TRUE,
pattern = paste0(bf, syntax$extension))
if (length(f) == 0L)
stop(gettextf("no Sweave file with name %s found",
sQuote(file[1L])), domain = NA)
else if (length(f) > 1L)
stop(paste(gettextf("%d Sweave files for basename %s found",
length(f), sQuote(file[1L])),
paste(":\n ", f, collapse="")),
domain = NA)
}
## An incomplete last line is not a real problem.
text <- readLines(f[1L], warn = FALSE)
srcLinenum <- seq_along(text)
if (encoding != "bytes") {
## now sort out an encoding, if needed.
enc <- tools:::.getVignetteEncoding(text, convert = TRUE)
if (enc == "non-ASCII") {
enc <- if (nzchar(encoding)) {
encoding
} else {
stop(sQuote(basename(file)),
" is not ASCII and does not declare an encoding",
domain = NA, call. = FALSE)
}
} else if (enc == "unknown") {
stop(sQuote(basename(file)),
" declares an encoding that Sweave does not know about",
domain = NA, call. = FALSE)
}
if (nzchar(enc)) text <- iconv(text, enc, "") else enc <- "ASCII"
} else enc <- "bytes"
pos <- grep(syntax$syntaxname, text)
if (length(pos) > 1L)
warning(gettextf("more than one syntax specification found, using the first one"), domain = NA)
if (length(pos) > 0L) {
sname <- sub(syntax$syntaxname, "\\1", text[pos[1L]])
syntax <- get(sname, mode = "list")
if (!identical(class(syntax), "SweaveSyntax"))
stop(gettextf("object %s does not have class \"SweaveSyntax\"",
sQuote(sname)), domain = NA)
text <- text[-pos]
srcLinenum <- srcLinenum[-pos]
}
srcFilenum <- rep(1, length(srcLinenum))
if (!is.null(syntax$input)) {
while(length(pos <- grep(syntax$input, text))) {
pos <- pos[1L]
ifile <- file.path(df, sub(syntax$input, "\\1", text[pos]))
if (any(ifile == file)) {
stop(paste(gettextf("recursive Sweave input %s in stack",
sQuote(ifile)),
paste("\n ", seq_len(file), ": ",
rev(file), collapse="")),
domain = NA)
}
itext <- SweaveReadFile(c(ifile, file), syntax, encoding = encoding)
pre <- seq_len(pos-1L)
post <- seq_len(length(text) - pos) + pos
text <- c(text[pre], itext, text[post])
srcLinenum <- c(srcLinenum[pre], attr(itext, "srcLinenum"),
srcLinenum[post])
srcFilenum <- c(srcFilenum[pre], attr(itext, "srcFilenum")+length(f),
srcFilenum[post])
f <- c(f, attr(itext, "files"))
}
}
attr(text, "syntax") <- syntax
attr(text, "files") <- f
attr(text, "encoding") <- enc
attr(text, "srcLinenum") <- srcLinenum
attr(text, "srcFilenum") <- srcFilenum
text
}
###**********************************************************
SweaveSyntaxNoweb <-
list(doc = "^@",
code = "^<<(.*)>>=.*",
coderef = "^<<(.*)>>.*",
docopt = "^[[:space:]]*\\\\SweaveOpts\\{([^\\}]*)\\}",
docexpr = "\\\\Sexpr\\{([^\\}]*)\\}",
extension = "\\.[rsRS]?nw$",
syntaxname = "^[[:space:]]*\\\\SweaveSyntax\\{([^\\}]*)\\}",
input = "^[[:space:]]*\\\\SweaveInput\\{([^\\}]*)\\}",
trans = list(
doc = "@",
code = "<<\\1>>=",
coderef = "<<\\1>>",
docopt = "\\\\SweaveOpts{\\1}",
docexpr = "\\\\Sexpr{\\1}",
extension = ".Snw",
syntaxname = "\\\\SweaveSyntax{SweaveSyntaxNoweb}",
input = "\\\\SweaveInput{\\1}")
)
class(SweaveSyntaxNoweb) <- "SweaveSyntax"
SweaveSyntaxLatex <- SweaveSyntaxNoweb
SweaveSyntaxLatex$doc <- "^[[:space:]]*\\\\end\\{Scode\\}"
SweaveSyntaxLatex$code <- "^[[:space:]]*\\\\begin\\{Scode\\}\\{?([^\\}]*)\\}?.*"
SweaveSyntaxLatex$coderef <- "^[[:space:]]*\\\\Scoderef\\{([^\\}]*)\\}.*"
SweaveSyntaxLatex$extension <- "\\.[rsRS]tex$"
SweaveSyntaxLatex$trans$doc <- "\\\\end{Scode}"
SweaveSyntaxLatex$trans$code <- "\\\\begin{Scode}{\\1}"
SweaveSyntaxLatex$trans$coderef <- "\\\\Scoderef{\\1}"
SweaveSyntaxLatex$trans$syntaxname <- "\\\\SweaveSyntax{SweaveSyntaxLatex}"
SweaveSyntaxLatex$trans$extension <- ".Stex"
SweaveGetSyntax <- function(file)
{
synt <- apropos("SweaveSyntax", mode = "list")
for (sname in synt) {
s <- get(sname, mode = "list")
if (!identical(class(s), "SweaveSyntax")) next
if (length(grep(s$extension, file))) return(s)
}
SweaveSyntaxNoweb
}
SweaveSyntConv <- function(file, syntax, output=NULL)
{
if (is.character(syntax)) syntax <- get(syntax)
if (!identical(class(syntax), "SweaveSyntax"))
stop(gettextf("target syntax not of class %s",
dQuote("SweaveSyntax")),
domain = NA)
if (is.null(syntax$trans))
stop("target syntax contains no translation table")
insynt <- SweaveGetSyntax(file)
text <- readLines(file)
if (is.null(output))
output <- sub(insynt$extension, syntax$trans$extension, basename(file))
TN <- names(syntax$trans)
for (n in TN)
if (n != "extension") text <- gsub(insynt[[n]], syntax$trans[[n]], text)
cat(text, file = output, sep = "\n")
cat("Wrote file", output, "\n")
}
###**********************************************************
## parses an option string, from
## - the header of a code chunk
## - an \SweaveOpts{} statement (strangely, left to the drivers)
## - the value of environment variable SWEAVE_OPTIONS
##
## The format is name=value pairs with whitespace being discarded
## (and could have been done all at once).
SweaveParseOptions <- function(text, defaults = list(), check = NULL)
{
x <- sub("^[[:space:]]*(.*)", "\\1", text)
x <- sub("(.*[^[:space:]])[[:space:]]*$", "\\1", x)
x <- unlist(strsplit(x, "[[:space:]]*,[[:space:]]*"))
x <- strsplit(x, "[[:space:]]*=[[:space:]]*")
## only the first option may have no name: the chunk label
if (length(x)) {
if (length(x[[1L]]) == 1L) x[[1L]] <- c("label", x[[1L]])
} else return(defaults)
if (any(sapply(x, length) != 2L))
stop(gettextf("parse error or empty option in\n%s", text), domain = NA)
options <- defaults
for (k in seq_along(x)) options[[ x[[k]][1L] ]] <- x[[k]][2L]
## This is undocumented
if (!is.null(options[["label"]]) && !is.null(options[["engine"]]))
options[["label"]] <-
sub(paste0("\\.", options[["engine"]], "$"),
"", options[["label"]])
if (!is.null(check)) check(options) else options
}
## really part of the RweaveLatex and Rtangle drivers
SweaveHooks <- function(options, run = FALSE, envir = .GlobalEnv)
{
if (is.null(SweaveHooks <- getOption("SweaveHooks"))) return(NULL)
z <- character()
for (k in names(SweaveHooks))
if (nzchar(k) && is.logical(options[[k]]) && options[[k]])
if (is.function(SweaveHooks[[k]])) {
z <- c(z, k)
if (run) eval(SweaveHooks[[k]](), envir=envir)
}
z # a character vector.
}
### For R CMD xxxx ------------------------------------------
.Sweave <- function(args = NULL)
{
options(warn = 1)
if (is.null(args)) {
args <- commandArgs(TRUE)
args <- paste(args, collapse=" ")
args <- strsplit(args,'nextArg', fixed = TRUE)[[1L]][-1L]
}
Usage <- function() {
cat("Usage: R CMD Sweave [options] file",
"",
"A front-end for Sweave",
"",
"Options:",
" -h, --help print this help message and exit",
" -v, --version print version info and exit",
" --driver=name use named Sweave driver",
" --encoding=enc default encoding 'enc' for file",
" --options= comma-separated list of Sweave options",
" --pdf convert to PDF document",
" --compact= try to compact PDF document:",
' "no" (default), "qpdf", "gs", "gs+qpdf", "both"',
" --compact same as --compact=qpdf",
"",
"Report bugs at bugs.r-project.org .",
sep = "\n")
}
do_exit <- function(status = 0L)
q("no", status = status, runLast = FALSE)
if (!length(args)) {
Usage()
do_exit(1L)
}
file <- character()
driver <- encoding <- options <- ""
toPDF <- FALSE
compact <- Sys.getenv("_R_SWEAVE_COMPACT_PDF_", "no")
while(length(args)) {
a <- args[1L]
if (a %in% c("-h", "--help")) {
Usage()
do_exit()
}
else if (a %in% c("-v", "--version")) {
cat("Sweave front-end: ",
R.version[["major"]], ".", R.version[["minor"]],
" (r", R.version[["svn rev"]], ")\n", sep = "")
cat("",
"Copyright (C) 2006-2011 The R Core Team.",
"This is free software; see the GNU General Public License version 2",
"or later for copying conditions. There is NO warranty.",
sep = "\n")
do_exit()
} else if (substr(a, 1, 9) == "--driver=") {
driver <- substr(a, 10, 1000)
} else if (substr(a, 1, 11) == "--encoding=") {
encoding <- substr(a, 12, 1000)
} else if (substr(a, 1, 10) == "--options=") {
options <- substr(a, 11, 1000)
} else if (a == "--pdf") {
toPDF <- TRUE
} else if (substr(a, 1, 10) == "--compact=") {
compact <- substr(a, 11, 1000)
} else if (a == "--compact") {
compact <- "qpdf"
} else if (substr(a, 1, 1) == "-") {
message(gettextf("Warning: unknown option %s", sQuote(a)),
domain = NA)
} else file <- c(file, a)
args <- args[-1L]
}
if(length(file) != 1L) {
Usage()
do_exit(1L)
}
args <- list(file)
if(nzchar(driver)) args <- c(args, driver)
args <- c(args, encoding = encoding)
if(nzchar(options)) {
opts <- eval(parse(text = paste("list(", options, ")")))
args <- c(args, opts)
}
do.call(Sweave, args)
if (toPDF) {
texfile <- basename(sub("\\.[rsRS][[:alpha:]]+$", ".tex", file))
tools::texi2pdf(texfile, clean = TRUE)
ofile <- sub("\\.tex$", ".pdf", texfile)
message(gettextf("Created PDF document %s", sQuote(ofile)),
domain = NA)
if(compact != "no") {
## <NOTE>
## Same code as used for --compact-vignettes in
## .build_packages() ...
message("Compacting PDF document")
if(compact %in% c("gs", "gs+qpdf", "both")) {
gs_cmd <- tools:::find_gs_cmd(Sys.getenv("R_GSCMD", ""))
gs_quality <- "ebook"
} else {
gs_cmd <- ""
gs_quality <- "none"
}
qpdf <- if(compact %in% c("qpdf", "gs+qpdf", "both"))
Sys.which(Sys.getenv("R_QPDF", "qpdf"))
else ""
res <- tools::compactPDF(ofile, qpdf = qpdf,
gs_cmd = gs_cmd,
gs_quality = gs_quality)
res <- format(res, diff = 1e5)
if(length(res))
message(paste(format(res), collapse = "\n"))
}
}
do_exit()
}
.Stangle <- function(args = NULL)
{
options(warn = 1)
if (is.null(args)) {
args <- commandArgs(TRUE)
args <- paste(args, collapse=" ")
args <- strsplit(args,'nextArg', fixed = TRUE)[[1L]][-1L]
}
Usage <- function() {
cat("Usage: R CMD Stangle file",
"",
"A front-end for Stangle",
"",
"Options:",
" -h, --help print this help message and exit",
" -v, --version print version info and exit",
" --encoding=enc assume encoding 'enc' for file",
" --options= comma-separated list of Stangle options",
"",
"Report bugs at bugs@r-project.org .",
sep = "\n")
}
do_exit <- function(status = 0L)
q("no", status = status, runLast = FALSE)
if (!length(args)) {
Usage()
do_exit(1L)
}
file <- character()
encoding <- options <- ""
while(length(args)) {
a <- args[1L]
if (a %in% c("-h", "--help")) {
Usage()
do_exit()
}
else if (a %in% c("-v", "--version")) {
cat("Stangle front-end: ",
R.version[["major"]], ".", R.version[["minor"]],
" (r", R.version[["svn rev"]], ")\n", sep = "")
cat("",
"Copyright (C) 2006-2011 The R Core Team.",
"This is free software; see the GNU General Public License version 2",
"or later for copying conditions. There is NO warranty.",
sep = "\n")
do_exit()
} else if (substr(a, 1, 11) == "--encoding=") {
encoding <- substr(a, 12, 1000)
} else if (substr(a, 1, 10) == "--options=") {
options <- substr(a, 11, 1000)
} else if (substr(a, 1, 1) == "-") {
message(gettextf("Warning: unknown option %s", sQuote(a)),
domain = NA)
} else file <- c(file, a)
args <- args[-1L]
}
if(length(file) != 1L) {
Usage()
do_exit(1L)
}
args <- list(file)
args <- c(args, encoding = encoding)
if(nzchar(options)) {
opts <- eval(parse(text = paste("list(", options, ")")))
args <- c(args, opts)
}
do.call(Stangle, args)
do_exit()
}
| 22,324 | gpl-2.0 |
4f2d75b1afd17269cc35ac55321f5adc238f27ba | lajus/customr | src/library/utils/R/Sweave.R | # File src/library/utils/R/Sweave.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
### The drivers are now in SweaveDrivers.R
### FIXMEs
### b) It would be nice to allow multiple 'grdevice' options
### Encodings (currently, different from 2.13.0)
###
### SweaveReadFile figures out an encoding, uses it (not currently for
### \SweaveInclude files) and returns it as an attribute. This is
### then passed as an attribute of 'file' to the driver's setup
### routine. Unless it is "" or "ASCII", the RweaveLatex driver
### re-encodes the output back to 'encoding': the Rtangle driver
### leaves it in the encoding of the current locale and records what
### that is in a comment.
###
### SweaveReadFile first looks for a call to one of the LaTeX packages
### inputen[cx] and deduces the vignette encoding from that, falling
### back to the package encoding, then Latin-1 (with a warning). This
### should work OK provided the package encoding is Latin-1: it is
### UTF-8 then LaTeX needs to be told what to do. It also assumes
### that R output is in the current locale: a package with a different
### encoding from the current one might have data in that package's
### encoding.
### Correspondence between input and output is maintained in two
### places: Each chunk has a srclines attribute, recording the input
### lines it corresponds to. Each code chunk will have attached
### srcrefs that duplicate the srclines. We don't need srclines for
### code, but we do need it for doc chunks, and it's easiest to just
### keep it for everything.
Stangle <- function(file, driver = Rtangle(),
syntax = getOption("SweaveSyntax"),
encoding = "", ...)
Sweave(file = file, driver = driver, encoding = encoding, ...)
Sweave <- function(file, driver = RweaveLatex(),
syntax = getOption("SweaveSyntax"),
encoding = "", ...)
{
if (is.character(driver)) driver <- get(driver, mode = "function")()
else if (is.function(driver)) driver <- driver()
if (is.null(syntax)) syntax <- SweaveGetSyntax(file) # from the extension
if (is.character(syntax)) syntax <- get(syntax, mode = "list")
if (.Platform$OS.type == "windows") file <- chartr("\\", "/", file)
text <- SweaveReadFile(file, syntax, encoding = encoding)
attr(file, "encoding") <- encoding <- attr(text, "encoding")
srcFilenames <- attr(text, "files")
srcFilenum <- attr(text, "srcFilenum")
srcLinenum <- attr(text, "srcLinenum")
## drobj$options is the current set of options for this file.
drobj <- driver$setup(file = file, syntax = syntax, ...)
on.exit(driver$finish(drobj, error = TRUE))
syntax <- attr(text, "syntax") # this is from the file commands.
if (!is.na(envopts <- Sys.getenv("SWEAVE_OPTIONS", NA)))
drobj$options <-
SweaveParseOptions(envopts, drobj$options, driver$checkopts)
drobj$filename <- file
mode <- "doc"
chunknr <- 0L
chunk <- NULL
chunkopts <- NULL
namedchunks <- list()
prevfilenum <- 0L
prevlinediff <- 0L
for (linenum in seq_along(text)) {
line <- text[linenum]
filenum <- srcFilenum[linenum]
linediff <- srcLinenum[linenum] - linenum
if(nzchar(Sys.getenv("R_DEBUG_Sweave"))) {
## Extensive logging for debugging, needs 'ls' (unix-like or Rtools):
cat(sprintf("l.%3d: %30s -'%4s'- ", linenum, substr(line,1,30), mode))
cat(sprintf("%16s\n", system(paste("ls -s",
summary(drobj$output)$description), intern=TRUE)))
}
if (length(grep(syntax$doc, line))) { # start new documentation chunk
if (mode == "doc") {
if (!is.null(chunk)) drobj <- driver$writedoc(drobj, chunk)
} else {
if (!is.null(chunkopts$label))
namedchunks[[chunkopts$label]] <- chunk
if (!is.null(chunk))
drobj <- driver$runcode(drobj, chunk, chunkopts)
mode <- "doc"
}
chunk <- NULL
} else if (length(grep(syntax$code, line))) { # start new code chunk
if (mode == "doc") {
if (!is.null(chunk)) drobj <- driver$writedoc(drobj, chunk)
} else {
if (!is.null(chunkopts$label))
namedchunks[[chunkopts$label]] <- chunk
if (!is.null(chunk))
drobj <- driver$runcode(drobj, chunk, chunkopts)
}
mode <- "code"
chunkopts <- sub(syntax$code, "\\1", line)
chunkopts <- SweaveParseOptions(chunkopts,
drobj$options,
driver$checkopts)
## these #line directives are used for error messages when parsing
file <- srcFilenames[filenum]
chunk <- paste0("#line ", linenum+linediff+1L, ' "', basename(file), '"')
attr(chunk, "srclines") <- linenum + linediff
attr(chunk, "srcFilenum") <- filenum
attr(chunk, "srcFilenames") <- srcFilenames
chunknr <- chunknr + 1L # this is really 'code chunk number'
chunkopts$chunknr <- chunknr
} else { # continuation of current chunk
if (mode == "code" && length(grep(syntax$coderef, line))) {
chunkref <- sub(syntax$coderef, "\\1", line)
if (!(chunkref %in% names(namedchunks))) {
## omit unknown references
warning(gettextf("reference to unknown chunk %s",
sQuote(chunkref)),
call. = TRUE,domain = NA)
next
} else {
## these #line directives are used for error messages
## when parsing
file <- srcFilenames[filenum]
line <- c(namedchunks[[chunkref]],
paste0("#line ", linenum+linediff+1L,
' "', basename(file), '"'))
}
}
if (mode == "code" &&
(prevfilenum != filenum ||
prevlinediff != linediff)) {
file <- srcFilenames[filenum]
line <- c(paste0("#line ", linenum+linediff, ' "', basename(file), '"'),
line)
}
srclines <- c(attr(chunk, "srclines"), rep(linenum+linediff, length(line)))
srcfilenum <- c(attr(chunk, "srcFilenum"), rep(filenum, length(line)))
chunk <- c(chunk, line)
attr(chunk, "srclines") <- srclines
attr(chunk, "srcFilenum") <- srcfilenum
attr(chunk, "srcFilenames") <- srcFilenames
}
prevfilenum <- filenum
prevlinediff <- linediff
}
if (!is.null(chunk)) { # write out final chunk
drobj <-
if (mode == "doc") driver$writedoc(drobj, chunk)
else driver$runcode(drobj, chunk, chunkopts)
}
on.exit() # clear action to finish with error = TRUE
drobj$srcFilenames <- srcFilenames
driver$finish(drobj)
}
SweaveReadFile <- function(file, syntax, encoding = "")
{
## file can be a vector to keep track of recursive calls to
## SweaveReadFile. In this case only the first element is
## tried to read in, the rest are forbidden names for further
## SweaveInput
f <- file[1L]
bf <- basename(f)
df <- dirname(f)
if (!file.exists(f)) {
f <- list.files(df, full.names = TRUE,
pattern = paste0(bf, syntax$extension))
if (length(f) == 0L)
stop(gettextf("no Sweave file with name %s found",
sQuote(file[1L])), domain = NA)
else if (length(f) > 1L)
stop(paste(gettextf("%d Sweave files for basename %s found",
length(f), sQuote(file[1L])),
paste(":\n ", f, collapse="")),
domain = NA)
}
## An incomplete last line is not a real problem.
text <- readLines(f[1L], warn = FALSE)
srcLinenum <- seq_along(text)
if (encoding != "bytes") {
## now sort out an encoding, if needed.
enc <- tools:::.getVignetteEncoding(text, convert = TRUE)
if (enc == "non-ASCII") {
enc <- if (nzchar(encoding)) {
encoding
} else {
stop(sQuote(basename(file)),
" is not ASCII and does not declare an encoding",
domain = NA, call. = FALSE)
}
} else if (enc == "unknown") {
stop(sQuote(basename(file)),
" declares an encoding that Sweave does not know about",
domain = NA, call. = FALSE)
}
if (nzchar(enc)) text <- iconv(text, enc, "") else enc <- "ASCII"
} else enc <- "bytes"
pos <- grep(syntax$syntaxname, text)
if (length(pos) > 1L)
warning(gettextf("more than one syntax specification found, using the first one"), domain = NA)
if (length(pos) > 0L) {
sname <- sub(syntax$syntaxname, "\\1", text[pos[1L]])
syntax <- get(sname, mode = "list")
if (!identical(class(syntax), "SweaveSyntax"))
stop(gettextf("object %s does not have class \"SweaveSyntax\"",
sQuote(sname)), domain = NA)
text <- text[-pos]
srcLinenum <- srcLinenum[-pos]
}
srcFilenum <- rep(1, length(srcLinenum))
if (!is.null(syntax$input)) {
while(length(pos <- grep(syntax$input, text))) {
pos <- pos[1L]
ifile <- file.path(df, sub(syntax$input, "\\1", text[pos]))
if (any(ifile == file)) {
stop(paste(gettextf("recursive Sweave input %s in stack",
sQuote(ifile)),
paste("\n ", seq_len(file), ": ",
rev(file), collapse="")),
domain = NA)
}
itext <- SweaveReadFile(c(ifile, file), syntax, encoding = encoding)
pre <- seq_len(pos-1L)
post <- seq_len(length(text) - pos) + pos
text <- c(text[pre], itext, text[post])
srcLinenum <- c(srcLinenum[pre], attr(itext, "srcLinenum"),
srcLinenum[post])
srcFilenum <- c(srcFilenum[pre], attr(itext, "srcFilenum")+length(f),
srcFilenum[post])
f <- c(f, attr(itext, "files"))
}
}
attr(text, "syntax") <- syntax
attr(text, "files") <- f
attr(text, "encoding") <- enc
attr(text, "srcLinenum") <- srcLinenum
attr(text, "srcFilenum") <- srcFilenum
text
}
###**********************************************************
SweaveSyntaxNoweb <-
list(doc = "^@",
code = "^<<(.*)>>=.*",
coderef = "^<<(.*)>>.*",
docopt = "^[[:space:]]*\\\\SweaveOpts\\{([^\\}]*)\\}",
docexpr = "\\\\Sexpr\\{([^\\}]*)\\}",
extension = "\\.[rsRS]?nw$",
syntaxname = "^[[:space:]]*\\\\SweaveSyntax\\{([^\\}]*)\\}",
input = "^[[:space:]]*\\\\SweaveInput\\{([^\\}]*)\\}",
trans = list(
doc = "@",
code = "<<\\1>>=",
coderef = "<<\\1>>",
docopt = "\\\\SweaveOpts{\\1}",
docexpr = "\\\\Sexpr{\\1}",
extension = ".Snw",
syntaxname = "\\\\SweaveSyntax{SweaveSyntaxNoweb}",
input = "\\\\SweaveInput{\\1}")
)
class(SweaveSyntaxNoweb) <- "SweaveSyntax"
SweaveSyntaxLatex <- SweaveSyntaxNoweb
SweaveSyntaxLatex$doc <- "^[[:space:]]*\\\\end\\{Scode\\}"
SweaveSyntaxLatex$code <- "^[[:space:]]*\\\\begin\\{Scode\\}\\{?([^\\}]*)\\}?.*"
SweaveSyntaxLatex$coderef <- "^[[:space:]]*\\\\Scoderef\\{([^\\}]*)\\}.*"
SweaveSyntaxLatex$extension <- "\\.[rsRS]tex$"
SweaveSyntaxLatex$trans$doc <- "\\\\end{Scode}"
SweaveSyntaxLatex$trans$code <- "\\\\begin{Scode}{\\1}"
SweaveSyntaxLatex$trans$coderef <- "\\\\Scoderef{\\1}"
SweaveSyntaxLatex$trans$syntaxname <- "\\\\SweaveSyntax{SweaveSyntaxLatex}"
SweaveSyntaxLatex$trans$extension <- ".Stex"
SweaveGetSyntax <- function(file)
{
synt <- apropos("SweaveSyntax", mode = "list")
for (sname in synt) {
s <- get(sname, mode = "list")
if (!identical(class(s), "SweaveSyntax")) next
if (length(grep(s$extension, file))) return(s)
}
SweaveSyntaxNoweb
}
SweaveSyntConv <- function(file, syntax, output=NULL)
{
if (is.character(syntax)) syntax <- get(syntax)
if (!identical(class(syntax), "SweaveSyntax"))
stop(gettextf("target syntax not of class %s",
dQuote("SweaveSyntax")),
domain = NA)
if (is.null(syntax$trans))
stop("target syntax contains no translation table")
insynt <- SweaveGetSyntax(file)
text <- readLines(file)
if (is.null(output))
output <- sub(insynt$extension, syntax$trans$extension, basename(file))
TN <- names(syntax$trans)
for (n in TN)
if (n != "extension") text <- gsub(insynt[[n]], syntax$trans[[n]], text)
cat(text, file = output, sep = "\n")
cat("Wrote file", output, "\n")
}
###**********************************************************
## parses an option string, from
## - the header of a code chunk
## - an \SweaveOpts{} statement (strangely, left to the drivers)
## - the value of environment variable SWEAVE_OPTIONS
##
## The format is name=value pairs with whitespace being discarded
## (and could have been done all at once).
SweaveParseOptions <- function(text, defaults = list(), check = NULL)
{
x <- sub("^[[:space:]]*(.*)", "\\1", text)
x <- sub("(.*[^[:space:]])[[:space:]]*$", "\\1", x)
x <- unlist(strsplit(x, "[[:space:]]*,[[:space:]]*"))
x <- strsplit(x, "[[:space:]]*=[[:space:]]*")
## only the first option may have no name: the chunk label
if (length(x)) {
if (length(x[[1L]]) == 1L) x[[1L]] <- c("label", x[[1L]])
} else return(defaults)
if (any(sapply(x, length) != 2L))
stop(gettextf("parse error or empty option in\n%s", text), domain = NA)
options <- defaults
for (k in seq_along(x)) options[[ x[[k]][1L] ]] <- x[[k]][2L]
## This is undocumented
if (!is.null(options[["label"]]) && !is.null(options[["engine"]]))
options[["label"]] <-
sub(paste0("\\.", options[["engine"]], "$"),
"", options[["label"]])
if (!is.null(check)) check(options) else options
}
## really part of the RweaveLatex and Rtangle drivers
SweaveHooks <- function(options, run = FALSE, envir = .GlobalEnv)
{
if (is.null(SweaveHooks <- getOption("SweaveHooks"))) return(NULL)
z <- character()
for (k in names(SweaveHooks))
if (nzchar(k) && is.logical(options[[k]]) && options[[k]])
if (is.function(SweaveHooks[[k]])) {
z <- c(z, k)
if (run) eval(SweaveHooks[[k]](), envir=envir)
}
z # a character vector.
}
### For R CMD xxxx ------------------------------------------
.Sweave <- function(args = NULL)
{
options(warn = 1)
if (is.null(args)) {
args <- commandArgs(TRUE)
args <- paste(args, collapse=" ")
args <- strsplit(args,'nextArg', fixed = TRUE)[[1L]][-1L]
}
Usage <- function() {
cat("Usage: R CMD Sweave [options] file",
"",
"A front-end for Sweave",
"",
"Options:",
" -h, --help print this help message and exit",
" -v, --version print version info and exit",
" --driver=name use named Sweave driver",
" --encoding=enc default encoding 'enc' for file",
" --options= comma-separated list of Sweave options",
" --pdf convert to PDF document",
" --compact= try to compact PDF document:",
' "no" (default), "qpdf", "gs", "gs+qpdf", "both"',
" --compact same as --compact=qpdf",
"",
"Report bugs at bugs.r-project.org .",
sep = "\n")
}
do_exit <- function(status = 0L)
q("no", status = status, runLast = FALSE)
if (!length(args)) {
Usage()
do_exit(1L)
}
file <- character()
driver <- encoding <- options <- ""
toPDF <- FALSE
compact <- Sys.getenv("_R_SWEAVE_COMPACT_PDF_", "no")
while(length(args)) {
a <- args[1L]
if (a %in% c("-h", "--help")) {
Usage()
do_exit()
}
else if (a %in% c("-v", "--version")) {
cat("Sweave front-end: ",
R.version[["major"]], ".", R.version[["minor"]],
" (r", R.version[["svn rev"]], ")\n", sep = "")
cat("",
"Copyright (C) 2006-2011 The R Core Team.",
"This is free software; see the GNU General Public License version 2",
"or later for copying conditions. There is NO warranty.",
sep = "\n")
do_exit()
} else if (substr(a, 1, 9) == "--driver=") {
driver <- substr(a, 10, 1000)
} else if (substr(a, 1, 11) == "--encoding=") {
encoding <- substr(a, 12, 1000)
} else if (substr(a, 1, 10) == "--options=") {
options <- substr(a, 11, 1000)
} else if (a == "--pdf") {
toPDF <- TRUE
} else if (substr(a, 1, 10) == "--compact=") {
compact <- substr(a, 11, 1000)
} else if (a == "--compact") {
compact <- "qpdf"
} else if (substr(a, 1, 1) == "-") {
message(gettextf("Warning: unknown option %s", sQuote(a)),
domain = NA)
} else file <- c(file, a)
args <- args[-1L]
}
if(length(file) != 1L) {
Usage()
do_exit(1L)
}
args <- list(file)
if(nzchar(driver)) args <- c(args, driver)
args <- c(args, encoding = encoding)
if(nzchar(options)) {
opts <- eval(parse(text = paste("list(", options, ")")))
args <- c(args, opts)
}
do.call(Sweave, args)
if (toPDF) {
texfile <- basename(sub("\\.[rsRS][[:alpha:]]+$", ".tex", file))
tools::texi2pdf(texfile, clean = TRUE)
ofile <- sub("\\.tex$", ".pdf", texfile)
message(gettextf("Created PDF document %s", sQuote(ofile)),
domain = NA)
if(compact != "no") {
## <NOTE>
## Same code as used for --compact-vignettes in
## .build_packages() ...
message("Compacting PDF document")
if(compact %in% c("gs", "gs+qpdf", "both")) {
gs_cmd <- tools:::find_gs_cmd(Sys.getenv("R_GSCMD", ""))
gs_quality <- "ebook"
} else {
gs_cmd <- ""
gs_quality <- "none"
}
qpdf <- if(compact %in% c("qpdf", "gs+qpdf", "both"))
Sys.which(Sys.getenv("R_QPDF", "qpdf"))
else ""
res <- tools::compactPDF(ofile, qpdf = qpdf,
gs_cmd = gs_cmd,
gs_quality = gs_quality)
res <- format(res, diff = 1e5)
if(length(res))
message(paste(format(res), collapse = "\n"))
}
}
do_exit()
}
.Stangle <- function(args = NULL)
{
options(warn = 1)
if (is.null(args)) {
args <- commandArgs(TRUE)
args <- paste(args, collapse=" ")
args <- strsplit(args,'nextArg', fixed = TRUE)[[1L]][-1L]
}
Usage <- function() {
cat("Usage: R CMD Stangle file",
"",
"A front-end for Stangle",
"",
"Options:",
" -h, --help print this help message and exit",
" -v, --version print version info and exit",
" --encoding=enc assume encoding 'enc' for file",
" --options= comma-separated list of Stangle options",
"",
"Report bugs at bugs@r-project.org .",
sep = "\n")
}
do_exit <- function(status = 0L)
q("no", status = status, runLast = FALSE)
if (!length(args)) {
Usage()
do_exit(1L)
}
file <- character()
encoding <- options <- ""
while(length(args)) {
a <- args[1L]
if (a %in% c("-h", "--help")) {
Usage()
do_exit()
}
else if (a %in% c("-v", "--version")) {
cat("Stangle front-end: ",
R.version[["major"]], ".", R.version[["minor"]],
" (r", R.version[["svn rev"]], ")\n", sep = "")
cat("",
"Copyright (C) 2006-2011 The R Core Team.",
"This is free software; see the GNU General Public License version 2",
"or later for copying conditions. There is NO warranty.",
sep = "\n")
do_exit()
} else if (substr(a, 1, 11) == "--encoding=") {
encoding <- substr(a, 12, 1000)
} else if (substr(a, 1, 10) == "--options=") {
options <- substr(a, 11, 1000)
} else if (substr(a, 1, 1) == "-") {
message(gettextf("Warning: unknown option %s", sQuote(a)),
domain = NA)
} else file <- c(file, a)
args <- args[-1L]
}
if(length(file) != 1L) {
Usage()
do_exit(1L)
}
args <- list(file)
args <- c(args, encoding = encoding)
if(nzchar(options)) {
opts <- eval(parse(text = paste("list(", options, ")")))
args <- c(args, opts)
}
do.call(Stangle, args)
do_exit()
}
| 22,324 | gpl-2.0 |
0216d238ecf377792fa8d8801760ccac5d2a7f56 | ahopki14/immunoSeqR | R/richness.R | #' richness
#'
#' Computes the richness (total number of unique elements) in a sample
#' @param x A numeric vector or tcr object
#' @param merge If x is a tcr object, the output will be a tcr object with the
#' richness merged into the metadata.
#' @return If x is a vector, the function returns the richness If x is a tcr
#' object, and merge=T, then the function returns a tcr object with richness
#' included in the metadata for all samples.
#' @author Alexander Hopkins
#' @export
richness <- function(x,...){
length(which(x>0))
}
setMethod("richness", "tcr",
function(x, merge=T){
r <- apply(assay(x),FUN=richness, MARGIN=2)
df <- data.frame(Richness=r,fn=names(r))
if(merge){
tmp <- DataFrame(
merge(colData(x),df,by='fn', all.x=T, sort=F)
)
# This is necessary to maintain colnames(assay(ds))
# for some reason...
rownames(tmp) <- tmp$fn
stopifnot(tmp$fn==colnames(assay(x))) #make sure they line up
colData(x) <- tmp
return(x)
}
else{r}
}
)
| 998 | gpl-3.0 |
f6f04624d4b62a5bffc70e75216a5c3e9e6ac4a3 | kalibera/rexp | src/library/tools/R/Rd2HTML.R |
# File src/library/tools/R/Rd2HTML.R
#
# Copyright (C) 1995-2013 The R Core Team
# Part of the R package, http://www.R-project.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
## also used by Rd2latex, but only 'topic' and 'dest'
get_link <- function(arg, tag, Rdfile) {
## 'topic' is the name to display, 'dest' is the topic to link to
## optionaly in package 'pkg'. If 'target' is set it is the file
## to link to in HTML help
## \link[=bar]{foo} means shows foo but treat this as a link to bar.
## \link[pkg]{bar} means show bar and link to *file* bar in package pkg
## \link{pkg:bar]{foo} means show foo and link to file bar in package pkg.
## As from 2.10.0, look for topic 'bar' if file not found.
if (!all(RdTags(arg) == "TEXT"))
stopRd(arg, Rdfile, "Bad \\link text")
option <- attr(arg, "Rd_option")
topic <- dest <- paste(unlist(arg), collapse = "")
targetfile <- NULL
pkg <- NULL
if (!is.null(option)) {
if (!identical(attr(option, "Rd_tag"), "TEXT"))
stopRd(option, Rdfile, "Bad \\link option -- must be text")
if (grepl("^=", option, perl = TRUE, useBytes = TRUE))
dest <- psub1("^=", "", option)
else if (grepl(":", option, perl = TRUE, useBytes = TRUE)) {
targetfile <- psub1("^[^:]*:", "", option)
pkg <- psub1(":.*", "", option)
} else {
targetfile <- dest
pkg <- as.character(option)
}
}
if (tag == "\\linkS4class") dest <- paste0(dest, "-class")
list(topic = topic, dest = dest, pkg = pkg, targetfile = targetfile)
}
# translation of Utils.pm function of the same name, plus "unknown"
mime_canonical_encoding <- function(encoding)
{
encoding[encoding %in% c("", "unknown")] <-
utils::localeToCharset()[1L]
encoding <- tolower(encoding)
encoding <- sub("iso_8859-([0-9]+)", "iso-8859-\\1", encoding)
encoding <- sub("iso8859-([0-9]+)", "iso-8859-\\1", encoding)
encoding[encoding == "latin1"] <- "iso-8859-1"
encoding[encoding == "latin2"] <- "iso-8859-2"
encoding[encoding == "latin3"] <- "iso-8859-3"
encoding[encoding == "latin4"] <- "iso-8859-4"
encoding[encoding == "cyrillic"] <-"iso-8859-5"
encoding[encoding == "arabic"] <- "iso-8859-6"
encoding[encoding == "greek"] <- "iso-8859-7"
encoding[encoding == "hebrew"] <- "iso-8859-8"
encoding[encoding == "latin5"] <- "iso-8859-9"
encoding[encoding == "latin6"] <- "iso-8859-10"
encoding[encoding == "latin8"] <- "iso-8859-14"
encoding[encoding == "latin-9"] <- "iso-8859-15"
encoding[encoding == "latin10"] <- "iso-8859-16"
encoding[encoding == "utf8"] <- "utf-8"
encoding[encoding == "ascii"] <- "us-ascii" # from W3C validator
encoding
}
htmlify <- function(x) {
x <- fsub("&", "&", x)
x <- fsub("---", "—", x)
x <- fsub("--", "–", x)
x <- fsub("``", "“", x)
x <- fsub("''", "”", x)
x <- psub("`([^']+)'", "‘\\1’", x)
x <- fsub("`", "'", x)
x <- fsub("<", "<", x)
x <- fsub(">", ">", x)
x <- fsub('"\\{"', '"{"', x)
x <- fsub('"', '"', x)
x
}
vhtmlify <- function(x, inEqn = FALSE) { # code version
x <- fsub("&", "&", x)
x <- fsub("<", "<", x)
x <- fsub(">", ">", x)
x <- fsub('"\\{"', '"{"', x)
## http://htmlhelp.com/reference/html40/entities/symbols.html
if(inEqn) {
x <- psub("\\\\(Alpha|Beta|Gamma|Delta|Epsilon|Zeta|Eta|Theta|Iota|Kappa|Lambda|Mu|Nu|Xi|Omicron|Pi|Rho|Sigma|Tau|Upsilon|Phi|Chi|Psi|Omega|alpha|beta|gamma|delta|epsilon|zeta|eta|theta|iota|kappa|lambda|mu|nu|xi|omicron|pi|rho|sigma|tau|upsilon|phi|chi|psi|omega|le|ge|sum|prod)", "&\\1;", x)
x <- psub("\\\\(dots|ldots)", "&\\hellip;", x)
x <- fsub("\\infty", "∞", x)
x <- fsub("\\sqrt", "√", x)
}
x
}
# URL encode anything other than alphanumeric, . and _
urlify <- function(x) { # make a string legal in a URL
chars <- unlist(strsplit(x, ""))
hex <- paste0("%", as.character(charToRaw(x)))
mixed <- ifelse(grepl("[0-9a-zA-Z._]", chars), chars, hex)
paste(mixed, collapse="")
}
# Ampersands should be escaped in proper HTML URIs
escapeAmpersand <- function(x) gsub("&", "&", x, fixed=TRUE)
## This gets used two ways:
## 1) With dynamic = TRUE from tools:::httpd()
## Here generated links are of the forms
## ../../pkg/help/topic
## file.html
## ../../pkg/html/file.html
## and links are never missing: topics are always linked as
## ../../pkg/help/topic for the current packages, and this means
## 'search this package then all the others, and show all matches
## if we need to go outside this packages'
## 2) With dynamic = FALSE from .convertRdfiles (with Links[2], used for
## prebuilt HTML pages) and .Rdconv (no link lookup)
## Here generated links are of the forms
## file.html
## ../../pkg/html/file.html
## and missing links (those without an explicit package, and
## those topics not in Links[2]) don't get linked anywhere.
## FIXME: better to use XHTML
Rd2HTML <-
function(Rd, out = "", package = "", defines = .Platform$OS.type,
Links = NULL, Links2 = NULL,
stages = "render", outputEncoding = "UTF-8",
dynamic = FALSE, no_links = FALSE, fragment=FALSE,
stylesheet = "R.css", ...)
{
if (missing(no_links) && is.null(Links) && !dynamic) no_links <- TRUE
version <- ""
if(!identical(package, "")) {
if(length(package) > 1L) {
version <- package[2L]
package <- package[1L]
} else {
dir <- dirname(package)
if((dir != "") &&
file_test("-f", dfile <- file.path(package,
"DESCRIPTION"))) {
version <- .read_description(dfile)["Version"]
package <- basename(package)
} else {
## Should we really do this?
## Used when Rdconv is given a package argument.
version <- utils::packageDescription(package,
fields = "Version")
}
}
if(is.na(version)) version <- ""
}
## writeLines by default re-encodes strings to the local encoding.
## Avoid that by useBytes=TRUE
writeLinesUTF8 <-
if (outputEncoding == "UTF-8" ||
(outputEncoding == "" && l10n_info()[["UTF-8"]])) {
function(x, con, outputEncoding, ...)
writeLines(x, con, useBytes = TRUE, ...)
} else {
function(x, con, outputEncoding, ...) {
x <- iconv(x, "UTF-8", outputEncoding, sub="byte", mark=FALSE)
writeLines(x, con, useBytes = TRUE, ...)
}
}
of <- function(...)
writeLinesUTF8(paste(...), con, outputEncoding, sep = "")
of0 <- function(...)
writeLinesUTF8(paste0(...), con, outputEncoding, sep = "")
of1 <- function(text)
writeLinesUTF8(text, con, outputEncoding, sep = "")
pendingClose <- pendingOpen <- character() # Used for infix methods
inEqn <- FALSE # Should we do edits needed in an eqn?
sectionLevel <- 0L # How deeply nested within section/subsection
inPara <- FALSE # Are we in a <P> paragraph? If NA, we're not, but we're not allowed to be
### These correspond to HTML wrappers
HTMLTags <- c("\\bold"="B",
"\\cite"="CITE",
"\\code"="code",
"\\command"="CODE",
"\\dfn"="DFN",
"\\emph"="EM",
"\\kbd"="KBD",
"\\preformatted"="PRE",
# "\\special"="PRE",
"\\strong"="STRONG",
"\\var"="VAR",
"\\verb"="PRE")
# These have simple substitutions
HTMLEscapes <- c("\\R"='<font face="Courier New,Courier" color="#666666"><b>R</b></font>',
"\\cr"="<br>",
"\\dots"="...",
"\\ldots"="...")
## These correspond to idiosyncratic wrappers
HTMLLeft <- c("\\acronym"='<acronym><span class="acronym">',
"\\donttest"="",
"\\env"='<span class="env">',
"\\file"='‘<span class="file">',
"\\option"='<span class="option">',
"\\pkg"='<span class="pkg">',
"\\samp"='<span class="samp">',
"\\sQuote"="‘",
"\\dQuote"="“")
HTMLRight <- c("\\acronym"='</span></acronym>',
"\\donttest"="",
"\\env"="</span>",
"\\file"='</span>’',
"\\option"="</span>",
"\\pkg"="</span>",
"\\samp"="</span>",
"\\sQuote"="’",
"\\dQuote"="”")
trim <- function(x) {
x <- psub1("^\\s*", "", x)
psub1("\\s*$", "", x)
}
addParaBreaks <- function(x) {
if (isBlankLineRd(x) && isTRUE(inPara)) {
inPara <<- FALSE
return("</p>\n")
}
start <- attr(x, "srcref")[2L] # FIXME: what if no srcref?, start col
if (start == 1) x <- psub("^\\s+", "", x)
if (isTRUE(!inPara) && !all(grepl("^[[:blank:]\n]*$", x, perl = TRUE))) {
x <- c("<p>", x)
inPara <<- TRUE
}
x
}
enterPara <- function(enter = TRUE) {
if (enter && isTRUE(!inPara)) {
of0("<p>")
inPara <<- TRUE
}
}
leavePara <- function(newval) {
if (isTRUE(inPara)) of0("</p>\n")
inPara <<- newval
}
writeWrapped <- function(tag, block, doParas) {
if (!doParas || HTMLTags[tag] == "PRE")
leavePara(NA)
else
enterPara()
if (!isBlankRd(block)) {
of0("<", HTMLTags[tag], ">")
writeContent(block, tag)
of0("</", HTMLTags[tag], ">")
}
}
checkInfixMethod <- function(blocks)
# Is this a method which needs special formatting?
if ( length(blocks) == 1 && RdTags(blocks) == "TEXT" &&
blocks[[1L]] %in% c("[", "[[", "$") ) {
pendingOpen <<- blocks[[1L]]
TRUE
} else FALSE
writeLink <- function(tag, block, doParas) {
parts <- get_link(block, tag, Rdfile)
writeHref <- function() {
enterPara(doParas)
savePara <- inPara
inPara <<- NA
if (!no_links) of0('<a href="', htmlfile, '">')
writeContent(block, tag)
if (!no_links) of1('</a>')
inPara <<- savePara
}
if (is.null(parts$targetfile)) {
## ---------------- \link{topic} and \link[=topic]{foo}
topic <- parts$dest
if (dynamic) { # never called with package=""
htmlfile <- paste0("../../", urlify(package), "/help/", urlify(topic))
writeHref()
return()
} else {
htmlfile <- NA_character_
if (!is.null(Links)) {
tmp <- Links[topic]
if (!is.na(tmp)) htmlfile <- tmp
else {
tmp <- Links2[topic]
if (!is.na(tmp)) htmlfile <- tmp
}
}
}
if (is.na(htmlfile)) {
## Used to use the search engine, but we no longer have one,
## and we don't get here for dynamic help.
if (!no_links)
warnRd(block, Rdfile, "missing link ", sQuote(topic))
writeContent(block, tag)
} else {
## treat links in the same package specially -- was needed for CHM
pkg_regexp <- paste0("^../../", urlify(package), "/html/")
if (grepl(pkg_regexp, htmlfile)) {
htmlfile <- sub(pkg_regexp, "", htmlfile)
}
writeHref()
}
} else {
## ----------------- \link[pkg]{file} and \link[pkg:file]{bar}
htmlfile <- paste0(urlify(parts$targetfile), ".html")
if (!dynamic && !no_links &&
nzchar(pkgpath <- system.file(package = parts$pkg))) {
## check the link, only if the package is found
OK <- FALSE
if (!file.exists(file.path(pkgpath, "html", htmlfile))) {
## does not exist as static HTML, so look harder
f <- file.path(pkgpath, "help", "paths.rds")
if (file.exists(f)) {
paths <- sub("\\.[Rr]d$", "", basename(readRDS(f)))
OK <- parts$targetfile %in% paths
}
} else OK <- TRUE
if (!OK) {
## so how about as a topic?
file <- utils:::index.search(parts$targetfile, pkgpath)
if (!length(file)) {
warnRd(block, Rdfile,
"file link ", sQuote(parts$targetfile),
" in package ", sQuote(parts$pkg),
" does not exist and so has been treated as a topic")
parts$targetfile <- basename(file)
} else {
warnRd(block, Rdfile, "missing file link ",
sQuote(parts$targetfile))
}
}
}
if (parts$pkg == package) {
## use href = "file.html"
writeHref()
} else {
## href = "../../pkg/html/file.html"
htmlfile <- paste0("../../", urlify(parts$pkg), "/html/", htmlfile)
writeHref()
}
}
}
writeComment <- function(txt) {
txt <- psub1("^%", "", txt)
txt <- fsub1("\n", "", txt)
txt <- fsub("--", "- - ", txt)
txt <- fsub(">", ">", txt)
of("<!-- ", txt, " -->\n")
}
writeLR <- function(block, tag, doParas) {
enterPara(doParas)
of1(HTMLLeft[tag])
writeContent(block, tag)
of1(HTMLRight[tag])
}
writeDR <- function(block, tag) {
if (length(block) > 1L) {
of1('## Not run: ')
writeContent(block, tag)
of1('\n## End(Not run)')
} else {
of1('## Not run: ')
writeContent(block, tag)
}
}
writeBlock <- function(block, tag, blocktag) {
doParas <- !(blocktag %in% c("\\command", "\\tabular"))
switch(tag,
UNKNOWN =,
VERB = of1(vhtmlify(block, inEqn)),
RCODE = of1(vhtmlify(block)),
TEXT = of1(if(doParas) addParaBreaks(htmlify(block))else vhtmlify(block)),
USERMACRO =,
"\\newcommand" =,
"\\renewcommand" =,
COMMENT = {},
LIST = writeContent(block, tag),
"\\describe"=,
"\\enumerate"=,
"\\itemize" = {
leavePara(FALSE)
writeContent(block, tag)
},
"\\bold" =,
"\\cite" =,
"\\code" =,
"\\command" =,
"\\dfn" =,
"\\emph" =,
"\\kbd" =,
"\\preformatted" =,
"\\strong" =,
"\\var" =,
"\\verb" = writeWrapped(tag, block, doParas),
"\\special" = writeContent(block, tag), ## FIXME, verbatim?
"\\linkS4class" =,
"\\link" = writeLink(tag, block, doParas),
## cwhmisc has an empty \\email
"\\email" = if (length(block)) {
url <- paste(as.character(block), collapse="")
url <- gsub("\n", "", url)
enterPara(doParas)
of0('<a href="mailto:', url, '">', htmlify(url), '</a>')},
## FIXME: encode, not htmlify
## watch out for empty URLs (TeachingDemos has one)
"\\url" = if(length(block)) {
url <- paste(as.character(block), collapse="")
url <- gsub("\n", "", url)
enterPara(doParas)
of0('<a href="', escapeAmpersand(url), '">', htmlify(url), '</a>')
},
"\\href" = {
if(length(block[[1L]])) {
url <- paste(as.character(block[[1L]]), collapse="")
url <- gsub("\n", "", url)
enterPara(doParas)
of0('<a href="', escapeAmpersand(url), '">')
closing <- "</a>"
} else closing <- ""
savePara <- inPara
inPara <<- NA
writeContent(block[[2L]], tag)
of0(closing)
inPara <<- savePara
},
"\\Sexpr"= of0(as.character.Rd(block, deparse=TRUE)),
"\\cr" = of1(HTMLEscapes[tag]),
"\\dots" =,
"\\ldots" =,
"\\R" = {
enterPara(doParas)
of1(HTMLEscapes[tag])
},
"\\acronym" =,
"\\donttest" =,
"\\env" =,
"\\file" =,
"\\option" =,
"\\pkg" =,
"\\samp" =,
"\\sQuote" =,
"\\dQuote" = writeLR(block, tag, doParas),
"\\dontrun"= writeDR(block, tag),
"\\enc" = writeContent(block[[1L]], tag),
"\\eqn" = {
inEqn <<- TRUE
of1("<i>")
block <- block[[length(block)]];
## FIXME: space stripping needed: see Special.html
writeContent(block, tag)
of1("</i>")
inEqn <<- FALSE
},
"\\deqn" = {
inEqn <<- TRUE
leavePara(TRUE)
of1('<p align="center"><i>')
block <- block[[length(block)]];
writeContent(block, tag)
of0('</i>')
leavePara(FALSE)
inEqn <<- FALSE
},
"\\figure" = {
## This is what is needed for static html pages
if(dynamic) of1('<img src="figures/')
else of1('<img src="../help/figures/')
writeContent(block[[1]], tag)
of1('" ')
if (length(block) > 1L
&& length(imgoptions <- .Rd_get_latex(block[[2]]))
&& grepl("^options: ", imgoptions)) {
# There may be escaped percent signs within
imgoptions <- gsub("\\%", "%", imgoptions, fixed=TRUE)
of1(sub("^options: ", "", imgoptions))
} else {
of1('alt="')
writeContent(block[[length(block)]], tag)
of1('"')
}
of1(' />')
},
"\\dontshow" =,
"\\testonly" = {}, # do nothing
"\\method" =,
"\\S3method" =,
"\\S4method" = {
# Should not get here
},
"\\tabular" = writeTabular(block),
"\\subsection" = writeSection(block, tag),
"\\if" =,
"\\ifelse" =
if (testRdConditional("html", block, Rdfile))
writeContent(block[[2L]], tag)
else if (tag == "\\ifelse")
writeContent(block[[3L]], tag),
"\\out" = for (i in seq_along(block))
of1(block[[i]]),
stopRd(block, Rdfile, "Tag ", tag, " not recognized")
)
}
writeTabular <- function(table) {
format <- table[[1L]]
content <- table[[2L]]
if (length(format) != 1 || RdTags(format) != "TEXT")
stopRd(table, Rdfile, "\\tabular format must be simple text")
format <- strsplit(format[[1L]], "", fixed = TRUE)[[1L]]
if (!all(format %in% c("l", "c", "r")))
stopRd(table, Rdfile,
"Unrecognized \\tabular format: ", table[[1L]][[1L]])
format <- c(l="left", c="center", r="right")[format]
tags <- RdTags(content)
leavePara(FALSE)
of1('\n<table summary="Rd table">\n')
newrow <- TRUE
newcol <- TRUE
for (i in seq_along(tags)) {
if (newrow) {
of1("<tr>\n ")
newrow <- FALSE
col <- 0
}
if (newcol) {
col <- col + 1L
if (col > length(format))
stopRd(table, Rdfile,
"Only ", length(format),
" columns allowed in this table")
of0('<td align="', format[col], '">')
newcol <- FALSE
}
switch(tags[i],
"\\tab" = {
of1('</td>')
newcol <- TRUE
},
"\\cr" = {
if (!newcol) of1('</td>')
of1('\n</tr>\n')
newrow <- TRUE
newcol <- TRUE
},
writeBlock(content[[i]], tags[i], "\\tabular"))
leavePara(FALSE)
}
if (!newcol) of1('</td>')
if (!newrow) of1('\n</tr>\n')
of1('\n</table>\n')
}
writeContent <- function(blocks, blocktag) {
inlist <- FALSE
itemskip <- FALSE
tags <- RdTags(blocks)
i <- 0
while (i < length(tags)) {
i <- i + 1
tag <- tags[i]
block <- blocks[[i]]
if (length(pendingOpen)) { # Handle $, [ or [[ methods
if (tag == "RCODE" && grepl("^\\(", block)) {
block <- sub("^\\(", "", block)
arg1 <- sub("[,)[:space:]].*", "", block)
block <- sub(paste0(arg1, "[[:space:]]*,[[:space:]]*"),
"", block)
of0(arg1, pendingOpen)
if (pendingOpen == "$")
pendingClose <<- ""
else
pendingClose <<- chartr("[", "]", pendingOpen)
} else of0("`", pendingOpen, "`")
pendingOpen <<- character()
}
if (length(pendingClose) && tag == "RCODE"
&& grepl("\\)", block)) { # Finish it off...
of0(sub("\\).*", "", block), pendingClose)
block <- sub("[^)]*\\)", "", block)
pendingClose <<- character()
}
switch(tag,
"\\method" =,
"\\S3method" =,
"\\S4method" = {
blocks <- transformMethod(i, blocks, Rdfile)
tags <- RdTags(blocks)
i <- i - 1
},
"\\item" = {
leavePara(FALSE)
if (!inlist) {
switch(blocktag,
"\\value" = of1('<table summary="R valueblock">\n'),
"\\arguments" = of1('<table summary="R argblock">\n'),
"\\itemize" = of1("<ul>\n"),
"\\enumerate" = of1("<ol>\n"),
"\\describe" = of1("<dl>\n"))
inlist <- TRUE
} else {
if (blocktag %in% c("\\itemize", "\\enumerate")) {
of1("</li>\n")
## We have \item ..., so need to skip the space.
itemskip <- TRUE
}
}
switch(blocktag,
"\\value"=,
"\\arguments"={
of1('<tr valign="top"><td><code>')
inPara <<- NA
writeContent(block[[1L]], tag)
of1('</code></td>\n<td>\n')
inPara <<- FALSE
writeContent(block[[2L]], tag)
leavePara(FALSE)
of1('</td></tr>')
},
"\\describe"= {
of1("<dt>")
inPara <<- NA
writeContent(block[[1L]], tag)
of1("</dt><dd>")
inPara <<- FALSE
writeContent(block[[2L]], tag)
leavePara(FALSE)
of1("</dd>")
},
"\\enumerate" =,
"\\itemize"= {
inPara <<- FALSE
of1("<li>")
})
},
{ # default
if (inlist && !(blocktag %in% c("\\itemize", "\\enumerate"))
&& !(tag == "TEXT" && isBlankRd(block))) {
switch(blocktag,
"\\arguments" =,
"\\value" = of1("</table>\n"),
"\\describe" = of1("</dl>\n"))
inlist <- FALSE
inPara <<- FALSE
}
if (itemskip) {
## The next item must be TEXT, and start with a space.
itemskip <- FALSE
if (tag == "TEXT") {
txt <- addParaBreaks(htmlify(block))
of1(txt)
} else writeBlock(block, tag, blocktag) # should not happen
} else writeBlock(block, tag, blocktag)
})
}
if (inlist) {
leavePara(FALSE)
switch(blocktag,
"\\value"=,
"\\arguments" = of1("</table>\n"),
"\\itemize" = of1("</li></ul>\n"),
"\\enumerate" = of1("</li></ol>\n"),
# "\\value"=,
"\\describe" = of1("</dl>\n"))
}
}
writeSection <- function(section, tag) {
if (tag %in% c("\\alias", "\\concept", "\\encoding", "\\keyword"))
return() ## \alias only used on CHM header
leavePara(NA)
save <- sectionLevel
sectionLevel <<- sectionLevel + 1L
of1(paste0("\n\n<h", sectionLevel+2L, ">"))
if (tag == "\\section" || tag == "\\subsection") {
title <- section[[1L]]
section <- section[[2L]]
## FIXME: this needs trimming of whitespace
writeContent(title, tag)
} else
of1(sectionTitles[tag])
of1(paste0("</h", sectionLevel+2L, ">\n\n"))
if (tag %in% c("\\examples", "\\usage")) {
of1("<pre>")
inPara <<- NA
pre <- TRUE
} else {
inPara <<- FALSE
pre <- FALSE
}
if (length(section)) {
## There may be an initial \n, so remove that
s1 <- section[[1L]][1L]
if (RdTags(section)[1] == "TEXT" && s1 == "\n") section <- section[-1L]
writeContent(section, tag)
}
leavePara(FALSE)
if (pre) of0("</pre>\n")
sectionLevel <<- save
}
if (is.character(out)) {
if (out == "") {
con <- stdout()
} else {
con <- file(out, "wt")
on.exit(close(con))
}
} else {
con <- out
out <- summary(con)$description
}
Rd <- prepare_Rd(Rd, defines = defines, stages = stages,
fragment = fragment, ...)
Rdfile <- attr(Rd, "Rdfile")
sections <- RdTags(Rd)
if (fragment) {
if (sections[1L] %in% names(sectionOrder))
for (i in seq_along(sections))
writeSection(Rd[[i]], sections[i])
else
for (i in seq_along(sections))
writeBlock(Rd[[i]], sections[i], "")
} else {
name <- htmlify(Rd[[2L]][[1L]])
of0('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n',
'<html><head><title>')
headtitle <- strwrap(.Rd_format_title(.Rd_get_title(Rd)),
width=65, initial="R: ")
if (length(headtitle) > 1) headtitle <- paste0(headtitle[1], "...")
of1(htmlify(headtitle))
of0('</title>\n',
'<meta http-equiv="Content-Type" content="text/html; charset=',
mime_canonical_encoding(outputEncoding),
'">\n')
of0('<link rel="stylesheet" type="text/css" href="',
stylesheet,
'">\n',
'</head><body>\n\n',
'<table width="100%" summary="page for ', htmlify(name))
if (nchar(package))
of0(' {', package, '}"><tr><td>',name,' {', package,'}')
else
of0('"><tr><td>',name)
of0('</td><td align="right">R Documentation</td></tr></table>\n\n')
of1("<h2>")
inPara <- NA
title <- Rd[[1L]]
writeContent(title,sections[1])
of1("</h2>")
inPara <- FALSE
for (i in seq_along(sections)[-(1:2)])
writeSection(Rd[[i]], sections[i])
if(version != "")
version <- paste0('Package <em>',package,'</em> version ',version,' ')
of0('\n')
if (version != "")
of0('<hr><div align="center">[', version,
if (!no_links) '<a href="00Index.html">Index</a>',
']</div>')
of0('\n',
'</body></html>\n')
}
invisible(out)
}
findHTMLlinks <- function(pkgDir = "", lib.loc = NULL, level = 0:2)
{
## The priority order is
## This package (level 0)
## The standard packages (level 1)
## along lib.loc (level 2)
if (is.null(lib.loc)) lib.loc <- .libPaths()
Links <- list()
if (2 %in% level)
Links <- c(Links, lapply(rev(lib.loc), .find_HTML_links_in_library))
if (1 %in% level) {
base <- unlist(.get_standard_package_names()[c("base", "recommended")],
use.names = FALSE)
Links <- c(Links,
lapply(file.path(.Library, base),
.find_HTML_links_in_package))
}
if (0 %in% level && nzchar(pkgDir))
Links <- c(Links, list(.find_HTML_links_in_package(pkgDir)))
Links <- unlist(Links)
## now latest names are newest, so
Links <- rev(Links)
Links <- Links[!duplicated(names(Links))]
gsub("[Rr]d$", "html", Links)
}
.find_HTML_links_in_package <-
function(dir)
{
if (file_test("-f", f <- file.path(dir, "Meta", "links.rds")))
readRDS(f)
else if (file_test("-f", f <- file.path(dir, "Meta", "Rd.rds")))
.build_links_index(readRDS(f), basename(dir))
else character()
}
.find_HTML_links_in_library <-
function(dir)
{
if (file_test("-f", f <- file.path(dir, ".Meta", "links.rds")))
readRDS(f)
else
.build_library_links_index(dir)
}
.build_library_links_index <-
function(dir)
{
unlist(lapply(rev(dir(dir, full.names = TRUE)),
.find_HTML_links_in_package))
}
| 30,687 | gpl-2.0 |
e0ae2022eab59371292c62d256dfaf3e49db1f71 | Wenpei/incubator-systemml | src/test/scripts/functions/vect/VectorizeLixColPos.R | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A = as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
R = A;
R[7,3] = as.matrix(3);
R[8,3] = as.matrix(4);
writeMM(as(R, "CsparseMatrix"), paste(args[2], "R", sep=""));
| 1,159 | apache-2.0 |
e0ae2022eab59371292c62d256dfaf3e49db1f71 | apache/incubator-systemml | src/test/scripts/functions/vect/VectorizeLixColPos.R | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A = as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
R = A;
R[7,3] = as.matrix(3);
R[8,3] = as.matrix(4);
writeMM(as(R, "CsparseMatrix"), paste(args[2], "R", sep=""));
| 1,159 | apache-2.0 |
e0ae2022eab59371292c62d256dfaf3e49db1f71 | fschueler/incubator-systemml | src/test/scripts/functions/vect/VectorizeLixColPos.R | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A = as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
R = A;
R[7,3] = as.matrix(3);
R[8,3] = as.matrix(4);
writeMM(as(R, "CsparseMatrix"), paste(args[2], "R", sep=""));
| 1,159 | apache-2.0 |
e0ae2022eab59371292c62d256dfaf3e49db1f71 | asurve/arvind-sysml2 | src/test/scripts/functions/vect/VectorizeLixColPos.R | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A = as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
R = A;
R[7,3] = as.matrix(3);
R[8,3] = as.matrix(4);
writeMM(as(R, "CsparseMatrix"), paste(args[2], "R", sep=""));
| 1,159 | apache-2.0 |
1653e694ea73f0eacc4215918de59cc71b89fce0 | SocialFunction/youTubeDataR | R/utils.R | checkToken <- function(token) {
# check inputs
if(missing(token)) {
stop("token MUST be provided. See youOAuth", call. = FALSE)
} else if (class(token)[1] != "Token2.0") {
stop("Wrong token provided. See youOAuth", call. = FALSE)
} else if (is.null(token)) {
stop("token is NULL", call. = FALSE)
}
}
buildParam <- function(param, values) {
test.param <- tryCatch(findParams(param), error = function(e){})
# test
if(!is.null(values) && !is.null(test.param)) {
# fetch valid values for parameter
valid <- findParams(param = param)
# if invalid stop
if(!length(values[values %in% valid]) && !is.null(values)) {
vals <- paste0(valid, collapse = ", ")
stop(paste0("invalid parameter, valid values are: ", vals),
call. = FALSE)
}
}
# build
if (!is.null(values)) {
# index of first letter after "."
index <- gregexpr("\\.", param)[[1]][[1]] + 1
# while "." present in string
while(index != 0) {
# capitalise
substr(param , start = index, stop = index) <- toupper(substring(param,
index,
index))
# replace with space because I can't subtr a f*cking "."
substr(param, index-1, index-1) <- " "
# remove space
param <- gsub("[[:space:]]", "", param)
# index of first letter after "."
index <- gregexpr("\\.", param)[[1]][[1]] + 1
}
# concatenate string
param <- paste0("&", param, "=", values)
# else remain NULL
} else if (is.null(values)) {
param <- NULL
}
return(param)
}
buildTime <- function(t) {
if(class(t)[1] == "POSIXlt" || class(t)[1] == "POSIXct"){
# to character
t <- as.character(t)
# add T
t <- gsub(" ", "T", t)
# add Z
t <- paste0(t, "Z")
} else if(is(t, "Date")) {
t <- paste0(t, "T00:00:00Z")
} else if (nchar(t) == "20" && !is.null(t)) {
t <- t
} else if (is.null(t)) {
t <- NULL
} else {
stop("Wrong date/time format suplied", call. = FALSE)
}
return(t)
}
# named list #stackoverflow FTW
namedList <- function(...) {
L <- list(...)
snm <- sapply(substitute(list(...)),deparse)[-1]
if (is.null(nm <- names(L))) nm <- snm
if (any(nonames <- nm=="")) nm[nonames] <- snm[nonames]
setNames(L,nm)
}
# buildTerms
buildTerms <- function(q) {
# replace space with "+"
q <- gsub("[[:space:]]", "+", q)
q <- paste0("&q=",q)
return(q)
}
# buildLocation
buildLocation <- function(location) {
location <- paste0(location[1], ",",location[2])
return(location)
}
# paginate
paginate <- function(response, n = 50, verbose = FALSE, token = token) {
# parse
json <- jsonlite::fromJSON(rawToChar(response$content),
simplifyDataFrame = FALSE)
dat <- do.call(plyr::"rbind.fill", lapply(json$items, as.data.frame))
# number of results
res <- json$pageInfo$resultsPerPage
i <- 1
while(res < n && length(json$nextPageToken)) {
# rebuild url
uri <- paste0(response$url, "&pageToken=", json$nextPageToken)
# fetch
response <- httr::GET(uri, config = (token = token))
# parse
json <- jsonlite::fromJSON(rawToChar(response$content),
simplifyDataFrame = F)
next.dat <- do.call(plyr::"rbind.fill", lapply(json$items, as.data.frame))
# bind
dat <- plyr::rbind.fill(dat, next.dat)
# number of results
res <- res + json$pageInfo$resultsPerPage
i <- i + 1
# vebose
if(verbose == TRUE) {
cat(paste0(res, " results\n"), fill = TRUE,
labels = paste0("Query #", i))
}
# don't hammer that server
Sys.sleep(0.5)
}
return(dat)
}
testPart <- function(FUN, values) {
test.param <- findParts(FUN)
# fetch valid values for parameter
valid <- findParts(FUN)
# if invalid stop
if(!length(values[values %in% valid]) && !is.null(values)) {
vals <- paste0(valid, collapse = ", ")
stop(paste0("invalid parameter, valid values are: ", vals),
call. = FALSE)
}
}
# rename returned results
renameReturn <- function(df){
names(df) <- gsub("^snippet.", "", names(df))
return(df)
} | 4,512 | gpl-3.0 |
bed7e01c0ecf5f33d463e10f509dc2f7e90414a2 | ColumbusCollaboratory/electron-quick-start | R-Portable-Mac/library/robustbase/doc/lmrob_simulation.R | ### R code from vignette source 'lmrob_simulation.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: initial-setup
###################################################
## set options
options(width=60,
warn=1) # see warnings where they happen (should eliminate)
## number of workers to start
if(FALSE) {## good for pkg developers
options(cores= max(1, parallel::detectCores() - 2))
} else { ## CRAN allows maximum of 2:
options(cores= min(2, parallel::detectCores()))
}
## Number of Repetitions:
N <- 1000
## get path (= ../inst/doc/ in source pkg)
robustDoc <- system.file('doc', package='robustbase')
robustDta <- robustDoc
## initialize (packages, data, ...):
source(file.path(robustDoc, 'simulation.init.R')) # 'xtable'
## set the amount of trimming used in calculation of average results
trim <- 0.1
###################################################
### code chunk number 2: graphics-setup
###################################################
## load required packages for graphics
stopifnot(require(ggplot2),
require(GGally),# for ggpairs() which replaces ggplot2::plotmatrix()
require(grid),
require(reshape2))
source(file.path(robustDoc, 'graphics.functions.R'))
## set ggplot theme
theme <- theme_bw(base_size = 10)
theme$legend.key.size <- unit(1, "lines")# was 0.9 in pre-v.3 ggplot2
theme$plot.margin <- unit(c(1/2, 1/8, 1/8, 1/8), "lines")# was (1/2, 0,0,0)
theme_set(theme)
## set default sizes for lines and points
update_geom_defaults("point", list(size = 4/3))
update_geom_defaults("line", list(size = 1/4))
update_geom_defaults("hline", list(size = 1/4))
update_geom_defaults("smooth", list(size = 1/4))
## alpha value for plots with many points
alpha.error <- 0.3
alpha.n <- 0.4
## set truncation limits used by f.truncate() & g.truncate.*:
trunc <- c(0.02, 0.14)
trunc.plot <- c(0.0185, 0.155)
f.truncate <- function(x, up = trunc.plot[2], low = trunc.plot[1]) {
x[x > up] <- up
x[x < low] <- low
x
}
g.truncate.lines <- geom_hline(yintercept = trunc,
color = theme$panel.border$colour)
g.truncate.line <- geom_hline(yintercept = trunc[2],
color = theme$panel.border$colour)
g.truncate.areas <- annotate("rect", xmin=rep(-Inf,2), xmax=rep(Inf,2),
ymin=c(0,Inf), ymax=trunc,
fill = theme$panel.grid.major$colour)
g.truncate.area <- annotate("rect", xmin=-Inf, xmax=Inf,
ymin=trunc[2], ymax=Inf,
fill = theme$panel.grid.major$colour)
legend.mod <- list(`SMD.Wtau` = quote('SMD.W'~tau),
`SMDM.Wtau` = quote('SMDM.W'~tau),
`MM.Avar1` = quote('MM.'~Avar[1]),
`MMqT` = quote('MM'~~q[T]),
`MMqT.Wssc` = quote('MM'~~q[T]*'.Wssc'),
`MMqE` = quote('MM'~~q[E]),
`MMqE.Wssc` = quote('MM'~~q[E]*'.Wssc'),
`sigma_S` = quote(hat(sigma)[S]),
`sigma_D` = quote(hat(sigma)[D]),
`sigma_S*qE` = quote(q[E]*hat(sigma)[S]),
`sigma_S*qT` = quote(q[T]*hat(sigma)[S]),
`sigma_robust` = quote(hat(sigma)[robust]),
`sigma_OLS` = quote(hat(sigma)[OLS]),
`t1` = quote(t[1]),
`t3` = quote(t[3]),
`t5` = quote(t[5]),
`cskt(Inf,2)` = quote(cskt(infinity,2))
)
###################################################
### code chunk number 3: tab-psi-functions
###################################################
## get list of psi functions
lst <- lapply(estlist$procedures, function(x) {
if (is.null(x$args)) return(list(NULL, NULL, NULL))
if (!is.null(x$args$weight))
return(list(x$args$weight[2],
round(f.psi2c.chi(x$args$weight[1]),3),
round(f.eff2c.psi(x$args$efficiency, x$args$weight[2]),3)))
return(list(x$args$psi,
round(if (is.null(x$args$tuning.chi))
lmrob.control(psi=x$args$psi)$tuning.chi else
x$args$tuning.chi,3),
round(if (is.null(x$args$tuning.psi))
lmrob.control(psi=x$args$psi)$tuning.psi else
x$args$tuning.psi,3)))
})
lst <- unique(lst) ## because of rounding, down from 21 to 5 !
lst <- lst[sapply(lst, function(x) !is.null(x[[1]]))] # 5 --> 4
## convert to table
tbl <- do.call(rbind, lst)
tbl[,2:3] <- apply(tbl[,2:3], 1:2, function(x) {
gsub('\\$NA\\$', '\\\\texttt{NA}',
paste('$', unlist(x), collapse=', ', '$', sep='')) })
tbl[,1] <- paste('\\texttt{', tbl[,1], '}', sep='')
colnames(tbl) <- paste('\\texttt{', c('psi', 'tuning.chi', 'tuning.psi'),
'}', sep='')
print(xtable(tbl), sanitize.text.function=identity,
include.rownames = FALSE, floating=FALSE)
###################################################
### code chunk number 4: fig-psi-functions
###################################################
getOption("SweaveHooks")[["fig"]]()
d.x_psi <- function(x, psi) {
cc <- lmrob.control(psi = psi)$tuning.psi
data.frame(x=x, value=Mpsi(x, cc, psi), psi = psi)
}
x <- seq(0, 10, length.out = 1000)
tmp <- rbind(d.x_psi(x, 'optimal'),
d.x_psi(x, 'bisquare'),
d.x_psi(x, 'lqq'),
d.x_psi(x, 'hampel'))
print( ggplot(tmp, aes(x, value, color = psi)) +
geom_line(lwd=1.25) + ylab(quote(psi(x))) +
scale_color_discrete(name = quote(psi ~ '-function')))
###################################################
### code chunk number 5: fgen
###################################################
f.gen <- function(n, p, rep, err) {
## get function name and parameters
lerrfun <- f.errname(err$err)
lerrpar <- err$args
## generate random predictors
ret <- replicate(rep, matrix(do.call(lerrfun, c(n = n*p, lerrpar)),
n, p), simplify=FALSE)
attr(ret[[1]], 'gen') <- f.gen
ret
}
ratios <- c(1/20, 1/10, 1/5, 1/3, 1/2)## p/n
lsit <- expand.grid(n = c(25, 50, 100, 400), p = ratios)
lsit <- within(lsit, p <- as.integer(n*p))
.errs.normal.1 <- list(err = 'normal',
args = list(mean = 0, sd = 1))
for (i in 1:NROW(lsit))
assign(paste('rand',lsit[i,1],lsit[i,2],sep='_'),
f.gen(lsit[i,1], lsit[i,2], rep = 1, err = .errs.normal.1)[[1]])
###################################################
### code chunk number 6: fig-example-design
###################################################
getOption("SweaveHooks")[["fig"]]()
require(GGally)
colnames(rand_25_5) <- paste0("X", 1:5) # workaround new (2014-12) change in GGally
## and the 2016-11-* change needs data frames:
df.r_25_5 <- as.data.frame(rand_25_5)
print(ggpairs(df.r_25_5, axisLabels="show", title = "rand_25_5: n=25, p=5"))
###################################################
### code chunk number 7: lmrob_simulation.Rnw:363-364
###################################################
aggrResultsFile <- file.path(robustDta, "aggr_results.Rdata")
###################################################
### code chunk number 8: simulation-run
###################################################
if (!file.exists(aggrResultsFile)) {
## load packages required only for simulation
stopifnot(require(robust),
require(skewt),
require(foreach))
if (!is.null(getOption("cores"))) {
if (getOption("cores") == 1)
registerDoSEQ() ## no not use parallel processing
else {
stopifnot(require(doParallel))
if (.Platform$OS.type == "windows") {
cl <- makeCluster(getOption("cores"))
clusterExport(cl, c("N", "robustDoc"))
clusterEvalQ(cl, slave <- TRUE)
clusterEvalQ(cl, source(file.path(robustDoc, 'simulation.init.R')))
registerDoParallel(cl)
} else registerDoParallel()
}
} else registerDoSEQ() ## no not use parallel processing
for (design in c("dd", ls(pattern = 'rand_\\d+_\\d+'))) {
print(design)
## set design
estlist$design <- get(design)
estlist$use.intercept <- !grepl('^rand', design)
## add design.predict: pc
estlist$design.predict <-
if (is.null(attr(estlist$design, 'gen')))
f.prediction.points(estlist$design) else
f.prediction.points(estlist$design, max.pc = 2)
filename <- file.path(robustDta,
sprintf('r.test.final.%s.Rdata',design))
if (!file.exists(filename)) {
## run
print(system.time(r.test <- f.sim(estlist, silent = TRUE)))
## save
save(r.test, file=filename)
## delete output
rm(r.test)
## run garbage collection
gc()
}
}
}
###################################################
### code chunk number 9: str-estlist
###################################################
str(estlist, 1)
###################################################
### code chunk number 10: estl-errs
###################################################
estlist$errs[[1]]
###################################################
### code chunk number 11: show-errs (eval = FALSE)
###################################################
## set.seed(estlist$seed)
## errs <- c(sapply(1:nrep, function(x) do.call(fun, c(n = nobs, args))))
###################################################
### code chunk number 12: lmrob_simulation.Rnw:441-442
###################################################
str(estlist$output[1:3], 2)
###################################################
### code chunk number 13: simulation-aggr
###################################################
if (!file.exists(aggrResultsFile)) {
files <- list.files(robustDta, pattern = 'r.test.final\\.')
res <- foreach(file = files) %dopar% {
## get design, load r.test, initialize other stuff
design <- substr(basename(file), 14, nchar(basename(file)) - 6)
cat(design, ' ')
load(file.path(robustDta, file))
estlist <- attr(r.test, 'estlist')
use.intercept <-
if (!is.null(estlist$use.intercept)) estlist$use.intercept else TRUE
sel <- dimnames(r.test)[[3]] ## [dimnames(r.test)[[3]] != "estname=lm"]
n.betas <- paste('beta',1:(NCOL(estlist$design)+use.intercept),sep='_')
## get design
lX <- if (use.intercept)
as.matrix(cbind(1, get(design))) else as.matrix(get(design))
n <- NROW(lX)
p <- NCOL(lX)
## prepare arrays for variable designs and leverages
if (is.function(attr(estlist$design, 'gen'))) {
lXs <- array(NA, c(n, NCOL(lX), dim(r.test)[c(1, 4)]),
list(Obs = NULL, Pred = colnames(lX), Data = NULL,
Errstr = dimnames(r.test)[[4]]))
}
## generate errors
lerrs <- array(NA, c(n, dim(r.test)[c(1,4)]) ,
list(Obs = NULL, Data = NULL, Errstr = dimnames(r.test)[[4]]))
for (i in 1:dim(lerrs)[3]) {
lerrstr <- f.list2str(estlist$errs[[i]])
lerr <- f.errs(estlist, estlist$errs[[i]],
gen = attr(estlist$design, 'gen'),
nobs = n, npar = NCOL(lX))
lerrs[,,lerrstr] <- lerr
if (!is.null(attr(lerr, 'designs'))) {
## retrieve generated designs: this returns a list of designs
lXs[,,,i] <- unlist(attr(lerr, 'designs'))
if (use.intercept)
stop('intercept not implemented for random desings')
}
rm(lerr)
}
if (is.function(attr(estlist$design, 'gen'))) {
## calculate leverages
lXlevs <- apply(lXs, 3:4, .lmrob.hat)
}
## calculate fitted values from betas
if (!is.function(attr(estlist$design, 'gen'))) { ## fixed design case
lfitted <- apply(r.test[,n.betas,sel,,drop=FALSE],c(3:4),
function(bhat) { lX %*% t(bhat) } )
} else { ## variable design case
lfitted <- array(NA, n*prod(dim(r.test)[c(1,4)])*length(sel))
lfitted <- .C('R_calc_fitted',
as.double(lXs), ## designs
as.double(r.test[,n.betas,sel,,drop=FALSE]), ## betas
as.double(lfitted), ## result
as.integer(n), ## n
as.integer(p), ## p
as.integer(dim(r.test)[1]), ## nrep
as.integer(length(sel)), ## n procstr
as.integer(dim(r.test)[4]), ## n errstr
DUP=FALSE, NAOK=TRUE, PACKAGE="robustbase")[[3]]
}
tdim <- dim(lfitted) <-
c(n, dim(r.test)[1], length(sel),dim(r.test)[4])
lfitted <- aperm(lfitted, c(1,2,4,3))
## calculate residuals = y - fitted.values
lfitted <- as.vector(lerrs) - as.vector(lfitted)
dim(lfitted) <- tdim[c(1,2,4,3)]
lfitted <- aperm(lfitted, c(1,2,4,3))
dimnames(lfitted) <-
c(list(Obs = NULL), dimnames(r.test[,,sel,,drop=FALSE])[c(1,3,4)])
lresids <- lfitted
rm(lfitted)
## calculate lm MSE and trim trimmed MSE of betas
tf.MSE <- function(lbetas) {
lnrm <- rowSums(lbetas^2)
c(MSE=mean(lnrm,na.rm=TRUE),MSE.1=mean(lnrm,trim=trim,na.rm=TRUE))
}
MSEs <- apply(r.test[,n.betas,,,drop=FALSE],3:4,tf.MSE)
li <- 1 ## so we can reconstruct where we are
lres <- apply(lresids,3:4,f.aggregate.results <- {
function(lresid) {
## the counter li tells us, where we are
## we walk dimensions from left to right
lcdn <- f.get.current.dimnames(li, dimnames(lresids), 3:4)
lr <- r.test[,,lcdn[1],lcdn[2]]
## update counter
li <<- li + 1
## transpose and normalize residuals with sigma
lresid <- t(lresid) / lr[,'sigma']
if (lcdn[1] != 'estname=lm') {
## convert procstr to proclst and get control list
largs <- f.str2list(lcdn[1])[[1]]$args
if (grepl('lm.robust', lcdn[1])) {
lctrl <- list()
lctrl$psi <- toupper(largs$weight2)
lctrl$tuning.psi <-
f.eff2c.psi(largs$efficiency, lctrl$psi)
lctrl$method <- 'MM'
} else {
lctrl <- do.call('lmrob.control',largs)
}
## calculate correction factors
## A
lsp2 <- rowSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi)^2)
## B
lspp <- rowSums(lpp <- Mpsi(lresid,lctrl$tuning.psi, lctrl$psi,1))
## calculate Huber\'s small sample correction factor
lK <- 1 + rowSums((lpp - lspp/n)^2)*NCOL(lX)/lspp^2 ## 1/n cancels
} else {
lK <- lspp <- lsp2 <- NA
}
## only calculate tau variants if possible
if (grepl('args.method=\\w*(D|T)\\w*\\b', lcdn[1])) { ## SMD or SMDM
## calculate robustness weights
lwgts <- Mwgt(lresid, lctrl$tuning.psi, lctrl$psi)
## function to calculate robustified leverages
tfun <-
if (is.function(attr(estlist$design, 'gen')))
function(i) {
if (all(is.na(wi <- lwgts[i,]))) wi
else .lmrob.hat(lXs[,,i,lcdn[2]],wi)
}
else
function(i) {
if (all(is.na(wi <- lwgts[i,]))) wi else .lmrob.hat(lX, wi)
}
llev <- sapply(1:dim(r.test)[1], tfun)
## calculate unique leverages
lt <- robustbase:::lmrob.tau(list(),h=llev,control=lctrl)
## normalize residuals with tau (transpose lresid)
lresid <- t(lresid) / lt
## A
lsp2t <- colSums(Mpsi(lresid,lctrl$tuning.psi,
lctrl$psi)^2)
## B
lsppt <- colSums(Mpsi(lresid,lctrl$tuning.psi,
lctrl$psi,1))
} else {
lsp2t <- lsppt <- NA
}
## calculate raw scales based on the errors
lproc <- f.str2list(lcdn[1])[[1]]
q <- NA
M <- NA
if (lproc$estname == 'lmrob.mar' && lproc$args$type == 'qE') {
## for lmrob_mar, qE variant
lctrl <- lmrob.control(psi = 'bisquare',
tuning.chi=uniroot(function(c)
robustbase:::lmrob.bp('bisquare', c) - (1-p/n)/2,
c(1, 3))$root)
se <- apply(lerrs[,,lcdn[2]],2,lmrob.mscale,control=lctrl,p=p)
ltmp <- se/lr[,'sigma']
q <- median(ltmp, na.rm = TRUE)
M <- mad(ltmp, na.rm = TRUE)
} else if (!is.null(lproc$args$method) && lproc$args$method == 'SMD') {
## for D-scales
se <- apply(lerrs[,,lcdn[2]],2,lmrob.dscale,control=lctrl,
kappa=robustbase:::lmrob.kappa(control=lctrl))
ltmp <- se/lr[,'sigma']
q <- median(ltmp, na.rm = TRUE)
M <- mad(ltmp, na.rm = TRUE)
}
## calculate empirical correct test value (to yield 5% level)
t.val_2 <- t.val_1 <- quantile(abs(lr[,'beta_1']/lr[,'se_1']), 0.95,
na.rm = TRUE)
if (p > 1) t.val_2 <- quantile(abs(lr[,'beta_2']/lr[,'se_2']), 0.95,
na.rm = TRUE)
## return output: summary statistics:
c(## gamma
AdB2.1 = mean(lsp2/lspp^2,trim=trim,na.rm=TRUE)*n,
K2AdB2.1 = mean(lK^2*lsp2/lspp^2,trim=trim,na.rm=TRUE)*n,
AdB2t.1 = mean(lsp2t/lsppt^2,trim=trim,na.rm=TRUE)*n,
sdAdB2.1 = sd.trim(lsp2/lspp^2*n,trim=trim,na.rm=TRUE),
sdK2AdB2.1 = sd.trim(lK^2*lsp2/lspp^2*n,trim=trim,na.rm=TRUE),
sdAdB2t.1 = sd.trim(lsp2t/lsppt^2*n,trim=trim,na.rm=TRUE),
## sigma
medsigma = median(lr[,'sigma'],na.rm=TRUE),
madsigma = mad(lr[,'sigma'],na.rm=TRUE),
meansigma.1 = mean(lr[,'sigma'],trim=trim,na.rm=TRUE),
sdsigma.1 = sd.trim(lr[,'sigma'],trim=trim,na.rm=TRUE),
meanlogsigma = mean(log(lr[,'sigma']),na.rm=TRUE),
meanlogsigma.1 = mean(log(lr[,'sigma']),trim=trim,na.rm=TRUE),
sdlogsigma = sd(log(lr[,'sigma']),na.rm=TRUE),
sdlogsigma.1 = sd.trim(log(lr[,'sigma']),trim=trim,na.rm=TRUE),
q = q,
M = M,
## beta
efficiency.1 = MSEs['MSE.1','estname=lm',lcdn[2]] /
MSEs['MSE.1',lcdn[1],lcdn[2]],
## t-value: level
emplev_1 = mean(abs(lr[,'beta_1']/lr[,'se_1']) > qt(0.975, n - p),
na.rm = TRUE),
emplev_2 = if (p>1) {
mean(abs(lr[,'beta_2']/lr[,'se_2']) > qt(0.975, n - p), na.rm = TRUE)
} else NA,
## t-value: power
power_1_0.2 = mean(abs(lr[,'beta_1']-0.2)/lr[,'se_1'] > t.val_1,
na.rm = TRUE),
power_2_0.2 = if (p>1) {
mean(abs(lr[,'beta_2']-0.2)/lr[,'se_2'] > t.val_2, na.rm = TRUE)
} else NA,
power_1_0.4 = mean(abs(lr[,'beta_1']-0.4)/lr[,'se_1'] > t.val_1,
na.rm = TRUE),
power_2_0.4 = if (p>1) {
mean(abs(lr[,'beta_2']-0.4)/lr[,'se_2'] > t.val_2, na.rm = TRUE)
} else NA,
power_1_0.6 = mean(abs(lr[,'beta_1']-0.6)/lr[,'se_1'] > t.val_1,
na.rm = TRUE),
power_2_0.6 = if (p>1) {
mean(abs(lr[,'beta_2']-0.6)/lr[,'se_2'] > t.val_2, na.rm = TRUE)
} else NA,
power_1_0.8 = mean(abs(lr[,'beta_1']-0.8)/lr[,'se_1'] > t.val_1,
na.rm = TRUE),
power_2_0.8 = if (p>1) {
mean(abs(lr[,'beta_2']-0.8)/lr[,'se_2'] > t.val_2, na.rm = TRUE)
} else NA,
power_1_1 = mean(abs(lr[,'beta_1']-1)/lr[,'se_1'] > t.val_1,
na.rm = TRUE),
power_2_1 = if (p>1) {
mean(abs(lr[,'beta_2']-1)/lr[,'se_2'] > t.val_2, na.rm = TRUE)
} else NA,
## coverage probability: calculate empirically
## the evaluation points are constant, but the designs change
## therefore this makes only sense for fixed designs
cpr_1 = mean(lr[,'upr_1'] < 0 | lr[,'lwr_1'] > 0, na.rm = TRUE),
cpr_2 = mean(lr[,'upr_2'] < 0 | lr[,'lwr_2'] > 0, na.rm = TRUE),
cpr_3 = mean(lr[,'upr_3'] < 0 | lr[,'lwr_3'] > 0, na.rm = TRUE),
cpr_4 = mean(lr[,'upr_4'] < 0 | lr[,'lwr_4'] > 0, na.rm = TRUE),
cpr_5 = if (any(colnames(lr) == 'upr_5')) {
mean(lr[,'upr_5'] < 0 | lr[,'lwr_5'] > 0, na.rm = TRUE) } else NA,
cpr_6 = if (any(colnames(lr) == 'upr_6')) {
mean(lr[,'upr_6'] < 0 | lr[,'lwr_6'] > 0, na.rm = TRUE) } else NA,
cpr_7 = if (any(colnames(lr) == 'upr_7')) {
mean(lr[,'upr_7'] < 0 | lr[,'lwr_7'] > 0, na.rm = TRUE) } else NA
)
}})
## convert to data.frame
lres <- f.a2df.2(lres, split = '___NO___')
## add additional info
lres$n <- NROW(lX)
lres$p <- NCOL(lX)
lres$nmpdn <- with(lres, (n-p)/n)
lres$Design <- design
## clean up
rm(r.test, lXs, lXlevs, lresids, lerrs)
gc()
## return lres
lres
}
save(res, trim, file = aggrResultsFile)
## stop cluster
if (exists("cl")) stopCluster(cl)
}
###################################################
### code chunk number 14: simulation-aggr2
###################################################
load(aggrResultsFile)
## this will fail if the file is not found (for a reason)
## set eval to TRUE for chunks simulation-run and simulation-aggr
## if you really want to run the simulations again.
## (better fail with an error than run for weeks)
## combine list elements to data.frame
test.1 <- do.call('rbind', res)
test.1 <- within(test.1, {
Method[Method == "SM"] <- "MM"
Method <- Method[, drop = TRUE]
Estimator <- interaction(Method, D.type, drop = TRUE)
Estimator <- f.rename.level(Estimator, 'MM.S', 'MM')
Estimator <- f.rename.level(Estimator, 'SMD.D', 'SMD')
Estimator <- f.rename.level(Estimator, 'SMDM.D', 'SMDM')
Estimator <- f.rename.level(Estimator, 'MM.qT', 'MMqT')
Estimator <- f.rename.level(Estimator, 'MM.qE', 'MMqE')
Estimator <- f.rename.level(Estimator, 'MM.rob', 'MMrobust')
Estimator <- f.rename.level(Estimator, 'lsq.lm', 'OLS')
Est.Scale <- f.rename.level(Estimator, 'MM', 'sigma_S')
Est.Scale <- f.rename.level(Est.Scale, 'MMrobust', 'sigma_robust')
Est.Scale <- f.rename.level(Est.Scale, 'MMqE', 'sigma_S*qE')
Est.Scale <- f.rename.level(Est.Scale, 'MMqT', 'sigma_S*qT')
Est.Scale <- f.rename.level(Est.Scale, 'SMDM', 'sigma_D')
Est.Scale <- f.rename.level(Est.Scale, 'SMD', 'sigma_D')
Est.Scale <- f.rename.level(Est.Scale, 'OLS', 'sigma_OLS')
Psi <- f.rename.level(Psi, 'hampel', 'Hampel')
})
## add interaction of Method and Cov
test.1 <- within(test.1, {
method.cov <- interaction(Estimator, Cov, drop=TRUE)
levels(method.cov) <-
sub('\\.+vcov\\.(a?)[wacrv1]*', '\\1', levels(method.cov))
method.cov <- f.rename.level(method.cov, "MMa", "MM.Avar1")
method.cov <- f.rename.level(method.cov, "MMrobust.Default", "MMrobust.Wssc")
method.cov <- f.rename.level(method.cov, "MM", "MM.Wssc")
method.cov <- f.rename.level(method.cov, "SMD", "SMD.Wtau")
method.cov <- f.rename.level(method.cov, "SMDM", "SMDM.Wtau")
method.cov <- f.rename.level(method.cov, "MMqT", "MMqT.Wssc")
method.cov <- f.rename.level(method.cov, "MMqE", "MMqE.Wssc")
method.cov <- f.rename.level(method.cov, "OLS.Default", "OLS")
## ratio: the closest 'desired ratios' instead of exact p/n;
## needed in plots only for stat_*(): median over "close" p/n's:
ratio <- ratios[apply(abs(as.matrix(1/ratios) %*% t(as.matrix(p / n)) - 1),
2, which.min)]
})
## calculate expected values of psi^2 and psi'
test.1$Ep2 <- test.1$Epp <- NA
for(Procstr in levels(test.1$Procstr)) {
args <- f.str2list(Procstr)[[1]]$args
if (is.null(args)) next
lctrl <- do.call('lmrob.control',args)
test.1$Ep2[test.1$Procstr == Procstr] <-
robustbase:::lmrob.E(psi(r)^2, lctrl, use.integrate = TRUE)
test.1$Epp[test.1$Procstr == Procstr] <-
robustbase:::lmrob.E(psi(r,1), lctrl, use.integrate = TRUE)
}
## drop some observations, separate fixed and random designs
test.fixed <- droplevels(subset(test.1, n == 20)) ## n = 20 -- fixed design
test.1 <- droplevels(subset(test.1, n != 20)) ## n !=20 -- random designs
test.lm <- droplevels(subset(test.1, Function == 'lm')) # lm = OLS
test.1 <- droplevels(subset(test.1, Function != 'lm')) # Rob := all "robust"
test.lm$Psi <- NULL
test.lm.2 <- droplevels(subset(test.lm, Error == 'N(0,1)')) # OLS for N(*)
test.2 <- droplevels(subset(test.1, Error == 'N(0,1)' & Function != 'lm'))# Rob for N(*)
## subsets
test.3 <- droplevels(subset(test.2, Method != 'SMDM'))# Rob, not SMDM for N(*)
test.4 <- droplevels(subset(test.1, Method != 'SMDM'))# Rob, not SMDM for all
###################################################
### code chunk number 15: fig-meanscale
###################################################
getOption("SweaveHooks")[["fig"]]()
## ## exp(mean(log(sigma))): this looks almost identical to mean(sigma)
print(ggplot(test.3, aes(p/n, exp(meanlogsigma.1), color = Est.Scale)) +
stat_summary(aes(x=ratio), # <- "rounded p/n": --> median over "neighborhood"
fun.y=median, geom='line') +
geom_point(aes(shape = factor(n)), alpha = alpha.n) +
geom_hline(yintercept = 1) +
g.scale_y_log10_1() +
facet_wrap(~ Psi) +
ylab(quote('geometric ' ~ mean(hat(sigma)))) +
scale_shape_discrete(quote(n)) +
scale_colour_discrete("Scale Est.", labels=lab(test.3$Est.Scale)))
###################################################
### code chunk number 16: fig-sdscale-1
###################################################
getOption("SweaveHooks")[["fig"]]()
print(ggplot(test.3, aes(p/n, sdlogsigma.1*sqrt(n), color = Est.Scale)) +
stat_summary(aes(x=ratio), fun.y=median, geom='line') +
geom_point(aes(shape = factor(n)), alpha = alpha.n) +
ylab(quote(sd(log(hat(sigma)))*sqrt(n))) +
facet_wrap(~ Psi) +
geom_point (data=test.lm.2, alpha=alpha.n, aes(color = Est.Scale)) +
stat_summary(data=test.lm.2, aes(x=ratio, color = Est.Scale),
fun.y=median, geom='line') +
scale_shape_discrete(quote(n)) +
scale_colour_discrete("Scale Est.",
labels= lab(test.3 $Est.Scale,
test.lm.2$Est.Scale)))
###################################################
### code chunk number 17: fig-sdscale-all
###################################################
getOption("SweaveHooks")[["fig"]]()
print(ggplot(test.4,
aes(p/n, sdlogsigma.1*sqrt(n), color = Est.Scale)) +
ylim(with(test.4, range(sdlogsigma.1*sqrt(n)))) +
ylab(quote(sd(log(hat(sigma)))*sqrt(n))) +
stat_summary(aes(x=ratio), fun.y=median, geom='line') +
geom_point(aes(shape = Error), alpha = alpha.error) +
facet_wrap(~ Psi) +
## "FIXME" (?): the next 'test.lm' one give warnings
geom_point (data=test.lm, aes(color = Est.Scale), alpha=alpha.n) +
##-> Warning: Removed 108 rows containing missing values (geom_point).
stat_summary(data=test.lm, aes(x = ratio, color = Est.Scale),
fun.y=median, geom='line') +
##-> Warning: Removed 108 rows containing non-finite values (stat_summary).
g.scale_shape(labels=lab(test.4$Error)) +
scale_colour_discrete("Scale Est.",
labels=lab(test.4 $Est.Scale,
test.lm$Est.Scale)))
###################################################
### code chunk number 18: fig-qscale
###################################################
getOption("SweaveHooks")[["fig"]]()
t3est2 <- droplevels(subset(test.3, Estimator %in% c("SMD", "MMqE")))
print(ggplot(t3est2,
aes(p/n, q, color = Est.Scale)) + ylab(quote(q)) +
stat_summary(aes(x=ratio), fun.y=median, geom='line') +
geom_point(aes(shape = factor(n)), alpha = alpha.n) +
geom_hline(yintercept = 1) +
g.scale_y_log10_1() +
facet_wrap(~ Psi) +
scale_shape_discrete(quote(n)) +
scale_colour_discrete("Scale Est.", labels=lab(t3est2$Est.Scale)))
###################################################
### code chunk number 19: fig-Mscale
###################################################
getOption("SweaveHooks")[["fig"]]()
print(ggplot(t3est2,
aes(p/n, M/q, color = Est.Scale)) +
stat_summary(aes(x=ratio), fun.y=median, geom='line') +
geom_point(aes(shape = factor(n)), alpha = alpha.n) +
g.scale_y_log10_0.05() +
facet_wrap(~ Psi) +
ylab(quote(M/q)) +
scale_shape_discrete(quote(n)) +
scale_colour_discrete("Scale Est.", labels=lab(t3est2$Est.Scale)))
###################################################
### code chunk number 20: fig-qscale-all
###################################################
getOption("SweaveHooks")[["fig"]]()
t1.bi <- droplevels(subset(test.1, Estimator %in% c("SMD", "MMqE") &
Psi == 'bisquare'))
print(ggplot(t1.bi,
aes(p/n, q, color = Est.Scale)) +
stat_summary(aes(x=ratio), fun.y=median, geom='line') +
geom_point(aes(shape = factor(n)), alpha = alpha.n) +
geom_hline(yintercept = 1) +
g.scale_y_log10_1() +
facet_wrap(~ Error) + ## labeller missing!
ylab(quote(q)) +
scale_shape_discrete(quote(n)) +
scale_colour_discrete("Scale Est.", labels=lab(tmp$Est.Scale)),
legend.mod = legend.mod)
###################################################
### code chunk number 21: fig-Mscale-all
###################################################
getOption("SweaveHooks")[["fig"]]()
print(ggplot(t1.bi,
aes(p/n, M/q, color = Est.Scale)) +
stat_summary(aes(x=ratio), fun.y=median, geom='line') +
geom_point(aes(shape = factor(n)), alpha = alpha.n) +
g.scale_y_log10_0.05() +
facet_wrap(~ Error) +
ylab(quote(M/q)) +
scale_shape_discrete(quote(n)) +
scale_colour_discrete("Scale Est.", labels=lab(tmp$Est.Scale)),
legend.mod = legend.mod)
###################################################
### code chunk number 22: fig-efficiency
###################################################
getOption("SweaveHooks")[["fig"]]()
print(ggplot(test.2, aes(p/n, efficiency.1, color = Estimator)) +
geom_point(aes(shape = factor(n)), alpha = alpha.n) +
geom_hline(yintercept = 0.95) +
stat_summary(aes(x=ratio), fun.y=median, geom='line') +
facet_wrap(~ Psi) +
ylab(quote('efficiency of' ~~ hat(beta))) +
g.scale_shape(quote(n)) +
scale_colour_discrete(name = "Estimator",
labels = lab(test.2$Estimator)))
###################################################
### code chunk number 23: fig-efficiency-all
###################################################
getOption("SweaveHooks")[["fig"]]()
t.1xt1 <- droplevels(subset(test.1, Error != 't1'))
print(ggplot(t.1xt1,
aes(p/n, efficiency.1, color = Estimator)) +
ylab(quote('efficiency of '~hat(beta))) +
geom_point(aes(shape = Error), alpha = alpha.error) +
geom_hline(yintercept = 0.95) +
stat_summary(aes(x=ratio), fun.y=median, geom='line') +
g.scale_shape(values=c(16,17,15,3,7,8,9,1,2,4)[-4],
labels=lab(t.1xt1$Error)) +
facet_wrap(~ Psi) +
scale_colour_discrete(name = "Estimator",
labels = lab(t.1xt1$Estimator)))
###################################################
### code chunk number 24: fig-AdB2-1
###################################################
getOption("SweaveHooks")[["fig"]]()
t.2o. <- droplevels(subset(test.2, !is.na(AdB2t.1)))
print(ggplot(t.2o., aes(p/n, AdB2.1/(1-p/n), color = Estimator)) +
geom_point(aes(shape=factor(n)), alpha = alpha.n) +
geom_point(aes(y=K2AdB2.1/(1-p/n)), alpha = alpha.n) +
geom_point(aes(y=AdB2t.1), alpha = alpha.n) +
stat_summary(aes(x=ratio), fun.y=median, geom='line') +
stat_summary(aes(x=ratio, y=K2AdB2.1/(1-p/n)), fun.y=median, geom='line', linetype=2) +
stat_summary(aes(x=ratio, y=AdB2t.1), fun.y=median, geom='line', linetype=3) +
geom_hline(yintercept = 1/0.95) +
g.scale_y_log10_1() +
scale_shape_discrete(quote(n)) +
scale_colour_discrete(name = "Estimator", labels = lab(t.2o.$Estimator)) +
ylab(quote(mean(hat(gamma)))) +
facet_wrap(~ Psi))
###################################################
### code chunk number 25: fig-sdAdB2-1
###################################################
getOption("SweaveHooks")[["fig"]]()
t.2ok <- droplevels(subset(test.2, !is.na(sdAdB2t.1)))
print(ggplot(t.2ok,
aes(p/n, sdAdB2.1/(1-p/n), color = Estimator)) +
geom_point(aes(shape=factor(n)), alpha = alpha.n) +
geom_point(aes(y=sdK2AdB2.1/(1-p/n)), alpha = alpha.n) +
geom_point(aes(y=sdAdB2t.1), alpha = alpha.n) +
stat_summary(aes(x=ratio), fun.y=median, geom='line') +
stat_summary(aes(x=ratio, y=sdK2AdB2.1/(1-p/n)), fun.y=median, geom='line', linetype= 2) +
stat_summary(aes(x=ratio, y=sdAdB2t.1), fun.y=median, geom='line', linetype= 3) +
g.scale_y_log10_0.05() +
scale_shape_discrete(quote(n)) +
scale_colour_discrete(name = "Estimator", labels=lab(t.2ok$Estimator)) +
ylab(quote(sd(hat(gamma)))) +
facet_wrap(~ Psi))
###################################################
### code chunk number 26: fig-emp-level
###################################################
getOption("SweaveHooks")[["fig"]]()
t.2en0 <- droplevels(subset(test.2, emplev_1 != 0))
print(ggplot(t.2en0,
aes(p/n, f.truncate(emplev_1), color = method.cov)) +
g.truncate.lines + g.truncate.areas +
geom_point(aes(shape = factor(n)), alpha = alpha.n) +
scale_shape_discrete(quote(n)) +
stat_summary(aes(x=ratio), fun.y=median, geom='line') +
geom_hline(yintercept = 0.05) +
g.scale_y_log10_0.05() +
scale_colour_discrete(name = "Estimator", labels=lab(t.2en0$method.cov)) +
ylab(quote("empirical level "~ list (H[0] : beta[1] == 0) )) +
facet_wrap(~ Psi))
###################################################
### code chunk number 27: fig-lqq-level
###################################################
getOption("SweaveHooks")[["fig"]]()
tmp <- droplevels(subset(test.1, Psi == 'lqq' & emplev_1 != 0))
print(ggplot(tmp, aes(p/n, f.truncate(emplev_1), color = method.cov)) +
ylab(quote("empirical level "~ list (H[0] : beta[1] == 0) )) +
g.truncate.line + g.truncate.area +
geom_point(aes(shape = factor(n)), alpha = alpha.n) +
stat_summary(aes(x=ratio), fun.y=median, geom='line') +
geom_hline(yintercept = 0.05) +
g.scale_y_log10_0.05() +
g.scale_shape(quote(n)) +
scale_colour_discrete(name = "Estimator", labels=lab(tmp$method.cov)) +
facet_wrap(~ Error)
,
legend.mod = legend.mod
)
###################################################
### code chunk number 28: fig-power-1-0_2
###################################################
getOption("SweaveHooks")[["fig"]]()
t2.25 <- droplevels(subset(test.2, n == 25))# <-- fixed n ==> no need for 'ratio'
tL2.25 <- droplevels(subset(test.lm.2, n == 25))
scale_col_D2.25 <- scale_colour_discrete(name = "Estimator (Cov. Est.)",
labels=lab(t2.25 $method.cov,
tL2.25$method.cov))
print(ggplot(t2.25,
aes(p/n, power_1_0.2, color = method.cov)) +
ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.2) )) +
geom_point(# aes(shape = Error),
alpha = alpha.error) +
stat_summary(fun.y=median, geom='line') +
geom_point (data=tL2.25, alpha = alpha.n) +
stat_summary(data=tL2.25, fun.y=median, geom='line') +
## g.scale_shape("Error", labels=lab(t2.25$Error)) +
scale_col_D2.25 +
facet_wrap(~ Psi)
)
###################################################
### code chunk number 29: fig-power-1-0_4
###################################################
getOption("SweaveHooks")[["fig"]]()
print(ggplot(t2.25,
aes(p/n, power_1_0.4, color = method.cov)) +
ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.4) )) +
geom_point(alpha = alpha.error) +
stat_summary(fun.y=median, geom='line') +
geom_point (data=tL2.25, alpha = alpha.n) +
stat_summary(data=tL2.25,
fun.y=median, geom='line') +
## g.scale_shape("Error", labels=lab(t2.25$Error)) +
scale_col_D2.25 +
facet_wrap(~ Psi)
)
###################################################
### code chunk number 30: fig-power-1-0_6
###################################################
getOption("SweaveHooks")[["fig"]]()
print(ggplot(t2.25,
aes(p/n, power_1_0.6, color = method.cov)) +
ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.6) )) +
geom_point(# aes(shape = Error),
alpha = alpha.error) +
stat_summary(fun.y=median, geom='line') +
geom_point (data=tL2.25, alpha = alpha.n) +
stat_summary(data=tL2.25, fun.y=median, geom='line') +
scale_col_D2.25 +
facet_wrap(~ Psi)
)
###################################################
### code chunk number 31: fig-power-1-0_8
###################################################
getOption("SweaveHooks")[["fig"]]()
print(ggplot(t2.25,
aes(p/n, power_1_0.8, color = method.cov)) +
ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.8) )) +
geom_point(alpha = alpha.error) +
stat_summary(fun.y=median, geom='line') +
geom_point (data=tL2.25, alpha = alpha.n) +
stat_summary(data=tL2.25, fun.y=median, geom='line') +
g.scale_shape("Error", labels=lab(t2.25$Error)) +
scale_col_D2.25 +
facet_wrap(~ Psi)
)
###################################################
### code chunk number 32: fig-power-1-1
###################################################
getOption("SweaveHooks")[["fig"]]()
print(ggplot(t2.25,
aes(p/n, power_1_1, color = method.cov)) +
ylab(quote("empirical power "~ list (H[0] : beta[1] == 1) )) +
geom_point(alpha = alpha.error) +
stat_summary(fun.y=median, geom='line') +
geom_point (data=tL2.25, alpha = alpha.n) +
stat_summary(data=tL2.25, fun.y=median, geom='line') +
## g.scale_shape("Error", labels=lab(t2.25$Error)) +
scale_col_D2.25 +
facet_wrap(~ Psi)
)
###################################################
### code chunk number 33: fig-pred-points
###################################################
getOption("SweaveHooks")[["fig"]]()
pp <- f.prediction.points(dd)[1:7,]
## Worked in older ggplot2 -- now plotmatrix() is gone, to be replaced by GGally::ggpairs):
## tmp <- plotmatrix(pp)$data
## tmp$label <- as.character(1:7)
## print(plotmatrix(dd) + geom_text(data=tmp, color = 2, aes(label=label), size = 2.5))
tmp <- ggpairs(pp)$data
tmp$label <- as.character(1:7) # and now?
## ggpairs() + geom_text() does *NOT* work {ggpairs has own class}
## print(ggpairs(dd) + geom_text(data=tmp, color = 2, aes(label=label), size = 2.5))
print( ggpairs(dd) )## now (2016-11) fine
###################################################
### code chunk number 34: fig-cpr
###################################################
getOption("SweaveHooks")[["fig"]]()
n.cprs <- names(test.fixed)[grep('cpr', names(test.fixed))] # test.fixed: n=20 => no 'x=ratio'
test.5 <- melt(test.fixed[,c('method.cov', 'Error', 'Psi', n.cprs)])
test.5 <- within(test.5, {
Point <- as.numeric(do.call('rbind', strsplit(levels(variable), '_'))[,2])[variable]
})
print(ggplot(test.5,
aes(Point, f.truncate(value), color = method.cov)) +
geom_point(aes(shape = Error), alpha = alpha.error) +
g.truncate.line + g.truncate.area +
stat_summary(fun.y=median, geom='line') +
geom_hline(yintercept = 0.05) +
g.scale_y_log10_0.05() +
g.scale_shape(labels=lab(test.5$Error)) +
scale_colour_discrete(name = "Estimator (Cov. Est.)",
labels=lab(test.5$method.cov)) +
ylab("empirical level of confidence intervals") +
facet_wrap(~ Psi)
)
###################################################
### code chunk number 35: maxbias-fn
###################################################
## Henning (1994) eq 33:
g <- Vectorize(function(s, theta, mu, ...) {
lctrl <- lmrob.control(...)
rho <- function(x)
Mchi(x, lctrl$tuning.chi, lctrl$psi, deriv = 0)
integrate(function(x) rho(((1 + theta^2)/s^2*x)^2)*dchisq(x, 1, mu^2/(1 + theta^2)),
-Inf, Inf)$value
})
## Martin et al 1989 Section 3.2: for mu = 0
g.2 <- Vectorize(function(s, theta, mu, ...) {
lctrl <- lmrob.control(...)
lctrl$tuning.psi <- lctrl$tuning.chi
robustbase:::lmrob.E(chi(sqrt(1 + theta^2)/s*r), lctrl, use.integrate = TRUE)})
g.2.MM <- Vectorize(function(s, theta, mu, ...) {
lctrl <- lmrob.control(...)
robustbase:::lmrob.E(chi(sqrt(1 + theta^2)/s*r), lctrl, use.integrate = TRUE)})
## Henning (1994) eq 30, one parameter case
g.3 <- Vectorize(function(s, theta, mu, ...) {
lctrl <- lmrob.control(...)
rho <- function(x)
Mchi(x, lctrl$tuning.chi, lctrl$psi, deriv = 0)
int.x <- Vectorize(function(y) {
integrate(function(x) rho((y - x*theta - mu)/s)*dnorm(x)*dnorm(y),-Inf, Inf)$value })
integrate(int.x,-Inf, Inf)$value
})
inv.g1 <- function(value, theta, mu, ...) {
g <- if (mu == 0) g.2 else g.3
uniroot(function(s) g(s, theta, mu, ...) - value, c(0.1, 100))$root
}
inv.g1.MM <- function(value, theta, mu, ...) {
g <- if (mu == 0) g.2.MM else g.3.MM
ret <- tryCatch(uniroot(function(s) g(s, theta, mu, ...) - value, c(0.01, 100)),
error = function(e)e)
if (inherits(ret, 'error')) {
warning('inv.g1.MM: ', value, ' ', theta, ' ', mu,' -> Error: ', ret$message)
NA
} else {
ret$root
}
}
s.min <- function(epsilon, ...) inv.g1(0.5/(1 - epsilon), 0, 0, ...)
s.max <- function(epsilon, ...) inv.g1((0.5-epsilon)/(1-epsilon), 0, 0, ...)
BS <- Vectorize(function(epsilon, ...) {
sqrt(s.max(epsilon, ...)/s.min(epsilon, ...)^2 - 1) })
l <- Vectorize(function(epsilon, ...) {
sigma_be <- s.max(epsilon, ...)
sqrt((sigma_be/inv.g1.MM(g.2.MM(sigma_be,0,0,...) +
epsilon/(1-epsilon),0,0,...))^2 - 1) })
u <- Vectorize(function(epsilon, ...) {
gamma_be <- s.min(epsilon, ...)
max(l(epsilon, ...),
sqrt((gamma_be/inv.g1.MM(g.2.MM(gamma_be,0,0,...) +
epsilon/(1-epsilon),0,0,...))^2 - 1)) })
###################################################
### code chunk number 36: max-asymptotic-bias
###################################################
asymptMBFile <- file.path(robustDta, 'asymptotic.max.bias.Rdata')
if (!file.exists(asymptMBFile)) {
x <- seq(0, 0.35, length.out = 100)
rmb <- rbind(data.frame(l=l(x, psi = 'hampel'),
u=u(x, psi = 'hampel'), psi = 'Hampel'),
data.frame(l=l(x, psi = 'lqq'),
u=u(x, psi = 'lqq'), psi = 'lqq'),
data.frame(l=l(x, psi = 'bisquare'),
u=u(x, psi = 'bisquare'), psi = 'bisquare'),
data.frame(l=l(x, psi = 'optimal'),
u=u(x, psi = 'optimal'), psi = 'optimal'))
rmb$x <- x
save(rmb, file=asymptMBFile)
} else load(asymptMBFile)
###################################################
### code chunk number 37: fig-max-asymptotic-bias
###################################################
getOption("SweaveHooks")[["fig"]]()
print(ggplot(rmb, aes(x, l, color=psi)) + geom_line() +
geom_line(aes(x, u, color=psi), linetype = 2) +
xlab(quote("amount of contamination" ~~ epsilon)) +
ylab("maximum asymptotic bias bounds") +
coord_cartesian(ylim = c(0,10)) +
scale_y_continuous(breaks = 1:10) +
scale_colour_hue(quote(psi ~ '-function')))
| 44,217 | cc0-1.0 |
e0ae2022eab59371292c62d256dfaf3e49db1f71 | niketanpansare/systemml | src/test/scripts/functions/vect/VectorizeLixColPos.R | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A = as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
R = A;
R[7,3] = as.matrix(3);
R[8,3] = as.matrix(4);
writeMM(as(R, "CsparseMatrix"), paste(args[2], "R", sep=""));
| 1,159 | apache-2.0 |
e0ae2022eab59371292c62d256dfaf3e49db1f71 | deroneriksson/incubator-systemml | src/test/scripts/functions/vect/VectorizeLixColPos.R | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A = as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
R = A;
R[7,3] = as.matrix(3);
R[8,3] = as.matrix(4);
writeMM(as(R, "CsparseMatrix"), paste(args[2], "R", sep=""));
| 1,159 | apache-2.0 |
e0ae2022eab59371292c62d256dfaf3e49db1f71 | gweidner/incubator-systemml | src/test/scripts/functions/vect/VectorizeLixColPos.R | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A = as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
R = A;
R[7,3] = as.matrix(3);
R[8,3] = as.matrix(4);
writeMM(as(R, "CsparseMatrix"), paste(args[2], "R", sep=""));
| 1,159 | apache-2.0 |
e0ae2022eab59371292c62d256dfaf3e49db1f71 | gweidner/systemml | src/test/scripts/functions/vect/VectorizeLixColPos.R | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A = as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
R = A;
R[7,3] = as.matrix(3);
R[8,3] = as.matrix(4);
writeMM(as(R, "CsparseMatrix"), paste(args[2], "R", sep=""));
| 1,159 | apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.