content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getChromStateOR.R
\name{getChromStateOR}
\alias{getChromStateOR}
\title{Significative variable Regression by cromatin state}
\usage{
getChromStateOR(significative, chromstate, varname)
}
\arguments{
\item{significative}{numerical. Variable to take in to account as significative variable, could be FDR, p-value,...}
\item{chromstate}{vector. Cromatin state values}
\item{varname}{string. Cromatin state name. For example : "TssA","TssAFlnk","TxFlnk","TxWk","Tx","EnhG","Enh","ZNF.Rpts","Het","TssBiv","BivFlnk","EnhBiv","ReprPC","ReprPCWk" or "Quies"}
}
\value{
}
\description{
Get regression by cromatin state taking in to account a significative variable parameter like FDR values
}
| /man/getChromStateOR.Rd | permissive | dpelegri/EASIER | R | false | true | 766 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getChromStateOR.R
\name{getChromStateOR}
\alias{getChromStateOR}
\title{Significative variable Regression by cromatin state}
\usage{
getChromStateOR(significative, chromstate, varname)
}
\arguments{
\item{significative}{numerical. Variable to take in to account as significative variable, could be FDR, p-value,...}
\item{chromstate}{vector. Cromatin state values}
\item{varname}{string. Cromatin state name. For example : "TssA","TssAFlnk","TxFlnk","TxWk","Tx","EnhG","Enh","ZNF.Rpts","Het","TssBiv","BivFlnk","EnhBiv","ReprPC","ReprPCWk" or "Quies"}
}
\value{
}
\description{
Get regression by cromatin state taking in to account a significative variable parameter like FDR values
}
|
## The following two function work in tandem to reduce required
# computations by saving the calculated inverse of a non-degenerate
# matrix, A, and only recalculating the inverse if the matrix, A,
# has changed.
## Function creating list of four items to
# 1) set the value of the matrix,
# 2) get the value of the matrix,
# 3) set the value of the matrix inverse,
# 4) get the value of the matrix inverse.
makeCacheMatrix <- function(x = matrix()) {
cheXInv <- NULL
# 1) define "set function"
set <- function(y){
x <<- y
cheXInv <<- NULL
}
# 2) define "get function"
get <- function() x
# 3) define "set Inverse function"
setInv <- function(calcInverse) cheXInv <<- calcInverse
# 4) define "get Inverse function"
getInv <- function() cheXInv
# 5) create/output list
list(set = set,
get = get,
setInv = setInv,
getInv = getInv)
}
## This function calculates the inverse of a "matrix object"
# (as defined by the previous function) only if the underlying
# input matrix (A, above) has been changed.
cacheSolve <- function(x, ...) {
# Return a matrix that is the inverse of 'x'
cheXInv <- x$getInv()
# If a matrix inverse already exists for this matrix, retrieve
# existing inverse (and note so).
if (!is.null(cheXInv)){
message("Retrieving cached data.")
return(cheXInv) #exits function here if inverse found
}
# Else, calculate new matrix inverse, output, and save.
data <- x$get()
cheXInv <- solve(data)
x$setInv(cheXInv)
cheXInv
}
| /cachematrix.R | no_license | pmhallum/ProgrammingAssignment2 | R | false | false | 1,576 | r | ## The following two function work in tandem to reduce required
# computations by saving the calculated inverse of a non-degenerate
# matrix, A, and only recalculating the inverse if the matrix, A,
# has changed.
## Function creating list of four items to
# 1) set the value of the matrix,
# 2) get the value of the matrix,
# 3) set the value of the matrix inverse,
# 4) get the value of the matrix inverse.
makeCacheMatrix <- function(x = matrix()) {
cheXInv <- NULL
# 1) define "set function"
set <- function(y){
x <<- y
cheXInv <<- NULL
}
# 2) define "get function"
get <- function() x
# 3) define "set Inverse function"
setInv <- function(calcInverse) cheXInv <<- calcInverse
# 4) define "get Inverse function"
getInv <- function() cheXInv
# 5) create/output list
list(set = set,
get = get,
setInv = setInv,
getInv = getInv)
}
## This function calculates the inverse of a "matrix object"
# (as defined by the previous function) only if the underlying
# input matrix (A, above) has been changed.
cacheSolve <- function(x, ...) {
# Return a matrix that is the inverse of 'x'
cheXInv <- x$getInv()
# If a matrix inverse already exists for this matrix, retrieve
# existing inverse (and note so).
if (!is.null(cheXInv)){
message("Retrieving cached data.")
return(cheXInv) #exits function here if inverse found
}
# Else, calculate new matrix inverse, output, and save.
data <- x$get()
cheXInv <- solve(data)
x$setInv(cheXInv)
cheXInv
}
|
#===========
#run_model.R
#===========
#This script demonstrates the VisionEval framework for the RSPM model.
#Load libraries
#--------------
library(visioneval)
#Initialize model
#----------------
initializeModel(
ParamDir = "defs",
RunParamFile = "run_parameters.json",
GeoFile = "geo.csv",
ModelParamFile = "model_parameters.json",
LoadDatastore = FALSE,
DatastoreName = NULL,
SaveDatastore = TRUE
)
#Run all demo module for all years
#---------------------------------
for(Year in getYears()) {
runModule("CreateHouseholds", "VESimHouseholds", RunFor = "AllYears", RunYear = Year)
runModule("PredictWorkers", "VESimHouseholds", RunFor = "AllYears", RunYear = Year)
runModule("AssignLifeCycle", "VESimHouseholds", RunFor = "AllYears", RunYear = Year)
runModule("PredictIncome", "VESimHouseholds", RunFor = "AllYears", RunYear = Year)
runModule("PredictHousing", "VELandUse", RunFor = "AllYears", RunYear = Year)
runModule("LocateEmployment", "VELandUse", RunFor = "AllYears", RunYear = Year)
runModule("AssignDevTypes", "VELandUse", RunFor = "AllYears", RunYear = Year)
runModule("Calculate4DMeasures", "VELandUse", RunFor = "AllYears", RunYear = Year)
runModule("CalculateUrbanMixMeasure", "VELandUse", RunFor = "AllYears", RunYear = Year)
runModule("AssignParkingRestrictions", "VELandUse", RunFor = "AllYears", RunYear = Year)
runModule("AssignDemandManagement", "VELandUse", RunFor = "AllYears", RunYear = Year)
runModule("AssignTransitService", "VETransportSupply", RunFor = "AllYears", RunYear = Year)
runModule("AssignRoadMiles", "VETransportSupply", RunFor = "AllYears", RunYear = Year)
runModule("AssignDrivers", "VEHouseholdVehicles", RunFor = "AllYears", RunYear = Year)
runModule("AssignVehicleOwnership", "VEHouseholdVehicles", RunFor = "AllYears", RunYear = Year)
runModule("AssignVehicleType", "VEHouseholdVehicles", RunFor = "AllYears", RunYear = Year)
runModule("CreateVehicleTable", "VEHouseholdVehicles", RunFor = "AllYears", RunYear = Year)
runModule("AssignVehicleAge", "VEHouseholdVehicles", RunFor = "AllYears", RunYear = Year)
runModule("CalculateHouseholdDvmt", "VEHouseholdTravel", RunFor = "AllYears", RunYear = Year)
runModule("CalculateAltModeTrips", "VEHouseholdTravel", RunFor = "AllYears", RunYear = Year)
runModule("DivertSovTravel", "VEHouseholdTravel", RunFor = "AllYears", RunYear = Year)
runModule("CalculateBaseRoadDvmt", "VERoadPerformance", RunFor = "BaseYear", RunYear = Year)
runModule("CalculateFutureRoadDvmt", "VERoadPerformance", RunFor = "NotBaseYear", RunYear = Year)
runModule("CalculateRoadPerformance", "VERoadPerformance", RunFor = "AllYears", RunYear = Year)
runModule("AssignHhVehiclePowertrain", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
runModule("AssignHhVehicleDvmtSplit", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
runModule("AssignHhVehicleDvmt", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
runModule("CalculateCarbonIntensity", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
runModule("CalculateMpgMpkwhAdjustments", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
runModule("CalculateHhEnergyAndEmissions", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
runModule("CalculateComEnergyAndEmissions", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
runModule("CalculatePtranEnergyAndEmissions", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
}
| /sources/models/VERSPM/Test1/run_model.R | permissive | n8mauer/VisionEval | R | false | false | 4,107 | r | #===========
#run_model.R
#===========
#This script demonstrates the VisionEval framework for the RSPM model.
#Load libraries
#--------------
library(visioneval)
#Initialize model
#----------------
initializeModel(
ParamDir = "defs",
RunParamFile = "run_parameters.json",
GeoFile = "geo.csv",
ModelParamFile = "model_parameters.json",
LoadDatastore = FALSE,
DatastoreName = NULL,
SaveDatastore = TRUE
)
#Run all demo module for all years
#---------------------------------
for(Year in getYears()) {
runModule("CreateHouseholds", "VESimHouseholds", RunFor = "AllYears", RunYear = Year)
runModule("PredictWorkers", "VESimHouseholds", RunFor = "AllYears", RunYear = Year)
runModule("AssignLifeCycle", "VESimHouseholds", RunFor = "AllYears", RunYear = Year)
runModule("PredictIncome", "VESimHouseholds", RunFor = "AllYears", RunYear = Year)
runModule("PredictHousing", "VELandUse", RunFor = "AllYears", RunYear = Year)
runModule("LocateEmployment", "VELandUse", RunFor = "AllYears", RunYear = Year)
runModule("AssignDevTypes", "VELandUse", RunFor = "AllYears", RunYear = Year)
runModule("Calculate4DMeasures", "VELandUse", RunFor = "AllYears", RunYear = Year)
runModule("CalculateUrbanMixMeasure", "VELandUse", RunFor = "AllYears", RunYear = Year)
runModule("AssignParkingRestrictions", "VELandUse", RunFor = "AllYears", RunYear = Year)
runModule("AssignDemandManagement", "VELandUse", RunFor = "AllYears", RunYear = Year)
runModule("AssignTransitService", "VETransportSupply", RunFor = "AllYears", RunYear = Year)
runModule("AssignRoadMiles", "VETransportSupply", RunFor = "AllYears", RunYear = Year)
runModule("AssignDrivers", "VEHouseholdVehicles", RunFor = "AllYears", RunYear = Year)
runModule("AssignVehicleOwnership", "VEHouseholdVehicles", RunFor = "AllYears", RunYear = Year)
runModule("AssignVehicleType", "VEHouseholdVehicles", RunFor = "AllYears", RunYear = Year)
runModule("CreateVehicleTable", "VEHouseholdVehicles", RunFor = "AllYears", RunYear = Year)
runModule("AssignVehicleAge", "VEHouseholdVehicles", RunFor = "AllYears", RunYear = Year)
runModule("CalculateHouseholdDvmt", "VEHouseholdTravel", RunFor = "AllYears", RunYear = Year)
runModule("CalculateAltModeTrips", "VEHouseholdTravel", RunFor = "AllYears", RunYear = Year)
runModule("DivertSovTravel", "VEHouseholdTravel", RunFor = "AllYears", RunYear = Year)
runModule("CalculateBaseRoadDvmt", "VERoadPerformance", RunFor = "BaseYear", RunYear = Year)
runModule("CalculateFutureRoadDvmt", "VERoadPerformance", RunFor = "NotBaseYear", RunYear = Year)
runModule("CalculateRoadPerformance", "VERoadPerformance", RunFor = "AllYears", RunYear = Year)
runModule("AssignHhVehiclePowertrain", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
runModule("AssignHhVehicleDvmtSplit", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
runModule("AssignHhVehicleDvmt", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
runModule("CalculateCarbonIntensity", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
runModule("CalculateMpgMpkwhAdjustments", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
runModule("CalculateHhEnergyAndEmissions", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
runModule("CalculateComEnergyAndEmissions", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
runModule("CalculatePtranEnergyAndEmissions", "VEEnergyAndEmissions", RunFor = "AllYears", RunYear = Year)
}
|
# Building a Prod-Ready, Robust Shiny Application.
#
# README: each step of the dev files is optional, and you don't have to
# fill every dev scripts before getting started.
# 01_start.R should be filled at start.
# 02_dev.R should be used to keep track of your development during the project.
# 03_deploy.R should be used once you need to deploy your app.
#
#
###################################
#### CURRENT FILE: DEV SCRIPT #####
###################################
# Engineering
## Dependencies ----
## Add one line by package you want to add as dependency
usethis::use_package( "thinkr" )
usethis::use_package("ggplot2")
usethis::use_package("fullPage")
usethis::use_package("bslib")
usethis::use_package("usmap")
usethis::use_package("bs4Dash")
usethis::use_package("dplyr")
usethis::use_package("urbnmapr")
usethis::use_package("urbnthemes")
usethis::use_package("plotly")
usethis::use_package("waiter")
usethis::use_package("hrbrthemes")
usethis::use_package("ggtext")
## Add modules ----
## Create a module infrastructure in R/
golem::add_module( name = "population_65_plus" ) # Name of the module
golem::add_module( name = "states" ) # Name of the module
golem::add_module( name = "about")
## Add helper functions ----
## Creates ftc_* and utils_*
golem::add_fct( "helpers" )
golem::add_fct("plots")
golem::add_utils( "helpers" )
## External resources
## Creates .js and .css files at inst/app/www
golem::add_js_file( "script" )
golem::add_js_handler( "handlers" )
golem::add_css_file( "custom" )
## Add internal datasets ----
## If you have data in your package
usethis::use_data_raw( name = "my_dataset", open = TRUE )
## Tests ----
## Add one line by test you want to create
usethis::use_test( "app" )
# Documentation
## Vignette ----
usethis::use_vignette("ACLOlderAmericansProfile")
devtools::build_vignettes()
## Code coverage ----
## (You'll need GitHub there)
usethis::use_github()
usethis::use_travis()
usethis::use_appveyor()
# You're now set! ----
# go to dev/03_deploy.R
rstudioapi::navigateToFile("dev/03_deploy.R")
| /dev/02_dev.R | no_license | kgilds/ACLOlderAmericansProfile | R | false | false | 2,058 | r | # Building a Prod-Ready, Robust Shiny Application.
#
# README: each step of the dev files is optional, and you don't have to
# fill every dev scripts before getting started.
# 01_start.R should be filled at start.
# 02_dev.R should be used to keep track of your development during the project.
# 03_deploy.R should be used once you need to deploy your app.
#
#
###################################
#### CURRENT FILE: DEV SCRIPT #####
###################################
# Engineering
## Dependencies ----
## Add one line by package you want to add as dependency
usethis::use_package( "thinkr" )
usethis::use_package("ggplot2")
usethis::use_package("fullPage")
usethis::use_package("bslib")
usethis::use_package("usmap")
usethis::use_package("bs4Dash")
usethis::use_package("dplyr")
usethis::use_package("urbnmapr")
usethis::use_package("urbnthemes")
usethis::use_package("plotly")
usethis::use_package("waiter")
usethis::use_package("hrbrthemes")
usethis::use_package("ggtext")
## Add modules ----
## Create a module infrastructure in R/
golem::add_module( name = "population_65_plus" ) # Name of the module
golem::add_module( name = "states" ) # Name of the module
golem::add_module( name = "about")
## Add helper functions ----
## Creates ftc_* and utils_*
golem::add_fct( "helpers" )
golem::add_fct("plots")
golem::add_utils( "helpers" )
## External resources
## Creates .js and .css files at inst/app/www
golem::add_js_file( "script" )
golem::add_js_handler( "handlers" )
golem::add_css_file( "custom" )
## Add internal datasets ----
## If you have data in your package
usethis::use_data_raw( name = "my_dataset", open = TRUE )
## Tests ----
## Add one line by test you want to create
usethis::use_test( "app" )
# Documentation
## Vignette ----
usethis::use_vignette("ACLOlderAmericansProfile")
devtools::build_vignettes()
## Code coverage ----
## (You'll need GitHub there)
usethis::use_github()
usethis::use_travis()
usethis::use_appveyor()
# You're now set! ----
# go to dev/03_deploy.R
rstudioapi::navigateToFile("dev/03_deploy.R")
|
\name{consensus_heatmap-ConsensusPartition-method}
\alias{consensus_heatmap,ConsensusPartition-method}
\alias{consensus_heatmap}
\title{
Heatmap for the consensus matrix
}
\description{
Heatmap for the consensus matrix
}
\usage{
\S4method{consensus_heatmap}{ConsensusPartition}(object, k, internal = FALSE,
anno = get_anno(object), anno_col = get_anno_col(object),
show_row_names = FALSE, ...)
}
\arguments{
\item{object}{A \code{\link{ConsensusPartition-class}} object.}
\item{k}{Number of partitions.}
\item{internal}{Used internally.}
\item{anno}{A data frame of annotations for the original matrix columns. By default it uses the annotations specified in \code{\link{consensus_partition}} or \code{\link{run_all_consensus_partition_methods}}.}
\item{anno_col}{A list of colors (color is defined as a named vector) for the annotations. If \code{anno} is a data frame, \code{anno_col} should be a named list where names correspond to the column names in \code{anno}.}
\item{show_row_names}{Whether plot row names on the consensus heatmap (which are the column names in the original matrix)}
\item{...}{other arguments}
}
\details{
For row i and column j in the consensus matrix, the value of corresponding x_ij
is the probability of sample i and sample j being in a same group from all partitions.
There are following heatmaps from left to right:
\itemize{
\item probability of the sample to stay in the corresponding group
\item silhouette scores which measure the distance of an item to the second closest subgroups.
\item predicted classes.
\item consensus matrix.
\item more annotations if provided as \code{anno}
}
One thing that is very important to note is that since we already know the consensus classes from consensus
partition, in the heatmap, only rows or columns within the group is clustered.
}
\value{
No value is returned.
}
\seealso{
\code{\link{membership_heatmap,ConsensusPartition-method}}
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\examples{
data(cola_rl)
consensus_heatmap(cola_rl["sd", "hclust"], k = 3)
}
| /man/consensus_heatmap-ConsensusPartition-method.rd | permissive | liangdp1984/cola | R | false | false | 2,066 | rd | \name{consensus_heatmap-ConsensusPartition-method}
\alias{consensus_heatmap,ConsensusPartition-method}
\alias{consensus_heatmap}
\title{
Heatmap for the consensus matrix
}
\description{
Heatmap for the consensus matrix
}
\usage{
\S4method{consensus_heatmap}{ConsensusPartition}(object, k, internal = FALSE,
anno = get_anno(object), anno_col = get_anno_col(object),
show_row_names = FALSE, ...)
}
\arguments{
\item{object}{A \code{\link{ConsensusPartition-class}} object.}
\item{k}{Number of partitions.}
\item{internal}{Used internally.}
\item{anno}{A data frame of annotations for the original matrix columns. By default it uses the annotations specified in \code{\link{consensus_partition}} or \code{\link{run_all_consensus_partition_methods}}.}
\item{anno_col}{A list of colors (color is defined as a named vector) for the annotations. If \code{anno} is a data frame, \code{anno_col} should be a named list where names correspond to the column names in \code{anno}.}
\item{show_row_names}{Whether plot row names on the consensus heatmap (which are the column names in the original matrix)}
\item{...}{other arguments}
}
\details{
For row i and column j in the consensus matrix, the value of corresponding x_ij
is the probability of sample i and sample j being in a same group from all partitions.
There are following heatmaps from left to right:
\itemize{
\item probability of the sample to stay in the corresponding group
\item silhouette scores which measure the distance of an item to the second closest subgroups.
\item predicted classes.
\item consensus matrix.
\item more annotations if provided as \code{anno}
}
One thing that is very important to note is that since we already know the consensus classes from consensus
partition, in the heatmap, only rows or columns within the group is clustered.
}
\value{
No value is returned.
}
\seealso{
\code{\link{membership_heatmap,ConsensusPartition-method}}
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\examples{
data(cola_rl)
consensus_heatmap(cola_rl["sd", "hclust"], k = 3)
}
|
household_power_data_raw <- read.csv("exdata-data-household_power_consumption/household_power_consumption.txt",sep=";")
household_power <- household_power_data_raw
household_power$Date <- as.Date(household_power$Date,"%d/%m/%Y")
household_power$Time <- strptime(household_power$Time,"%H:%M:%S")
dates <- (household_power$Date)
times <- household_power$Time
x<- paste(dates,times)
datetimes<- strptime(x, "%d/%m/%Y %H:%M:%S")
household_power <- cbind (household_power,datetimes)
correct_dates_household_power <- household_power[household_power$Date == '2007-02-01'| household_power$Date == '2007-02-02' ,]
as.numeric.factor <- function(x) {as.numeric(levels(x))[x]}
png("./figure/plot1.png")
hist(x=(as.numeric.factor(correct_dates_household_power$Global_active_power)), xlab="Global Active Power (kilowatts)",col="red",main="Global Active Power")
dev.off()
| /plot1.R | no_license | dgilliga/ExData_Plotting1 | R | false | false | 871 | r |
household_power_data_raw <- read.csv("exdata-data-household_power_consumption/household_power_consumption.txt",sep=";")
household_power <- household_power_data_raw
household_power$Date <- as.Date(household_power$Date,"%d/%m/%Y")
household_power$Time <- strptime(household_power$Time,"%H:%M:%S")
dates <- (household_power$Date)
times <- household_power$Time
x<- paste(dates,times)
datetimes<- strptime(x, "%d/%m/%Y %H:%M:%S")
household_power <- cbind (household_power,datetimes)
correct_dates_household_power <- household_power[household_power$Date == '2007-02-01'| household_power$Date == '2007-02-02' ,]
as.numeric.factor <- function(x) {as.numeric(levels(x))[x]}
png("./figure/plot1.png")
hist(x=(as.numeric.factor(correct_dates_household_power$Global_active_power)), xlab="Global Active Power (kilowatts)",col="red",main="Global Active Power")
dev.off()
|
library(influxdbr)
library(xts)
library(mlbench)
require(caret)
#Read train file
trainFile = read.csv("Data/BPHTrain.csv")
#Create the prediction model using Random Forest algorithm
control = trainControl(method="repeatedcv", number=10, repeats=3)
set.seed(7)
cforest_trained <- train(Infestation~., data=trainFile, method="cforest", trControl=control)
imp <- varImp(cforest_trained, scale = FALSE)
#Create the prediction model using Stochaistic graient boost model
gbm_trained <- train(Infestation~., data=trainFile, method="gbm", trControl=control)
#Get the variable importance
imp <- varImp(gbm_trained, scale = FALSE)
#open connection to influxdb
dbcon =influxdbr::influx_connection()
while(1)
{
#read incoming sensor data from database every 15 seconds( ideally data from sensors will collected every day but here scaled down to 15 seconds for testing)
result <- influxdbr::influx_select(con = dbcon, db = "iotDb",
field_keys = "*", measurement = "InfestationF1",
limit = 1,
order_desc = TRUE,
return_xts = FALSE)
resultDf= as.data.frame(result)
resultVector = as.vector(t(resultDf))
timestamp = as.double(resultVector[5,])
#predict the infestation risk
predictResult = predict(gbm_trained, resultDf, type = "prob")
#write the prediction back to database
#finalOutput <- as.data.frame(cbind(predictResult, timestamp))
xtsObj <- xts(predictResult$yes, order.by=as.POSIXct(timestamp,origin="1970-01-01"))
#give column names
names(xtsObj) <- "risk"
influxdbr::influx_write(con = dbcon,
db = "iotDb",
x = xtsObj,
measurement = "infestationP4",
precision = c("ns")
)
#wait for sometime
Sys.sleep(15)
}
| /BphPrediction.R | no_license | divmohan/PestInfestationPrediction | R | false | false | 1,878 | r | library(influxdbr)
library(xts)
library(mlbench)
require(caret)
#Read train file
trainFile = read.csv("Data/BPHTrain.csv")
#Create the prediction model using Random Forest algorithm
control = trainControl(method="repeatedcv", number=10, repeats=3)
set.seed(7)
cforest_trained <- train(Infestation~., data=trainFile, method="cforest", trControl=control)
imp <- varImp(cforest_trained, scale = FALSE)
#Create the prediction model using Stochaistic graient boost model
gbm_trained <- train(Infestation~., data=trainFile, method="gbm", trControl=control)
#Get the variable importance
imp <- varImp(gbm_trained, scale = FALSE)
#open connection to influxdb
dbcon =influxdbr::influx_connection()
while(1)
{
#read incoming sensor data from database every 15 seconds( ideally data from sensors will collected every day but here scaled down to 15 seconds for testing)
result <- influxdbr::influx_select(con = dbcon, db = "iotDb",
field_keys = "*", measurement = "InfestationF1",
limit = 1,
order_desc = TRUE,
return_xts = FALSE)
resultDf= as.data.frame(result)
resultVector = as.vector(t(resultDf))
timestamp = as.double(resultVector[5,])
#predict the infestation risk
predictResult = predict(gbm_trained, resultDf, type = "prob")
#write the prediction back to database
#finalOutput <- as.data.frame(cbind(predictResult, timestamp))
xtsObj <- xts(predictResult$yes, order.by=as.POSIXct(timestamp,origin="1970-01-01"))
#give column names
names(xtsObj) <- "risk"
influxdbr::influx_write(con = dbcon,
db = "iotDb",
x = xtsObj,
measurement = "infestationP4",
precision = c("ns")
)
#wait for sometime
Sys.sleep(15)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rekognition_operations.R
\name{rekognition_detect_text}
\alias{rekognition_detect_text}
\title{Detects text in the input image and converts it into machine-readable
text}
\usage{
rekognition_detect_text(Image, Filters = NULL)
}
\arguments{
\item{Image}{[required] The input image as base64-encoded bytes or an Amazon S3 object. If you
use the AWS CLI to call Amazon Rekognition operations, you can't pass
image bytes.
If you are using an AWS SDK to call Amazon Rekognition, you might not
need to base64-encode image bytes passed using the \code{Bytes} field. For
more information, see Images in the Amazon Rekognition developer guide.}
\item{Filters}{Optional parameters that let you set the criteria that the text must
meet to be included in your response.}
}
\description{
Detects text in the input image and converts it into machine-readable text.
See \url{https://www.paws-r-sdk.com/docs/rekognition_detect_text/} for full documentation.
}
\keyword{internal}
| /cran/paws.machine.learning/man/rekognition_detect_text.Rd | permissive | paws-r/paws | R | false | true | 1,044 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rekognition_operations.R
\name{rekognition_detect_text}
\alias{rekognition_detect_text}
\title{Detects text in the input image and converts it into machine-readable
text}
\usage{
rekognition_detect_text(Image, Filters = NULL)
}
\arguments{
\item{Image}{[required] The input image as base64-encoded bytes or an Amazon S3 object. If you
use the AWS CLI to call Amazon Rekognition operations, you can't pass
image bytes.
If you are using an AWS SDK to call Amazon Rekognition, you might not
need to base64-encode image bytes passed using the \code{Bytes} field. For
more information, see Images in the Amazon Rekognition developer guide.}
\item{Filters}{Optional parameters that let you set the criteria that the text must
meet to be included in your response.}
}
\description{
Detects text in the input image and converts it into machine-readable text.
See \url{https://www.paws-r-sdk.com/docs/rekognition_detect_text/} for full documentation.
}
\keyword{internal}
|
# day03_05_crawling.R
install.packages('rvest')
library(rvest)
## - id๋ #
## - class๋ .
tour_rank <- read_html('https://en.wikipedia.org/wiki/World_Tourism_rankings')
tour_rank
tables <- html_nodes(tour_rank, '.wikitable')
# ์ธ๊ณ๊ตญ๊ฐ ์์๋ 1๋ฒ์จฐ ๋ฐฉ์ ์ ์ฅ๋์ด์์
df <- html_table(tables[1])[[1]]
df
# ์ปฌ๋ผ ์ ๋ณด ํ์ธ
str(df)
# rank, destination, 2018๋
๋ ์๋ฃ๋ง ๋ฐ๋ก ๊ตฌ์ฑํ๊ธฐ
df1 <- df[, c(1, 2, 3)]
# ์ปฌ๋ผ๋ช
(์ด)์ ์ด๋ฆ์ด ๋๋ฌด ๊ธธ์ด์ ์์ ํ๊ธฐ
colnames(df1) <- c('rank', 'des', 'tour')
head(df1)
str(df1)
# df1์ tour์ ํ์
์ด ๋ฌธ์ํ์ด๋ฏ๋ก ์์นํ์ผ๋ก
# ๋ณ๊ฒฝํ๊ธฐ
# million์ ์์ ๊ธฐ
# million์ ''(๊ณต๋ฐฑ)์ผ๋ก ๋ณ๊ฒฝํ๊ธฐ.
# gsub(์กฐ๊ฑด, ์นํ๋ ๊ฐ, ๋ฐ์ดํฐ)
df1$tour
df1$tour <- gsub(' million', '', df1$tour)
df1
# ํ์
ํ์ธํ๊ธฐ
class(df1$tour)
# df1์ tour์ ํ์
์ ์ซ์ํ์ผ๋ก ๋ณ๊ฒฝํ๊ธฐ.
df1$tour <- as.numeric(df1$tour)
# df1์ tour์ ๋น์จ(ratio)์ ์ถ๊ฐํ๊ธฐ
# ๋น์จ์ด๋ ์ ์ฒด ์ค์์ ๋ด๊ฐ ์ฐจ์งํ๋ ์
sum(df1$tour)
df1$tour / sum(df1$tour)
sum(df1$tour / sum(df1$tour)) # ์ ์ฒด ํ์ธํ๊ธฐ
temp <- round(df1$tour / sum(df1$tour), 2) *100
df1['ratio'] <- temp
df1
# barchar ๊ทธ๋ฆฌ๊ธฐ
barplot(df1$tour, names.arg = df1$des, col = 1:10)
barplot(df1$tour, names.arg = df1$des, col = rainbow(10))
# ๋น์จ์ ๋ํ๋ผ ๋์๋ barplot์ ํํ๋ณด๋ค๋
# ์ํ(pie) ํํ๊ฐ ๋ ์ข์ ํํ์ด๋ค.
pie(df1$tour)
# ์ค์ตํ๊ธฐ.
# ์๊ณผ ๊ฐ์ ์ด๋ฆ์ ์ง์ ํ๊ธฐ
pie(df1$tour, col = rainbow(10), labels = df1$des)
# ๋ผ๋ฒจ์ ratio๋ฅผ ๊ฐ์ด ํ๊ธฐํ๊ธฐ
ratio <- paste0(df1$des,'(',df1$ratio,'%',')')
pie(df1$tour, col = rainbow(10), labels = ratio)
| /day03/day03_05_crawling.R | no_license | Kyungpyo-Kang/R_Bigdata | R | false | false | 1,830 | r | # day03_05_crawling.R
install.packages('rvest')
library(rvest)
## - id๋ #
## - class๋ .
tour_rank <- read_html('https://en.wikipedia.org/wiki/World_Tourism_rankings')
tour_rank
tables <- html_nodes(tour_rank, '.wikitable')
# ์ธ๊ณ๊ตญ๊ฐ ์์๋ 1๋ฒ์จฐ ๋ฐฉ์ ์ ์ฅ๋์ด์์
df <- html_table(tables[1])[[1]]
df
# ์ปฌ๋ผ ์ ๋ณด ํ์ธ
str(df)
# rank, destination, 2018๋
๋ ์๋ฃ๋ง ๋ฐ๋ก ๊ตฌ์ฑํ๊ธฐ
df1 <- df[, c(1, 2, 3)]
# ์ปฌ๋ผ๋ช
(์ด)์ ์ด๋ฆ์ด ๋๋ฌด ๊ธธ์ด์ ์์ ํ๊ธฐ
colnames(df1) <- c('rank', 'des', 'tour')
head(df1)
str(df1)
# df1์ tour์ ํ์
์ด ๋ฌธ์ํ์ด๋ฏ๋ก ์์นํ์ผ๋ก
# ๋ณ๊ฒฝํ๊ธฐ
# million์ ์์ ๊ธฐ
# million์ ''(๊ณต๋ฐฑ)์ผ๋ก ๋ณ๊ฒฝํ๊ธฐ.
# gsub(์กฐ๊ฑด, ์นํ๋ ๊ฐ, ๋ฐ์ดํฐ)
df1$tour
df1$tour <- gsub(' million', '', df1$tour)
df1
# ํ์
ํ์ธํ๊ธฐ
class(df1$tour)
# df1์ tour์ ํ์
์ ์ซ์ํ์ผ๋ก ๋ณ๊ฒฝํ๊ธฐ.
df1$tour <- as.numeric(df1$tour)
# df1์ tour์ ๋น์จ(ratio)์ ์ถ๊ฐํ๊ธฐ
# ๋น์จ์ด๋ ์ ์ฒด ์ค์์ ๋ด๊ฐ ์ฐจ์งํ๋ ์
sum(df1$tour)
df1$tour / sum(df1$tour)
sum(df1$tour / sum(df1$tour)) # ์ ์ฒด ํ์ธํ๊ธฐ
temp <- round(df1$tour / sum(df1$tour), 2) *100
df1['ratio'] <- temp
df1
# barchar ๊ทธ๋ฆฌ๊ธฐ
barplot(df1$tour, names.arg = df1$des, col = 1:10)
barplot(df1$tour, names.arg = df1$des, col = rainbow(10))
# ๋น์จ์ ๋ํ๋ผ ๋์๋ barplot์ ํํ๋ณด๋ค๋
# ์ํ(pie) ํํ๊ฐ ๋ ์ข์ ํํ์ด๋ค.
pie(df1$tour)
# ์ค์ตํ๊ธฐ.
# ์๊ณผ ๊ฐ์ ์ด๋ฆ์ ์ง์ ํ๊ธฐ
pie(df1$tour, col = rainbow(10), labels = df1$des)
# ๋ผ๋ฒจ์ ratio๋ฅผ ๊ฐ์ด ํ๊ธฐํ๊ธฐ
ratio <- paste0(df1$des,'(',df1$ratio,'%',')')
pie(df1$tour, col = rainbow(10), labels = ratio)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/here_batch_geocoder.R
\name{here_batch_geocoder}
\alias{here_batch_geocoder}
\title{Batch Geocoding with Here API}
\usage{
here_batch_geocoder(df, search_var, country, here_id, here_code)
}
\arguments{
\item{df}{A data.frame containing the columns of the searchText and the Country where this address is located - data.frame/tibble}
\item{search_var}{The column which contains the address/text to be searched by the Here API}
\item{country}{The column which contains the country where this address is located}
\item{here_id}{The developer application ID that uniquely identify the user}
\item{here_code}{The developer application Code that uniquely identify the user}
}
\value{
Given a valid app_id, and app_code, the script returns a data.frame containing
the searched text/address, and the latitude and longitude, if it found any.
}
\description{
\code{here_batch_geocoder} Returns a data.frame containing the latitude and longitude of a list of addresses.
This should only be used to search large volumes of data with Here Geolocation Services.
}
\details{
This function is used by passing a data.frame, that contains a column with the text to be searched in the Here API,
and a column with the Country which this address/text refers to. It also leverages two other functions from this (heRe) package:
`here_get_job_status` and `here_download_job`. This process occurs to allow the user to upload a data.frame with many observations
to be fulfilled by latitude and longitude data from the Here API, and return a data.frame containing the searchText,
and the latitude and longitude pair.
}
\examples{
\dontrun{
df <-
data.frame(address = c("27 King's College Cir, Toronto", "Avenida Joao Pessoa, Porto Alegre"),
country = c("CAN", "BRA"))
here_batch_geocoder(df, search_var = address, country = country, '<YOUR_APP_ID>', '<YOUR_APP_CODE>')
}
}
| /man/here_batch_geocoder.Rd | no_license | paeselhz/heRe | R | false | true | 1,946 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/here_batch_geocoder.R
\name{here_batch_geocoder}
\alias{here_batch_geocoder}
\title{Batch Geocoding with Here API}
\usage{
here_batch_geocoder(df, search_var, country, here_id, here_code)
}
\arguments{
\item{df}{A data.frame containing the columns of the searchText and the Country where this address is located - data.frame/tibble}
\item{search_var}{The column which contains the address/text to be searched by the Here API}
\item{country}{The column which contains the country where this address is located}
\item{here_id}{The developer application ID that uniquely identify the user}
\item{here_code}{The developer application Code that uniquely identify the user}
}
\value{
Given a valid app_id, and app_code, the script returns a data.frame containing
the searched text/address, and the latitude and longitude, if it found any.
}
\description{
\code{here_batch_geocoder} Returns a data.frame containing the latitude and longitude of a list of addresses.
This should only be used to search large volumes of data with Here Geolocation Services.
}
\details{
This function is used by passing a data.frame, that contains a column with the text to be searched in the Here API,
and a column with the Country which this address/text refers to. It also leverages two other functions from this (heRe) package:
`here_get_job_status` and `here_download_job`. This process occurs to allow the user to upload a data.frame with many observations
to be fulfilled by latitude and longitude data from the Here API, and return a data.frame containing the searchText,
and the latitude and longitude pair.
}
\examples{
\dontrun{
df <-
data.frame(address = c("27 King's College Cir, Toronto", "Avenida Joao Pessoa, Porto Alegre"),
country = c("CAN", "BRA"))
here_batch_geocoder(df, search_var = address, country = country, '<YOUR_APP_ID>', '<YOUR_APP_CODE>')
}
}
|
# A pair of functions that cache the inverse of a matrix.
# This function creates a special "matrix" object that can cache its inverse.
# Example use:
# n <- 10
# x <- matrix(sample(1:n^2), nrow = n)
# y <- makeCacheMatrix(x)
makeCacheMatrix <- function(x = matrix()) {
# initialize the inverse
m <- NULL
# method to set the original
set <- function(y) {
x <<- y
m <<- NULL
}
# method to get the original
get <- function() {
x
}
# method to set the inverse
setsolve <- function(solve) {
m <<- solve
}
# method to get the inverse
getsolve <- function() {
m
}
# return a list with the methods
list(
set = set,
get = get,
setsolve = setsolve,
getsolve = getsolve
)
}
# This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cacheSolve retrieves the inverse from the cache. It is assumed that the matrix supplied is always invertible.
# Example use:
# s <- cacheSolve(y) # first time run
# s <- cacheSolve(y) # getting cached data
cacheSolve <- function(x, ...) {
# use the method to get the inverse from the special "matrix" object
m <- x$getsolve()
# if the inverse is already set, return the cached inverse
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# if not, get the original
data <- x$get()
# compute the inverse
m <- solve(data, ...)
# cache the inverse to the special "matrix" object
x$setsolve(m)
# and return the inverse
m
}
| /cachematrix.R | no_license | nirski/ProgrammingAssignment2 | R | false | false | 1,755 | r | # A pair of functions that cache the inverse of a matrix.
# This function creates a special "matrix" object that can cache its inverse.
# Example use:
# n <- 10
# x <- matrix(sample(1:n^2), nrow = n)
# y <- makeCacheMatrix(x)
makeCacheMatrix <- function(x = matrix()) {
# initialize the inverse
m <- NULL
# method to set the original
set <- function(y) {
x <<- y
m <<- NULL
}
# method to get the original
get <- function() {
x
}
# method to set the inverse
setsolve <- function(solve) {
m <<- solve
}
# method to get the inverse
getsolve <- function() {
m
}
# return a list with the methods
list(
set = set,
get = get,
setsolve = setsolve,
getsolve = getsolve
)
}
# This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cacheSolve retrieves the inverse from the cache. It is assumed that the matrix supplied is always invertible.
# Example use:
# s <- cacheSolve(y) # first time run
# s <- cacheSolve(y) # getting cached data
cacheSolve <- function(x, ...) {
# use the method to get the inverse from the special "matrix" object
m <- x$getsolve()
# if the inverse is already set, return the cached inverse
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# if not, get the original
data <- x$get()
# compute the inverse
m <- solve(data, ...)
# cache the inverse to the special "matrix" object
x$setsolve(m)
# and return the inverse
m
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/p_depends.R
\name{p_depends}
\alias{p_depends}
\alias{p_depends_reverse}
\title{Package Dependencies}
\usage{
p_depends(package, local = FALSE, character.only = FALSE, ...)
p_depends_reverse(package, local = FALSE, character.only = FALSE, ...)
}
\arguments{
\item{package}{Name of the package you want the list of dependencies/reverse
dependencies for.}
\item{local}{logical. If \code{TRUE} checks user's local library for
existence; if \code{FALSE} \href{http://cran.r-project.org/}{CRAN} for the
package.}
\item{character.only}{logical. If \code{TRUE} the input is a variable
containing the package name.}
\item{\ldots}{other arguments passed to
\code{\link[tools]{package_dependencies}} and
\code{\link[tools]{dependsOnPkgs}}.}
}
\value{
Returns a list of dependencies/reverse dependencies.
}
\description{
\code{p_depends} - Get \href{http://cran.r-project.org/}{CRAN} or local
package dependencies.
\code{p_depends_reverse} - Get \href{http://cran.r-project.org/}{CRAN} or
local reverse dependencies.
}
\examples{
p_depends(lattice)
p_depends_reverse(lattice)
\dontrun{
## dependencies from CRAN
p_depends(pacman)
p_depends_reverse("pacman")
## local dependencies
p_depends(pacman, local = TRUE)
p_depends_reverse("qdap", local = TRUE)
}
}
\seealso{
\code{\link[pacman]{p_info}},
\code{\link[tools]{package_dependencies}},
\code{\link[tools]{dependsOnPkgs}}
}
\keyword{dependencies}
\keyword{dependency}
| /man/p_depends.Rd | no_license | khughitt/pacman | R | false | false | 1,504 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/p_depends.R
\name{p_depends}
\alias{p_depends}
\alias{p_depends_reverse}
\title{Package Dependencies}
\usage{
p_depends(package, local = FALSE, character.only = FALSE, ...)
p_depends_reverse(package, local = FALSE, character.only = FALSE, ...)
}
\arguments{
\item{package}{Name of the package you want the list of dependencies/reverse
dependencies for.}
\item{local}{logical. If \code{TRUE} checks user's local library for
existence; if \code{FALSE} \href{http://cran.r-project.org/}{CRAN} for the
package.}
\item{character.only}{logical. If \code{TRUE} the input is a variable
containing the package name.}
\item{\ldots}{other arguments passed to
\code{\link[tools]{package_dependencies}} and
\code{\link[tools]{dependsOnPkgs}}.}
}
\value{
Returns a list of dependencies/reverse dependencies.
}
\description{
\code{p_depends} - Get \href{http://cran.r-project.org/}{CRAN} or local
package dependencies.
\code{p_depends_reverse} - Get \href{http://cran.r-project.org/}{CRAN} or
local reverse dependencies.
}
\examples{
p_depends(lattice)
p_depends_reverse(lattice)
\dontrun{
## dependencies from CRAN
p_depends(pacman)
p_depends_reverse("pacman")
## local dependencies
p_depends(pacman, local = TRUE)
p_depends_reverse("qdap", local = TRUE)
}
}
\seealso{
\code{\link[pacman]{p_info}},
\code{\link[tools]{package_dependencies}},
\code{\link[tools]{dependsOnPkgs}}
}
\keyword{dependencies}
\keyword{dependency}
|
#devtools::install_github("rystanley/genepopedit")
#devtools::install_github("bwringe/hybriddetective")
#devtools::install_github("bwringe/parallelnewhybrid")
#modified from script by Bradley Martin
suppressMessages(library("parallelnewhybrid"))
suppressMessages(library("hybriddetective"))
suppressMessages(library("genepopedit"))
suppressMessages(library("diveRsity"))
# suppressMessages(library("genepop"))
##*** SET WORKING DIRECTORY HERE ***###
# Store working directory as object.
my_path <- getwd()
###############################################################################
args = commandArgs(trailingOnly=TRUE)
#print(args)
my.NH <- as.character(args[1])
my.Files <- as.character(args[2])
burn <- as.numeric(args[3])
sweeps <- as.numeric(args[4])
get_os <- function(){
sysinf <- Sys.info()
if (!is.null(sysinf)){
os <- sysinf['sysname']
if (os == 'Darwin')
os <- "osx"
} else { ## mystery machine
os <- .Platform$OS.type
if (grepl("^darwin", R.version$os))
os <- "osx"
if (grepl("linux-gnu", R.version$os))
os <- "linux"
}
tolower(os)
}
#print(sim.type)
os<-get_os()
if (os == "osx"){
parallelnh_OSX(my.Files, where.NH = my.NH, burnin=burn, sweeps=sweeps)
}else if (sim.type == "linux"){
parallelnh_LINUX(my.Files, where.NH = my.NH, burnin=burn, sweeps=sweeps)
}else{
print("ERROR: Unsupported OS")
}
| /run_newhyb.R | no_license | tkchafin/newhyb_pipeline | R | false | false | 1,365 | r | #devtools::install_github("rystanley/genepopedit")
#devtools::install_github("bwringe/hybriddetective")
#devtools::install_github("bwringe/parallelnewhybrid")
#modified from script by Bradley Martin
suppressMessages(library("parallelnewhybrid"))
suppressMessages(library("hybriddetective"))
suppressMessages(library("genepopedit"))
suppressMessages(library("diveRsity"))
# suppressMessages(library("genepop"))
##*** SET WORKING DIRECTORY HERE ***###
# Store working directory as object.
my_path <- getwd()
###############################################################################
args = commandArgs(trailingOnly=TRUE)
#print(args)
my.NH <- as.character(args[1])
my.Files <- as.character(args[2])
burn <- as.numeric(args[3])
sweeps <- as.numeric(args[4])
get_os <- function(){
sysinf <- Sys.info()
if (!is.null(sysinf)){
os <- sysinf['sysname']
if (os == 'Darwin')
os <- "osx"
} else { ## mystery machine
os <- .Platform$OS.type
if (grepl("^darwin", R.version$os))
os <- "osx"
if (grepl("linux-gnu", R.version$os))
os <- "linux"
}
tolower(os)
}
#print(sim.type)
os<-get_os()
if (os == "osx"){
parallelnh_OSX(my.Files, where.NH = my.NH, burnin=burn, sweeps=sweeps)
}else if (sim.type == "linux"){
parallelnh_LINUX(my.Files, where.NH = my.NH, burnin=burn, sweeps=sweeps)
}else{
print("ERROR: Unsupported OS")
}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Employment Status
if(year == 1996)
FYC <- FYC %>% mutate(EMPST53 = EMPST96, EMPST42 = EMPST2, EMPST31 = EMPST1)
FYC <- FYC %>%
mutate_at(vars(EMPST53, EMPST42, EMPST31), funs(replace(., .< 0, NA))) %>%
mutate(employ_last = coalesce(EMPST53, EMPST42, EMPST31))
FYC <- FYC %>% mutate(
employed = 1*(employ_last==1) + 2*(employ_last > 1),
employed = replace(employed, is.na(employed) & AGELAST < 16, 9),
employed = recode_factor(employed, .default = "Missing", .missing = "Missing",
"1" = "Employed",
"2" = "Not employed",
"9" = "Inapplicable (age < 16)"))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(employed,ind, DUPERSID, PERWT.yy.F, VARSTR, VARPSU)
# Load event files
RX <- read.xport('C:/MEPS/.RX..ssp') %>% rename(EVNTIDX = LINKIDX)
IPT <- read.xport('C:/MEPS/.IP..ssp')
ERT <- read.xport('C:/MEPS/.ER..ssp')
OPT <- read.xport('C:/MEPS/.OP..ssp')
OBV <- read.xport('C:/MEPS/.OB..ssp')
HHT <- read.xport('C:/MEPS/.HH..ssp')
# Stack events (condition data not collected for dental visits and other medical expenses)
stacked_events <- stack_events(RX, IPT, ERT, OPT, OBV, HHT)
stacked_events <- stacked_events %>%
mutate(event = data,
PR.yy.X = PV.yy.X + TR.yy.X,
OZ.yy.X = OF.yy.X + SL.yy.X + OT.yy.X + OR.yy.X + OU.yy.X + WC.yy.X + VA.yy.X) %>%
select(DUPERSID, event, EVNTIDX,
XP.yy.X, SF.yy.X, MR.yy.X, MD.yy.X, PR.yy.X, OZ.yy.X)
# Read in event-condition linking file
clink1 = read.xport('C:/MEPS/.CLNK..ssp') %>%
select(DUPERSID,CONDIDX,EVNTIDX)
# Read in conditions file and merge with condition_codes, link file
cond <- read.xport('C:/MEPS/.Conditions..ssp') %>%
select(DUPERSID, CONDIDX, CCCODEX) %>%
mutate(CCS_Codes = as.numeric(as.character(CCCODEX))) %>%
left_join(condition_codes, by = "CCS_Codes") %>%
full_join(clink1, by = c("DUPERSID", "CONDIDX")) %>%
distinct(DUPERSID, EVNTIDX, Condition, .keep_all=T)
# Merge events with conditions-link file and FYCsub
all_events <- full_join(stacked_events, cond, by=c("DUPERSID","EVNTIDX")) %>%
filter(!is.na(Condition),XP.yy.X >= 0) %>%
mutate(count = 1) %>%
full_join(FYCsub, by = "DUPERSID")
# Sum by person, condition, across event
all_pers <- all_events %>%
group_by(employed,ind, DUPERSID, VARSTR, VARPSU, PERWT.yy.F, Condition, count) %>%
summarize_at(vars(SF.yy.X, PR.yy.X, MR.yy.X, MD.yy.X, OZ.yy.X, XP.yy.X),sum) %>% ungroup
PERSdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = all_pers,
nest = TRUE)
results <- svyby(~XP.yy.X, by = ~Condition + employed, FUN = svymean, design = PERSdsgn)
print(results)
| /mepstrends/hc_cond/json/code/r/meanEXP__Condition__employed__.r | permissive | RandomCriticalAnalysis/MEPS-summary-tables | R | false | false | 3,500 | r | # Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Employment Status
if(year == 1996)
FYC <- FYC %>% mutate(EMPST53 = EMPST96, EMPST42 = EMPST2, EMPST31 = EMPST1)
FYC <- FYC %>%
mutate_at(vars(EMPST53, EMPST42, EMPST31), funs(replace(., .< 0, NA))) %>%
mutate(employ_last = coalesce(EMPST53, EMPST42, EMPST31))
FYC <- FYC %>% mutate(
employed = 1*(employ_last==1) + 2*(employ_last > 1),
employed = replace(employed, is.na(employed) & AGELAST < 16, 9),
employed = recode_factor(employed, .default = "Missing", .missing = "Missing",
"1" = "Employed",
"2" = "Not employed",
"9" = "Inapplicable (age < 16)"))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(employed,ind, DUPERSID, PERWT.yy.F, VARSTR, VARPSU)
# Load event files
RX <- read.xport('C:/MEPS/.RX..ssp') %>% rename(EVNTIDX = LINKIDX)
IPT <- read.xport('C:/MEPS/.IP..ssp')
ERT <- read.xport('C:/MEPS/.ER..ssp')
OPT <- read.xport('C:/MEPS/.OP..ssp')
OBV <- read.xport('C:/MEPS/.OB..ssp')
HHT <- read.xport('C:/MEPS/.HH..ssp')
# Stack events (condition data not collected for dental visits and other medical expenses)
stacked_events <- stack_events(RX, IPT, ERT, OPT, OBV, HHT)
stacked_events <- stacked_events %>%
mutate(event = data,
PR.yy.X = PV.yy.X + TR.yy.X,
OZ.yy.X = OF.yy.X + SL.yy.X + OT.yy.X + OR.yy.X + OU.yy.X + WC.yy.X + VA.yy.X) %>%
select(DUPERSID, event, EVNTIDX,
XP.yy.X, SF.yy.X, MR.yy.X, MD.yy.X, PR.yy.X, OZ.yy.X)
# Read in event-condition linking file
clink1 = read.xport('C:/MEPS/.CLNK..ssp') %>%
select(DUPERSID,CONDIDX,EVNTIDX)
# Read in conditions file and merge with condition_codes, link file
cond <- read.xport('C:/MEPS/.Conditions..ssp') %>%
select(DUPERSID, CONDIDX, CCCODEX) %>%
mutate(CCS_Codes = as.numeric(as.character(CCCODEX))) %>%
left_join(condition_codes, by = "CCS_Codes") %>%
full_join(clink1, by = c("DUPERSID", "CONDIDX")) %>%
distinct(DUPERSID, EVNTIDX, Condition, .keep_all=T)
# Merge events with conditions-link file and FYCsub
all_events <- full_join(stacked_events, cond, by=c("DUPERSID","EVNTIDX")) %>%
filter(!is.na(Condition),XP.yy.X >= 0) %>%
mutate(count = 1) %>%
full_join(FYCsub, by = "DUPERSID")
# Sum by person, condition, across event
all_pers <- all_events %>%
group_by(employed,ind, DUPERSID, VARSTR, VARPSU, PERWT.yy.F, Condition, count) %>%
summarize_at(vars(SF.yy.X, PR.yy.X, MR.yy.X, MD.yy.X, OZ.yy.X, XP.yy.X),sum) %>% ungroup
PERSdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = all_pers,
nest = TRUE)
results <- svyby(~XP.yy.X, by = ~Condition + employed, FUN = svymean, design = PERSdsgn)
print(results)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_tar.R
\name{URLs_IMAGEWOOF_160}
\alias{URLs_IMAGEWOOF_160}
\title{IMAGEWOOF_160 dataset}
\usage{
URLs_IMAGEWOOF_160(filename = "IMAGEWOOF_160", untar = TRUE)
}
\arguments{
\item{filename}{the name of the file}
\item{untar}{logical, whether to untar the '.tgz' file}
}
\value{
None
}
\description{
download IMAGEWOOF_160 dataset
}
| /man/URLs_IMAGEWOOF_160.Rd | permissive | Cdk29/fastai | R | false | true | 418 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_tar.R
\name{URLs_IMAGEWOOF_160}
\alias{URLs_IMAGEWOOF_160}
\title{IMAGEWOOF_160 dataset}
\usage{
URLs_IMAGEWOOF_160(filename = "IMAGEWOOF_160", untar = TRUE)
}
\arguments{
\item{filename}{the name of the file}
\item{untar}{logical, whether to untar the '.tgz' file}
}
\value{
None
}
\description{
download IMAGEWOOF_160 dataset
}
|
#' @title Censored Mixed-Effects Models with Autoregressive Correlation Structure and DEC for Normal and t-Student Errors
#' @import numDeriv
#' @import TruncatedNormal
#' @import LaplacesDemon
#' @import tcltk
#' @import MASS
#' @import stats
#' @import relliptical
#' @import expm
#' @description This functino fits left, right or intervalar censored mixed-effects linear model, with autoregressive errors of order \code{p}, using the EM algorithm. It returns estimates, standard errors and prediction of future observations.
#' @param y Vector \code{1 x n} of censored responses, where \code{n} is the sum of the number of observations of each individual
#' @param x Design matrix of the fixed effects of order \code{n x s}, corresponding to vector of fixed effects.
#' @param z Design matrix of the random effects of order\code{n x b}, corresponding to vector of random effects.
#' @param cc Vector of censoring indicators of length \code{n}, where \code{n} is the total of observations. For each observation: \code{0} if non-censored, \code{1} if censored.
#' @param nj Vector \code{1 x m} with the number of observations for each subject, where \code{m} is the total number of individuals.
#' @param tt Vector \code{1 x n} with the time the measurements were made, where \code{n} is the total number of measurements for all individuals. Default it's considered regular times.
#' @param struc \code{UNC},\code{ARp},\code{DEC},\code{SYM} or \code{DEC(AR)} for uncorrelated ,autoregressive, DEC(phi1,phi2), DEC(phi1,phi2=1), DEC(DEC(phi1,phi2=1)) structure, respectively
#' @param order Order of the autoregressive process. Must be a positive integer value.
#' @param initial List with the initial values in the next orden: betas,sigma2,alphas,phi and nu. If it is not indicated it will be provided automatically. Default is \code{NULL}
#' @param nu.fixed Logical. Should estimate the parameter "nu" for the t-student distribution?. If is False indicates the value in the list of initial values. Default is \code{FALSE}
#' @param typeModel \code{Normal} for Normal distribution and \code{Student} for t-Student distribution. Default is \code{Normal}
#' @param cens.type \code{left} for left censoring, \code{right} for right censoring and \code{interval} for intervalar censoring. Default is \code{left}
#' @param LI Vector censoring lower limit indicator of length \code{n}. For each observation: \code{0} if non-censored, \code{-inf} if censored. It is only indicated for when \code{cens.type} is \code{both}. Default is \code{NULL}
#' @param LS Vector censoring upper limit indicator of length \code{n}. For each observation: \code{0} if non-censored, \code{inf} if censored.It is only indicated for when \code{cens.type} is \code{both}. Default is \code{NULL}
#' @param MaxIter The maximum number of iterations of the EM algorithm. Default is \code{200}
#' @param error The convergence maximum error. Default is \code{0.0001}
#' @param Prev Indicator of the prediction process. Available at the moment only for the \code{typeModel=normal} case. Default is \code{FALSE}
#' @param isubj Vector indicator of subject included in the prediction process. Default is \code{NULL}
#' @param step Number of steps for prediction. Default is \code{NULL}
#' @param xpre Design matrix of the fixed effects to be predicted. Default is \code{NULL}.
#' @param zpre Design matrix of the random effects to be predicted. Default is \code{NULL}.
#' @return returns list of class \dQuote{ARpMMEC}:
#' \item{FixEffect}{Data frame with: estimate, standar errors and confidence intervals of the fixed effects.}
#' \item{Sigma2}{Data frame with: estimate, standar errors and confidence intervals of the variance of the white noise process.}
#' \item{Phi}{Data frame with: estimate, standar errors and confidence intervals of the autoregressive parameters.}
#' \item{RandEffect}{Data frame with: estimate, standar errors and confidence intervals of the random effects.}
#' \item{nu}{the parameter "nu" for the t-student distribution}
#' \item{Est}{Vector of parameters estimate (fixed Effects, sigma2, phi, random effects).}
#' \item{SE}{Vector of the standard errors of (fixed Effects, sigma2, phi, random effects).}
#' \item{Residual}{Vector of the marginal residuals.}
#' \item{loglik}{Log-likelihood value.}
#' \item{AIC}{Akaike information criterion.}
#' \item{BIC}{Bayesian information criterion.}
#' \item{AICc}{Corrected Akaike information criterion.}
#' \item{iter}{Number of iterations until convergence.}
#' \item{Yfit}{Vector "y" fitted}
#' \item{MI}{Information matrix}
#' \item{Prev}{Predicted values (if xpre and zpre is not \code{NULL}).}
#' \item{time}{Processing time.}
#' \item{others}{The first and second moments of the random effect and vector Y}
#' @references Olivari, R. C., Garay, A. M., Lachos, V. H., & Matos, L. A. (2021). Mixed-effects
#' models for censored data with autoregressive errors. Journal of Biopharmaceutical Statistics, 31(3), 273-294.
#' \doi{10.1080/10543406.2020.1852246}
#' @examples
#' \dontrun{
#'p.cens = 0.1
#'m = 10
#'D = matrix(c(0.049,0.001,0.001,0.002),2,2)
#'sigma2 = 0.30
#'phi = 0.6
#'beta = c(1,2,1)
#'nj=rep(4,10)
#'tt=rep(1:4,length(nj))
#'x<-matrix(runif(sum(nj)*length(beta),-1,1),sum(nj),length(beta))
#'z<-matrix(runif(sum(nj)*dim(D)[1],-1,1),sum(nj),dim(D)[1])
#'data=ARpMMEC.sim(m,x,z,tt,nj,beta,sigma2,D,phi,struc="ARp",typeModel="Normal",p.cens=p.cens)
#'
#'teste1=ARpMMEC.est(data$y_cc,x,z,tt,data$cc,nj,struc="ARp",order=1,typeModel="Normal",MaxIter = 2)
#'teste2=ARpMMEC.est(data$y_cc,x,z,tt,data$cc,nj,struc="ARp",order=1,typeModel="Student",MaxIter = 2)
#'
#'xx=matrix(runif(6*length(beta),-1,1),6,length(beta))
#'zz=matrix(runif(6*dim(D)[1],-1,1),6,dim(D)[1])
#'isubj=c(1,4,5)
#'teste3=ARpMMEC.est(data$y_cc,x,z,tt,data$cc,nj,struc="ARp",order=1,typeModel="Normal",
#' MaxIter = 2,Prev=TRUE,step=2,isubj=isubj,xpre=xx,zpre=zz)
#'teste3$Prev
#'
#' }
#'
#'
#' @export
#'
#'
#'
ARpMMEC.est=function(y,x,z,tt,cc,nj,struc="UNC",order=1, initial=NULL,nu.fixed=TRUE,
typeModel="Normal",cens.type="left", LI=NULL,LS=NULL, MaxIter=200,
error=0.0001, Prev=FALSE,step=NULL,isubj=NULL,xpre=NULL,zpre=NULL)
{
m<-length(y); N<-sum(nj); p<-dim(x)[2]; q1<-dim(z)[2]; m1<-m*p; m2<-m*q1
if(is.matrix(y)) y <- y[as.vector(!is.na(as.vector(t(y))))]
if(is.matrix(cc)) cc <- cc[as.vector(!is.na(as.vector(t(cc))))]
if (!is.matrix(x)) x=as.matrix(x)
if (!is.matrix(z)) x=as.matrix(z)
if( is.matrix(nj)) nj <- nj[as.vector(!is.na(as.vector(t(nj))))]
if(!is.numeric(y)) stop("y must be a numeric vector. Check documentation!")
if(sum(is.na(y))>0) stop("Vector y does not support NA values.")
if(!is.vector(y)) stop("y must be a vector.Check documentation!")
if(length(y)!=nrow(as.matrix(x))) stop("x does not have the same number of lines than y.")
if(length(y)!=length(cc)) stop("cc does not have the same length than y.")
if(length(y)!=nrow(as.matrix(z))) stop("x does not have the same number of lines than y.")
if(length(y)!=sum(nj)) stop("not compatible sizes between the response y and the repetited measures nj")
if(length(y)==0) stop("The parameter y must be provided.")
if(length(y)!=length(tt)) stop("not compatible sizes between the response y and the vector time tt")
if(!is.numeric(x)) stop("x must be a numeric matrix. Check documentation!")
if(sum(is.na(x))>0) stop("There are some NA values in x.")
if(!is.matrix(x)) stop("x must be a matrix. Check documentation!")
if(det(t(x)%*%x)==0) stop("the columns of x must be linearly independent.")
if(length(x)==0) stop("The parameter x must be provided.")
if(!is.numeric(z)) stop("z must be a numeric matrix. Check documentation!")
if(!is.matrix(z)) stop("z must be a matrix. Check documentation!")
if(sum(is.na(z))>0) stop("There are some NA values in z.")
if(length(z)==0) stop("The parameter z must be provided.")
if(!is.numeric(cc)) stop("cc must be a numeric vector. Check documentation!")
if(!is.vector(cc)) stop("cc must be a vector.Check documentation!")
if(sum(is.na(cc))>0) stop("There are some NA values in cc.")
if(sum(cc%in%c(0,1))<length(cc)) stop("The elements of the vector cc must be 0 or 1.")
if(length(cc)==0) stop("The parameter cc must be provided.")
if(!is.numeric(nj)) stop("nj must be a numeric vector. Check documentation!")
if(!is.vector(nj)) stop("nj must be a vector. Check documentation!")
if(sum(is.na(nj))>0) stop("There are some NA values in nj")
if(length(nj)==0) stop("The parameter nj must be provided.")
if(struc!="DEC"&struc!="DEC(AR)"&struc!="SYM"&struc!="ARp"&struc!="UNC") stop("Struc must be UNC, DEC, DEC(AR), SYM or ARp. Check documentation!")
if(struc=="ARp"){
if(!is.numeric(order) ) stop("Orde must be a number. Check documentation!")
if(length(order)!=1) stop("Order must be a value.")
if(is.numeric(order))
{ if(order!=round(order)|order<=0) stop("Order must be a positive integer value.")}}
if(!is.null(initial))
{ if(!is.null(initial$betas))
{if(!is.numeric(initial$betas)) stop("betas must be a numeric vector. Check documentation!")
if(!is.vector(initial$betas)) stop("betas must be a vector. Check documentation!")
if(length(initial$betas)!=ncol(x)) stop("not compatible sizes between the matrix x and parameter betas.")}
if(!is.null(initial$sigma2))
{if(!is.numeric(initial$sigma2)) stop("sigma2 must be a scalar.")
if(length(initial$sigma2)>1) stop("sigma2 must be a scalar.")}
if(!is.null(initial$alphas))
{if(!is.matrix(initial$alphas)) stop("alphas must be a matrix.")
if(initial$alphas[upper.tri(initial$alphas)]!=initial$alphas[lower.tri(initial$alphas)])stop("alphas must be a simetric matrix.")
if(dim(initial$alphas)[2]!=ncol(z)) stop("not compatible sizes between the matrix z and parameter alphas.")}
if(struc=="ARp"){
if(!is.null(initial$phi))
{if(!is.numeric(initial$phi)) stop("phi must be a numeric vector. Check documentation!")
if(length(initial$phi)!=order) stop("not compatible sizes between the value Arp and parameter phi. Check documentation!")}
}
}
if(typeModel!='Normal'& typeModel!='Student') stop('typeModel must be Normal or Student. Check documentation!')
if(cens.type!="left" & cens.type!="right" & cens.type!="interval")stop('cens.type must be left, right or interval. Check documentation!')
if(cens.type=="interval"&is.null(LI)) stop("The parameter LI must be provided.. Check documentation!")
if(cens.type=="interval"&is.null(LS)) stop("The parameter LS must be provided.. Check documentation!")
if(!is.null(LI)&!is.numeric(LI)) stop("LI must be a numeric vector. Check documentation!")
if(!is.null(LS)&!is.numeric(LS)) stop("LS must be a numeric vector. Check documentation!")
if(length(LS)!=length(LI)) stop("not compatible sizes between the vectors LI and LS. Check documentation!")
if(cens.type=="interval") {
if(length(y)!=length(LI)) stop("not compatible sizes between the vectors y and LI. Check documentation!")
if(length(y)!=length(LS)) stop("not compatible sizes between the vectors y and LS. Check documentation!")
}
if (!is.numeric(MaxIter)) stop("MaxIter must be a positive number. Check documentation!")
if (length(MaxIter) > 1) stop("MaxIter parameter must be a scalar")
if (MaxIter <0) stop("MaxIter parameter must be positive number")
if (!is.numeric(error)) stop("error must be a positive number. Check documentation!")
if (length(error) > 1) stop("error parameter must be a scalar")
if (error <0) stop("error parameter must be positive number")
if (Prev) {
if(is.null(step)|is.null(xpre)|is.null(zpre)|is.null(isubj)) stop("step, isubj, xpre, zpre needs to be provided. Check documentation!")
if (!is.numeric(isubj)) stop("isubj must be a numeric vector. Check documentation!")
if (!is.numeric(step)) stop("step must be a positive number. Check documentation!")
if (step <0) stop("step parameter must be positive number")
if (length(step) > 1) stop("step parameter must be a scalar")
if (ncol(xpre)!=ncol(as.matrix(x))) stop("xpre must have the same number of columns than x")
if (sum(is.na(xpre))>0) stop("There are some NA values in xpre")
if (!is.numeric(xpre)) stop("xpred must be a numeric matrix")
if (ncol(zpre)!=ncol(as.matrix(z))) stop("zpre must have the same number of columns than z")
if (sum(is.na(zpre))>0) stop("There are some NA values in zpre")
if (!is.numeric(zpre)) stop("zpred must be a numeric matrix")
if(nrow(xpre)!=length(isubj)*step) stop("not compatible sizes between xpre and isubj. Check documentation!")
if(nrow(zpre)!=length(isubj)*step) stop("not compatible sizes between zpre and isubj. Check documentation!")
}
if(typeModel=="Normal"){
if(struc=="ARp"){
out<-EMCensArpN(cc=cc,y=y,x=x,z=z,tt=tt,nj=nj, Arp=order, initial=initial, cens.type=cens.type, LI=LI,LS=LS,MaxIter=MaxIter,ee=error,
Prev=Prev,step=step,isubj=isubj ,xpre=xpre,zpre=zpre)}
if(struc=="UNC"){
out<-EMCensArpN(cc=cc,y=y,x=x,z=z,tt=tt,nj=nj, Arp=struc, initial=initial, cens.type=cens.type, LI=LI,LS=LS,MaxIter=MaxIter,ee=error,
Prev=Prev,step=step,isubj=isubj ,xpre=xpre,zpre=zpre)}
if(struc=="DEC"|struc=="DEC(AR)"|struc=="SYM"){
out<-EMCensDECN(cc=cc,y=y,x=x,z=z,tt=tt,nj=nj, struc=struc, initial=initial, cens.type=cens.type, LI=LI,LS=LS,MaxIter=MaxIter,ee=error,
Prev=Prev,step=step,isubj=isubj ,xpre=xpre,zpre=zpre)}
}
if(typeModel=="Student"){
if(struc=="ARp"){
out<-EMCensArpT(cc=cc,y=y,x=x,z=z,ttc=tt,nj=nj,Arp=order,initial=initial,cens.type=cens.type,LL=LI,LU=LS,nu.fixed=nu.fixed,
iter.max=MaxIter,precision=error)
}
if(struc=="UNC"){
out<-EMCensArpT(cc=cc,y=y,x=x,z=z,ttc=tt,nj=nj,Arp=struc,initial=initial,cens.type=cens.type,LL=LI,LU=LS,nu.fixed=nu.fixed,
iter.max=MaxIter,precision=error)
}
if(struc=="DEC"|struc=="DEC(AR)"|struc=="SYM"){
out<-EMCensDECT(cc=cc,y=y,x=x,z=z,ttc=tt,nj=nj,struc=struc,initial=initial,cens.type=cens.type,LL=LI,LU=LS,nu.fixed=nu.fixed,
iter.max=MaxIter,precision=error)
}
}
if(struc=="ARp")
{
cat('\n')
cat('---------------------------------------------------\n')
cat('Autoregressive censored mixed-effects models \n')
cat('---------------------------------------------------\n')
cat('\n')
cat("Autoregressive order =",order)
cat('\n')
cat("Distribution =",typeModel)
cat('\n')
if(typeModel=="Student") cat("nu =",out$nu); cat('\n')
cat("Subjects =",length(nj),";",'Observations =',sum(nj))
cat('\n')
cat('\n')
cat('-----------\n')
cat('Estimates\n')
cat('-----------\n')
cat('\n')
cat('- Fixed effects \n')
cat('\n')
print(out$tableB)
cat('\n')
cat('\n')
cat('- Sigma^2 \n')
cat('\n')
print(out$tableS)
cat('\n')
cat('\n')
cat('- Autoregressives parameters\n')
cat('\n')
print(out$tableP)
cat('\n')
cat('\n')
cat('- Random effects \n')
cat('\n')
print(out$tableA)
cat('\n')
cat('\n')
cat('------------------------\n')
cat('Model selection criteria\n')
cat('------------------------\n')
cat('\n')
critFin <- c(out$loglik, out$AIC, out$BIC)
critFin <- round(t(as.matrix(critFin)),digits=3)
dimnames(critFin) <- list(c("Value"),c("Loglik", "AIC", "BIC"))
print(critFin)
cat('\n')
cat('-------\n')
cat('Details\n')
cat('-------\n')
cat('\n')
cat("Convergence reached? =",(out$iter < MaxIter))
cat('\n')
cat('Iterations =',out$iter,"/",MaxIter)
cat('\n')
cat("Processing time =",out$time,units(out$time))
cat('\n')
}
if(struc!="ARp")
{
cat('\n')
cat('---------------------------------------------------\n')
cat('DEC censored mixed-effects models \n')
cat('---------------------------------------------------\n')
cat('\n')
cat("Case =",struc)
cat('\n')
cat("Distribution =",typeModel)
cat('\n')
if(typeModel=="Student") cat("nu =",out$nu); cat('\n')
cat("Subjects =",length(nj),";",'Observations =',sum(nj))
cat('\n')
cat('\n')
cat('-----------\n')
cat('Estimates\n')
cat('-----------\n')
cat('\n')
cat('- Fixed effects \n')
cat('\n')
print(out$tableB)
cat('\n')
cat('\n')
cat('- Sigma^2 \n')
cat('\n')
print(out$tableS)
cat('\n')
cat('\n')
if(struc!="UNC"){
cat('- Autoregressives parameters\n')
cat('\n')
print(out$tableP)
cat('\n')
cat('\n')}
cat('- Random effects \n')
cat('\n')
print(out$tableA)
cat('\n')
cat('\n')
cat('------------------------\n')
cat('Model selection criteria\n')
cat('------------------------\n')
cat('\n')
critFin <- c(out$loglik, out$AIC, out$BIC)
critFin <- round(t(as.matrix(critFin)),digits=3)
dimnames(critFin) <- list(c("Value"),c("Loglik", "AIC", "BIC"))
print(critFin)
cat('\n')
cat('-------\n')
cat('Details\n')
cat('-------\n')
cat('\n')
cat("Convergence reached? =",(out$iter < MaxIter))
cat('\n')
cat('Iterations =',out$iter,"/",MaxIter)
cat('\n')
cat("Processing time =",out$time,units(out$time))
cat('\n')
}
if(typeModel=="Student") nu<-out$nu
if(typeModel=="Normal") nu<-NULL
obj.out <- list(FixEffect=out$tableB, Sigma2=out$tableS, Phi=out$tableP,RandEffect=out$tableA, nu=nu,
Est=c(out$beta1, sigma2=out$sigmae, phi=out$phi, RnEffect=out$dd), SE=out$SE,Residual=out$residual,
loglik=out$loglik, AIC=out$AIC, BIC=out$BIC, AICc=out$AICcorr, iter=out$iter, Yfit=out$yfit,
MI=out$MI, Prev=out$Prev, time=out$time, others=list(ubi = out$ubi, ubbi = out$ubbi, uybi = out$uybi, uyi = out$uyi, uyyi = out$uyyi,varbeta=out$varbeta,yog=out$yorg) )
class(obj.out) = "ARpMMEC"
return(obj.out)
}
| /R/ARpMMEC.est.R | no_license | cran/ARpLMEC | R | false | false | 19,429 | r | #' @title Censored Mixed-Effects Models with Autoregressive Correlation Structure and DEC for Normal and t-Student Errors
#' @import numDeriv
#' @import TruncatedNormal
#' @import LaplacesDemon
#' @import tcltk
#' @import MASS
#' @import stats
#' @import relliptical
#' @import expm
#' @description This functino fits left, right or intervalar censored mixed-effects linear model, with autoregressive errors of order \code{p}, using the EM algorithm. It returns estimates, standard errors and prediction of future observations.
#' @param y Vector \code{1 x n} of censored responses, where \code{n} is the sum of the number of observations of each individual
#' @param x Design matrix of the fixed effects of order \code{n x s}, corresponding to vector of fixed effects.
#' @param z Design matrix of the random effects of order\code{n x b}, corresponding to vector of random effects.
#' @param cc Vector of censoring indicators of length \code{n}, where \code{n} is the total of observations. For each observation: \code{0} if non-censored, \code{1} if censored.
#' @param nj Vector \code{1 x m} with the number of observations for each subject, where \code{m} is the total number of individuals.
#' @param tt Vector \code{1 x n} with the time the measurements were made, where \code{n} is the total number of measurements for all individuals. Default it's considered regular times.
#' @param struc \code{UNC},\code{ARp},\code{DEC},\code{SYM} or \code{DEC(AR)} for uncorrelated ,autoregressive, DEC(phi1,phi2), DEC(phi1,phi2=1), DEC(DEC(phi1,phi2=1)) structure, respectively
#' @param order Order of the autoregressive process. Must be a positive integer value.
#' @param initial List with the initial values in the next orden: betas,sigma2,alphas,phi and nu. If it is not indicated it will be provided automatically. Default is \code{NULL}
#' @param nu.fixed Logical. Should estimate the parameter "nu" for the t-student distribution?. If is False indicates the value in the list of initial values. Default is \code{FALSE}
#' @param typeModel \code{Normal} for Normal distribution and \code{Student} for t-Student distribution. Default is \code{Normal}
#' @param cens.type \code{left} for left censoring, \code{right} for right censoring and \code{interval} for intervalar censoring. Default is \code{left}
#' @param LI Vector censoring lower limit indicator of length \code{n}. For each observation: \code{0} if non-censored, \code{-inf} if censored. It is only indicated for when \code{cens.type} is \code{both}. Default is \code{NULL}
#' @param LS Vector censoring upper limit indicator of length \code{n}. For each observation: \code{0} if non-censored, \code{inf} if censored.It is only indicated for when \code{cens.type} is \code{both}. Default is \code{NULL}
#' @param MaxIter The maximum number of iterations of the EM algorithm. Default is \code{200}
#' @param error The convergence maximum error. Default is \code{0.0001}
#' @param Prev Indicator of the prediction process. Available at the moment only for the \code{typeModel=normal} case. Default is \code{FALSE}
#' @param isubj Vector indicator of subject included in the prediction process. Default is \code{NULL}
#' @param step Number of steps for prediction. Default is \code{NULL}
#' @param xpre Design matrix of the fixed effects to be predicted. Default is \code{NULL}.
#' @param zpre Design matrix of the random effects to be predicted. Default is \code{NULL}.
#' @return returns list of class \dQuote{ARpMMEC}:
#' \item{FixEffect}{Data frame with: estimate, standar errors and confidence intervals of the fixed effects.}
#' \item{Sigma2}{Data frame with: estimate, standar errors and confidence intervals of the variance of the white noise process.}
#' \item{Phi}{Data frame with: estimate, standar errors and confidence intervals of the autoregressive parameters.}
#' \item{RandEffect}{Data frame with: estimate, standar errors and confidence intervals of the random effects.}
#' \item{nu}{the parameter "nu" for the t-student distribution}
#' \item{Est}{Vector of parameters estimate (fixed Effects, sigma2, phi, random effects).}
#' \item{SE}{Vector of the standard errors of (fixed Effects, sigma2, phi, random effects).}
#' \item{Residual}{Vector of the marginal residuals.}
#' \item{loglik}{Log-likelihood value.}
#' \item{AIC}{Akaike information criterion.}
#' \item{BIC}{Bayesian information criterion.}
#' \item{AICc}{Corrected Akaike information criterion.}
#' \item{iter}{Number of iterations until convergence.}
#' \item{Yfit}{Vector "y" fitted}
#' \item{MI}{Information matrix}
#' \item{Prev}{Predicted values (if xpre and zpre is not \code{NULL}).}
#' \item{time}{Processing time.}
#' \item{others}{The first and second moments of the random effect and vector Y}
#' @references Olivari, R. C., Garay, A. M., Lachos, V. H., & Matos, L. A. (2021). Mixed-effects
#' models for censored data with autoregressive errors. Journal of Biopharmaceutical Statistics, 31(3), 273-294.
#' \doi{10.1080/10543406.2020.1852246}
#' @examples
#' \dontrun{
#'p.cens = 0.1
#'m = 10
#'D = matrix(c(0.049,0.001,0.001,0.002),2,2)
#'sigma2 = 0.30
#'phi = 0.6
#'beta = c(1,2,1)
#'nj=rep(4,10)
#'tt=rep(1:4,length(nj))
#'x<-matrix(runif(sum(nj)*length(beta),-1,1),sum(nj),length(beta))
#'z<-matrix(runif(sum(nj)*dim(D)[1],-1,1),sum(nj),dim(D)[1])
#'data=ARpMMEC.sim(m,x,z,tt,nj,beta,sigma2,D,phi,struc="ARp",typeModel="Normal",p.cens=p.cens)
#'
#'teste1=ARpMMEC.est(data$y_cc,x,z,tt,data$cc,nj,struc="ARp",order=1,typeModel="Normal",MaxIter = 2)
#'teste2=ARpMMEC.est(data$y_cc,x,z,tt,data$cc,nj,struc="ARp",order=1,typeModel="Student",MaxIter = 2)
#'
#'xx=matrix(runif(6*length(beta),-1,1),6,length(beta))
#'zz=matrix(runif(6*dim(D)[1],-1,1),6,dim(D)[1])
#'isubj=c(1,4,5)
#'teste3=ARpMMEC.est(data$y_cc,x,z,tt,data$cc,nj,struc="ARp",order=1,typeModel="Normal",
#' MaxIter = 2,Prev=TRUE,step=2,isubj=isubj,xpre=xx,zpre=zz)
#'teste3$Prev
#'
#' }
#'
#'
#' @export
#'
#'
#'
ARpMMEC.est=function(y,x,z,tt,cc,nj,struc="UNC",order=1, initial=NULL,nu.fixed=TRUE,
typeModel="Normal",cens.type="left", LI=NULL,LS=NULL, MaxIter=200,
error=0.0001, Prev=FALSE,step=NULL,isubj=NULL,xpre=NULL,zpre=NULL)
{
m<-length(y); N<-sum(nj); p<-dim(x)[2]; q1<-dim(z)[2]; m1<-m*p; m2<-m*q1
if(is.matrix(y)) y <- y[as.vector(!is.na(as.vector(t(y))))]
if(is.matrix(cc)) cc <- cc[as.vector(!is.na(as.vector(t(cc))))]
if (!is.matrix(x)) x=as.matrix(x)
if (!is.matrix(z)) x=as.matrix(z)
if( is.matrix(nj)) nj <- nj[as.vector(!is.na(as.vector(t(nj))))]
if(!is.numeric(y)) stop("y must be a numeric vector. Check documentation!")
if(sum(is.na(y))>0) stop("Vector y does not support NA values.")
if(!is.vector(y)) stop("y must be a vector.Check documentation!")
if(length(y)!=nrow(as.matrix(x))) stop("x does not have the same number of lines than y.")
if(length(y)!=length(cc)) stop("cc does not have the same length than y.")
if(length(y)!=nrow(as.matrix(z))) stop("x does not have the same number of lines than y.")
if(length(y)!=sum(nj)) stop("not compatible sizes between the response y and the repetited measures nj")
if(length(y)==0) stop("The parameter y must be provided.")
if(length(y)!=length(tt)) stop("not compatible sizes between the response y and the vector time tt")
if(!is.numeric(x)) stop("x must be a numeric matrix. Check documentation!")
if(sum(is.na(x))>0) stop("There are some NA values in x.")
if(!is.matrix(x)) stop("x must be a matrix. Check documentation!")
if(det(t(x)%*%x)==0) stop("the columns of x must be linearly independent.")
if(length(x)==0) stop("The parameter x must be provided.")
if(!is.numeric(z)) stop("z must be a numeric matrix. Check documentation!")
if(!is.matrix(z)) stop("z must be a matrix. Check documentation!")
if(sum(is.na(z))>0) stop("There are some NA values in z.")
if(length(z)==0) stop("The parameter z must be provided.")
if(!is.numeric(cc)) stop("cc must be a numeric vector. Check documentation!")
if(!is.vector(cc)) stop("cc must be a vector.Check documentation!")
if(sum(is.na(cc))>0) stop("There are some NA values in cc.")
if(sum(cc%in%c(0,1))<length(cc)) stop("The elements of the vector cc must be 0 or 1.")
if(length(cc)==0) stop("The parameter cc must be provided.")
if(!is.numeric(nj)) stop("nj must be a numeric vector. Check documentation!")
if(!is.vector(nj)) stop("nj must be a vector. Check documentation!")
if(sum(is.na(nj))>0) stop("There are some NA values in nj")
if(length(nj)==0) stop("The parameter nj must be provided.")
if(struc!="DEC"&struc!="DEC(AR)"&struc!="SYM"&struc!="ARp"&struc!="UNC") stop("Struc must be UNC, DEC, DEC(AR), SYM or ARp. Check documentation!")
if(struc=="ARp"){
if(!is.numeric(order) ) stop("Orde must be a number. Check documentation!")
if(length(order)!=1) stop("Order must be a value.")
if(is.numeric(order))
{ if(order!=round(order)|order<=0) stop("Order must be a positive integer value.")}}
if(!is.null(initial))
{ if(!is.null(initial$betas))
{if(!is.numeric(initial$betas)) stop("betas must be a numeric vector. Check documentation!")
if(!is.vector(initial$betas)) stop("betas must be a vector. Check documentation!")
if(length(initial$betas)!=ncol(x)) stop("not compatible sizes between the matrix x and parameter betas.")}
if(!is.null(initial$sigma2))
{if(!is.numeric(initial$sigma2)) stop("sigma2 must be a scalar.")
if(length(initial$sigma2)>1) stop("sigma2 must be a scalar.")}
if(!is.null(initial$alphas))
{if(!is.matrix(initial$alphas)) stop("alphas must be a matrix.")
if(initial$alphas[upper.tri(initial$alphas)]!=initial$alphas[lower.tri(initial$alphas)])stop("alphas must be a simetric matrix.")
if(dim(initial$alphas)[2]!=ncol(z)) stop("not compatible sizes between the matrix z and parameter alphas.")}
if(struc=="ARp"){
if(!is.null(initial$phi))
{if(!is.numeric(initial$phi)) stop("phi must be a numeric vector. Check documentation!")
if(length(initial$phi)!=order) stop("not compatible sizes between the value Arp and parameter phi. Check documentation!")}
}
}
if(typeModel!='Normal'& typeModel!='Student') stop('typeModel must be Normal or Student. Check documentation!')
if(cens.type!="left" & cens.type!="right" & cens.type!="interval")stop('cens.type must be left, right or interval. Check documentation!')
if(cens.type=="interval"&is.null(LI)) stop("The parameter LI must be provided.. Check documentation!")
if(cens.type=="interval"&is.null(LS)) stop("The parameter LS must be provided.. Check documentation!")
if(!is.null(LI)&!is.numeric(LI)) stop("LI must be a numeric vector. Check documentation!")
if(!is.null(LS)&!is.numeric(LS)) stop("LS must be a numeric vector. Check documentation!")
if(length(LS)!=length(LI)) stop("not compatible sizes between the vectors LI and LS. Check documentation!")
if(cens.type=="interval") {
if(length(y)!=length(LI)) stop("not compatible sizes between the vectors y and LI. Check documentation!")
if(length(y)!=length(LS)) stop("not compatible sizes between the vectors y and LS. Check documentation!")
}
if (!is.numeric(MaxIter)) stop("MaxIter must be a positive number. Check documentation!")
if (length(MaxIter) > 1) stop("MaxIter parameter must be a scalar")
if (MaxIter <0) stop("MaxIter parameter must be positive number")
if (!is.numeric(error)) stop("error must be a positive number. Check documentation!")
if (length(error) > 1) stop("error parameter must be a scalar")
if (error <0) stop("error parameter must be positive number")
if (Prev) {
if(is.null(step)|is.null(xpre)|is.null(zpre)|is.null(isubj)) stop("step, isubj, xpre, zpre needs to be provided. Check documentation!")
if (!is.numeric(isubj)) stop("isubj must be a numeric vector. Check documentation!")
if (!is.numeric(step)) stop("step must be a positive number. Check documentation!")
if (step <0) stop("step parameter must be positive number")
if (length(step) > 1) stop("step parameter must be a scalar")
if (ncol(xpre)!=ncol(as.matrix(x))) stop("xpre must have the same number of columns than x")
if (sum(is.na(xpre))>0) stop("There are some NA values in xpre")
if (!is.numeric(xpre)) stop("xpred must be a numeric matrix")
if (ncol(zpre)!=ncol(as.matrix(z))) stop("zpre must have the same number of columns than z")
if (sum(is.na(zpre))>0) stop("There are some NA values in zpre")
if (!is.numeric(zpre)) stop("zpred must be a numeric matrix")
if(nrow(xpre)!=length(isubj)*step) stop("not compatible sizes between xpre and isubj. Check documentation!")
if(nrow(zpre)!=length(isubj)*step) stop("not compatible sizes between zpre and isubj. Check documentation!")
}
if(typeModel=="Normal"){
if(struc=="ARp"){
out<-EMCensArpN(cc=cc,y=y,x=x,z=z,tt=tt,nj=nj, Arp=order, initial=initial, cens.type=cens.type, LI=LI,LS=LS,MaxIter=MaxIter,ee=error,
Prev=Prev,step=step,isubj=isubj ,xpre=xpre,zpre=zpre)}
if(struc=="UNC"){
out<-EMCensArpN(cc=cc,y=y,x=x,z=z,tt=tt,nj=nj, Arp=struc, initial=initial, cens.type=cens.type, LI=LI,LS=LS,MaxIter=MaxIter,ee=error,
Prev=Prev,step=step,isubj=isubj ,xpre=xpre,zpre=zpre)}
if(struc=="DEC"|struc=="DEC(AR)"|struc=="SYM"){
out<-EMCensDECN(cc=cc,y=y,x=x,z=z,tt=tt,nj=nj, struc=struc, initial=initial, cens.type=cens.type, LI=LI,LS=LS,MaxIter=MaxIter,ee=error,
Prev=Prev,step=step,isubj=isubj ,xpre=xpre,zpre=zpre)}
}
if(typeModel=="Student"){
if(struc=="ARp"){
out<-EMCensArpT(cc=cc,y=y,x=x,z=z,ttc=tt,nj=nj,Arp=order,initial=initial,cens.type=cens.type,LL=LI,LU=LS,nu.fixed=nu.fixed,
iter.max=MaxIter,precision=error)
}
if(struc=="UNC"){
out<-EMCensArpT(cc=cc,y=y,x=x,z=z,ttc=tt,nj=nj,Arp=struc,initial=initial,cens.type=cens.type,LL=LI,LU=LS,nu.fixed=nu.fixed,
iter.max=MaxIter,precision=error)
}
if(struc=="DEC"|struc=="DEC(AR)"|struc=="SYM"){
out<-EMCensDECT(cc=cc,y=y,x=x,z=z,ttc=tt,nj=nj,struc=struc,initial=initial,cens.type=cens.type,LL=LI,LU=LS,nu.fixed=nu.fixed,
iter.max=MaxIter,precision=error)
}
}
if(struc=="ARp")
{
cat('\n')
cat('---------------------------------------------------\n')
cat('Autoregressive censored mixed-effects models \n')
cat('---------------------------------------------------\n')
cat('\n')
cat("Autoregressive order =",order)
cat('\n')
cat("Distribution =",typeModel)
cat('\n')
if(typeModel=="Student") cat("nu =",out$nu); cat('\n')
cat("Subjects =",length(nj),";",'Observations =',sum(nj))
cat('\n')
cat('\n')
cat('-----------\n')
cat('Estimates\n')
cat('-----------\n')
cat('\n')
cat('- Fixed effects \n')
cat('\n')
print(out$tableB)
cat('\n')
cat('\n')
cat('- Sigma^2 \n')
cat('\n')
print(out$tableS)
cat('\n')
cat('\n')
cat('- Autoregressives parameters\n')
cat('\n')
print(out$tableP)
cat('\n')
cat('\n')
cat('- Random effects \n')
cat('\n')
print(out$tableA)
cat('\n')
cat('\n')
cat('------------------------\n')
cat('Model selection criteria\n')
cat('------------------------\n')
cat('\n')
critFin <- c(out$loglik, out$AIC, out$BIC)
critFin <- round(t(as.matrix(critFin)),digits=3)
dimnames(critFin) <- list(c("Value"),c("Loglik", "AIC", "BIC"))
print(critFin)
cat('\n')
cat('-------\n')
cat('Details\n')
cat('-------\n')
cat('\n')
cat("Convergence reached? =",(out$iter < MaxIter))
cat('\n')
cat('Iterations =',out$iter,"/",MaxIter)
cat('\n')
cat("Processing time =",out$time,units(out$time))
cat('\n')
}
if(struc!="ARp")
{
cat('\n')
cat('---------------------------------------------------\n')
cat('DEC censored mixed-effects models \n')
cat('---------------------------------------------------\n')
cat('\n')
cat("Case =",struc)
cat('\n')
cat("Distribution =",typeModel)
cat('\n')
if(typeModel=="Student") cat("nu =",out$nu); cat('\n')
cat("Subjects =",length(nj),";",'Observations =',sum(nj))
cat('\n')
cat('\n')
cat('-----------\n')
cat('Estimates\n')
cat('-----------\n')
cat('\n')
cat('- Fixed effects \n')
cat('\n')
print(out$tableB)
cat('\n')
cat('\n')
cat('- Sigma^2 \n')
cat('\n')
print(out$tableS)
cat('\n')
cat('\n')
if(struc!="UNC"){
cat('- Autoregressives parameters\n')
cat('\n')
print(out$tableP)
cat('\n')
cat('\n')}
cat('- Random effects \n')
cat('\n')
print(out$tableA)
cat('\n')
cat('\n')
cat('------------------------\n')
cat('Model selection criteria\n')
cat('------------------------\n')
cat('\n')
critFin <- c(out$loglik, out$AIC, out$BIC)
critFin <- round(t(as.matrix(critFin)),digits=3)
dimnames(critFin) <- list(c("Value"),c("Loglik", "AIC", "BIC"))
print(critFin)
cat('\n')
cat('-------\n')
cat('Details\n')
cat('-------\n')
cat('\n')
cat("Convergence reached? =",(out$iter < MaxIter))
cat('\n')
cat('Iterations =',out$iter,"/",MaxIter)
cat('\n')
cat("Processing time =",out$time,units(out$time))
cat('\n')
}
if(typeModel=="Student") nu<-out$nu
if(typeModel=="Normal") nu<-NULL
obj.out <- list(FixEffect=out$tableB, Sigma2=out$tableS, Phi=out$tableP,RandEffect=out$tableA, nu=nu,
Est=c(out$beta1, sigma2=out$sigmae, phi=out$phi, RnEffect=out$dd), SE=out$SE,Residual=out$residual,
loglik=out$loglik, AIC=out$AIC, BIC=out$BIC, AICc=out$AICcorr, iter=out$iter, Yfit=out$yfit,
MI=out$MI, Prev=out$Prev, time=out$time, others=list(ubi = out$ubi, ubbi = out$ubbi, uybi = out$uybi, uyi = out$uyi, uyyi = out$uyyi,varbeta=out$varbeta,yog=out$yorg) )
class(obj.out) = "ARpMMEC"
return(obj.out)
}
|
setwd('~/Workspace/pim_kinase')
library(ggplot2)
library(grid)
library(limma)
library(survival)
TCGA.BRCA <- function() {
source('~/Workspace/pim_kinase/TCGA_BRCA_exp_HiSeqV2-2014-08-28/tcga_brca.r')
TCGA.BRCA()
}
ISPY1_DATASET <- function() {
source('~/Workspace/pim_kinase/ispy1_082814/ispy1.r')
ISPY1()
}
GSE25066_DATASET <- function() {
source('~/Workspace/pim_kinase/gse25066_091114/gse25066.r')
## remove ISPY1 samples from dataset
data <- GSE25066()
data$design <- subset(data$design, data$design$source != 'ISPY')
syncArrayData(data)
}
YAU_DATASET <- function() {
source('~/Workspace/pim_kinase/YauGeneExp-2011-11-11/yau_gene_exp.r')
## remove samples without survival data from dataset
data <- YauGeneExp()
data$design <- subset(data$design,
!is.na(data$design$e_dmfs) &
!is.na(data$design$t_dmfs))
syncArrayData(data)
}
discretizeSamplesByQuantile <- function(exprs, time, event, .quantile) {
rank <- round(.quantile * length(exprs) / 100)
groups <- discretizeSamplesByRank(exprs, rank)
reg <- coxph(Surv(time, event) ~ groups)
list(P = summary(reg)$sctest['pvalue'],
quantile = .quantile,
groups = groups)
}
discretizeSamplesByRank <- function(exprs, rank) {
.exprs <- exprs[order(exprs)]
cutoff <- .exprs[rank]
discretizeSamples(exprs, cutoff)
}
discretizeSamples <- function(exprs, cutoff) {
ifelse(exprs < cutoff, 1, 2)
}
getOptimalQuantile <- function(gene.exprs, time, event, min.quantile = 0.1) {
start <- ceiling(min.quantile * length(gene.exprs))
end <- floor((1 - min.quantile) * length(gene.exprs))
p.dist <- lapply(start:end, function(rank) {
groups <- discretizeSamplesByRank(gene.exprs, rank)
reg <- coxph(Surv(time, event) ~ groups)
list(P = summary(reg)$sctest['pvalue'],
quantile = rank / length(gene.exprs) * 100,
groups = groups)
})
optimal <- which.min(sapply(p.dist, function(x) x$P))
p.dist[[optimal]]
}
drawKMPlot <- function(filename, title.prefix, gene.exprs, time, event,
.quantile = NULL, ...) {
temp <- if (is.null(.quantile)) {
getOptimalQuantile(gene.exprs, time, event)
} else {
discretizeSamplesByQuantile(gene.exprs, time, event, .quantile)
}
fit <- survfit(Surv(time, event) ~ temp$groups)
jpeg(filename, res = 600, height = 9, width = 9, units = 'in')
col <- c('black', 'red')
title <- sprintf('%s (n = %i)\nLog-rank p = %.3e',
title.prefix, length(temp$groups), temp$P)
par(mar = c(5, 6, 4, 2))
plot(fit, col = col, main = title, ylim = c(0.3, 1), cex.lab = 2,
cex.axis = 1.7, cex.main = 1.5, las = 1, lwd = 3, font.lab = 2, ...)
legend('topright',
legend = c(
sprintf('< %.2f percentile (%i)', temp$quantile,
sum(temp$groups == 1)),
sprintf('>= %.2f percentile (%i)', temp$quantile,
sum(temp$groups == 2))),
col = col,
lty = c(1, 1),
lwd = c(5, 5),
cex = 1.7)
dev.off()
}
## Summarizes data.
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
generateDataForPlot <- function(gene.exprs, status) {
.gene.exprs <- gene.exprs - median(gene.exprs)
df <- data.frame(status = status, expr = .gene.exprs)
df <- subset(df, !is.na(df$status))
## Perform pair-wise unpaired t-tests with pooled SD
sig <- pairwise.t.test(df$expr, df$status, p.adj = 'none')$p.value
.df <- summarySE(df, measurevar = 'expr', groupvars = 'status')
.df$status <- as.factor(sprintf('%s\n(n=%i)', .df$status, .df$N))
list(plot.data = .df,
significance = sig)
}
drawBarPlot <- function(filename, df) {
plot <- ggplot(data = df, aes(x = status, y = expr)) +
geom_bar(aes(fill = status), position = position_dodge(),
stat = 'identity', color = 'black', size = 1.3,
width = 0.6) +
geom_errorbar(aes(ymin = ifelse(expr < 0, expr - se, expr),
ymax = ifelse(expr < 0, expr, expr + se)),
width = 0.2, position = position_dodge(.7), size = 1) +
geom_hline(yintercept = 0, size = 1.5) +
scale_fill_manual(values = c('black', 'grey', 'white')) +
ylab('Median-centered log expression') +
theme_bw() +
theme(
legend.position = 'none',
axis.line = element_line(size = 1.3),
axis.ticks = element_line(size = 1.5),
axis.ticks.length = unit(3, 'mm'),
axis.text.x = element_text(size = 20, face = 'bold',
angle = 45, hjust = 1, vjust = 1),
axis.text.y = element_text(size = 20),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border = element_blank(),
panel.grid = element_blank())
ggsave(filename = filename, plot = plot, height = 6.75, width = 4.5,
dpi = 600)
}
univariateAnalysis <- function(gene.exprs, group, time, event) {
valid <- !is.na(group)
.gene.exprs <- scale(gene.exprs[valid])
.group <- group[valid]
.time <- time[valid]
.event <- event[valid]
subsetAnalysis <- function(subset) {
..exprs <- .gene.exprs[subset]
..time <- .time[subset]
..event <- .event[subset]
reg <- coxph(Surv(..time, ..event) ~ ..exprs)
summ <- summary(reg)
conf.int <- summ$conf.int
p <- summ$logtest['pvalue']
data.frame(N = length(..exprs),
HazardRatio = sprintf('%.3f (%.3f - %.3f)',
conf.int[, 1],
conf.int[, 3],
conf.int[, 4]),
p = p,
stringsAsFactors = F)
}
subsets <- cbind(
rep(T, length(.gene.exprs)),
sapply(unique(.group), function(g) .group == g))
res <- apply(subsets, 2, subsetAnalysis)
comp.groups <- names(res)
comp.groups[1] <- 'Overall'
res <- rbind.fill(res)
rownames(res) <- comp.groups
res
}
corrAnalysis <- function(x, y, cats) {
df <- data.frame(x, y, cats)
plyr::ddply(df, ~ cats, function(.df) {
tryCatch({
res <- with(.df, cor.test(x, y))
data.frame(cor=res$estimate, p=res$p.value, N=nrow(.df))
}, error=function(err) data.frame(cor=NA, p=NA, N=nrow(.df)))
})
}
##########
## Main ##
##########
## TCGA
tcga <- TCGA.BRCA()
tcga.df <- generateDataForPlot(as.numeric(tcga$exprs['PIM1', ]),
factor(tcga$design$status,
levels = c('HER2+', 'HR+HER2-', 'TN')))
drawBarPlot('tcga_PIM1_exprs_barplot.jpg', tcga.df$plot.data)
write.csv(tcga.df$significance, file = 'tcga_PIM1_exprs_pairwise_t.csv')
## ISPY1
ispy1 <- ISPY1_DATASET()
sample.subset <- !is.na(ispy1$design$hr) & ispy1$design$hr == 'HR-'
drawKMPlot('ispy1_PIM1_kaplan_meier.jpg',
'I-SPY',
as.numeric(ispy1$exprs['5292', sample.subset]),
ispy1$design[sample.subset, 'rfs.t'],
ispy1$design[sample.subset, 'rfs.e'],
xlab = 'Time (years)')
ispy.df <- generateDataForPlot(as.numeric(ispy1$exprs['5292', ]),
factor(ispy1$design$status,
levels = c('HER2+', 'HR+HER2-', 'TN')))
drawBarPlot('ispy1_PIM1_exprs_barplot.jpg', ispy.df$plot.data)
write.csv(ispy.df$significance, file = 'ispy1_PIM1_exprs_pairwise_t.csv')
ispy1.res <- univariateAnalysis(as.numeric(ispy1$exprs['5292', ]),
ispy1$design$hr,
ispy1$design$rfs.t,
ispy1$design$rfs.e)
write.csv(ispy1.res, file = 'ispy1_univariate_analysis.csv')
## GSE25066
gse25066 <- GSE25066_DATASET()
sample.subset <- !is.na(gse25066$design$hr) & gse25066$design$hr == 'HR-'
drawKMPlot('gse25066_PIM1_kaplan_meier.jpg',
'Pooled Neoadjuvant Chemotherapy Treated',
as.numeric(gse25066$exprs['5292', sample.subset]),
gse25066$design[sample.subset, 'drfs_t'],
gse25066$design[sample.subset, 'drfs_e'],
xlab = 'Time (years)')
gse25066.df <- generateDataForPlot(as.numeric(gse25066$exprs['5292', ]),
factor(gse25066$design$status,
levels = c('HER2+',
'HR+HER2-',
'TN')))
drawBarPlot('gse25066_PIM1_exprs_barplot.jpg', gse25066.df$plot.data)
write.csv(gse25066.df$significance, file = 'gse25066_PIM1_exprs_pairwise_t.csv')
gse25066.res <- univariateAnalysis(as.numeric(gse25066$exprs['5292', ]),
gse25066$design$hr,
gse25066$design$drfs_t,
gse25066$design$drfs_e)
write.csv(gse25066.res, file = 'gse25066_univariate_analysis.csv')
## Yau dataset
yau <- YAU_DATASET()
sample.subset <- !is.na(yau$design$er_status) & yau$design$er_status == 'ER-'
drawKMPlot('yau_PIM1_kaplan_meier.jpg',
'Pooled Node-Negative Adjuvant Chemotherapy Naive',
as.numeric(yau$exprs['PIM1', sample.subset]),
time = yau$design[sample.subset, 't_dmfs'],
event = yau$design[sample.subset, 'e_dmfs'],
xlab = 'Time (years)')
yau.df <- generateDataForPlot(as.numeric(yau$exprs['PIM1', ]),
factor(yau$design$status,
levels = c('HER2+',
'ER+HER2-',
'ER-HER2-')))
drawBarPlot('yau_PIM1_exprs_barplot.jpg', yau.df$plot.data)
write.csv(yau.df$significance, file = 'yau_PIM1_exprs_pairwise_t.csv')
yau.res <- univariateAnalysis(as.numeric(yau$exprs['PIM1', ]),
yau$design$er_status,
yau$design$t_dmfs,
yau$design$e_dmfs)
write.csv(yau.res, file = 'yau_univariate_analysis.csv')
##
## MYC Univariate Analysis
##
ispy1.myc.uva <- univariateAnalysis(as.numeric(ispy1$exprs['4609', ]),
ispy1$design$hr,
ispy1$design$rfs.t,
ispy1$design$rfs.e)
write.csv(ispy1.myc.uva, file = 'ispy1_MYC_univariate_analysis.csv')
gse25066.myc.uva <- univariateAnalysis(as.numeric(gse25066$exprs['4609', ]),
gse25066$design$hr,
gse25066$design$drfs_t,
gse25066$design$drfs_e)
write.csv(gse25066.myc.uva, file = 'gse25066_MYC_univariate_analysis.csv')
yau.myc.uva <- univariateAnalysis(as.numeric(yau$exprs['MYC', ]),
yau$design$er_status,
yau$design$t_dmfs,
yau$design$e_dmfs)
write.csv(yau.myc.uva, file = 'yau_MYC_univariate_analysis.csv')
##
## Correlation between MYC and PIM1
##
tcga.myc.pim1.corr <- corrAnalysis(as.numeric(tcga$exprs['PIM1', ]),
as.numeric(tcga$exprs['MYC', ]),
factor(tcga$design$status))
write.csv(tcga.myc.pim1.corr, file = 'tcga_MYC_PIM1_corr.csv')
ispy.myc.pim1.corr <- corrAnalysis(as.numeric(ispy1$exprs['5292', ]),
as.numeric(ispy1$exprs['4609', ]),
factor(ispy1$design$status))
write.csv(ispy.myc.pim1.corr, file = 'ispy1_MYC_PIM1_corr.csv')
gse25066.myc.pim1.corr <- corrAnalysis(as.numeric(gse25066$exprs['5292', ]),
as.numeric(gse25066$exprs['4609', ]),
factor(gse25066$design$status))
write.csv(gse25066.myc.pim1.corr, file = 'gse25066_MYC_PIM1_corr.csv')
yau.myc.pim1.corr <- corrAnalysis(as.numeric(yau$exprs['PIM1', ]),
as.numeric(yau$exprs['MYC', ]),
factor(yau$design$status))
write.csv(yau.myc.pim1.corr, file = 'yau_MYC_PIM1_corr.csv')
##
## DE Analysis CDK
##
tcga.df2 <- generateDataForPlot(as.numeric(tcga$exprs['CDKN1B', ]),
factor(tcga$design$status,
levels = c('HER2+', 'HR+HER2-', 'TN')))
drawBarPlot('tcga_CDKN1B_exprs_barplot.jpg', tcga.df2$plot.data)
write.csv(tcga.df2$significance, file = 'tcga_CDKN1B_exprs_pairwise_t.csv')
ispy.df2 <- generateDataForPlot(as.numeric(ispy1$exprs['1027', ]),
factor(ispy1$design$status,
levels = c('HER2+', 'HR+HER2-', 'TN')))
drawBarPlot('ispy1_CDKN1B_exprs_barplot.jpg', ispy.df2$plot.data)
write.csv(ispy.df2$significance, file = 'ispy1_CDKN1B_exprs_pairwise_t.csv')
gse25066.df2 <- generateDataForPlot(as.numeric(gse25066$exprs['1027', ]),
factor(gse25066$design$status,
levels = c('HER2+',
'HR+HER2-',
'TN')))
drawBarPlot('gse25066_CDKN1B_exprs_barplot.jpg', gse25066.df2$plot.data)
write.csv(gse25066.df2$significance, file = 'gse25066_CDKN1B_exprs_pairwise_t.csv')
yau.df2 <- generateDataForPlot(as.numeric(yau$exprs['CDKN1B', ]),
factor(yau$design$status,
levels = c('HER2+',
'ER+HER2-',
'ER-HER2-')))
drawBarPlot('yau_CDKN1B_exprs_barplot.jpg', yau.df2$plot.data)
write.csv(yau.df2$significance, file = 'yau_CDKN1B_exprs_pairwise_t.csv')
##
## Correlation between CDKN1B and PIM1
##
tcga.cdkn1b.pim1.corr <- corrAnalysis(as.numeric(tcga$exprs['PIM1', ]),
as.numeric(tcga$exprs['CDKN1B', ]),
factor(tcga$design$status))
write.csv(tcga.cdkn1b.pim1.corr, file = 'tcga_CDKN1B_PIM1_corr.csv')
ispy.cdkn1b.pim1.corr <- corrAnalysis(as.numeric(ispy1$exprs['5292', ]),
as.numeric(ispy1$exprs['1027', ]),
factor(ispy1$design$status))
write.csv(ispy.cdkn1b.pim1.corr, file = 'ispy1_CDKN1B_PIM1_corr.csv')
gse25066.cdkn1b.pim1.corr <- corrAnalysis(as.numeric(gse25066$exprs['5292', ]),
as.numeric(gse25066$exprs['1027', ]),
factor(gse25066$design$status))
write.csv(gse25066.cdkn1b.pim1.corr, file = 'gse25066_CDKN1B_PIM1_corr.csv')
yau.cdkn1b.pim1.corr <- corrAnalysis(as.numeric(yau$exprs['PIM1', ]),
as.numeric(yau$exprs['CDKN1B', ]),
factor(yau$design$status))
write.csv(yau.cdkn1b.pim1.corr, file = 'yau_CDKN1B_PIM1_corr.csv')
##
## CDKN1B Univariate Analysis
##
ispy1.cdkn1b.uva <- univariateAnalysis(as.numeric(ispy1$exprs['1027', ]),
ispy1$design$hr,
ispy1$design$rfs.t,
ispy1$design$rfs.e)
write.csv(ispy1.cdkn1b.uva, file = 'ispy1_CDKN1B_univariate_analysis.csv')
gse25066.cdkn1b.uva <- univariateAnalysis(as.numeric(gse25066$exprs['1027', ]),
gse25066$design$hr,
gse25066$design$drfs_t,
gse25066$design$drfs_e)
write.csv(gse25066.cdkn1b.uva, file = 'gse25066_CDKN1B_univariate_analysis.csv')
yau.cdkn1b.uva <- univariateAnalysis(as.numeric(yau$exprs['CDKN1B', ]),
yau$design$er_status,
yau$design$t_dmfs,
yau$design$e_dmfs)
write.csv(yau.cdkn1b.uva, file = 'yau_CDKN1B_univariate_analysis.csv')
| /pim_analysis.r | no_license | snjvb/pim_kinase | R | false | false | 17,904 | r | setwd('~/Workspace/pim_kinase')
library(ggplot2)
library(grid)
library(limma)
library(survival)
TCGA.BRCA <- function() {
source('~/Workspace/pim_kinase/TCGA_BRCA_exp_HiSeqV2-2014-08-28/tcga_brca.r')
TCGA.BRCA()
}
ISPY1_DATASET <- function() {
source('~/Workspace/pim_kinase/ispy1_082814/ispy1.r')
ISPY1()
}
GSE25066_DATASET <- function() {
source('~/Workspace/pim_kinase/gse25066_091114/gse25066.r')
## remove ISPY1 samples from dataset
data <- GSE25066()
data$design <- subset(data$design, data$design$source != 'ISPY')
syncArrayData(data)
}
YAU_DATASET <- function() {
source('~/Workspace/pim_kinase/YauGeneExp-2011-11-11/yau_gene_exp.r')
## remove samples without survival data from dataset
data <- YauGeneExp()
data$design <- subset(data$design,
!is.na(data$design$e_dmfs) &
!is.na(data$design$t_dmfs))
syncArrayData(data)
}
discretizeSamplesByQuantile <- function(exprs, time, event, .quantile) {
rank <- round(.quantile * length(exprs) / 100)
groups <- discretizeSamplesByRank(exprs, rank)
reg <- coxph(Surv(time, event) ~ groups)
list(P = summary(reg)$sctest['pvalue'],
quantile = .quantile,
groups = groups)
}
discretizeSamplesByRank <- function(exprs, rank) {
.exprs <- exprs[order(exprs)]
cutoff <- .exprs[rank]
discretizeSamples(exprs, cutoff)
}
discretizeSamples <- function(exprs, cutoff) {
ifelse(exprs < cutoff, 1, 2)
}
getOptimalQuantile <- function(gene.exprs, time, event, min.quantile = 0.1) {
start <- ceiling(min.quantile * length(gene.exprs))
end <- floor((1 - min.quantile) * length(gene.exprs))
p.dist <- lapply(start:end, function(rank) {
groups <- discretizeSamplesByRank(gene.exprs, rank)
reg <- coxph(Surv(time, event) ~ groups)
list(P = summary(reg)$sctest['pvalue'],
quantile = rank / length(gene.exprs) * 100,
groups = groups)
})
optimal <- which.min(sapply(p.dist, function(x) x$P))
p.dist[[optimal]]
}
drawKMPlot <- function(filename, title.prefix, gene.exprs, time, event,
.quantile = NULL, ...) {
temp <- if (is.null(.quantile)) {
getOptimalQuantile(gene.exprs, time, event)
} else {
discretizeSamplesByQuantile(gene.exprs, time, event, .quantile)
}
fit <- survfit(Surv(time, event) ~ temp$groups)
jpeg(filename, res = 600, height = 9, width = 9, units = 'in')
col <- c('black', 'red')
title <- sprintf('%s (n = %i)\nLog-rank p = %.3e',
title.prefix, length(temp$groups), temp$P)
par(mar = c(5, 6, 4, 2))
plot(fit, col = col, main = title, ylim = c(0.3, 1), cex.lab = 2,
cex.axis = 1.7, cex.main = 1.5, las = 1, lwd = 3, font.lab = 2, ...)
legend('topright',
legend = c(
sprintf('< %.2f percentile (%i)', temp$quantile,
sum(temp$groups == 1)),
sprintf('>= %.2f percentile (%i)', temp$quantile,
sum(temp$groups == 2))),
col = col,
lty = c(1, 1),
lwd = c(5, 5),
cex = 1.7)
dev.off()
}
## Summarizes data.
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
generateDataForPlot <- function(gene.exprs, status) {
.gene.exprs <- gene.exprs - median(gene.exprs)
df <- data.frame(status = status, expr = .gene.exprs)
df <- subset(df, !is.na(df$status))
## Perform pair-wise unpaired t-tests with pooled SD
sig <- pairwise.t.test(df$expr, df$status, p.adj = 'none')$p.value
.df <- summarySE(df, measurevar = 'expr', groupvars = 'status')
.df$status <- as.factor(sprintf('%s\n(n=%i)', .df$status, .df$N))
list(plot.data = .df,
significance = sig)
}
drawBarPlot <- function(filename, df) {
plot <- ggplot(data = df, aes(x = status, y = expr)) +
geom_bar(aes(fill = status), position = position_dodge(),
stat = 'identity', color = 'black', size = 1.3,
width = 0.6) +
geom_errorbar(aes(ymin = ifelse(expr < 0, expr - se, expr),
ymax = ifelse(expr < 0, expr, expr + se)),
width = 0.2, position = position_dodge(.7), size = 1) +
geom_hline(yintercept = 0, size = 1.5) +
scale_fill_manual(values = c('black', 'grey', 'white')) +
ylab('Median-centered log expression') +
theme_bw() +
theme(
legend.position = 'none',
axis.line = element_line(size = 1.3),
axis.ticks = element_line(size = 1.5),
axis.ticks.length = unit(3, 'mm'),
axis.text.x = element_text(size = 20, face = 'bold',
angle = 45, hjust = 1, vjust = 1),
axis.text.y = element_text(size = 20),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border = element_blank(),
panel.grid = element_blank())
ggsave(filename = filename, plot = plot, height = 6.75, width = 4.5,
dpi = 600)
}
univariateAnalysis <- function(gene.exprs, group, time, event) {
valid <- !is.na(group)
.gene.exprs <- scale(gene.exprs[valid])
.group <- group[valid]
.time <- time[valid]
.event <- event[valid]
subsetAnalysis <- function(subset) {
..exprs <- .gene.exprs[subset]
..time <- .time[subset]
..event <- .event[subset]
reg <- coxph(Surv(..time, ..event) ~ ..exprs)
summ <- summary(reg)
conf.int <- summ$conf.int
p <- summ$logtest['pvalue']
data.frame(N = length(..exprs),
HazardRatio = sprintf('%.3f (%.3f - %.3f)',
conf.int[, 1],
conf.int[, 3],
conf.int[, 4]),
p = p,
stringsAsFactors = F)
}
subsets <- cbind(
rep(T, length(.gene.exprs)),
sapply(unique(.group), function(g) .group == g))
res <- apply(subsets, 2, subsetAnalysis)
comp.groups <- names(res)
comp.groups[1] <- 'Overall'
res <- rbind.fill(res)
rownames(res) <- comp.groups
res
}
corrAnalysis <- function(x, y, cats) {
df <- data.frame(x, y, cats)
plyr::ddply(df, ~ cats, function(.df) {
tryCatch({
res <- with(.df, cor.test(x, y))
data.frame(cor=res$estimate, p=res$p.value, N=nrow(.df))
}, error=function(err) data.frame(cor=NA, p=NA, N=nrow(.df)))
})
}
##########
## Main ##
##########
## TCGA
tcga <- TCGA.BRCA()
tcga.df <- generateDataForPlot(as.numeric(tcga$exprs['PIM1', ]),
factor(tcga$design$status,
levels = c('HER2+', 'HR+HER2-', 'TN')))
drawBarPlot('tcga_PIM1_exprs_barplot.jpg', tcga.df$plot.data)
write.csv(tcga.df$significance, file = 'tcga_PIM1_exprs_pairwise_t.csv')
## ISPY1
ispy1 <- ISPY1_DATASET()
sample.subset <- !is.na(ispy1$design$hr) & ispy1$design$hr == 'HR-'
drawKMPlot('ispy1_PIM1_kaplan_meier.jpg',
'I-SPY',
as.numeric(ispy1$exprs['5292', sample.subset]),
ispy1$design[sample.subset, 'rfs.t'],
ispy1$design[sample.subset, 'rfs.e'],
xlab = 'Time (years)')
ispy.df <- generateDataForPlot(as.numeric(ispy1$exprs['5292', ]),
factor(ispy1$design$status,
levels = c('HER2+', 'HR+HER2-', 'TN')))
drawBarPlot('ispy1_PIM1_exprs_barplot.jpg', ispy.df$plot.data)
write.csv(ispy.df$significance, file = 'ispy1_PIM1_exprs_pairwise_t.csv')
ispy1.res <- univariateAnalysis(as.numeric(ispy1$exprs['5292', ]),
ispy1$design$hr,
ispy1$design$rfs.t,
ispy1$design$rfs.e)
write.csv(ispy1.res, file = 'ispy1_univariate_analysis.csv')
## GSE25066
gse25066 <- GSE25066_DATASET()
sample.subset <- !is.na(gse25066$design$hr) & gse25066$design$hr == 'HR-'
drawKMPlot('gse25066_PIM1_kaplan_meier.jpg',
'Pooled Neoadjuvant Chemotherapy Treated',
as.numeric(gse25066$exprs['5292', sample.subset]),
gse25066$design[sample.subset, 'drfs_t'],
gse25066$design[sample.subset, 'drfs_e'],
xlab = 'Time (years)')
gse25066.df <- generateDataForPlot(as.numeric(gse25066$exprs['5292', ]),
factor(gse25066$design$status,
levels = c('HER2+',
'HR+HER2-',
'TN')))
drawBarPlot('gse25066_PIM1_exprs_barplot.jpg', gse25066.df$plot.data)
write.csv(gse25066.df$significance, file = 'gse25066_PIM1_exprs_pairwise_t.csv')
gse25066.res <- univariateAnalysis(as.numeric(gse25066$exprs['5292', ]),
gse25066$design$hr,
gse25066$design$drfs_t,
gse25066$design$drfs_e)
write.csv(gse25066.res, file = 'gse25066_univariate_analysis.csv')
## Yau dataset
yau <- YAU_DATASET()
sample.subset <- !is.na(yau$design$er_status) & yau$design$er_status == 'ER-'
drawKMPlot('yau_PIM1_kaplan_meier.jpg',
'Pooled Node-Negative Adjuvant Chemotherapy Naive',
as.numeric(yau$exprs['PIM1', sample.subset]),
time = yau$design[sample.subset, 't_dmfs'],
event = yau$design[sample.subset, 'e_dmfs'],
xlab = 'Time (years)')
yau.df <- generateDataForPlot(as.numeric(yau$exprs['PIM1', ]),
factor(yau$design$status,
levels = c('HER2+',
'ER+HER2-',
'ER-HER2-')))
drawBarPlot('yau_PIM1_exprs_barplot.jpg', yau.df$plot.data)
write.csv(yau.df$significance, file = 'yau_PIM1_exprs_pairwise_t.csv')
yau.res <- univariateAnalysis(as.numeric(yau$exprs['PIM1', ]),
yau$design$er_status,
yau$design$t_dmfs,
yau$design$e_dmfs)
write.csv(yau.res, file = 'yau_univariate_analysis.csv')
##
## MYC Univariate Analysis
##
ispy1.myc.uva <- univariateAnalysis(as.numeric(ispy1$exprs['4609', ]),
ispy1$design$hr,
ispy1$design$rfs.t,
ispy1$design$rfs.e)
write.csv(ispy1.myc.uva, file = 'ispy1_MYC_univariate_analysis.csv')
gse25066.myc.uva <- univariateAnalysis(as.numeric(gse25066$exprs['4609', ]),
gse25066$design$hr,
gse25066$design$drfs_t,
gse25066$design$drfs_e)
write.csv(gse25066.myc.uva, file = 'gse25066_MYC_univariate_analysis.csv')
yau.myc.uva <- univariateAnalysis(as.numeric(yau$exprs['MYC', ]),
yau$design$er_status,
yau$design$t_dmfs,
yau$design$e_dmfs)
write.csv(yau.myc.uva, file = 'yau_MYC_univariate_analysis.csv')
##
## Correlation between MYC and PIM1
##
tcga.myc.pim1.corr <- corrAnalysis(as.numeric(tcga$exprs['PIM1', ]),
as.numeric(tcga$exprs['MYC', ]),
factor(tcga$design$status))
write.csv(tcga.myc.pim1.corr, file = 'tcga_MYC_PIM1_corr.csv')
ispy.myc.pim1.corr <- corrAnalysis(as.numeric(ispy1$exprs['5292', ]),
as.numeric(ispy1$exprs['4609', ]),
factor(ispy1$design$status))
write.csv(ispy.myc.pim1.corr, file = 'ispy1_MYC_PIM1_corr.csv')
gse25066.myc.pim1.corr <- corrAnalysis(as.numeric(gse25066$exprs['5292', ]),
as.numeric(gse25066$exprs['4609', ]),
factor(gse25066$design$status))
write.csv(gse25066.myc.pim1.corr, file = 'gse25066_MYC_PIM1_corr.csv')
yau.myc.pim1.corr <- corrAnalysis(as.numeric(yau$exprs['PIM1', ]),
as.numeric(yau$exprs['MYC', ]),
factor(yau$design$status))
write.csv(yau.myc.pim1.corr, file = 'yau_MYC_PIM1_corr.csv')
##
## DE Analysis CDK
##
tcga.df2 <- generateDataForPlot(as.numeric(tcga$exprs['CDKN1B', ]),
factor(tcga$design$status,
levels = c('HER2+', 'HR+HER2-', 'TN')))
drawBarPlot('tcga_CDKN1B_exprs_barplot.jpg', tcga.df2$plot.data)
write.csv(tcga.df2$significance, file = 'tcga_CDKN1B_exprs_pairwise_t.csv')
ispy.df2 <- generateDataForPlot(as.numeric(ispy1$exprs['1027', ]),
factor(ispy1$design$status,
levels = c('HER2+', 'HR+HER2-', 'TN')))
drawBarPlot('ispy1_CDKN1B_exprs_barplot.jpg', ispy.df2$plot.data)
write.csv(ispy.df2$significance, file = 'ispy1_CDKN1B_exprs_pairwise_t.csv')
gse25066.df2 <- generateDataForPlot(as.numeric(gse25066$exprs['1027', ]),
factor(gse25066$design$status,
levels = c('HER2+',
'HR+HER2-',
'TN')))
drawBarPlot('gse25066_CDKN1B_exprs_barplot.jpg', gse25066.df2$plot.data)
write.csv(gse25066.df2$significance, file = 'gse25066_CDKN1B_exprs_pairwise_t.csv')
yau.df2 <- generateDataForPlot(as.numeric(yau$exprs['CDKN1B', ]),
factor(yau$design$status,
levels = c('HER2+',
'ER+HER2-',
'ER-HER2-')))
drawBarPlot('yau_CDKN1B_exprs_barplot.jpg', yau.df2$plot.data)
write.csv(yau.df2$significance, file = 'yau_CDKN1B_exprs_pairwise_t.csv')
##
## Correlation between CDKN1B and PIM1
##
tcga.cdkn1b.pim1.corr <- corrAnalysis(as.numeric(tcga$exprs['PIM1', ]),
as.numeric(tcga$exprs['CDKN1B', ]),
factor(tcga$design$status))
write.csv(tcga.cdkn1b.pim1.corr, file = 'tcga_CDKN1B_PIM1_corr.csv')
ispy.cdkn1b.pim1.corr <- corrAnalysis(as.numeric(ispy1$exprs['5292', ]),
as.numeric(ispy1$exprs['1027', ]),
factor(ispy1$design$status))
write.csv(ispy.cdkn1b.pim1.corr, file = 'ispy1_CDKN1B_PIM1_corr.csv')
gse25066.cdkn1b.pim1.corr <- corrAnalysis(as.numeric(gse25066$exprs['5292', ]),
as.numeric(gse25066$exprs['1027', ]),
factor(gse25066$design$status))
write.csv(gse25066.cdkn1b.pim1.corr, file = 'gse25066_CDKN1B_PIM1_corr.csv')
yau.cdkn1b.pim1.corr <- corrAnalysis(as.numeric(yau$exprs['PIM1', ]),
as.numeric(yau$exprs['CDKN1B', ]),
factor(yau$design$status))
write.csv(yau.cdkn1b.pim1.corr, file = 'yau_CDKN1B_PIM1_corr.csv')
##
## CDKN1B Univariate Analysis
##
ispy1.cdkn1b.uva <- univariateAnalysis(as.numeric(ispy1$exprs['1027', ]),
ispy1$design$hr,
ispy1$design$rfs.t,
ispy1$design$rfs.e)
write.csv(ispy1.cdkn1b.uva, file = 'ispy1_CDKN1B_univariate_analysis.csv')
gse25066.cdkn1b.uva <- univariateAnalysis(as.numeric(gse25066$exprs['1027', ]),
gse25066$design$hr,
gse25066$design$drfs_t,
gse25066$design$drfs_e)
write.csv(gse25066.cdkn1b.uva, file = 'gse25066_CDKN1B_univariate_analysis.csv')
yau.cdkn1b.uva <- univariateAnalysis(as.numeric(yau$exprs['CDKN1B', ]),
yau$design$er_status,
yau$design$t_dmfs,
yau$design$e_dmfs)
write.csv(yau.cdkn1b.uva, file = 'yau_CDKN1B_univariate_analysis.csv')
|
testlist <- list(doy = NaN, latitude = c(2.77448001762435e+180, 2.36468317547528e+179, NaN, 2.77448001762435e+180, NaN, 2.85279195360568e+180, 2.77448001762435e+180, 6.34899725744383e-66, 2.7744800222921e+180, NaN, NaN, -5.48545699190201e+303, NaN, NaN, 1.18182239014916e-125, NaN, NaN, NaN, NaN, NaN, NaN, 1.08408895213916e-19, -1.05658865685293e+270, 1.29035286663029e+214, -1.60283297694471e-180, 2.75983466855771e+181, 2.68373944684602e+199, 1.1241466614968e+79, 7.29112203137456e-304, -5.70159252079474e+303, 7.29111856797089e-304, 1.21339378369184e-67, 6.22623865398035e-109, 3.62473289151349e+228, 7.03104782847522e-24, 1.41117821684455e+277, 1.41117821684533e+277, NaN, 1.92105781554008e-168, 2.31320649354954e-23, 2.13028483652966e-314, 1.37982759729425e-309, 8.07123252740795e-65, -2.92949919635097e+299, -5.51157045666091e+303, 9.61276163712138e+281, 7.90763413601096e-299, 1.09661312858685e-72, 8.25805381456012e-317, 7.73970547159204e-304, -1.94906280695994e+289, -2.36776797154032e-150, 8.73901580341246e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(2.77448001762435e+180, NaN, 2.77448001761239e+180, NaN, NaN, NaN, NaN, NaN, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/libFuzzer_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1612735593-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,308 | r | testlist <- list(doy = NaN, latitude = c(2.77448001762435e+180, 2.36468317547528e+179, NaN, 2.77448001762435e+180, NaN, 2.85279195360568e+180, 2.77448001762435e+180, 6.34899725744383e-66, 2.7744800222921e+180, NaN, NaN, -5.48545699190201e+303, NaN, NaN, 1.18182239014916e-125, NaN, NaN, NaN, NaN, NaN, NaN, 1.08408895213916e-19, -1.05658865685293e+270, 1.29035286663029e+214, -1.60283297694471e-180, 2.75983466855771e+181, 2.68373944684602e+199, 1.1241466614968e+79, 7.29112203137456e-304, -5.70159252079474e+303, 7.29111856797089e-304, 1.21339378369184e-67, 6.22623865398035e-109, 3.62473289151349e+228, 7.03104782847522e-24, 1.41117821684455e+277, 1.41117821684533e+277, NaN, 1.92105781554008e-168, 2.31320649354954e-23, 2.13028483652966e-314, 1.37982759729425e-309, 8.07123252740795e-65, -2.92949919635097e+299, -5.51157045666091e+303, 9.61276163712138e+281, 7.90763413601096e-299, 1.09661312858685e-72, 8.25805381456012e-317, 7.73970547159204e-304, -1.94906280695994e+289, -2.36776797154032e-150, 8.73901580341246e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(2.77448001762435e+180, NaN, 2.77448001761239e+180, NaN, NaN, NaN, NaN, NaN, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
##
## isj
##
load("isj.Rdata")
head(isj)
plot(isj[,2:3],type="n",main="Elevation",asp=1)
na.idx=which(is.na(isj$elev))
elev.star=isj$elev[-na.idx]
norm.elev=(elev.star-min(elev.star))/(max(elev.star)-min(elev.star))
points(isj[-na.idx,2:3],pch=20,col=grey(1-norm.elev))
points(isj[1:307,2:3],pch=4,col="red")
occ=isj[-which(is.na(isj$isj)),]
occ=occ[-which(is.na(occ$elev)),]
fit=glm(isj~poly(elev,1)+poly(forest,2)+poly(chap,2),family=binomial,data=occ)
summary(fit)
resids=residuals(fit)
plot(resids)
plot(isj[,2:3],type="n",main="Elevation",asp=1)
isj.nona=na.omit(isj[,-1])
str(isj.nona)
phat=predict(fit,newdata=isj.nona,type="response")
plot(isj[,2:3],type="n",main="Plotting Residuals in Space",asp=1)
points(isj.nona[,1:2],pch=20,col=grey(1-phat))
points(isj[1:307,2:3],pch=1,col="red",cex=(resids-min(resids))/(max(resids)-min(resids))*2)
##
## Nile
##
plot(Nile,main="Annual flow of the river Nile at Ashwan")
## date when Ashwan dam was constructed
abline(v=1902,col="blue")
## create covariate
y=as.numeric(Nile)
x=rep(0,length(y))
1902-1871
x[32:length(x)]=1
plot(x)
pairs(cbind(y,x))
## fit linear model
fit=lm(y~x)
summary(fit)
ypred=predict(fit)
res=resid(fit)
## examine model fit graphically
plot(Nile,main="Annual flow of the river Nile at Ashwan - Data with Fitted Model")
points(1871:1970,ypred,type="l",col="red")
## examine residuals for normality
par(mfrow=c(1,2))
hist(res,main="Histogram of Model Residuals")
qqnorm(res)
qqline(res,col="red")
par(mfrow=c(1,1))
pncol=rep(2,length(res))
pncol[res<0] <- 4
plot(1871:1970,res,pch=20,type="l",main="Model Residuals Over Time",col=1)
points(1871:1970,res,pch=20,type="p",col=pncol,cex=3)
abline(h=0)
## examine residuals for serial correlation
r=res[-1]
r.prev=res[-length(res)]
plot(r,r.prev)
abline(lm(r~r.prev))
summary(lm(r~r.prev))
acf(res)
## Specify and fit a Linear Model with AR(1) time series correlated random effect
library(nlme)
group=rep(1,length(y))
t=1871:1970
fit.gls=gls(y~x,correlation=corAR1(form= ~t))
summary(fit.gls)
intervals(fit.gls)
| /ExampleCode/CorrelatedResids.r | no_license | TheMetaphysicalCrook/STAT511 | R | false | false | 2,066 | r | ##
## isj
##
load("isj.Rdata")
head(isj)
plot(isj[,2:3],type="n",main="Elevation",asp=1)
na.idx=which(is.na(isj$elev))
elev.star=isj$elev[-na.idx]
norm.elev=(elev.star-min(elev.star))/(max(elev.star)-min(elev.star))
points(isj[-na.idx,2:3],pch=20,col=grey(1-norm.elev))
points(isj[1:307,2:3],pch=4,col="red")
occ=isj[-which(is.na(isj$isj)),]
occ=occ[-which(is.na(occ$elev)),]
fit=glm(isj~poly(elev,1)+poly(forest,2)+poly(chap,2),family=binomial,data=occ)
summary(fit)
resids=residuals(fit)
plot(resids)
plot(isj[,2:3],type="n",main="Elevation",asp=1)
isj.nona=na.omit(isj[,-1])
str(isj.nona)
phat=predict(fit,newdata=isj.nona,type="response")
plot(isj[,2:3],type="n",main="Plotting Residuals in Space",asp=1)
points(isj.nona[,1:2],pch=20,col=grey(1-phat))
points(isj[1:307,2:3],pch=1,col="red",cex=(resids-min(resids))/(max(resids)-min(resids))*2)
##
## Nile
##
plot(Nile,main="Annual flow of the river Nile at Ashwan")
## date when Ashwan dam was constructed
abline(v=1902,col="blue")
## create covariate
y=as.numeric(Nile)
x=rep(0,length(y))
1902-1871
x[32:length(x)]=1
plot(x)
pairs(cbind(y,x))
## fit linear model
fit=lm(y~x)
summary(fit)
ypred=predict(fit)
res=resid(fit)
## examine model fit graphically
plot(Nile,main="Annual flow of the river Nile at Ashwan - Data with Fitted Model")
points(1871:1970,ypred,type="l",col="red")
## examine residuals for normality
par(mfrow=c(1,2))
hist(res,main="Histogram of Model Residuals")
qqnorm(res)
qqline(res,col="red")
par(mfrow=c(1,1))
pncol=rep(2,length(res))
pncol[res<0] <- 4
plot(1871:1970,res,pch=20,type="l",main="Model Residuals Over Time",col=1)
points(1871:1970,res,pch=20,type="p",col=pncol,cex=3)
abline(h=0)
## examine residuals for serial correlation
r=res[-1]
r.prev=res[-length(res)]
plot(r,r.prev)
abline(lm(r~r.prev))
summary(lm(r~r.prev))
acf(res)
## Specify and fit a Linear Model with AR(1) time series correlated random effect
library(nlme)
group=rep(1,length(y))
t=1871:1970
fit.gls=gls(y~x,correlation=corAR1(form= ~t))
summary(fit.gls)
intervals(fit.gls)
|
source('library/boot.R')
mean.homicides <- sapply(zipcodes, function (z) {with(subset(homicides, Zipcode == z), mean(Probability))})
threshold <- 0.05
most.dangerous.zipcodes <- zipcodes[which(mean.homicides > threshold)]
worst.homicides <- subset(homicides, Zipcode %in% most.dangerous.zipcodes)
worst.homicides$Zipcode <- as.factor(as.character(worst.homicides$Zipcode))
jpeg('visualizations/Homicide Trends in Philly\'s Most Dangerous Zipcodes.jpg')
print(qplot(Year, Probability,
data = worst.homicides,
color = Zipcode,
geom = 'line',
main = 'Homicide Trends by Zipcode'))
dev.off()
| /visualizations/historical_trends.R | no_license | johnmyleswhite/analytics_x_prize | R | false | false | 644 | r | source('library/boot.R')
mean.homicides <- sapply(zipcodes, function (z) {with(subset(homicides, Zipcode == z), mean(Probability))})
threshold <- 0.05
most.dangerous.zipcodes <- zipcodes[which(mean.homicides > threshold)]
worst.homicides <- subset(homicides, Zipcode %in% most.dangerous.zipcodes)
worst.homicides$Zipcode <- as.factor(as.character(worst.homicides$Zipcode))
jpeg('visualizations/Homicide Trends in Philly\'s Most Dangerous Zipcodes.jpg')
print(qplot(Year, Probability,
data = worst.homicides,
color = Zipcode,
geom = 'line',
main = 'Homicide Trends by Zipcode'))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Measure.R
\name{makeMeasure}
\alias{makeMeasure}
\alias{Measure}
\title{Construct performance measure.}
\usage{
makeMeasure(
id,
minimize,
properties = character(0L),
fun,
extra.args = list(),
aggr = test.mean,
best = NULL,
worst = NULL,
name = id,
note = ""
)
}
\arguments{
\item{id}{(`character(1)`)\cr
Name of measure.}
\item{minimize}{(`logical(1)`)\cr
Should the measure be minimized?
Default is `TRUE`.}
\item{properties}{([character])\cr
Set of measure properties. Some standard property names include:
\describe{
\item{classif}{Is the measure applicable for classification?}
\item{classif.multi}{Is the measure applicable for multi-class classification?}
\item{multilabel}{Is the measure applicable for multilabel classification?}
\item{regr}{Is the measure applicable for regression?}
\item{surv}{Is the measure applicable for survival?}
\item{cluster}{Is the measure applicable for cluster?}
\item{costsens}{Is the measure applicable for cost-sensitive learning?}
\item{req.pred}{Is prediction object required in calculation? Usually the case.}
\item{req.truth}{Is truth column required in calculation? Usually the case.}
\item{req.task}{Is task object required in calculation? Usually not the case}
\item{req.model}{Is model object required in calculation? Usually not the case.}
\item{req.feats}{Are feature values required in calculation? Usually not the case.}
\item{req.prob}{Are predicted probabilities required in calculation? Usually not the case, example would be AUC.}
}
Default is `character(0)`.}
\item{fun}{(`function(task, model, pred, feats, extra.args)`)\cr
Calculates the performance value. Usually you will only need the prediction
object `pred`.
\describe{
\item{`task` ([Task])}{
The task.}
\item{`model` ([WrappedModel])}{
The fitted model.}
\item{`pred` ([Prediction])}{
Prediction object.}
\item{`feats` ([data.frame])}{
The features.}
\item{`extra.args` ([list])}{
See below.}
}}
\item{extra.args}{([list])\cr
List of extra arguments which will always be passed to `fun`.
Can be changed after construction via [setMeasurePars]<`3`>.
Default is empty list.}
\item{aggr}{([Aggregation])\cr
Aggregation funtion, which is used to aggregate the values measured
on test / training sets of the measure to a single value.
Default is [test.mean].}
\item{best}{(`numeric(1)`)\cr
Best obtainable value for measure.
Default is -`Inf` or `Inf`, depending on `minimize`.}
\item{worst}{(`numeric(1)`)\cr
Worst obtainable value for measure.
Default is `Inf` or -`Inf`, depending on `minimize`.}
\item{name}{([character]) \cr
Name of the measure. Default is `id`.}
\item{note}{([character]) \cr
Description and additional notes for the measure. Default is \dQuote{}.}
}
\value{
\link{Measure}.
}
\description{
A measure object encapsulates a function to evaluate the performance of a prediction.
Information about already implemented measures can be obtained here: [measures].
A learner is trained on a training set d1, results in a model m and predicts another set d2
(which may be a different one or the training set) resulting in the prediction.
The performance measure can now be defined using all of the information of the original task,
the fitted model and the prediction.
Object slots:
\describe{
\item{id (character(1))}{See argument.}
\item{minimize (logical(1))}{See argument.}
\item{properties (character)}{See argument.}
\item{fun (function)}{See argument.}
\item{extra.args (list)}{See argument.}
\item{aggr (Aggregation)}{See argument.}
\item{best (numeric(1))}{See argument.}
\item{worst (numeric(1))}{See argument.}
\item{name (character(1))}{See argument.}
\item{note (character(1))}{See argument.}
}
}
\examples{
f = function(task, model, pred, extra.args)
sum((pred$data$response - pred$data$truth)^2)
makeMeasure(id = "my.sse", minimize = TRUE, properties = c("regr", "response"), fun = f)
}
\seealso{
Other performance:
\code{\link{ConfusionMatrix}},
\code{\link{calculateConfusionMatrix}()},
\code{\link{calculateROCMeasures}()},
\code{\link{estimateRelativeOverfitting}()},
\code{\link{makeCostMeasure}()},
\code{\link{makeCustomResampledMeasure}()},
\code{\link{measures}},
\code{\link{performance}()},
\code{\link{setAggregation}()},
\code{\link{setMeasurePars}()}
}
\concept{performance}
| /man/makeMeasure.Rd | no_license | fdrennan/mlr | R | false | true | 4,420 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Measure.R
\name{makeMeasure}
\alias{makeMeasure}
\alias{Measure}
\title{Construct performance measure.}
\usage{
makeMeasure(
id,
minimize,
properties = character(0L),
fun,
extra.args = list(),
aggr = test.mean,
best = NULL,
worst = NULL,
name = id,
note = ""
)
}
\arguments{
\item{id}{(`character(1)`)\cr
Name of measure.}
\item{minimize}{(`logical(1)`)\cr
Should the measure be minimized?
Default is `TRUE`.}
\item{properties}{([character])\cr
Set of measure properties. Some standard property names include:
\describe{
\item{classif}{Is the measure applicable for classification?}
\item{classif.multi}{Is the measure applicable for multi-class classification?}
\item{multilabel}{Is the measure applicable for multilabel classification?}
\item{regr}{Is the measure applicable for regression?}
\item{surv}{Is the measure applicable for survival?}
\item{cluster}{Is the measure applicable for cluster?}
\item{costsens}{Is the measure applicable for cost-sensitive learning?}
\item{req.pred}{Is prediction object required in calculation? Usually the case.}
\item{req.truth}{Is truth column required in calculation? Usually the case.}
\item{req.task}{Is task object required in calculation? Usually not the case}
\item{req.model}{Is model object required in calculation? Usually not the case.}
\item{req.feats}{Are feature values required in calculation? Usually not the case.}
\item{req.prob}{Are predicted probabilities required in calculation? Usually not the case, example would be AUC.}
}
Default is `character(0)`.}
\item{fun}{(`function(task, model, pred, feats, extra.args)`)\cr
Calculates the performance value. Usually you will only need the prediction
object `pred`.
\describe{
\item{`task` ([Task])}{
The task.}
\item{`model` ([WrappedModel])}{
The fitted model.}
\item{`pred` ([Prediction])}{
Prediction object.}
\item{`feats` ([data.frame])}{
The features.}
\item{`extra.args` ([list])}{
See below.}
}}
\item{extra.args}{([list])\cr
List of extra arguments which will always be passed to `fun`.
Can be changed after construction via [setMeasurePars]<`3`>.
Default is empty list.}
\item{aggr}{([Aggregation])\cr
Aggregation funtion, which is used to aggregate the values measured
on test / training sets of the measure to a single value.
Default is [test.mean].}
\item{best}{(`numeric(1)`)\cr
Best obtainable value for measure.
Default is -`Inf` or `Inf`, depending on `minimize`.}
\item{worst}{(`numeric(1)`)\cr
Worst obtainable value for measure.
Default is `Inf` or -`Inf`, depending on `minimize`.}
\item{name}{([character]) \cr
Name of the measure. Default is `id`.}
\item{note}{([character]) \cr
Description and additional notes for the measure. Default is \dQuote{}.}
}
\value{
\link{Measure}.
}
\description{
A measure object encapsulates a function to evaluate the performance of a prediction.
Information about already implemented measures can be obtained here: [measures].
A learner is trained on a training set d1, results in a model m and predicts another set d2
(which may be a different one or the training set) resulting in the prediction.
The performance measure can now be defined using all of the information of the original task,
the fitted model and the prediction.
Object slots:
\describe{
\item{id (character(1))}{See argument.}
\item{minimize (logical(1))}{See argument.}
\item{properties (character)}{See argument.}
\item{fun (function)}{See argument.}
\item{extra.args (list)}{See argument.}
\item{aggr (Aggregation)}{See argument.}
\item{best (numeric(1))}{See argument.}
\item{worst (numeric(1))}{See argument.}
\item{name (character(1))}{See argument.}
\item{note (character(1))}{See argument.}
}
}
\examples{
f = function(task, model, pred, extra.args)
sum((pred$data$response - pred$data$truth)^2)
makeMeasure(id = "my.sse", minimize = TRUE, properties = c("regr", "response"), fun = f)
}
\seealso{
Other performance:
\code{\link{ConfusionMatrix}},
\code{\link{calculateConfusionMatrix}()},
\code{\link{calculateROCMeasures}()},
\code{\link{estimateRelativeOverfitting}()},
\code{\link{makeCostMeasure}()},
\code{\link{makeCustomResampledMeasure}()},
\code{\link{measures}},
\code{\link{performance}()},
\code{\link{setAggregation}()},
\code{\link{setMeasurePars}()}
}
\concept{performance}
|
# Take the file topicHierarchy.tab and convert to JSON nested format
# Note this could probably be done much easier just by adding a JSON bracket to each line of the text file?
library(RJSONIO)
try(setwd("~/OneDrive - Cardiff University/Research/Bristol/CHIELD/CHIELD_Online/processing/"))
# use readLines to work out max depth
# (because read.delim will misalign columns without this info)
h= readLines("../data/tabular/topicHierarchy.tab", encoding = "UTF-8")
maxDepth = max(sapply(strsplit(h,"\t"),length))
h = read.delim2("../data/tabular/topicHierarchy.tab",sep="\t",header = F,fileEncoding = "UTF-8",encoding = "UTF-8",as.is=T,col.names = paste0("V",1:maxDepth))
# Turn tabbed list into table with all rows filled out
columnsWithData = which(apply(h,2,function(X){sum(X!="")>0}))
h = h[,columnsWithData]
rowsWithData = which(apply(h,1,function(X){sum(X!="")>0}))
h = h[rowsWithData,]
#h$leaf = (h[,ncol(h)]!="")
depth = function(row){
max(which(row!=""))
}
# Identify leaf nodes
# (basically any node where the next line has
# the same or higher indent)
h$leaf = TRUE
hcols = 1:(ncol(h)-1)
for(i in 1:(nrow(h)-1)){
if(depth(h[i,hcols]) < depth(h[i+1,hcols])){
h[i,]$leaf = FALSE
}
}
# Fill in columns so that higher tiers
# get copied into all rows
for(i in 2:nrow(h)){
maxCol = max(which(h[i,1:(ncol(h)-1)]!=""))
for(j in 1:maxCol){
if(h[i,j]==""){
h[i,j] = h[i-1,j]
}
}
}
h = h[h$leaf,]
## Filter hierarchy to only those included in CHIELD
## (Alternative is to add rest to "unclassified")
l = read.csv("../data/db/CausalLinks.csv",stringsAsFactors = F,encoding = "UTF-8",fileEncoding = "UTF-8")
Ctopics = (l$Topic)
if(sum(!is.na(Ctopics))>0){
Ctopics = unlist(strsplit(Ctopics,";"))
Ctopics = gsub(" +$","",Ctopics)
Ctopics = gsub("^ +","",Ctopics)
Ctopics = unique(Ctopics[!is.na(Ctopics)])
# Capitalise first letter
#Ctopics = tolower(Ctopics)
#Ctopics = sapply(Ctopics,function(X){
# X = strsplit(X,"")[[1]]
# X[1] = toupper(X[1])
# paste(X,collapse="")
#})
Ctopics = Ctopics[!is.na(Ctopics)]
Ctopics = Ctopics[Ctopics!="NA"]
Ctopics = Ctopics[Ctopics!=""]
# All leaves in hierarchy
hLeaves = apply(h,1,function(X){
X[depth(X[1:(length(X)-1)])]
})
filterHierarchy=TRUE
if(filterHierarchy){
h = h[sapply(hLeaves,function(X){tolower(X) %in% tolower(Ctopics)}),]
}
# Add topics not in hierarchy to "unclassified"
unc = Ctopics[!Ctopics %in% hLeaves]
unc = unc[!is.na(unc)]
unc = unc[unc!="NA"]
uncm = matrix("",nrow = length(unc),ncol=ncol(h))
uncm[,1]="Unclassified"
uncm[,2]=unc
uncm[,ncol(uncm)] = TRUE
colnames(uncm) = names(h)
h = rbind(h,uncm)
# Turn data frame into nested JSON:
makeList<-function(x){
isInternalNode = FALSE
if(ncol(x)>2){
if(sum(x[,2:ncol(x)]!="")>0){
isInternalNode = TRUE
}
}
if(isInternalNode){
listSplit<-split(x[-1],x[1],drop=T)
lapply(names(listSplit),function(y){
list(
label=y,
leaf = y %in% Ctopics[!is.na(Ctopics)],
children=makeList(listSplit[[y]]))
})
}else{
lapply(seq(nrow(x[1])),function(y){
list(
leaf="true",
label=x[,1][y]
)
})
}
}
h2 = makeList(h[,1:(ncol(h)-1)])
} else{
# No topics in database
h2 = list()
}
jsonOut<-toJSON(list(label="Topics",
expanded="true",
children=h2),
pretty = T)
cat(jsonOut, file="../app/Site/json/topicHierarchy.json")
| /processing/makeTopicHierarchy.R | no_license | CHIELDOnline/CHIELD | R | false | false | 3,561 | r | # Take the file topicHierarchy.tab and convert to JSON nested format
# Note this could probably be done much easier just by adding a JSON bracket to each line of the text file?
library(RJSONIO)
try(setwd("~/OneDrive - Cardiff University/Research/Bristol/CHIELD/CHIELD_Online/processing/"))
# use readLines to work out max depth
# (because read.delim will misalign columns without this info)
h= readLines("../data/tabular/topicHierarchy.tab", encoding = "UTF-8")
maxDepth = max(sapply(strsplit(h,"\t"),length))
h = read.delim2("../data/tabular/topicHierarchy.tab",sep="\t",header = F,fileEncoding = "UTF-8",encoding = "UTF-8",as.is=T,col.names = paste0("V",1:maxDepth))
# Turn tabbed list into table with all rows filled out
columnsWithData = which(apply(h,2,function(X){sum(X!="")>0}))
h = h[,columnsWithData]
rowsWithData = which(apply(h,1,function(X){sum(X!="")>0}))
h = h[rowsWithData,]
#h$leaf = (h[,ncol(h)]!="")
depth = function(row){
max(which(row!=""))
}
# Identify leaf nodes
# (basically any node where the next line has
# the same or higher indent)
h$leaf = TRUE
hcols = 1:(ncol(h)-1)
for(i in 1:(nrow(h)-1)){
if(depth(h[i,hcols]) < depth(h[i+1,hcols])){
h[i,]$leaf = FALSE
}
}
# Fill in columns so that higher tiers
# get copied into all rows
for(i in 2:nrow(h)){
maxCol = max(which(h[i,1:(ncol(h)-1)]!=""))
for(j in 1:maxCol){
if(h[i,j]==""){
h[i,j] = h[i-1,j]
}
}
}
h = h[h$leaf,]
## Filter hierarchy to only those included in CHIELD
## (Alternative is to add rest to "unclassified")
l = read.csv("../data/db/CausalLinks.csv",stringsAsFactors = F,encoding = "UTF-8",fileEncoding = "UTF-8")
Ctopics = (l$Topic)
if(sum(!is.na(Ctopics))>0){
Ctopics = unlist(strsplit(Ctopics,";"))
Ctopics = gsub(" +$","",Ctopics)
Ctopics = gsub("^ +","",Ctopics)
Ctopics = unique(Ctopics[!is.na(Ctopics)])
# Capitalise first letter
#Ctopics = tolower(Ctopics)
#Ctopics = sapply(Ctopics,function(X){
# X = strsplit(X,"")[[1]]
# X[1] = toupper(X[1])
# paste(X,collapse="")
#})
Ctopics = Ctopics[!is.na(Ctopics)]
Ctopics = Ctopics[Ctopics!="NA"]
Ctopics = Ctopics[Ctopics!=""]
# All leaves in hierarchy
hLeaves = apply(h,1,function(X){
X[depth(X[1:(length(X)-1)])]
})
filterHierarchy=TRUE
if(filterHierarchy){
h = h[sapply(hLeaves,function(X){tolower(X) %in% tolower(Ctopics)}),]
}
# Add topics not in hierarchy to "unclassified"
unc = Ctopics[!Ctopics %in% hLeaves]
unc = unc[!is.na(unc)]
unc = unc[unc!="NA"]
uncm = matrix("",nrow = length(unc),ncol=ncol(h))
uncm[,1]="Unclassified"
uncm[,2]=unc
uncm[,ncol(uncm)] = TRUE
colnames(uncm) = names(h)
h = rbind(h,uncm)
# Turn data frame into nested JSON:
makeList<-function(x){
isInternalNode = FALSE
if(ncol(x)>2){
if(sum(x[,2:ncol(x)]!="")>0){
isInternalNode = TRUE
}
}
if(isInternalNode){
listSplit<-split(x[-1],x[1],drop=T)
lapply(names(listSplit),function(y){
list(
label=y,
leaf = y %in% Ctopics[!is.na(Ctopics)],
children=makeList(listSplit[[y]]))
})
}else{
lapply(seq(nrow(x[1])),function(y){
list(
leaf="true",
label=x[,1][y]
)
})
}
}
h2 = makeList(h[,1:(ncol(h)-1)])
} else{
# No topics in database
h2 = list()
}
jsonOut<-toJSON(list(label="Topics",
expanded="true",
children=h2),
pretty = T)
cat(jsonOut, file="../app/Site/json/topicHierarchy.json")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/housekeeping.R
\name{GetPleguezuelosManzanoIndelSignaturesData}
\alias{GetPleguezuelosManzanoIndelSignaturesData}
\title{Returns the Pleguezuelos-Manzano indel reference signature data}
\usage{
GetPleguezuelosManzanoIndelSignaturesData()
}
\value{
A data.frame of the Pleguezuelos-Manzano indel mutational signatures.
}
\description{
This function returns the Pleguezuelos-Manzano small insertion and deletion
(indel) mutational signatures data.
}
| /man/GetPleguezuelosManzanoIndelSignaturesData.Rd | no_license | Honglab-Research/MutaliskR | R | false | true | 526 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/housekeeping.R
\name{GetPleguezuelosManzanoIndelSignaturesData}
\alias{GetPleguezuelosManzanoIndelSignaturesData}
\title{Returns the Pleguezuelos-Manzano indel reference signature data}
\usage{
GetPleguezuelosManzanoIndelSignaturesData()
}
\value{
A data.frame of the Pleguezuelos-Manzano indel mutational signatures.
}
\description{
This function returns the Pleguezuelos-Manzano small insertion and deletion
(indel) mutational signatures data.
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.9,family="gaussian",standardize=TRUE)
sink('./NSCLC_089.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/NSCLC/NSCLC_089.R | no_license | esbgkannan/QSMART | R | false | false | 347 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.9,family="gaussian",standardize=TRUE)
sink('./NSCLC_089.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting_functions.R
\name{plotSimulatedThresholds}
\alias{plotSimulatedThresholds}
\title{Plots simulation data for QTLseq analysis}
\usage{
plotSimulatedThresholds(SNPset = NULL, popStruc = "F2", bulkSize,
depth = NULL, replications = 10000, filter = 0.3,
intervals = c(95, 99))
}
\arguments{
\item{SNPset}{optional. Either supply your data set to extract read depths from or supply depth vector.}
\item{popStruc}{the population structure. Defaults to "F2" and assumes "RIL" otherwise.}
\item{bulkSize}{non-negative integer. The number of individuals in each bulk}
\item{depth}{optional integer vector. A read depth for which to replicate SNP-index calls. If read depth is defined SNPset will be ignored.}
\item{replications}{integer. The number of bootstrap replications.}
\item{filter}{numeric. An optional minimum SNP-index filter}
\item{intervals}{numeric vector. Confidence intervals supplied as two-sided percentiles. i.e. If intervals = '95' will return the two sided 95\% confidence interval, 2.5\% on each side.}
}
\value{
Plots a deltaSNP by depth plot. Helps if the user wants to know the the delta SNP index needed to pass a certain CI at a specified depth.
}
\description{
as described in Takagi et al., (2013). Genotypes are randomly assigned for
each indvidual in the bulk, based on the population structure. The total
alternative allele frequency in each bulk is calculated at each depth used to simulate
delta SNP-indeces, with a user defined number of bootstrapped replication.
The requested confidence intervals are then calculated from the bootstraps.
This function plots the simulated confidence intervals by the read depth.
}
\examples{
plotSimulatedThresholds <- function(SNPset = NULL, popStruc = "F2", bulkSize = 25, depth = 1:150, replications = 10000, filter = 0.3, intervals = c(95, 99))
}
| /man/plotSimulatedThresholds.Rd | no_license | bioShaun/QTLseqr | R | false | true | 1,910 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting_functions.R
\name{plotSimulatedThresholds}
\alias{plotSimulatedThresholds}
\title{Plots simulation data for QTLseq analysis}
\usage{
plotSimulatedThresholds(SNPset = NULL, popStruc = "F2", bulkSize,
depth = NULL, replications = 10000, filter = 0.3,
intervals = c(95, 99))
}
\arguments{
\item{SNPset}{optional. Either supply your data set to extract read depths from or supply depth vector.}
\item{popStruc}{the population structure. Defaults to "F2" and assumes "RIL" otherwise.}
\item{bulkSize}{non-negative integer. The number of individuals in each bulk}
\item{depth}{optional integer vector. A read depth for which to replicate SNP-index calls. If read depth is defined SNPset will be ignored.}
\item{replications}{integer. The number of bootstrap replications.}
\item{filter}{numeric. An optional minimum SNP-index filter}
\item{intervals}{numeric vector. Confidence intervals supplied as two-sided percentiles. i.e. If intervals = '95' will return the two sided 95\% confidence interval, 2.5\% on each side.}
}
\value{
Plots a deltaSNP by depth plot. Helps if the user wants to know the the delta SNP index needed to pass a certain CI at a specified depth.
}
\description{
as described in Takagi et al., (2013). Genotypes are randomly assigned for
each indvidual in the bulk, based on the population structure. The total
alternative allele frequency in each bulk is calculated at each depth used to simulate
delta SNP-indeces, with a user defined number of bootstrapped replication.
The requested confidence intervals are then calculated from the bootstraps.
This function plots the simulated confidence intervals by the read depth.
}
\examples{
plotSimulatedThresholds <- function(SNPset = NULL, popStruc = "F2", bulkSize = 25, depth = 1:150, replications = 10000, filter = 0.3, intervals = c(95, 99))
}
|
#!/usr/bin/env Rscript
library(GenomicRanges)
load("ensemble_construction_workplace.RData")
GEMSTAT_init_BOlinear(.exp.nu = 1,
.dir="/shared-mounts/sinhas/tabebor2/ER_Project/Model/GEMSTAT/based_on_linear",
.GEMSTAT_call = "/shared-mounts/sinhas/tabebor2/GEMSTAT_git/my_fork/GEMSTAT/src/seq2expr",
TF_names = names(TF.motifs.Shrinked.count),
model_evaluations = Sim_Ann_weighted_148_restart_all_models_eval,
# model_gene_ind = c(1,2),
model_parameters = Sim_Ann_weighted_148_restart_parameters
nu_enh_per_gene = 1,
TF_KD_evaluation = Sim_Ann_weighted_148_restart_TF_KD,
quantile_nu = 4,
enhancer_GRang = ER.associated.reg.elements_gte4nzKD_atl1p1n_52_seq_chopped_scoredPerPiece_filtered_perGene$Chopped_GRanges,
enhancer_Seq = ER.associated.reg.elements_gte4nzKD_atl1p1n_52_seq_chopped_scoredPerPiece_filtered_perGene$Chopped_Seq,
real_exp_mat = my_CommonDifExpMat_16_ERassoc_gte4nzKD_atl1p1n_52,
TF_expression_mat = TF.Exp.Shrinked.microarray_ENTREZ_PerDataset_unique_mat_dJun_ER01,
motif_list = TF.motifs.Shrinked.count,
annotation_thresh=numeric(0),
annotation_range=numeric(0),
initial_bind_w=numeric(0),
bind_w_range=numeric(0),
initial_alpha=numeric(0),
alpha_range=numeric(0),
coop_tf_mat=numeric(0),
initial_coop_weight=numeric(0),
coop_weight_range=numeric(0),
coop_type=character(0),
coop_dist=integer(0),
coop_orientation=integer(0),
.one_qbtm_per_enh=T,
initial_qBTM=numeric(0),
qBTMrange=numeric(0),
.one_beta_per_enh=T,
initial_pi_beta=numeric(0),
pi_beta_upper=numeric(0),
pi_beta_lower=numeric(0),
annotation_thresh_ff=integer(0),
initial_bind_w_ff=integer(0),
initial_alpha_ff=integer(0),
initial_coop_weight_ff=integer(0),
initial_qBTM_ff=integer(0),
initial_pi_beta_ff=integer(0),
nu_enhacners = length(unlist(ER.associated.reg.elements_gte4nzKD_atl1p1n_52_seq_chopped_scoredPerPiece_filtered_perGene$Chopped_Seq)),
.ensemble_mode = F,
nu_samples = 0,
na = 2,
.job_file_name = "based_on_linear_1enh_perGene.job"
#,annotation_thresh_ff_ens = annotation_thresh_ff,
#initial_coop_weight_ff_ens = initial_coop_weight_ff,
# initial_qBTM_ff_ens = initial_qBTM_ff
) | /Ensemble_based_on_linear_creator.R | no_license | ShayanBordbar/ER_scripts | R | false | false | 3,134 | r | #!/usr/bin/env Rscript
library(GenomicRanges)
load("ensemble_construction_workplace.RData")
GEMSTAT_init_BOlinear(.exp.nu = 1,
.dir="/shared-mounts/sinhas/tabebor2/ER_Project/Model/GEMSTAT/based_on_linear",
.GEMSTAT_call = "/shared-mounts/sinhas/tabebor2/GEMSTAT_git/my_fork/GEMSTAT/src/seq2expr",
TF_names = names(TF.motifs.Shrinked.count),
model_evaluations = Sim_Ann_weighted_148_restart_all_models_eval,
# model_gene_ind = c(1,2),
model_parameters = Sim_Ann_weighted_148_restart_parameters
nu_enh_per_gene = 1,
TF_KD_evaluation = Sim_Ann_weighted_148_restart_TF_KD,
quantile_nu = 4,
enhancer_GRang = ER.associated.reg.elements_gte4nzKD_atl1p1n_52_seq_chopped_scoredPerPiece_filtered_perGene$Chopped_GRanges,
enhancer_Seq = ER.associated.reg.elements_gte4nzKD_atl1p1n_52_seq_chopped_scoredPerPiece_filtered_perGene$Chopped_Seq,
real_exp_mat = my_CommonDifExpMat_16_ERassoc_gte4nzKD_atl1p1n_52,
TF_expression_mat = TF.Exp.Shrinked.microarray_ENTREZ_PerDataset_unique_mat_dJun_ER01,
motif_list = TF.motifs.Shrinked.count,
annotation_thresh=numeric(0),
annotation_range=numeric(0),
initial_bind_w=numeric(0),
bind_w_range=numeric(0),
initial_alpha=numeric(0),
alpha_range=numeric(0),
coop_tf_mat=numeric(0),
initial_coop_weight=numeric(0),
coop_weight_range=numeric(0),
coop_type=character(0),
coop_dist=integer(0),
coop_orientation=integer(0),
.one_qbtm_per_enh=T,
initial_qBTM=numeric(0),
qBTMrange=numeric(0),
.one_beta_per_enh=T,
initial_pi_beta=numeric(0),
pi_beta_upper=numeric(0),
pi_beta_lower=numeric(0),
annotation_thresh_ff=integer(0),
initial_bind_w_ff=integer(0),
initial_alpha_ff=integer(0),
initial_coop_weight_ff=integer(0),
initial_qBTM_ff=integer(0),
initial_pi_beta_ff=integer(0),
nu_enhacners = length(unlist(ER.associated.reg.elements_gte4nzKD_atl1p1n_52_seq_chopped_scoredPerPiece_filtered_perGene$Chopped_Seq)),
.ensemble_mode = F,
nu_samples = 0,
na = 2,
.job_file_name = "based_on_linear_1enh_perGene.job"
#,annotation_thresh_ff_ens = annotation_thresh_ff,
#initial_coop_weight_ff_ens = initial_coop_weight_ff,
# initial_qBTM_ff_ens = initial_qBTM_ff
) |
\name{residuals.dppm}
\alias{residuals.dppm}
\title{
Residuals for Fitted Determinantal Point Process Model
}
\description{
Given a determinantal point process model fitted to a point pattern,
compute residuals.
}
\usage{
\method{residuals}{dppm}(object, \dots)
}
\arguments{
\item{object}{
The fitted determinatal point process model (an object of class \code{"dppm"})
for which residuals should be calculated.
}
\item{\dots}{
Arguments passed to \code{\link{residuals.ppm}}.
}
}
\value{
An object of class \code{"msr"}
representing a signed measure or vector-valued measure
(see \code{\link{msr}}). This object can be plotted.
}
\details{
This function extracts the intensity component of the model using
\code{\link{as.ppm}} and then applies \code{\link{residuals.ppm}}
to compute the residuals.
Use \code{\link{plot.msr}} to plot the residuals directly.
}
\seealso{
\code{\link{msr}},
\code{\link{dppm}}
}
\examples{
fit <- dppm(swedishpines ~ x, dppGauss, method="c")
rr <- residuals(fit)
}
\author{
\spatstatAuthors.
}
\keyword{spatial}
\keyword{models}
\keyword{methods}
| /man/residuals.dppm.Rd | no_license | spatstat/spatstat.core | R | false | false | 1,131 | rd | \name{residuals.dppm}
\alias{residuals.dppm}
\title{
Residuals for Fitted Determinantal Point Process Model
}
\description{
Given a determinantal point process model fitted to a point pattern,
compute residuals.
}
\usage{
\method{residuals}{dppm}(object, \dots)
}
\arguments{
\item{object}{
The fitted determinatal point process model (an object of class \code{"dppm"})
for which residuals should be calculated.
}
\item{\dots}{
Arguments passed to \code{\link{residuals.ppm}}.
}
}
\value{
An object of class \code{"msr"}
representing a signed measure or vector-valued measure
(see \code{\link{msr}}). This object can be plotted.
}
\details{
This function extracts the intensity component of the model using
\code{\link{as.ppm}} and then applies \code{\link{residuals.ppm}}
to compute the residuals.
Use \code{\link{plot.msr}} to plot the residuals directly.
}
\seealso{
\code{\link{msr}},
\code{\link{dppm}}
}
\examples{
fit <- dppm(swedishpines ~ x, dppGauss, method="c")
rr <- residuals(fit)
}
\author{
\spatstatAuthors.
}
\keyword{spatial}
\keyword{models}
\keyword{methods}
|
cb.print.evaluation <- function (period = 10) {
callback <- function(env = parent.frame()) {
if (is.null(env$time)){
env$time = Sys.time()
}
i <- env$iteration
if (i == env$begin_iteration){
cat('Iteration', i ,"\n")
}
if ( (i>env$begin_iteration & i<(10+env$begin_iteration)) || (i%%10 == 0 & i<(100+env$begin_iteration)) || (i%%100 == 0) || i == env$end_iteration) {
cat('Iteration', i , ": Current boosting time =" , round(difftime(Sys.time(),env$time , units="mins"),2) ,"minutes\n")
}
}
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.print.evaluation'
callback
} | /R/cb.print.evaluation.r | no_license | cran/twangRDC | R | false | false | 679 | r | cb.print.evaluation <- function (period = 10) {
callback <- function(env = parent.frame()) {
if (is.null(env$time)){
env$time = Sys.time()
}
i <- env$iteration
if (i == env$begin_iteration){
cat('Iteration', i ,"\n")
}
if ( (i>env$begin_iteration & i<(10+env$begin_iteration)) || (i%%10 == 0 & i<(100+env$begin_iteration)) || (i%%100 == 0) || i == env$end_iteration) {
cat('Iteration', i , ": Current boosting time =" , round(difftime(Sys.time(),env$time , units="mins"),2) ,"minutes\n")
}
}
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.print.evaluation'
callback
} |
library(purrr) #Elbow method
library(cluster) #Silhouette method
library(gridExtra) #Silhouette method
library(grid) #Silhouette method
library(NbClust) #Silhouette method
library(factoextra) #Silhouette method
data <- read.csv("Mall_Customers.csv")
#Determine optimal number of clusters
#Intra-cluster Sum of Squares (Elbow Method)
iss <- function(k) {
kmeans(data[,3:5], k, iter.max = 100, nstart = 100, algorithm = "Lloyd")$tot.withinss
}
kValues <- 1:10
issValues <- map_dbl(kValues, iss)
plot(kValues, issValues, type = "b", xlab = "Number of Clusters (K)", ylab = "Total Intra-Clusters")
#4-6 clusters
#Silhouette Method
k2 <- kmeans(data[,3:5], 2, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k3 <- kmeans(data[,3:5], 3, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k4 <- kmeans(data[,3:5], 4, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k5 <- kmeans(data[,3:5], 5, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k6 <- kmeans(data[,3:5], 6, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k7 <- kmeans(data[,3:5], 7, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k8 <- kmeans(data[,3:5], 85, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k9 <- kmeans(data[,3:5], 9, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k10 <- kmeans(data[,3:5], 10, iter.max = 100, nstart = 50, algorithm = "Lloyd")
#6
par(mfrow = c(3, 3))
plot(silhouette(k2$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k3$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k4$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k5$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k6$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k7$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k8$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k9$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k10$cluster, dist(data[,3:5], "euclidean")))
fviz_nbclust(data[,3:5], kmeans, method = "silhouette")
#6
#Gap Statistic Method
gap <- clusGap(data[, 3:5], FUN = kmeans, nstart = 25, K.max = 10, B = 50)
fviz_gap_stat(gap)
#6
clusterSummary <- prcomp(data[ ,3:5])
summary(clusterSummary)
ggplot(data, aes(x = Annual.Income..k.., y = Spending.Score..1.100.)) + geom_point(stat = "identity", aes(color = as.factor(k6$cluster))) + scale_color_discrete(name = " ", breaks = c(1:6), labels = c("C1", "C2", "C3", "C4", "C5", "C6")) + ggtitle("Segments of Mall Customers", subtitle = "K-means Clustering")
| /customerSegmentation.R | no_license | msyphus/customer-segmentation | R | false | false | 2,462 | r | library(purrr) #Elbow method
library(cluster) #Silhouette method
library(gridExtra) #Silhouette method
library(grid) #Silhouette method
library(NbClust) #Silhouette method
library(factoextra) #Silhouette method
data <- read.csv("Mall_Customers.csv")
#Determine optimal number of clusters
#Intra-cluster Sum of Squares (Elbow Method)
iss <- function(k) {
kmeans(data[,3:5], k, iter.max = 100, nstart = 100, algorithm = "Lloyd")$tot.withinss
}
kValues <- 1:10
issValues <- map_dbl(kValues, iss)
plot(kValues, issValues, type = "b", xlab = "Number of Clusters (K)", ylab = "Total Intra-Clusters")
#4-6 clusters
#Silhouette Method
k2 <- kmeans(data[,3:5], 2, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k3 <- kmeans(data[,3:5], 3, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k4 <- kmeans(data[,3:5], 4, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k5 <- kmeans(data[,3:5], 5, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k6 <- kmeans(data[,3:5], 6, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k7 <- kmeans(data[,3:5], 7, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k8 <- kmeans(data[,3:5], 85, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k9 <- kmeans(data[,3:5], 9, iter.max = 100, nstart = 50, algorithm = "Lloyd")
k10 <- kmeans(data[,3:5], 10, iter.max = 100, nstart = 50, algorithm = "Lloyd")
#6
par(mfrow = c(3, 3))
plot(silhouette(k2$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k3$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k4$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k5$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k6$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k7$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k8$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k9$cluster, dist(data[,3:5], "euclidean")))
plot(silhouette(k10$cluster, dist(data[,3:5], "euclidean")))
fviz_nbclust(data[,3:5], kmeans, method = "silhouette")
#6
#Gap Statistic Method
gap <- clusGap(data[, 3:5], FUN = kmeans, nstart = 25, K.max = 10, B = 50)
fviz_gap_stat(gap)
#6
clusterSummary <- prcomp(data[ ,3:5])
summary(clusterSummary)
ggplot(data, aes(x = Annual.Income..k.., y = Spending.Score..1.100.)) + geom_point(stat = "identity", aes(color = as.factor(k6$cluster))) + scale_color_discrete(name = " ", breaks = c(1:6), labels = c("C1", "C2", "C3", "C4", "C5", "C6")) + ggtitle("Segments of Mall Customers", subtitle = "K-means Clustering")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write.mzXML.R
\name{write.mzXML}
\alias{write.mzXML}
\title{Write a standard mzXML.}
\usage{
write.mzXML(mzXML, filename, precision = c("32", "64"))
}
\arguments{
\item{mzXML}{The generic mzXML object}
\item{filename}{A mzXML filename to export to.}
\item{precision}{Either '32' or '64' byte.}
}
\value{
Nothing. mzXML object is exported to filename. If it was imported using \code{\link{read.mzXML}} it should contain all previous fields.
}
\description{
\code{write.mzXML} is a copy from the caMassClass package which is no longer actively on CRAN
}
\details{
The main task of \code{\link{read.mzXML}} and write.mzXML functions is to extract and save scan data of mzXML files.
In addition attempt is made to keep all other sections of mzXML file as unparsed XML code, so the data can be extracted latter or saved into new mzXML files.
Those unparsed sections are stored as XML text.
}
| /man/write.mzXML.Rd | no_license | cran/CorrectOverloadedPeaks | R | false | true | 994 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write.mzXML.R
\name{write.mzXML}
\alias{write.mzXML}
\title{Write a standard mzXML.}
\usage{
write.mzXML(mzXML, filename, precision = c("32", "64"))
}
\arguments{
\item{mzXML}{The generic mzXML object}
\item{filename}{A mzXML filename to export to.}
\item{precision}{Either '32' or '64' byte.}
}
\value{
Nothing. mzXML object is exported to filename. If it was imported using \code{\link{read.mzXML}} it should contain all previous fields.
}
\description{
\code{write.mzXML} is a copy from the caMassClass package which is no longer actively on CRAN
}
\details{
The main task of \code{\link{read.mzXML}} and write.mzXML functions is to extract and save scan data of mzXML files.
In addition attempt is made to keep all other sections of mzXML file as unparsed XML code, so the data can be extracted latter or saved into new mzXML files.
Those unparsed sections are stored as XML text.
}
|
plus = as.symbol('+')
colon = as.symbol(':')
star = as.symbol('*')
tilde = as.symbol('~')
varCompScoreTests = c(score='LinScore',score='VM03',score='SS95')
varCompTests=c(varCompScoreTests,score='HP01',RLRT='RLRT') #,'Wald', 'RWD88')
# varCompTestTypes = structure(names(varCompTests), names=varCompTests)
informationTypes=c('AOI', 'WAI', 'AEI', 'OI', 'EI')
LinScoreWeightingMethods=c('EqWt', 'InvSTD', 'InvSqrtV', 'MinVar')
# varCompSpecial=c('ibs','lin0','quad1','intxn2','am')
| /varComp/R/varComp-const.R | no_license | ingted/R-Examples | R | false | false | 492 | r | plus = as.symbol('+')
colon = as.symbol(':')
star = as.symbol('*')
tilde = as.symbol('~')
varCompScoreTests = c(score='LinScore',score='VM03',score='SS95')
varCompTests=c(varCompScoreTests,score='HP01',RLRT='RLRT') #,'Wald', 'RWD88')
# varCompTestTypes = structure(names(varCompTests), names=varCompTests)
informationTypes=c('AOI', 'WAI', 'AEI', 'OI', 'EI')
LinScoreWeightingMethods=c('EqWt', 'InvSTD', 'InvSqrtV', 'MinVar')
# varCompSpecial=c('ibs','lin0','quad1','intxn2','am')
|
#Function for creating tokenized DTMs from a corpus using a dictionary
tokenize_corpus <- function(ng, corpus, weighting, dictionary) {
dict_wc <- nchar(dictionary) - nchar(gsub(" ",'',dictionary)) + 1
dtm_list <- list()
colname_list <- list()
for (i in 1:length(ng)) {
tokenizer <- function(x) {unlist(lapply(ngrams(words(x), ng[i]), paste, collapse = " "), use.names = FALSE)}
dtm <- as.matrix(DocumentTermMatrix(corpus,
control = list(tokenize = tokenizer, weighting = weighting,
dictionary = dictionary)))
colnames <- colnames(dtm)[dict_wc == ng[i]]
dtm <- t(as.matrix(dtm[dict_wc == ng[i]]))
colname_list[[i]] <- colnames
dtm_list[[i]] <- dtm
}
dtm <- unlist(dtm_list)
colnames <- unlist(colname_list)
dtm <- t(dtm)
colnames(dtm) <- colnames
return(dtm)
} | /datascience-master/functions/tokenize_corpus.R | no_license | goughgorski/clusteryourself | R | false | false | 875 | r | #Function for creating tokenized DTMs from a corpus using a dictionary
tokenize_corpus <- function(ng, corpus, weighting, dictionary) {
dict_wc <- nchar(dictionary) - nchar(gsub(" ",'',dictionary)) + 1
dtm_list <- list()
colname_list <- list()
for (i in 1:length(ng)) {
tokenizer <- function(x) {unlist(lapply(ngrams(words(x), ng[i]), paste, collapse = " "), use.names = FALSE)}
dtm <- as.matrix(DocumentTermMatrix(corpus,
control = list(tokenize = tokenizer, weighting = weighting,
dictionary = dictionary)))
colnames <- colnames(dtm)[dict_wc == ng[i]]
dtm <- t(as.matrix(dtm[dict_wc == ng[i]]))
colname_list[[i]] <- colnames
dtm_list[[i]] <- dtm
}
dtm <- unlist(dtm_list)
colnames <- unlist(colname_list)
dtm <- t(dtm)
colnames(dtm) <- colnames
return(dtm)
} |
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.09756868649237e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615833216-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,049 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.09756868649237e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
# This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
.htdpinit <- function() {
invisible(.Call('htdp_htdpinit', PACKAGE = 'htdp'))
}
displace <- function(xy, t0, t1, iopt) {
.Call('htdp_displace', PACKAGE = 'htdp', xy, t0, t1, iopt)
}
| /R/RcppExports.R | no_license | cran/htdp | R | false | false | 313 | r | # This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
.htdpinit <- function() {
invisible(.Call('htdp_htdpinit', PACKAGE = 'htdp'))
}
displace <- function(xy, t0, t1, iopt) {
.Call('htdp_displace', PACKAGE = 'htdp', xy, t0, t1, iopt)
}
|
#-------------------------------------------------------------------------------
# Initialization
rm(list = ls(all = TRUE), envir = .GlobalEnv)
require('ProjectTemplate')
load.project()
enableJIT(3)
#-------------------------------------------------------------------------------
# Configuration
input.file <- 'cache/murally/index.html'
output.file <- 'data/notes.csv'
#-------------------------------------------------------------------------------
# Begin
Murally.parse(
input.file,
output.file
)
| /src/parse_murally.R | permissive | adrianwebb/r-classifier | R | false | false | 511 | r |
#-------------------------------------------------------------------------------
# Initialization
rm(list = ls(all = TRUE), envir = .GlobalEnv)
require('ProjectTemplate')
load.project()
enableJIT(3)
#-------------------------------------------------------------------------------
# Configuration
input.file <- 'cache/murally/index.html'
output.file <- 'data/notes.csv'
#-------------------------------------------------------------------------------
# Begin
Murally.parse(
input.file,
output.file
)
|
################################################################################
### General utils
#' @name convertToSf
#' @title convertToSf
#' @description convert spdf & df to sf
#' @param spdf spdf
#' @param df df
#' @param spdfid spdfid
#' @param dfid dfid
#' @return an sf object
#' @noRd
convertToSf <- function(spdf, df, spdfid, dfid){
if (!missing(df)){
# missing IDs
if (is.null(spdfid)){spdfid <- names(spdf@data)[1]}
if (is.null(dfid)){dfid <- names(df)[1]}
# Join (only on df data), work with tibbls
spdf@data <- data.frame(spdf@data[,spdfid],
data.frame(df[match(spdf@data[,spdfid],
df[[dfid]]),]))
spdf <- spdf[!is.na(spdf@data[,dfid]),]
}
# convert
x <- sf::st_as_sf(spdf)
return(x)
}
################################################################################
### choro utils
#' @name choro
#' @title choro
#' @description add color gradients to spdf according to data classes
#' @param var vector of values used
#' @param distr vector of classes
#' @param col vector of colors
#' @param nclass number of classes targeted (if null, the Huntsberger method is used)
#' @param method classification method ("sd", "equal", "quantile", "fisher-jenks","q6","geom")
#' @return List: a vector of colors, colors and distr
#' @noRd
choro <- function(var, distr = NULL, col = NULL,
nclass = NULL, method = "quantile")
{
# Discretization
if (is.null(distr)){
distr <- getBreaks(v = var, nclass = nclass, method = method)
# Colors
if(is.null(col)){
col <- carto.pal(pal1 = "blue.pal",n1 = (length(distr) - 1))
}
colMap <- col[findInterval(var, distr, all.inside = TRUE)]
}else{
inter <- findInterval(var, distr, all.inside = FALSE,
rightmost.closed = TRUE)
inter[inter == 0] <- length(distr)
if(is.null(col)){
col <- carto.pal(pal1 = "blue.pal",n1 = (length(distr) - 1))
}
colMap <- col[inter]
}
return(list(colMap = colMap, distr = distr, col = col))
}
################################################################################
### typo utils
#' @name checkCol
#' @title checkCol
#' @description check if col length matches modalities length, if no color is
#' provided add default colors
#' @param col vector of colors
#' @param mod vector of modalities
#' @return a vector of colors.
#' @noRd
checkCol <- function(col, mod){
if (is.null(col)){
lm <- length(mod)
if (lm<=20){
col <- carto.pal(pal1 = "pastel.pal", n1 = lm)
}else{
lc <- carto.pal(pal1 = "pastel.pal", 20)
col <- sample(x = lc, size = lm , replace = TRUE)
}
}else{
if (length(col) < length(mod)){
stop(paste("'col' length (",length(col),
") must fit the number of modalities of the variable (",
length(mod),").",sep = ""),
call. = FALSE)
}
}
return(col)
}
#' @name checkOrder
#' @title checkOrder
#' @description check if col order match legend.values.order
#' @param legend.values.order legend.values.order
#' @param mod vector of modalities
#' @return a vector of legend.values.order.
#' @noRd
checkOrder <- function(legend.values.order, mod){
if (!is.null(legend.values.order)){
m <- match(mod, legend.values.order)
m <- m[!is.na(m)]
if(length(m) != length(mod) | length(mod) != length(legend.values.order)){
stop(paste("'legend.values.order' modalities must fit the modalities of the variable (",
paste(mod, collapse=","),").",sep = ""),
call. = FALSE)
}
}else{
legend.values.order <- mod
}
return(legend.values.order)
}
################################################################################
### prop symbols utils
#' @name checkMergeOrder
#' @title checkMergeOrder
#' @description clean, sorted sf object with centroid coordinates from an
#' sf object
#' @param x x
#' @param var var
#' @return an sorted and cleaned sf object with centroid coordinates.
#' @noRd
checkMergeOrder <- function(x = x, var = var){
# get centroid coords
x <- cbind(sf::st_coordinates(
sf::st_centroid(x = sf::st_geometry(x), of_largest_polygon = max(sf::st_is(sf::st_as_sf(x), "MULTIPOLYGON")))), x)
# remove NAs and 0 values
x <- x[!is.na(x = x[[var]]),]
x <- x[x[[var]]!=0, ]
# turn to positive values
x[[var]] <- abs(x[[var]])
# Order the dots
x <- x[order(abs(x[[var]]), decreasing = TRUE),]
return(x)
}
#' @name sizer
#' @title sizer
#' @description get a vector of radii
#' @param dots dots
#' @param inches inches
#' @param var var
#' @param fixmax fixmax
#' @param symbols symbols
#' @return a vector of radii
#' @noRd
sizer <- function(dots, inches, var, fixmax, symbols){
switch(symbols,
circle = {
smax <- inches * inches * pi
size <- sqrt((abs(dots[[var]]) * smax / fixmax) / pi)
},
square = {
smax <- inches * inches
size <- sqrt(abs(dots[[var]]) * smax / fixmax)
},
bar = {
smax <- inches
size <- abs(dots[[var]]) * smax / fixmax
})
return(size)
}
################################################################################
### legend utils
legpos <- function(pos, x1, x2, y1, y2, delta1, delta2,
legend_xsize, legend_ysize){
# Position
if(length(pos) == 2){
return(list(xref = pos[1], yref = pos[2]))
}
if (pos == "bottomleft") {
xref <- x1 + delta1
yref <- y1 + delta1
}
if (pos == "bottomleftextra") {
xref <- x1 + delta1
yref <- y1 + delta1 + graphics::strheight(s = "hp\nhp", cex = 0.6, font = 3)
}
if (pos == "topleft") {
xref <- x1 + delta1
yref <- y2 - 2 * delta1 - legend_ysize
}
if (pos == "topright") {
xref <- x2 - 2 * delta1 - legend_xsize
yref <- y2 -2 * delta1 - legend_ysize
}
if (pos == "bottomright") {
xref <- x2 - 2 * delta1 - legend_xsize
yref <- y1 + delta1
}
if (pos == "left") {
xref <- x1 + delta1
yref <- (y1+y2)/2-legend_ysize/2 - delta2
}
if (pos == "right") {
xref <- x2 - 2*delta1 - legend_xsize
yref <- (y1+y2)/2-legend_ysize/2 - delta2
}
if (pos == "top") {
xref <- (x1+x2)/2 - legend_xsize/2
yref <- y2 - 2*delta1 - legend_ysize
}
if (pos == "bottom") {
xref <- (x1+x2)/2 - legend_xsize/2
yref <- y1 + delta1
}
if (pos == "center") {
xref <- (x1+x2)/2 - legend_xsize/2
yref <- (y1+y2)/2-legend_ysize/2 - delta2
}
return(list(xref = xref, yref = yref))
}
################################################################################
### labelLayer utils
# Rcpp stuff
#' @useDynLib cartography, .registration = TRUE
#' @importFrom Rcpp evalCpp
NULL
# Label placement
#' @name wordlayout
#' @title wordlayout
#' @description wordlayout
#' @param x long
#' @param y lat
#' @param words labels
#' @param cex cex
#' @param xlim xlim
#' @param ylim ylim
#' @param tstep tstep
#' @param rstep rstep
#' @param ... other stuf
#' @return coords
#' @noRd
wordlayout <- function(x, y, words, cex=1,
xlim=c(-Inf,Inf), ylim=c(-Inf,Inf),
tstep=.1, rstep=.1, ...){
tails <- "g|j|p|q|y"
n <- length(words)
sdx <- sd(x,na.rm=TRUE)
sdy <- sd(y,na.rm=TRUE)
if(sdx==0)
sdx <- 1
if(sdy==0)
sdy <- 1
if(length(cex)==1)
cex <- rep(cex,n)
set.seed(999)
boxes <- list()
for(i in 1:length(words)){
r <-0
theta <- runif(1,0,2*pi)
x1 <- xo <- x[i]
y1 <- yo <- y[i]
wid <- strwidth(words[i],cex=cex[i],...) + 0.4 *
strwidth("R", cex=cex[i], ...)
ht <- strheight(words[i],cex=cex[i],...) + 0.4 *
strheight("R", cex=cex[i], ...)
#mind your ps and qs
if(grepl(tails,words[i]))
ht <- ht + ht*.2
isOverlaped <- TRUE
while(isOverlaped){
if(!is_overlap(x1-.5*wid,y1-.5*ht,wid,ht,boxes) &&
x1-.5*wid>xlim[1] && y1-.5*ht>ylim[1] &&
x1+.5*wid<xlim[2] && y1+.5*ht<ylim[2]){
boxes[[length(boxes)+1]] <- c(x1-.5*wid,y1-.5*ht,wid,ht)
isOverlaped <- FALSE
}else{
theta <- theta+tstep
r <- r + rstep*tstep/(2*pi)
x1 <- xo+sdx*r*cos(theta)
y1 <- yo+sdy*r*sin(theta)
}
}
}
result <- do.call(rbind,boxes)
colnames(result) <- c("x","y","width","ht")
rownames(result) <- words
result
}
# shadow around the labels
#' @name shadowtext
#' @title shadowtext
#' @description shadowtext
#' @param x lon
#' @param y lat
#' @param labels labels
#' @param col col
#' @param bg bg
#' @param theta number of iteration
#' @param r radius
#' @param ...
#' @noRd
shadowtext <- function(x, y=NULL, labels, col='white', bg='black',
theta= seq(0, 2*pi, length.out=50), r=0.1, ... ) {
xo <- r*strwidth('A')
yo <- r*strheight('A')
for (i in theta) {
text(x + cos(i)*xo, y + sin(i)*yo, labels, col=bg, ... )
}
text(x, y, labels, col=col, ... )
}
# import stuffs
#' @importFrom graphics image legend lines par plot.new
#' plot.window points polygon rect segments
#' strheight strwidth symbols text title
#' xinch yinch plot
#' @importFrom stats aggregate na.omit quantile runif sd median
#' @importFrom rgeos createSPComment
NULL
| /R/utils.R | no_license | arthurgailes/cartography | R | false | false | 9,347 | r | ################################################################################
### General utils
#' @name convertToSf
#' @title convertToSf
#' @description convert spdf & df to sf
#' @param spdf spdf
#' @param df df
#' @param spdfid spdfid
#' @param dfid dfid
#' @return an sf object
#' @noRd
convertToSf <- function(spdf, df, spdfid, dfid){
if (!missing(df)){
# missing IDs
if (is.null(spdfid)){spdfid <- names(spdf@data)[1]}
if (is.null(dfid)){dfid <- names(df)[1]}
# Join (only on df data), work with tibbls
spdf@data <- data.frame(spdf@data[,spdfid],
data.frame(df[match(spdf@data[,spdfid],
df[[dfid]]),]))
spdf <- spdf[!is.na(spdf@data[,dfid]),]
}
# convert
x <- sf::st_as_sf(spdf)
return(x)
}
################################################################################
### choro utils
#' @name choro
#' @title choro
#' @description add color gradients to spdf according to data classes
#' @param var vector of values used
#' @param distr vector of classes
#' @param col vector of colors
#' @param nclass number of classes targeted (if null, the Huntsberger method is used)
#' @param method classification method ("sd", "equal", "quantile", "fisher-jenks","q6","geom")
#' @return List: a vector of colors, colors and distr
#' @noRd
choro <- function(var, distr = NULL, col = NULL,
nclass = NULL, method = "quantile")
{
# Discretization
if (is.null(distr)){
distr <- getBreaks(v = var, nclass = nclass, method = method)
# Colors
if(is.null(col)){
col <- carto.pal(pal1 = "blue.pal",n1 = (length(distr) - 1))
}
colMap <- col[findInterval(var, distr, all.inside = TRUE)]
}else{
inter <- findInterval(var, distr, all.inside = FALSE,
rightmost.closed = TRUE)
inter[inter == 0] <- length(distr)
if(is.null(col)){
col <- carto.pal(pal1 = "blue.pal",n1 = (length(distr) - 1))
}
colMap <- col[inter]
}
return(list(colMap = colMap, distr = distr, col = col))
}
################################################################################
### typo utils
#' @name checkCol
#' @title checkCol
#' @description check if col length matches modalities length, if no color is
#' provided add default colors
#' @param col vector of colors
#' @param mod vector of modalities
#' @return a vector of colors.
#' @noRd
checkCol <- function(col, mod){
if (is.null(col)){
lm <- length(mod)
if (lm<=20){
col <- carto.pal(pal1 = "pastel.pal", n1 = lm)
}else{
lc <- carto.pal(pal1 = "pastel.pal", 20)
col <- sample(x = lc, size = lm , replace = TRUE)
}
}else{
if (length(col) < length(mod)){
stop(paste("'col' length (",length(col),
") must fit the number of modalities of the variable (",
length(mod),").",sep = ""),
call. = FALSE)
}
}
return(col)
}
#' @name checkOrder
#' @title checkOrder
#' @description check if col order match legend.values.order
#' @param legend.values.order legend.values.order
#' @param mod vector of modalities
#' @return a vector of legend.values.order.
#' @noRd
checkOrder <- function(legend.values.order, mod){
if (!is.null(legend.values.order)){
m <- match(mod, legend.values.order)
m <- m[!is.na(m)]
if(length(m) != length(mod) | length(mod) != length(legend.values.order)){
stop(paste("'legend.values.order' modalities must fit the modalities of the variable (",
paste(mod, collapse=","),").",sep = ""),
call. = FALSE)
}
}else{
legend.values.order <- mod
}
return(legend.values.order)
}
################################################################################
### prop symbols utils
#' @name checkMergeOrder
#' @title checkMergeOrder
#' @description clean, sorted sf object with centroid coordinates from an
#' sf object
#' @param x x
#' @param var var
#' @return an sorted and cleaned sf object with centroid coordinates.
#' @noRd
checkMergeOrder <- function(x = x, var = var){
# get centroid coords
x <- cbind(sf::st_coordinates(
sf::st_centroid(x = sf::st_geometry(x), of_largest_polygon = max(sf::st_is(sf::st_as_sf(x), "MULTIPOLYGON")))), x)
# remove NAs and 0 values
x <- x[!is.na(x = x[[var]]),]
x <- x[x[[var]]!=0, ]
# turn to positive values
x[[var]] <- abs(x[[var]])
# Order the dots
x <- x[order(abs(x[[var]]), decreasing = TRUE),]
return(x)
}
#' @name sizer
#' @title sizer
#' @description get a vector of radii
#' @param dots dots
#' @param inches inches
#' @param var var
#' @param fixmax fixmax
#' @param symbols symbols
#' @return a vector of radii
#' @noRd
sizer <- function(dots, inches, var, fixmax, symbols){
switch(symbols,
circle = {
smax <- inches * inches * pi
size <- sqrt((abs(dots[[var]]) * smax / fixmax) / pi)
},
square = {
smax <- inches * inches
size <- sqrt(abs(dots[[var]]) * smax / fixmax)
},
bar = {
smax <- inches
size <- abs(dots[[var]]) * smax / fixmax
})
return(size)
}
################################################################################
### legend utils
legpos <- function(pos, x1, x2, y1, y2, delta1, delta2,
legend_xsize, legend_ysize){
# Position
if(length(pos) == 2){
return(list(xref = pos[1], yref = pos[2]))
}
if (pos == "bottomleft") {
xref <- x1 + delta1
yref <- y1 + delta1
}
if (pos == "bottomleftextra") {
xref <- x1 + delta1
yref <- y1 + delta1 + graphics::strheight(s = "hp\nhp", cex = 0.6, font = 3)
}
if (pos == "topleft") {
xref <- x1 + delta1
yref <- y2 - 2 * delta1 - legend_ysize
}
if (pos == "topright") {
xref <- x2 - 2 * delta1 - legend_xsize
yref <- y2 -2 * delta1 - legend_ysize
}
if (pos == "bottomright") {
xref <- x2 - 2 * delta1 - legend_xsize
yref <- y1 + delta1
}
if (pos == "left") {
xref <- x1 + delta1
yref <- (y1+y2)/2-legend_ysize/2 - delta2
}
if (pos == "right") {
xref <- x2 - 2*delta1 - legend_xsize
yref <- (y1+y2)/2-legend_ysize/2 - delta2
}
if (pos == "top") {
xref <- (x1+x2)/2 - legend_xsize/2
yref <- y2 - 2*delta1 - legend_ysize
}
if (pos == "bottom") {
xref <- (x1+x2)/2 - legend_xsize/2
yref <- y1 + delta1
}
if (pos == "center") {
xref <- (x1+x2)/2 - legend_xsize/2
yref <- (y1+y2)/2-legend_ysize/2 - delta2
}
return(list(xref = xref, yref = yref))
}
################################################################################
### labelLayer utils
# Rcpp stuff
#' @useDynLib cartography, .registration = TRUE
#' @importFrom Rcpp evalCpp
NULL
# Label placement
#' @name wordlayout
#' @title wordlayout
#' @description wordlayout
#' @param x long
#' @param y lat
#' @param words labels
#' @param cex cex
#' @param xlim xlim
#' @param ylim ylim
#' @param tstep tstep
#' @param rstep rstep
#' @param ... other stuf
#' @return coords
#' @noRd
wordlayout <- function(x, y, words, cex=1,
xlim=c(-Inf,Inf), ylim=c(-Inf,Inf),
tstep=.1, rstep=.1, ...){
tails <- "g|j|p|q|y"
n <- length(words)
sdx <- sd(x,na.rm=TRUE)
sdy <- sd(y,na.rm=TRUE)
if(sdx==0)
sdx <- 1
if(sdy==0)
sdy <- 1
if(length(cex)==1)
cex <- rep(cex,n)
set.seed(999)
boxes <- list()
for(i in 1:length(words)){
r <-0
theta <- runif(1,0,2*pi)
x1 <- xo <- x[i]
y1 <- yo <- y[i]
wid <- strwidth(words[i],cex=cex[i],...) + 0.4 *
strwidth("R", cex=cex[i], ...)
ht <- strheight(words[i],cex=cex[i],...) + 0.4 *
strheight("R", cex=cex[i], ...)
#mind your ps and qs
if(grepl(tails,words[i]))
ht <- ht + ht*.2
isOverlaped <- TRUE
while(isOverlaped){
if(!is_overlap(x1-.5*wid,y1-.5*ht,wid,ht,boxes) &&
x1-.5*wid>xlim[1] && y1-.5*ht>ylim[1] &&
x1+.5*wid<xlim[2] && y1+.5*ht<ylim[2]){
boxes[[length(boxes)+1]] <- c(x1-.5*wid,y1-.5*ht,wid,ht)
isOverlaped <- FALSE
}else{
theta <- theta+tstep
r <- r + rstep*tstep/(2*pi)
x1 <- xo+sdx*r*cos(theta)
y1 <- yo+sdy*r*sin(theta)
}
}
}
result <- do.call(rbind,boxes)
colnames(result) <- c("x","y","width","ht")
rownames(result) <- words
result
}
# shadow around the labels
#' @name shadowtext
#' @title shadowtext
#' @description shadowtext
#' @param x lon
#' @param y lat
#' @param labels labels
#' @param col col
#' @param bg bg
#' @param theta number of iteration
#' @param r radius
#' @param ...
#' @noRd
shadowtext <- function(x, y=NULL, labels, col='white', bg='black',
theta= seq(0, 2*pi, length.out=50), r=0.1, ... ) {
xo <- r*strwidth('A')
yo <- r*strheight('A')
for (i in theta) {
text(x + cos(i)*xo, y + sin(i)*yo, labels, col=bg, ... )
}
text(x, y, labels, col=col, ... )
}
# import stuffs
#' @importFrom graphics image legend lines par plot.new
#' plot.window points polygon rect segments
#' strheight strwidth symbols text title
#' xinch yinch plot
#' @importFrom stats aggregate na.omit quantile runif sd median
#' @importFrom rgeos createSPComment
NULL
|
# The Main Diving Script
#
# I provide this version of the script for use starting
# Wednesday July 9. I will expand it through the rest of
# this case study. It is a reduced version of what we
# did Monday/Tuesday, but I have removed some things.
###################################
# Special note for Wednesday July 9
#
# If you get to class early and download this file, please do
# not read ahead too far: read only this section. If you have
# extra time, please review things we did in the last two days
# and prepare a question to ask me at the start of class.
#
# Thank you!
#
# See English.R and ToyExamples.R
#
# Please: if you know (from reading my paper) or think you have
# figured out a good way of studying nationalistic bias, please do
# not announce it to the class. This will spoil the experience for
# the other students.
# I want to give you lots of chances today to practice working
# in R. For many of you, this is most important. But I also
# want to provide a real data challenge that is interesting for
# those of you experienced with R. If you think you are good with
# R, I challenge you to seek multiple solutions to the same
# problem. Decide which you like, and why? Explain this to
# someone else. Argue with them. If they are still learning,
# teach them.
#
# Other aspects of the problem are statistical in nature. Something
# may seem easy or obvious. It may be. It may not be. Keep an open
# mind, and challenge your own assumptions. Real data analysis is
# different from pure mathematical statistics.
#
# End special note for Wednesday
###################################
x <- read.csv("Diving2000.csv", as.is=TRUE)
dim(x)
plot(jitter(x$Difficulty), jitter(x$JScore),
col=factor(x$Round),
xlab="Degree of Difficulty",
ylab="Judges' Scores")
# Create the matching variable; recall this will not work
# if the country variables are factors.
x$match <- x$Country == x$JCountry
table(x$match)
# Let's create a variable for use that will be
# convenient to have for efficient exploration:
thisjudge <- "WANG Facheng"
# A series of calculculations, perhaps useful, perhaps not:
mean(x$JScore)
mean(x$JScore[x$Judge==thisjudge & x$match])
mean(x$JScore[x$Judge==thisjudge & !x$match])
mean(x$JScore[x$JCountry != "CHN" &
x$Country == "CHN"])
## Challenge: figure out the nationality
## of 'thisjudge' in a general way.
thisjudge <- "WANG Facheng"
thiscountry <- x$JCountry[x$Judge==thisjudge][1] # One way
thiscountry <- unique(x$JCountry[x$Judge==thisjudge]) # Another way
if (length(thiscountry) != 1 ) print("Judge country problem!") # Sanity check
## Challenge: the check, above, is not sufficient to "catch" all
## possible errors (unless you make some specific assumption).
## Why? What could go wrong that we could also try to "catch"?
# How many "matching" dives do we have for thisjudge?
sum(x$Judge==thisjudge & x$match) # One solution
table(x$Judge, x$match)[thisjudge, 2] # Another solution. CONSIDER!
# Can we establish "bias" for a judge? Perhaps this is closer:
t.test(x$JScore[x$Judge==thisjudge & x$match],
x$JScore[x$Judge!=thisjudge & x$Country==thiscountry])
## Challenge: the test above isn't great. Why? Find some aspect
## of the problem relating specifically to Judge Wang and to things
## we discussed in our diving exploration that causes a problem for
## this approach. There may be many problems, of course.
### STOP. Write down one problem on paper (in English) in a very
### short paragraph. Show it to another student. Read theirs.
### Discuss the problem. Discuss the English.
# Jake: Is T-test suitable for this problem?
# Find the relationship between Round and match
table(x$Round, x$match)
## Challenge: identify all dives in the data set where the diver
## is from 'thiscountry' (perhaps China) and the judge is 'thisjudge'
## (Judge Wang in our case). Create a reduced data set for only
## these dives. Create a new data.frame 'y' containing only these
## dives.
#### Your solution should go here:
#### End of your solution. Is it nicely indented and spaced?
#### Did you use <- for assignments? Would I be impressed
#### if I looked at your code?
### FOR THE CHALLENGE IMMEDIATELY ABOVE:
### 'y' should have 22*7 rows for Judge Wang.
### STOP. Without 'y' from above, you can't continue below. ###
# Once we have this reduced data set, y, we might consider
# the following test:
t.test(y$JScore[y$match], y$JScore[!y$match]) # TODO
## CHALLENGE: describe exactly and precisely the two sets of scores
## being compared in the t-test, above. Note that it requires
## considering how 'y' was extracted from 'x' in the earlier challenge.
### STOP. Make very sure you are right, above, before continuing. ###
### Talk with each other! Argue! Debate! ###
## CHALLENGE: How do you feel about this test? Do you like it? Is
## Is it fair to Judge Wang? Is it effective at uncovering biased
## judging (if present)?
### STOP. Write down your answer on paper (in English) in a very
### short paragraph. Show it to another student. Read theirs.
### Discuss the problem. Discuss the English.
#################################################################
## POSSIBLE CHALLENGE: you may feel that you know how to solve
## this problem of assessing nationalistic bias. Maybe you are
## right. Maybe not. I'm not going to tell you now.
##
## But one question that students often ask is
## whether a t-test is appropriate for this problem. What do you
## think? If you don't like the t-test, propose a better choice.
## Be ready to defend your method.
#################################################################
| /Diving.R | no_license | jakezhaojb/Diving_problem | R | false | false | 5,774 | r | # The Main Diving Script
#
# I provide this version of the script for use starting
# Wednesday July 9. I will expand it through the rest of
# this case study. It is a reduced version of what we
# did Monday/Tuesday, but I have removed some things.
###################################
# Special note for Wednesday July 9
#
# If you get to class early and download this file, please do
# not read ahead too far: read only this section. If you have
# extra time, please review things we did in the last two days
# and prepare a question to ask me at the start of class.
#
# Thank you!
#
# See English.R and ToyExamples.R
#
# Please: if you know (from reading my paper) or think you have
# figured out a good way of studying nationalistic bias, please do
# not announce it to the class. This will spoil the experience for
# the other students.
# I want to give you lots of chances today to practice working
# in R. For many of you, this is most important. But I also
# want to provide a real data challenge that is interesting for
# those of you experienced with R. If you think you are good with
# R, I challenge you to seek multiple solutions to the same
# problem. Decide which you like, and why? Explain this to
# someone else. Argue with them. If they are still learning,
# teach them.
#
# Other aspects of the problem are statistical in nature. Something
# may seem easy or obvious. It may be. It may not be. Keep an open
# mind, and challenge your own assumptions. Real data analysis is
# different from pure mathematical statistics.
#
# End special note for Wednesday
###################################
x <- read.csv("Diving2000.csv", as.is=TRUE)
dim(x)
plot(jitter(x$Difficulty), jitter(x$JScore),
col=factor(x$Round),
xlab="Degree of Difficulty",
ylab="Judges' Scores")
# Create the matching variable; recall this will not work
# if the country variables are factors.
x$match <- x$Country == x$JCountry
table(x$match)
# Let's create a variable for use that will be
# convenient to have for efficient exploration:
thisjudge <- "WANG Facheng"
# A series of calculculations, perhaps useful, perhaps not:
mean(x$JScore)
mean(x$JScore[x$Judge==thisjudge & x$match])
mean(x$JScore[x$Judge==thisjudge & !x$match])
mean(x$JScore[x$JCountry != "CHN" &
x$Country == "CHN"])
## Challenge: figure out the nationality
## of 'thisjudge' in a general way.
thisjudge <- "WANG Facheng"
thiscountry <- x$JCountry[x$Judge==thisjudge][1] # One way
thiscountry <- unique(x$JCountry[x$Judge==thisjudge]) # Another way
if (length(thiscountry) != 1 ) print("Judge country problem!") # Sanity check
## Challenge: the check, above, is not sufficient to "catch" all
## possible errors (unless you make some specific assumption).
## Why? What could go wrong that we could also try to "catch"?
# How many "matching" dives do we have for thisjudge?
sum(x$Judge==thisjudge & x$match) # One solution
table(x$Judge, x$match)[thisjudge, 2] # Another solution. CONSIDER!
# Can we establish "bias" for a judge? Perhaps this is closer:
t.test(x$JScore[x$Judge==thisjudge & x$match],
x$JScore[x$Judge!=thisjudge & x$Country==thiscountry])
## Challenge: the test above isn't great. Why? Find some aspect
## of the problem relating specifically to Judge Wang and to things
## we discussed in our diving exploration that causes a problem for
## this approach. There may be many problems, of course.
### STOP. Write down one problem on paper (in English) in a very
### short paragraph. Show it to another student. Read theirs.
### Discuss the problem. Discuss the English.
# Jake: Is T-test suitable for this problem?
# Find the relationship between Round and match
table(x$Round, x$match)
## Challenge: identify all dives in the data set where the diver
## is from 'thiscountry' (perhaps China) and the judge is 'thisjudge'
## (Judge Wang in our case). Create a reduced data set for only
## these dives. Create a new data.frame 'y' containing only these
## dives.
#### Your solution should go here:
#### End of your solution. Is it nicely indented and spaced?
#### Did you use <- for assignments? Would I be impressed
#### if I looked at your code?
### FOR THE CHALLENGE IMMEDIATELY ABOVE:
### 'y' should have 22*7 rows for Judge Wang.
### STOP. Without 'y' from above, you can't continue below. ###
# Once we have this reduced data set, y, we might consider
# the following test:
t.test(y$JScore[y$match], y$JScore[!y$match]) # TODO
## CHALLENGE: describe exactly and precisely the two sets of scores
## being compared in the t-test, above. Note that it requires
## considering how 'y' was extracted from 'x' in the earlier challenge.
### STOP. Make very sure you are right, above, before continuing. ###
### Talk with each other! Argue! Debate! ###
## CHALLENGE: How do you feel about this test? Do you like it? Is
## Is it fair to Judge Wang? Is it effective at uncovering biased
## judging (if present)?
### STOP. Write down your answer on paper (in English) in a very
### short paragraph. Show it to another student. Read theirs.
### Discuss the problem. Discuss the English.
#################################################################
## POSSIBLE CHALLENGE: you may feel that you know how to solve
## this problem of assessing nationalistic bias. Maybe you are
## right. Maybe not. I'm not going to tell you now.
##
## But one question that students often ask is
## whether a t-test is appropriate for this problem. What do you
## think? If you don't like the t-test, propose a better choice.
## Be ready to defend your method.
#################################################################
|
#' Group input by rows
#'
#' \code{rowwise} is used for the results of \code{\link{do}} when you
#' create list-variables. It is also useful to support arbitrary
#' complex operations that need to be applied to each row.
#'
#' Currently \code{rowwise} grouping only works with data frames. Its
#' main impact is to allow you to work with list-variables in
#' \code{\link{summarise}} and \code{\link{mutate}} without having to
#' use \code{[[1]]}. This makes \code{summarise()} on a rowwise tbl
#' effectively equivalent to plyr's \code{ldply}.
#'
#' @param data Input data frame.
#' @export
#' @examples
#' df <- expand.grid(x = 1:3, y = 3:1)
#' df %>% rowwise() %>% do(i = seq(.$x, .$y))
#' .Last.value %>% summarise(n = length(i))
rowwise <- function(data) {
stopifnot(is.data.frame(data))
structure(data, class = c("rowwise_df", "tbl_df", "data.frame"))
}
#' @export
print.rowwise_df <- function(x, ..., n = NULL, width = NULL) {
cat("Source: local data frame ", dim_desc(x), "\n", sep = "")
cat("Groups: <by row>\n")
cat("\n")
print(trunc_mat(x, n = n, width = width))
invisible(x)
}
#' @export
ungroup.rowwise_df <- function(x) {
class(x) <- "data.frame"
x
}
#' @export
as.data.frame.rowwise_df <- function(x, row.names, optional, ...) {
class(x) <- "data.frame"
x
}
#' @export
group_size.rowwise_df <- function(x) {
rep.int(1L, nrow(x))
}
#' @export
n_groups.rowwise_df <- function(x) {
nrow(x)
}
#' @export
group_by_.rowwise_df <- function(.data, ..., .dots, add = FALSE) {
warning("Grouping rowwise data frame strips rowwise nature", call. = FALSE)
.data <- ungroup(.data)
groups <- group_by_prepare(.data, ..., .dots = .dots, add = add)
grouped_df(groups$data, groups$groups)
}
| /R/rowwise.r | no_license | kbroman/dplyr | R | false | false | 1,728 | r | #' Group input by rows
#'
#' \code{rowwise} is used for the results of \code{\link{do}} when you
#' create list-variables. It is also useful to support arbitrary
#' complex operations that need to be applied to each row.
#'
#' Currently \code{rowwise} grouping only works with data frames. Its
#' main impact is to allow you to work with list-variables in
#' \code{\link{summarise}} and \code{\link{mutate}} without having to
#' use \code{[[1]]}. This makes \code{summarise()} on a rowwise tbl
#' effectively equivalent to plyr's \code{ldply}.
#'
#' @param data Input data frame.
#' @export
#' @examples
#' df <- expand.grid(x = 1:3, y = 3:1)
#' df %>% rowwise() %>% do(i = seq(.$x, .$y))
#' .Last.value %>% summarise(n = length(i))
rowwise <- function(data) {
stopifnot(is.data.frame(data))
structure(data, class = c("rowwise_df", "tbl_df", "data.frame"))
}
#' @export
print.rowwise_df <- function(x, ..., n = NULL, width = NULL) {
cat("Source: local data frame ", dim_desc(x), "\n", sep = "")
cat("Groups: <by row>\n")
cat("\n")
print(trunc_mat(x, n = n, width = width))
invisible(x)
}
#' @export
ungroup.rowwise_df <- function(x) {
class(x) <- "data.frame"
x
}
#' @export
as.data.frame.rowwise_df <- function(x, row.names, optional, ...) {
class(x) <- "data.frame"
x
}
#' @export
group_size.rowwise_df <- function(x) {
rep.int(1L, nrow(x))
}
#' @export
n_groups.rowwise_df <- function(x) {
nrow(x)
}
#' @export
group_by_.rowwise_df <- function(.data, ..., .dots, add = FALSE) {
warning("Grouping rowwise data frame strips rowwise nature", call. = FALSE)
.data <- ungroup(.data)
groups <- group_by_prepare(.data, ..., .dots = .dots, add = add)
grouped_df(groups$data, groups$groups)
}
|
browser()
options( stringsAsFactors=FALSE)
library(lpSolve,lib.loc="/Users/amita/software/Rpackage")
library(irr,lib.loc="/Users/amita/software/Rpackage")
setwd("~/git/summary_repo/Summary/src/Similarity_Labels/Similarity_Data/gay-rights-debates/MTdata_cluster/Labels_Updated/AllMT_task/") #sets R's working directory to near where my files are
InputFile<-"Results/ExtDist5_NVA_CoreNLP_AllMT_Reg.csv"
warnings()
mytable<-read.csv(InputFile,header=TRUE,stringsAsFactors=FALSE)
dim(mytable)
class(mytable)
table.sub2 <- subset(mytable, select = c(Id_A142ZRU284W9O:Id_ASK5ZTC22VRZZ))
dim(table.sub2)
mat=data.matrix(table.sub2)
aplha<-kripp.alpha(t(mat),"ordinal")
aplha
aplha_nom<-kripp.alpha(t(mat))
aplha_nom
| /Summary/Rscript/kirpendorrfalpha.R | no_license | amitamisra/Summary_Dialogs | R | false | false | 716 | r | browser()
options( stringsAsFactors=FALSE)
library(lpSolve,lib.loc="/Users/amita/software/Rpackage")
library(irr,lib.loc="/Users/amita/software/Rpackage")
setwd("~/git/summary_repo/Summary/src/Similarity_Labels/Similarity_Data/gay-rights-debates/MTdata_cluster/Labels_Updated/AllMT_task/") #sets R's working directory to near where my files are
InputFile<-"Results/ExtDist5_NVA_CoreNLP_AllMT_Reg.csv"
warnings()
mytable<-read.csv(InputFile,header=TRUE,stringsAsFactors=FALSE)
dim(mytable)
class(mytable)
table.sub2 <- subset(mytable, select = c(Id_A142ZRU284W9O:Id_ASK5ZTC22VRZZ))
dim(table.sub2)
mat=data.matrix(table.sub2)
aplha<-kripp.alpha(t(mat),"ordinal")
aplha
aplha_nom<-kripp.alpha(t(mat))
aplha_nom
|
# *******************************************************************************
# OpenStudio(R), Copyright (c) 2008-2018, Alliance for Sustainable Energy, LLC.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# (1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# (3) Neither the name of the copyright holder nor the names of any contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission from the respective party.
#
# (4) Other than as required in clauses (1) and (2), distributions in any form
# of modifications or other derivative works may not use the "OpenStudio"
# trademark, "OS", "os", or any other confusingly similar designation without
# specific prior written permission from Alliance for Sustainable Energy, LLC.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER, THE UNITED STATES
# GOVERNMENT, OR ANY CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *******************************************************************************
clusterEvalQ(cl,library(rjson))
clusterEvalQ(cl,library(R.utils))
objDim <- length(objfun)
print(paste("objDim:",objDim))
print(paste("UniqueGroups:",uniquegroups))
print(paste("objfun:",objfun))
print(paste("normtype:",normtype))
print(paste("ppower:",ppower))
print(paste("min:",mins))
print(paste("max:",maxes))
print(paste("failed_f:",failed_f))
clusterExport(cl,"objDim")
clusterExport(cl,"normtype")
clusterExport(cl,"ppower")
clusterExport(cl,"uniquegroups")
clusterExport(cl,"failed_f")
clusterExport(cl,"debug_messages")
for (i in 1:ncol(vars)){
vars[,i] <- sort(vars[,i])
}
print(paste("vartypes:",vartypes))
print(paste("varnames:",varnames))
print(paste("vardisplaynames:",vardisplaynames))
print(paste("objnames:",objnames))
# Setup a bunch of variables for the analysis based on passed variables
# From Ruby
analysis_dir <- paste(rails_sim_root_path,'/analysis_',rails_analysis_id,sep='')
ruby_command <- paste('cd ',analysis_dir,' && ',rails_ruby_bin_dir,'/bundle exec ruby ',sep='')
rake_command <- paste('cd ',rails_root_path,' && ',rails_ruby_bin_dir,'/bundle exec rake ',sep='')
if (debug_messages == 1) {
print(paste("analysis_dir: ",analysis_dir))
print(paste("ruby_command: ",ruby_command))
print(paste("rake_command: ",rake_command))
}
varfile <- function(x){
var_filename <- paste(analysis_dir,'/varnames.json',sep='')
if (!file.exists(var_filename)){
write.table(x, file=var_filename, quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
vardisplayfile <- function(x){
var_filename <- paste(analysis_dir,'/vardisplaynames.json',sep='')
if (!file.exists(var_filename)){
write.table(x, file=var_filename, quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
# Export local variables for worker nodes
clusterExport(cl,"ruby_command")
clusterExport(cl,"rake_command")
clusterExport(cl,"analysis_dir")
clusterExport(cl,"varfile")
clusterExport(cl,"varnames")
# Export some global variables for worker nodes
clusterExport(cl,"rails_analysis_id")
clusterExport(cl,"rails_sim_root_path")
clusterExport(cl,"rails_ruby_bin_dir")
clusterExport(cl,"rails_mongodb_name")
clusterExport(cl,"rails_mongodb_ip")
clusterExport(cl,"rails_run_filename")
clusterExport(cl,"rails_create_dp_filename")
clusterExport(cl,"rails_root_path")
clusterExport(cl,"rails_host")
clusterExport(cl,"r_scripts_path")
clusterExport(cl,"rails_exit_guideline_14")
clusterEvalQ(cl,varfile(varnames))
clusterExport(cl,"vardisplayfile")
clusterExport(cl,"vardisplaynames")
clusterEvalQ(cl,vardisplayfile(vardisplaynames))
# Export functions for worker nodes
source(paste(r_scripts_path,'create_and_run_datapoint_uniquegroups.R',sep='/'))
clusterExport(cl,"create_and_run_datapoint_uniquegroups")
clusterExport(cl,"check_run_flag")
#f <- function(x){
# tryCatch(create_and_run_datapoint_uniquegroups(x),
# error=function(x){
# obj <- NULL
# for (i in 1:objDim) {
# obj[i] <- failed_f
# }
# print("create_and_run_datapoint_uniquegroups failed")
# return(obj)
# }
# )
#}
f <- function(x){
try(create_and_run_datapoint_uniquegroups(x), silent=TRUE)
}
clusterExport(cl,"f")
print(paste("n:",n))
print(paste("M:",M))
if (n <= (4*M^2+1)) {
n <- (4*M^2) + 2
print("n is <= 4*M^2, increasing the value of n")
print(paste("n:",n))
}
temp_list <- list()
if (length(mins) > 0) {
for (i in 1:length(mins))
{
temp_list[[i]] <- list(min= mins[i], max=maxes[i])
}
}
results <- NULL
# if (length(unique(vardisplaynames)) == length(vardisplaynames)) {
# m <- fast99(model=NULL, factors=vardisplaynames, n=n, M=M, q.arg = temp_list)
# } else if (length(unique(varnames)) == length(varnames)) {
# m <- fast99(model=NULL, factors=varnames, n=n, M=M, q.arg = temp_list)
# } else {
m <- fast99(model=NULL, factors=ncol(vars), n=n, M=M, q.arg = temp_list)
#}
m1 <- as.list(data.frame(t(m$X)))
if (debug_messages == 1) {
print(paste("m1:",m1))
}
try(results <- clusterApplyLB(cl, m1, f),silent=FALSE)
result <- as.data.frame(results)
if (debug_messages == 1) {
print(paste("length(objnames):",length(objnames)))
print(paste("nrow(result):",nrow(result)))
print(paste("ncol(result):",ncol(result)))
}
file_names_R <- c("")
file_names_bar_png <- c("")
if (nrow(result) > 0) {
for (j in 1:nrow(result)){
n <- m
tell(n,as.numeric(unlist(result[j,])))
print(n)
file_names_R[j] <- paste(analysis_dir,"/m_",gsub(" ","_",objnames[j], fixed=TRUE),".RData",sep="")
save(n, file=file_names_R[j])
file_names_bar_png[j] <- paste(analysis_dir,"/fast99_",gsub(" ","_",objnames[j],fixed=TRUE),"_bar.png",sep="")
png(file_names_bar_png[j], width=12, height=8, units="in", pointsize=10, res=200, type="cairo")
op <- par(mar = c(14,4,4,2) + 0.1)
if (! is.null(n$y)) {
S <- rbind(n$D1 / n$V, 1 - n$Dt / n$V - n$D1 / n$V)
colnames(S) <- vardisplaynames
bar.col <- c("white","grey")
mp <- barplot(S, ylim = c(0,1), col = bar.col, , xaxt="n")
axis(1, at=mp, labels=vardisplaynames, las=2, cex.axis=0.9)
legend("topright", c("main effect", "interactions"), fill = bar.col)
}
dev.off()
#file_zip <- c(file_names_jsons,file_names_R,file_names_bar_png,file_names_bar_sorted_png,file_names_png,file_names_box_png,file_names_box_sorted_png,paste(analysis_dir,"/vardisplaynames.json",sep=''))
file_zip <- c(file_names_R,file_names_bar_png,paste(analysis_dir,"/vardisplaynames.json",sep=''))
if (debug_messages == 1) {
print(paste("file_zip:",file_zip))
}
if(!dir.exists(paste(analysis_dir,"/downloads",sep=''))){
dir.create(paste(analysis_dir,"/downloads",sep=''))
print(paste("created dir:",analysis_dir,"/downloads",sep=''))
}
zip(zipfile=paste(analysis_dir,"/downloads/fast99_results_",rails_analysis_id,".zip",sep=''),files=file_zip, flags = "-j")
}
} else {
print("Results is null")
}
| /docker/R/lib/fast99.R | permissive | jmarrec/OpenStudio-server | R | false | false | 8,265 | r | # *******************************************************************************
# OpenStudio(R), Copyright (c) 2008-2018, Alliance for Sustainable Energy, LLC.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# (1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# (3) Neither the name of the copyright holder nor the names of any contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission from the respective party.
#
# (4) Other than as required in clauses (1) and (2), distributions in any form
# of modifications or other derivative works may not use the "OpenStudio"
# trademark, "OS", "os", or any other confusingly similar designation without
# specific prior written permission from Alliance for Sustainable Energy, LLC.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER, THE UNITED STATES
# GOVERNMENT, OR ANY CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *******************************************************************************
clusterEvalQ(cl,library(rjson))
clusterEvalQ(cl,library(R.utils))
objDim <- length(objfun)
print(paste("objDim:",objDim))
print(paste("UniqueGroups:",uniquegroups))
print(paste("objfun:",objfun))
print(paste("normtype:",normtype))
print(paste("ppower:",ppower))
print(paste("min:",mins))
print(paste("max:",maxes))
print(paste("failed_f:",failed_f))
clusterExport(cl,"objDim")
clusterExport(cl,"normtype")
clusterExport(cl,"ppower")
clusterExport(cl,"uniquegroups")
clusterExport(cl,"failed_f")
clusterExport(cl,"debug_messages")
for (i in 1:ncol(vars)){
vars[,i] <- sort(vars[,i])
}
print(paste("vartypes:",vartypes))
print(paste("varnames:",varnames))
print(paste("vardisplaynames:",vardisplaynames))
print(paste("objnames:",objnames))
# Setup a bunch of variables for the analysis based on passed variables
# From Ruby
analysis_dir <- paste(rails_sim_root_path,'/analysis_',rails_analysis_id,sep='')
ruby_command <- paste('cd ',analysis_dir,' && ',rails_ruby_bin_dir,'/bundle exec ruby ',sep='')
rake_command <- paste('cd ',rails_root_path,' && ',rails_ruby_bin_dir,'/bundle exec rake ',sep='')
if (debug_messages == 1) {
print(paste("analysis_dir: ",analysis_dir))
print(paste("ruby_command: ",ruby_command))
print(paste("rake_command: ",rake_command))
}
varfile <- function(x){
var_filename <- paste(analysis_dir,'/varnames.json',sep='')
if (!file.exists(var_filename)){
write.table(x, file=var_filename, quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
vardisplayfile <- function(x){
var_filename <- paste(analysis_dir,'/vardisplaynames.json',sep='')
if (!file.exists(var_filename)){
write.table(x, file=var_filename, quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
# Export local variables for worker nodes
clusterExport(cl,"ruby_command")
clusterExport(cl,"rake_command")
clusterExport(cl,"analysis_dir")
clusterExport(cl,"varfile")
clusterExport(cl,"varnames")
# Export some global variables for worker nodes
clusterExport(cl,"rails_analysis_id")
clusterExport(cl,"rails_sim_root_path")
clusterExport(cl,"rails_ruby_bin_dir")
clusterExport(cl,"rails_mongodb_name")
clusterExport(cl,"rails_mongodb_ip")
clusterExport(cl,"rails_run_filename")
clusterExport(cl,"rails_create_dp_filename")
clusterExport(cl,"rails_root_path")
clusterExport(cl,"rails_host")
clusterExport(cl,"r_scripts_path")
clusterExport(cl,"rails_exit_guideline_14")
clusterEvalQ(cl,varfile(varnames))
clusterExport(cl,"vardisplayfile")
clusterExport(cl,"vardisplaynames")
clusterEvalQ(cl,vardisplayfile(vardisplaynames))
# Export functions for worker nodes
source(paste(r_scripts_path,'create_and_run_datapoint_uniquegroups.R',sep='/'))
clusterExport(cl,"create_and_run_datapoint_uniquegroups")
clusterExport(cl,"check_run_flag")
#f <- function(x){
# tryCatch(create_and_run_datapoint_uniquegroups(x),
# error=function(x){
# obj <- NULL
# for (i in 1:objDim) {
# obj[i] <- failed_f
# }
# print("create_and_run_datapoint_uniquegroups failed")
# return(obj)
# }
# )
#}
f <- function(x){
try(create_and_run_datapoint_uniquegroups(x), silent=TRUE)
}
clusterExport(cl,"f")
print(paste("n:",n))
print(paste("M:",M))
if (n <= (4*M^2+1)) {
n <- (4*M^2) + 2
print("n is <= 4*M^2, increasing the value of n")
print(paste("n:",n))
}
temp_list <- list()
if (length(mins) > 0) {
for (i in 1:length(mins))
{
temp_list[[i]] <- list(min= mins[i], max=maxes[i])
}
}
results <- NULL
# if (length(unique(vardisplaynames)) == length(vardisplaynames)) {
# m <- fast99(model=NULL, factors=vardisplaynames, n=n, M=M, q.arg = temp_list)
# } else if (length(unique(varnames)) == length(varnames)) {
# m <- fast99(model=NULL, factors=varnames, n=n, M=M, q.arg = temp_list)
# } else {
m <- fast99(model=NULL, factors=ncol(vars), n=n, M=M, q.arg = temp_list)
#}
m1 <- as.list(data.frame(t(m$X)))
if (debug_messages == 1) {
print(paste("m1:",m1))
}
try(results <- clusterApplyLB(cl, m1, f),silent=FALSE)
result <- as.data.frame(results)
if (debug_messages == 1) {
print(paste("length(objnames):",length(objnames)))
print(paste("nrow(result):",nrow(result)))
print(paste("ncol(result):",ncol(result)))
}
file_names_R <- c("")
file_names_bar_png <- c("")
if (nrow(result) > 0) {
for (j in 1:nrow(result)){
n <- m
tell(n,as.numeric(unlist(result[j,])))
print(n)
file_names_R[j] <- paste(analysis_dir,"/m_",gsub(" ","_",objnames[j], fixed=TRUE),".RData",sep="")
save(n, file=file_names_R[j])
file_names_bar_png[j] <- paste(analysis_dir,"/fast99_",gsub(" ","_",objnames[j],fixed=TRUE),"_bar.png",sep="")
png(file_names_bar_png[j], width=12, height=8, units="in", pointsize=10, res=200, type="cairo")
op <- par(mar = c(14,4,4,2) + 0.1)
if (! is.null(n$y)) {
S <- rbind(n$D1 / n$V, 1 - n$Dt / n$V - n$D1 / n$V)
colnames(S) <- vardisplaynames
bar.col <- c("white","grey")
mp <- barplot(S, ylim = c(0,1), col = bar.col, , xaxt="n")
axis(1, at=mp, labels=vardisplaynames, las=2, cex.axis=0.9)
legend("topright", c("main effect", "interactions"), fill = bar.col)
}
dev.off()
#file_zip <- c(file_names_jsons,file_names_R,file_names_bar_png,file_names_bar_sorted_png,file_names_png,file_names_box_png,file_names_box_sorted_png,paste(analysis_dir,"/vardisplaynames.json",sep=''))
file_zip <- c(file_names_R,file_names_bar_png,paste(analysis_dir,"/vardisplaynames.json",sep=''))
if (debug_messages == 1) {
print(paste("file_zip:",file_zip))
}
if(!dir.exists(paste(analysis_dir,"/downloads",sep=''))){
dir.create(paste(analysis_dir,"/downloads",sep=''))
print(paste("created dir:",analysis_dir,"/downloads",sep=''))
}
zip(zipfile=paste(analysis_dir,"/downloads/fast99_results_",rails_analysis_id,".zip",sep=''),files=file_zip, flags = "-j")
}
} else {
print("Results is null")
}
|
#--------------------------------------**--------------------------------------#
# File Name: stat305-hw3-soln.R
# Purpose:
#--------------------------------------**--------------------------------------#
# Ch. 3 Ex. 12
x1 <- c( 2, 1, 1, 1, 0, 0, -2, 10, -5, 1)
x2 <- c(-2, -6, -1, -2, -1, -2, -1, -1, -1, -5)
x3 <- c(10, 10, 8, 8, 7, 7, 9, 11)
x4 <- c(-4, -3, -4, -2, -3, -3, -3, -3, -4, -4)
sample_variance <- function(x) (1/(length(x)-1))*sum((x - mean(x))^2)
sample_sd <- function(x) sqrt(sample_variance(x))
sample_sd(x4)
# Ch. 3 Ex 18
h1 <- c(313, 100, 235, 250, 457, 11, 315, 584, 249, 204)
h2 <- c(349, 206, 163, 350, 189, 216, 170, 359, 267, 196)
h3 <- c(289, 279, 142, 334, 192, 339, 87, 185, 262, 194)
x <- h1
p <- 0.5
quantile_function <- function(p, x){
x <- x[order(x)]
i <- floor(length(x)*p + 0.5)
qp <- x[i] + (length(x)*p + 0.5 - i)*(x[i+1] - x[i])
return(qp)
}
quantile_function(0.62, h1)
quantile_function(0.50, h3)
quantile_function(0.75, h3)
sample1 = c(-1.05, -1.0, -0.5, 0.15, 0.6, 0.65, 0.7, 1.25)
sample2 = c(-2.1, -2.0, -1.0, 0.3, 1.2, 1.3, 1.4, 2.5)
sample3 = c(-4.2, -4.0, -2.0, 0.6, 2.4, 2.6, 2.8, 5.0)
sample4 = c(-8.4, -8.0, -4.0, 1.2, 4.8, 5.2, 5.6, 10.0)
sample5 = c(-16.8, -16.0, -8.0, 2.4, 9.6, 10.4, 11.2, 20.0)
sample_variance(sample1)
sample_variance(sample2)
sample_variance(sample3)
sample_variance(sample4)
sample_variance(sample5)
hist(h2)
hist(h3)
| /materials/hw/hw3/stat305-hw3-soln.R | no_license | HaniehBaktash/stat305.github.io | R | false | false | 1,434 | r | #--------------------------------------**--------------------------------------#
# File Name: stat305-hw3-soln.R
# Purpose:
#--------------------------------------**--------------------------------------#
# Ch. 3 Ex. 12
x1 <- c( 2, 1, 1, 1, 0, 0, -2, 10, -5, 1)
x2 <- c(-2, -6, -1, -2, -1, -2, -1, -1, -1, -5)
x3 <- c(10, 10, 8, 8, 7, 7, 9, 11)
x4 <- c(-4, -3, -4, -2, -3, -3, -3, -3, -4, -4)
sample_variance <- function(x) (1/(length(x)-1))*sum((x - mean(x))^2)
sample_sd <- function(x) sqrt(sample_variance(x))
sample_sd(x4)
# Ch. 3 Ex 18
h1 <- c(313, 100, 235, 250, 457, 11, 315, 584, 249, 204)
h2 <- c(349, 206, 163, 350, 189, 216, 170, 359, 267, 196)
h3 <- c(289, 279, 142, 334, 192, 339, 87, 185, 262, 194)
x <- h1
p <- 0.5
quantile_function <- function(p, x){
x <- x[order(x)]
i <- floor(length(x)*p + 0.5)
qp <- x[i] + (length(x)*p + 0.5 - i)*(x[i+1] - x[i])
return(qp)
}
quantile_function(0.62, h1)
quantile_function(0.50, h3)
quantile_function(0.75, h3)
sample1 = c(-1.05, -1.0, -0.5, 0.15, 0.6, 0.65, 0.7, 1.25)
sample2 = c(-2.1, -2.0, -1.0, 0.3, 1.2, 1.3, 1.4, 2.5)
sample3 = c(-4.2, -4.0, -2.0, 0.6, 2.4, 2.6, 2.8, 5.0)
sample4 = c(-8.4, -8.0, -4.0, 1.2, 4.8, 5.2, 5.6, 10.0)
sample5 = c(-16.8, -16.0, -8.0, 2.4, 9.6, 10.4, 11.2, 20.0)
sample_variance(sample1)
sample_variance(sample2)
sample_variance(sample3)
sample_variance(sample4)
sample_variance(sample5)
hist(h2)
hist(h3)
|
generate = function(day){
# ์ฑ๋ณ
S = rep(sample(c('M','F'),1),day)
# ๋์ด
A = rep(sample(20:85, 1),day)
# ๊ธฐ์ ์ง๋ณ
D = rep(sample(c(TRUE, FALSE), 1),day)
## ์ฒด์จ
T = sample(c(0,1,2,3), day, replace = TRUE, prob = c(0.6,0.2,0.1,0.1))
T = sapply(T, function(i){
if(i==3){ # <= 35.0
return(sample(seq(34,35,0.1), 1))
}
else if(i==2){
high = sample(c(TRUE,FALSE),1)
if(high){ # high, 39.1 >=
return( sample(seq(39.1,39.5,0.1),1))
}
else{ # low, 35.1 ~ 36
return( sample(seq(35.1,36,0.1), 1) )
}
} else if( i==1){
return( sample(seq(38.1,39.1,0.1),1) )
}
return(sample(seq(36.1,38.1,0.1),1)) # 0
})
## ์ฌํ๊ธฐ๋ฅ
CF = sample(c(0,1,2,3), day, replace = TRUE, prob = c(0.6,0.2,0.1,0.1))
CF = sapply(CF, function(i){
if(i==0){ # ALL Should 0
return(c(0,0,0,0))
}
if(i==1){
cnt = sample(1:4,1) # cnt
return( sample(c(rep(1,cnt), rep(0,4-cnt))) )
}
if(i==2){
cnt = sample(1:4,1) # 2 cnt
return( sample( c(rep(2,cnt), rep(sample(c(0,1),size = 4-cnt, replace = TRUE))) ) )
}
if(i==3){
cnt = sample(1:4,1) # 3 cnt
return( sample( c(rep(3,cnt), rep(sample(c(0,1,2),size = 4-cnt, replace = TRUE))) ) )
}
})
rownames(CF) = c('HB', 'O', 'BC', 'P')
# ํธํก๊ณค๋
HB = sapply(CF['HB',], function(i){
if(i <= 1 ){ # 0 or 1
return(rep(FALSE,7))
}
if(i==2){
return(c(TRUE, rep(FALSE,6)))
}
if(i==3){
cnt = sample(1:6,1)
return( c(sample(c(TRUE,FALSE),1), sample(c(rep(TRUE, cnt), rep(FALSE, 6-cnt))) ) )
}
})
# ํธํก๊ณค๋ํธ์, ์ฌํ ํธํก๊ณค๋, ๊ฐ์ดํต์ฆ, ์ค์ , ๊ณ ์ด-๋ชธ์ด-๊ธฐ์นจ, ํ๋์
์ , ํธํก๊ณค๋ ์ฌํ
rownames(HB) = c('HB', 'DHB', 'CP', 'F', 'C', 'BL', 'HHB')
# ์ฐ์ํฌํ๋
O = sapply(CF['O',], function(i){
if(i<=1){ # 95.1 ~ 100
return(sample(seq(95.1,100,0.1),1))
}
if(i==2){ # 93.1 ~ 95
return(sample(seq(93.1,95,0.1),1))
}
if(i==3){ # 92 ~ 93
return(sample(seq(92,93,0.1),1))
}
})
# ํธํก์
BC = sapply(CF['BC',], function(i){
if(i<=1){
return(sample(12:20,1))
}
if(i==2){
high = sample(c(TRUE, FALSE), 1)
if(high){
return(sample(21:24,1))
}
return(sample(9:11,1))
}
if(i==3){ # >=25, <=8
high = sample(c(TRUE, FALSE), 1)
if(high){
return(sample(25:30,1))
}
return(sample(5:8,1))
}
})
# ๋งฅ๋ฐ
P = sapply(CF['P',], function(i){
if(i <= 1){
return(sample(51:100,1))
}
if(i==2){
high = sample(c(TRUE, FALSE),1)
if(high){
return(sample(101:110,1))
}
return(sample(41:50,1))
}
if(i==3){
high = sample(c(TRUE, FALSE),1)
if(high){
return(sample(111:130,1))
}
return(sample(30:40,1))
}
})
P = sample(90:120, day, replace = TRUE)
## ์์์์ค
CO = sample(c(0,3), size = day, replace = TRUE, prob = c(0.95,0.05))
CO = sapply(CO, function(i){
if(i==3){
return(FALSE)
}
return(TRUE)
})
## ์ฌ๋ฆฌ์ํ
M = sample(c(0,1,2,3), day, replace = TRUE, prob = c(0.6,0.2,0.1,0.1))
# ๋๊ทผ๊ฑฐ๋ฆผ, ๋ฐํ, ๋ชธ๋จ๋ฆผ, ์ง์, ๊ฐ์ด๋ถํธ, ๋ณต๋ถ๋ถํธ, ์ด์ง๋ฌ์, ๊ฐ๊ฐ์ด์, ๋๋ ค์-๊ณตํฌ
M = sapply(M, function(i){
if(i==0){ # 0 TRUE
return(rep(FALSE, 9))
}
if(i==1){ # 1 ~ 3 TRUE
cnt = sample(1:3,1)
}
else if(i>=2){
cnt = sample(4:9,1)
}
return(sample( c(rep(TRUE, cnt), rep(FALSE, 9-cnt))) )
})
rownames(M) = c('PO', 'PE', 'TR', 'CH', 'CC', 'AC', 'W', 'SA', 'FE')
res = data.frame(S, A, D, T, t(HB), O, BC, P, CO, t(M))
datatable(res, rownames = FALSE)
}
| /generateSample.R | permissive | shinykorea/corona-triage | R | false | false | 3,847 | r | generate = function(day){
# ์ฑ๋ณ
S = rep(sample(c('M','F'),1),day)
# ๋์ด
A = rep(sample(20:85, 1),day)
# ๊ธฐ์ ์ง๋ณ
D = rep(sample(c(TRUE, FALSE), 1),day)
## ์ฒด์จ
T = sample(c(0,1,2,3), day, replace = TRUE, prob = c(0.6,0.2,0.1,0.1))
T = sapply(T, function(i){
if(i==3){ # <= 35.0
return(sample(seq(34,35,0.1), 1))
}
else if(i==2){
high = sample(c(TRUE,FALSE),1)
if(high){ # high, 39.1 >=
return( sample(seq(39.1,39.5,0.1),1))
}
else{ # low, 35.1 ~ 36
return( sample(seq(35.1,36,0.1), 1) )
}
} else if( i==1){
return( sample(seq(38.1,39.1,0.1),1) )
}
return(sample(seq(36.1,38.1,0.1),1)) # 0
})
## ์ฌํ๊ธฐ๋ฅ
CF = sample(c(0,1,2,3), day, replace = TRUE, prob = c(0.6,0.2,0.1,0.1))
CF = sapply(CF, function(i){
if(i==0){ # ALL Should 0
return(c(0,0,0,0))
}
if(i==1){
cnt = sample(1:4,1) # cnt
return( sample(c(rep(1,cnt), rep(0,4-cnt))) )
}
if(i==2){
cnt = sample(1:4,1) # 2 cnt
return( sample( c(rep(2,cnt), rep(sample(c(0,1),size = 4-cnt, replace = TRUE))) ) )
}
if(i==3){
cnt = sample(1:4,1) # 3 cnt
return( sample( c(rep(3,cnt), rep(sample(c(0,1,2),size = 4-cnt, replace = TRUE))) ) )
}
})
rownames(CF) = c('HB', 'O', 'BC', 'P')
# ํธํก๊ณค๋
HB = sapply(CF['HB',], function(i){
if(i <= 1 ){ # 0 or 1
return(rep(FALSE,7))
}
if(i==2){
return(c(TRUE, rep(FALSE,6)))
}
if(i==3){
cnt = sample(1:6,1)
return( c(sample(c(TRUE,FALSE),1), sample(c(rep(TRUE, cnt), rep(FALSE, 6-cnt))) ) )
}
})
# ํธํก๊ณค๋ํธ์, ์ฌํ ํธํก๊ณค๋, ๊ฐ์ดํต์ฆ, ์ค์ , ๊ณ ์ด-๋ชธ์ด-๊ธฐ์นจ, ํ๋์
์ , ํธํก๊ณค๋ ์ฌํ
rownames(HB) = c('HB', 'DHB', 'CP', 'F', 'C', 'BL', 'HHB')
# ์ฐ์ํฌํ๋
O = sapply(CF['O',], function(i){
if(i<=1){ # 95.1 ~ 100
return(sample(seq(95.1,100,0.1),1))
}
if(i==2){ # 93.1 ~ 95
return(sample(seq(93.1,95,0.1),1))
}
if(i==3){ # 92 ~ 93
return(sample(seq(92,93,0.1),1))
}
})
# ํธํก์
BC = sapply(CF['BC',], function(i){
if(i<=1){
return(sample(12:20,1))
}
if(i==2){
high = sample(c(TRUE, FALSE), 1)
if(high){
return(sample(21:24,1))
}
return(sample(9:11,1))
}
if(i==3){ # >=25, <=8
high = sample(c(TRUE, FALSE), 1)
if(high){
return(sample(25:30,1))
}
return(sample(5:8,1))
}
})
# ๋งฅ๋ฐ
P = sapply(CF['P',], function(i){
if(i <= 1){
return(sample(51:100,1))
}
if(i==2){
high = sample(c(TRUE, FALSE),1)
if(high){
return(sample(101:110,1))
}
return(sample(41:50,1))
}
if(i==3){
high = sample(c(TRUE, FALSE),1)
if(high){
return(sample(111:130,1))
}
return(sample(30:40,1))
}
})
P = sample(90:120, day, replace = TRUE)
## ์์์์ค
CO = sample(c(0,3), size = day, replace = TRUE, prob = c(0.95,0.05))
CO = sapply(CO, function(i){
if(i==3){
return(FALSE)
}
return(TRUE)
})
## ์ฌ๋ฆฌ์ํ
M = sample(c(0,1,2,3), day, replace = TRUE, prob = c(0.6,0.2,0.1,0.1))
# ๋๊ทผ๊ฑฐ๋ฆผ, ๋ฐํ, ๋ชธ๋จ๋ฆผ, ์ง์, ๊ฐ์ด๋ถํธ, ๋ณต๋ถ๋ถํธ, ์ด์ง๋ฌ์, ๊ฐ๊ฐ์ด์, ๋๋ ค์-๊ณตํฌ
M = sapply(M, function(i){
if(i==0){ # 0 TRUE
return(rep(FALSE, 9))
}
if(i==1){ # 1 ~ 3 TRUE
cnt = sample(1:3,1)
}
else if(i>=2){
cnt = sample(4:9,1)
}
return(sample( c(rep(TRUE, cnt), rep(FALSE, 9-cnt))) )
})
rownames(M) = c('PO', 'PE', 'TR', 'CH', 'CC', 'AC', 'W', 'SA', 'FE')
res = data.frame(S, A, D, T, t(HB), O, BC, P, CO, t(M))
datatable(res, rownames = FALSE)
}
|
#' Plots multiple b-scored normalised platemaps
#'
#' Transforms numerical values using the b-score normalisation process to
#' account for row and column effects. Uses well and plate labels to plot the
#' normalised values in the form of microtitre plates. Works for 96, 384 and
#' 1536 well plates.
#'
#' @param data Numerical values to be plotted
#' @param well Vector of well identifiers e.g "A01"
#' @param plate Number of wells in complete plate (96, 384 or 1536)
#' @param plate_id Vector of plate identifiers e.g "Plate_1"
#' @return ggplot plot
#'
#' @import ggplot2
#' @export
#'
#' @examples
#' df01 <- data.frame(well = num_to_well(1:96),
#' vals = rnorm(96),
#' plate = 1)
#'
#' df02 <- data.frame(well = num_to_well(1:96),
#' vals = rnorm(96),
#' plate = 2)
#'
#' df <- rbind(df01, df02)
#'
#' b_grid(data = df$vals,
#' well = df$well,
#' plate_id = df$plate,
#' plate = 96)
b_grid <- function(data, well, plate_id, plate = 96) {
stopifnot(is.vector(data))
# need to group_by plate_id, median polish, then return data.frame
# that can be passed to ggplot and use raw_grid
platemap <- plate_map_grid(data, well, plate_id)
# force to factor
platemap$plate_label <- as.factor(platemap$plate_label)
# split by plate_id
platemap_split <- split(platemap, platemap$plate_label)
# apply med_smooth to each dataframe, split by plate_id
med_smooth_list <- lapply(platemap_split, function(x){
med_smooth(x, plate = plate)
})
# list to dataframe
med_smooth_df <- list_to_dataframe(med_smooth_list,
col_name = "plate_label")
raw_grid(data = med_smooth_df$residual,
well = med_smooth_df$well,
plate_id = med_smooth_df$plate_label)
}
#' Converts list to a dataframe in a sensible way
#'
#' Given a list of dataframes with the same columns, this function will row bind
#' them together, and if passed a \code{col_name} arguement, will produce a
#' column containing their original element name
#'
#' @param l list of dataframes to be converted into single dataframe
#' @param col_name (optional) name of column to put element names under
#'
#' @return dataframe
#'
list_to_dataframe <- function(l, col_name = NULL) {
# check l is a list
if (!is.list(l)) {
stop(paste(substitute(l) , "needs to be a list"))
}
# if col_name is a string, will create a new column from the element names
# within the list
if (!is.null(col_name)) {
# create column from list name
for (name in names(l)) {
l[[name]][col_name] <- name
}
}
# create data frame from list
out_df <- do.call(rbind, l)
rownames(out_df) <- NULL
return(out_df)
}
| /R/b_grid.R | no_license | jayhesselberth/platetools | R | false | false | 2,745 | r | #' Plots multiple b-scored normalised platemaps
#'
#' Transforms numerical values using the b-score normalisation process to
#' account for row and column effects. Uses well and plate labels to plot the
#' normalised values in the form of microtitre plates. Works for 96, 384 and
#' 1536 well plates.
#'
#' @param data Numerical values to be plotted
#' @param well Vector of well identifiers e.g "A01"
#' @param plate Number of wells in complete plate (96, 384 or 1536)
#' @param plate_id Vector of plate identifiers e.g "Plate_1"
#' @return ggplot plot
#'
#' @import ggplot2
#' @export
#'
#' @examples
#' df01 <- data.frame(well = num_to_well(1:96),
#' vals = rnorm(96),
#' plate = 1)
#'
#' df02 <- data.frame(well = num_to_well(1:96),
#' vals = rnorm(96),
#' plate = 2)
#'
#' df <- rbind(df01, df02)
#'
#' b_grid(data = df$vals,
#' well = df$well,
#' plate_id = df$plate,
#' plate = 96)
b_grid <- function(data, well, plate_id, plate = 96) {
stopifnot(is.vector(data))
# need to group_by plate_id, median polish, then return data.frame
# that can be passed to ggplot and use raw_grid
platemap <- plate_map_grid(data, well, plate_id)
# force to factor
platemap$plate_label <- as.factor(platemap$plate_label)
# split by plate_id
platemap_split <- split(platemap, platemap$plate_label)
# apply med_smooth to each dataframe, split by plate_id
med_smooth_list <- lapply(platemap_split, function(x){
med_smooth(x, plate = plate)
})
# list to dataframe
med_smooth_df <- list_to_dataframe(med_smooth_list,
col_name = "plate_label")
raw_grid(data = med_smooth_df$residual,
well = med_smooth_df$well,
plate_id = med_smooth_df$plate_label)
}
#' Converts list to a dataframe in a sensible way
#'
#' Given a list of dataframes with the same columns, this function will row bind
#' them together, and if passed a \code{col_name} arguement, will produce a
#' column containing their original element name
#'
#' @param l list of dataframes to be converted into single dataframe
#' @param col_name (optional) name of column to put element names under
#'
#' @return dataframe
#'
list_to_dataframe <- function(l, col_name = NULL) {
# check l is a list
if (!is.list(l)) {
stop(paste(substitute(l) , "needs to be a list"))
}
# if col_name is a string, will create a new column from the element names
# within the list
if (!is.null(col_name)) {
# create column from list name
for (name in names(l)) {
l[[name]][col_name] <- name
}
}
# create data frame from list
out_df <- do.call(rbind, l)
rownames(out_df) <- NULL
return(out_df)
}
|
#' Convert an arsenal::tableby object to a kable
#'
#' Create a funnel plot from a dataframe containing at least two columns (i.e numerator and denominator)
#' It is currently working only with binary outcome, continuous outcomes will be supported in future
#'
#' @param tableby tableby object
#' @param label_translation A named list (or vector) where the name is the label in the output to be replaced in the pretty rendering by the character string value for the named element of the list, e.g., list(age = "Age(Years)", meansd = "Mean(SD)")
#' @param caption Title/caption for the table, defaulting to NULL (no title).
#'@param ref_label The table reference label
#' @return a kable object
#' @export
#'
#' @examples
#'
tableby_to_kable <- function(tableby, label_translation, caption, ref_label) {
table <-
tableby %>%
summary(labelTranslations = label_translation,
text = TRUE) %>%
as.data.frame()
na_rows <-
table %>%
janitor::clean_names() %>%
tibble::rownames_to_column() %>%
dplyr::mutate(dplyr::across(dplyr::everything(), dplyr::na_if, "")) %>%
tidyr::drop_na() %>%
dplyr::pull(rowname)
bolds <- table %>%
janitor::clean_names() %>%
tibble::rownames_to_column() %>%
dplyr::select(rowname) %>%
dplyr::mutate(bold = rowname %nin% na_rows) %>%
dplyr::pull(bold)
table %>%
kableExtra::kbl(booktabs = TRUE,
linesep = "",
caption = caption,
label = ref_label) %>%
kableExtra::column_spec(1, bold = bolds)
}
| /R/tableby_to_kable.R | permissive | farhadsalimi/registryr | R | false | false | 1,524 | r | #' Convert an arsenal::tableby object to a kable
#'
#' Create a funnel plot from a dataframe containing at least two columns (i.e numerator and denominator)
#' It is currently working only with binary outcome, continuous outcomes will be supported in future
#'
#' @param tableby tableby object
#' @param label_translation A named list (or vector) where the name is the label in the output to be replaced in the pretty rendering by the character string value for the named element of the list, e.g., list(age = "Age(Years)", meansd = "Mean(SD)")
#' @param caption Title/caption for the table, defaulting to NULL (no title).
#'@param ref_label The table reference label
#' @return a kable object
#' @export
#'
#' @examples
#'
tableby_to_kable <- function(tableby, label_translation, caption, ref_label) {
table <-
tableby %>%
summary(labelTranslations = label_translation,
text = TRUE) %>%
as.data.frame()
na_rows <-
table %>%
janitor::clean_names() %>%
tibble::rownames_to_column() %>%
dplyr::mutate(dplyr::across(dplyr::everything(), dplyr::na_if, "")) %>%
tidyr::drop_na() %>%
dplyr::pull(rowname)
bolds <- table %>%
janitor::clean_names() %>%
tibble::rownames_to_column() %>%
dplyr::select(rowname) %>%
dplyr::mutate(bold = rowname %nin% na_rows) %>%
dplyr::pull(bold)
table %>%
kableExtra::kbl(booktabs = TRUE,
linesep = "",
caption = caption,
label = ref_label) %>%
kableExtra::column_spec(1, bold = bolds)
}
|
#' @title A `drake`-plan-like pipeline archetype
#' @description Simplify target specification in pipelines.
#' @details Allows targets with just targets and commands
#' to be written in the pipeline as `target = command` instead of
#' `tar_target(target, command)`. Also supports ordinary
#' target objects if they are unnamed.
#' `tar_plan(x = 1, y = 2, tar_target(z, 3), tar_render(r, "r.Rmd"))`
#' is equivalent to
#' `tar_pipeline(tar_target(x, 1), tar_target(y, 2), tar_target(z, 3), tar_render(r, "r.Rmd"))`. # nolint
#' @export
#' @return A `targets::tar_pipeline()` object.
#' @param ... Named and unnamed targets. All named targets must follow
#' the `drake`-plan-like `target = command` syntax, and all unnamed
#' arguments must be explicit calls to create target objects,
#' e.g. `tar_target()`, target archetypes like [tar_render()], or similar.
#' @examples
#' if (identical(Sys.getenv("TARCHETYPES_LONG_EXAMPLES"), "true")) {
#' targets::tar_dir({
#' lines <- c(
#' "---",
#' "title: report",
#' "output_format: html_document",
#' "---",
#' "",
#' "```{r}",
#' "targets::tar_read(data)",
#' "```"
#' )
#' writeLines(lines, "report.Rmd")
#' targets::tar_script({
#' library(tarchetypes)
#' tar_plan(
#' data = data.frame(x = seq_len(26), y = sample.int(26)),
#' means = colMeans(data),
#' tar_render(report, "report.Rmd")
#' )
#' })
#' targets::tar_make()
#' })
#' }
tar_plan <- function(...) {
commands <- tar_plan_parse(match.call(expand.dots = FALSE)$...)
targets <- lapply(commands, eval, envir = targets::tar_option_get("envir"))
targets::tar_pipeline(targets)
}
tar_plan_parse <- function(commands) {
names <- names(commands) %||% rep("", length(commands))
is_named <- !is.na(names) & nzchar(names)
commands[is_named] <- tar_plan_parse_named(commands[is_named])
commands
}
tar_plan_parse_named <- function(commands) {
lapply(names(commands), tar_plan_parse_command, commands = commands)
}
tar_plan_parse_command <- function(name, commands) {
env <- list(name = rlang::sym(name), command = commands[[name]])
substitute(targets::tar_target(name, command), env = env)
}
| /R/tar_plan.R | permissive | limnoliver/tarchetypes | R | false | false | 2,162 | r | #' @title A `drake`-plan-like pipeline archetype
#' @description Simplify target specification in pipelines.
#' @details Allows targets with just targets and commands
#' to be written in the pipeline as `target = command` instead of
#' `tar_target(target, command)`. Also supports ordinary
#' target objects if they are unnamed.
#' `tar_plan(x = 1, y = 2, tar_target(z, 3), tar_render(r, "r.Rmd"))`
#' is equivalent to
#' `tar_pipeline(tar_target(x, 1), tar_target(y, 2), tar_target(z, 3), tar_render(r, "r.Rmd"))`. # nolint
#' @export
#' @return A `targets::tar_pipeline()` object.
#' @param ... Named and unnamed targets. All named targets must follow
#' the `drake`-plan-like `target = command` syntax, and all unnamed
#' arguments must be explicit calls to create target objects,
#' e.g. `tar_target()`, target archetypes like [tar_render()], or similar.
#' @examples
#' if (identical(Sys.getenv("TARCHETYPES_LONG_EXAMPLES"), "true")) {
#' targets::tar_dir({
#' lines <- c(
#' "---",
#' "title: report",
#' "output_format: html_document",
#' "---",
#' "",
#' "```{r}",
#' "targets::tar_read(data)",
#' "```"
#' )
#' writeLines(lines, "report.Rmd")
#' targets::tar_script({
#' library(tarchetypes)
#' tar_plan(
#' data = data.frame(x = seq_len(26), y = sample.int(26)),
#' means = colMeans(data),
#' tar_render(report, "report.Rmd")
#' )
#' })
#' targets::tar_make()
#' })
#' }
tar_plan <- function(...) {
commands <- tar_plan_parse(match.call(expand.dots = FALSE)$...)
targets <- lapply(commands, eval, envir = targets::tar_option_get("envir"))
targets::tar_pipeline(targets)
}
tar_plan_parse <- function(commands) {
names <- names(commands) %||% rep("", length(commands))
is_named <- !is.na(names) & nzchar(names)
commands[is_named] <- tar_plan_parse_named(commands[is_named])
commands
}
tar_plan_parse_named <- function(commands) {
lapply(names(commands), tar_plan_parse_command, commands = commands)
}
tar_plan_parse_command <- function(name, commands) {
env <- list(name = rlang::sym(name), command = commands[[name]])
substitute(targets::tar_target(name, command), env = env)
}
|
#the code to create the third plot
#set working directory and read in data
#will need to adjust WD to wherever cloned directory is
setwd("C:/Users/wbradley/Desktop/datasciencecoursera/ExData_Plotting1")
#decided to read in only relevant rows, and relabel columns
plotdata <- read.table("household_power_consumption.txt", header = FALSE, sep = ";", skip = 66637, nrows = 2880,
col.names = c("Date", "Time", "Global_active_power", "Global_reactive_power","Voltage",
"Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"), na.strings = "?")
plotdata$DateTime <- strptime(paste(plotdata$Date, plotdata$Time), format = "%d/%m/%Y %H:%M:%S")
png(filename = "plot3.png")
with(plotdata, plot(DateTime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering"))
with(plotdata, lines(DateTime, Sub_metering_2, col = "Red"))
with(plotdata, lines(DateTime, Sub_metering_3, col = "Blue"))
legend("topright", lty = 1,lwd = 2, legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"), col = c("Black","Red","Blue"))
dev.off()
| /plot3.R | no_license | WJHBradley/ExData_Plotting1 | R | false | false | 1,115 | r | #the code to create the third plot
#set working directory and read in data
#will need to adjust WD to wherever cloned directory is
setwd("C:/Users/wbradley/Desktop/datasciencecoursera/ExData_Plotting1")
#decided to read in only relevant rows, and relabel columns
plotdata <- read.table("household_power_consumption.txt", header = FALSE, sep = ";", skip = 66637, nrows = 2880,
col.names = c("Date", "Time", "Global_active_power", "Global_reactive_power","Voltage",
"Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"), na.strings = "?")
plotdata$DateTime <- strptime(paste(plotdata$Date, plotdata$Time), format = "%d/%m/%Y %H:%M:%S")
png(filename = "plot3.png")
with(plotdata, plot(DateTime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering"))
with(plotdata, lines(DateTime, Sub_metering_2, col = "Red"))
with(plotdata, lines(DateTime, Sub_metering_3, col = "Blue"))
legend("topright", lty = 1,lwd = 2, legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"), col = c("Black","Red","Blue"))
dev.off()
|
##### this version attempts to optimize the code in any manner possible...
#clear the workspace of any residual data
rm(list=ls())
boxOfficeData<-read.csv("./data/boxOfficeData_2014.csv", header=TRUE, sep=",")
attach(boxOfficeData)
cols<-colnames(boxOfficeData)
#read in the list of representative columns - these cols are a subset of the
## set of explanatory variables (42 total) - these 18 variables have been
## chosen as most relevant for the Lifetime earnings..
sampleSet<-as.vector(read.csv("./data/sample set.csv",header=TRUE,sep=","))
sampleModels<-matrix(ncol=2,data=rep(0))
colnames(sampleModels)<-c("R-squared","model")
#############using combinations of the 18 variables to arrive at the "best" model
## based on the respective R-squared values of the generated models.
i<-1
j<-1
k<-1
totalParams<-nrow(sampleSet)
numCombs<-0
cou<-0
modCou<-0
for (i in 1:totalParams)
{
combi <- combn(totalParams,i)
numCombs<-ncol(combi)
for (l in 1:numCombs)
{
sampleModels<-rbind(sampleModels,c(rep(0)))
}
for (j in 1:numCombs)
{
modCou<-modCou+1
#here, each column of combi is a set of expl vars for the model
# the number of vars being the num of rows of combi...
str1<-""
for (k in 1:nrow(combi))
{
cou<-cou+1
str1<-paste(str1,sampleSet[combi[k,j],],sep="+")
otpt<-paste("cou=",cou,"i=",i,"j=",j,"k=",k,"data=",str1,sep=" ")
print(otpt)
}#k
fmla<-as.formula(paste("Lifetime~",str1,"-1"))
sampleModel<-lm(fmla)
sampleSummary<-summary(sampleModel)
sampleModels[modCou,1]<-sampleSummary$r.squared
sampleModels[modCou,2]<-toString(sampleSummary$terms)
}#j
}#i
write.table(sampleModels,file="./data/residuals.csv",append=TRUE,row.names = FALSE,sep=",",qmethod="double")
dim(sampleModels)
#[1] 262144 2
indexBstMod<-which.max(sampleModels[,1])
#[1] 262143
bestModel<-sampleModels[indexBstMod,2]
######moving thro' combinations complete - the following code uses the recommended
## models and related deviants to arrive at the most useful model for
## ROI determination and earning prediction
#manually inspect the "bestModel" variable and construct required formula string
bstModFrmla<-"Lifetime ~ +mvs + holiday1 + Genre1 + sequel + larm1 + larf1 + dirr1 + tv_weeks + tv_spots + tv_channels + tv_grps + tv_spend + yt_trailerlikes + yt_moviesonglikes + fb_pagelikes + fb_pageposts + tw_faves + search_mvkywd - 1"
bestModel<-lm(bstModFrmla)
summary(bestModel)
goodModFrmla<-"Lifetime ~ larm1 + larf1 + dirr1 + tv_channels + tw_faves + search_mvkywd - 1"
goodModel<-lm(goodModFrmla)
summary(goodModel)
anoGoodModFrmla<-"Lifetime ~ +mvs + holiday1 + Genre1 + sequel + larm1 + larf1 + dirr1 + tv_weeks + tv_spots + tv_channels + tv_grps + tv_spend + yt_trailerlikes + yt_moviesonglikes + fb_pagelikes + fb_pageposts + tw_faves + search_mvkywd - 1"
anoGoodModel<-lm(anoGoodModFrmla)
summary(anoGoodModel)
yetAnoGoodModel<-lm(Lifetime~larm1+search_mvkywd-1)
summary(yetAnoGoodModel)
##conclusion - use "good model"
#summary(goodModel)
####### supplementary models
##determine what drives search_mvkywd - since search is of significant importance
## in predicting/ influencing Lifetime earnings - attempting to understand what
## "actionable" variables are driving search itself..
supMod<-lm(search_mvkywd~tw_follws+rad6_comm+rad6_news+rad6_blgs+rad6_vds+rad6_twsentiP+rad6_twsentiN-1)
summary(supMod)
anoSupMod<-lm(search_mvkywd~yt_trailerviews+fb_pagelikes+tw_follws+tw_faves+fg+fb_pageposts-1)
summary(anoSupMod)
##model for prediction - due to lack of data for search mv keyword
coefs<-coef(anoSupMod)
searchVol<-coefs[1]*yt_trailerviews+coefs[2]*fb_pagelikes+coefs[3]*tw_follws+coefs[4]*tw_faves+coefs[5]*fg+coefs[6]*fb_pageposts
coefs<-coef(goodModel)
lifetimeEarnings<-coefs[1]*larm1+coefs[2]*larf1+coefs[3]*dirr1+coefs[4]*tv_channels+coefs[5]*tw_faves+coefs[6]*searchVol
| /Applied Econometrics/ASSIGNMENT 1 - go bollywood/latest/bollywood/movieEarningPredictionv2.1.R | no_license | snimkar0924/PGCDS | R | false | false | 3,905 | r | ##### this version attempts to optimize the code in any manner possible...
#clear the workspace of any residual data
rm(list=ls())
boxOfficeData<-read.csv("./data/boxOfficeData_2014.csv", header=TRUE, sep=",")
attach(boxOfficeData)
cols<-colnames(boxOfficeData)
#read in the list of representative columns - these cols are a subset of the
## set of explanatory variables (42 total) - these 18 variables have been
## chosen as most relevant for the Lifetime earnings..
sampleSet<-as.vector(read.csv("./data/sample set.csv",header=TRUE,sep=","))
sampleModels<-matrix(ncol=2,data=rep(0))
colnames(sampleModels)<-c("R-squared","model")
#############using combinations of the 18 variables to arrive at the "best" model
## based on the respective R-squared values of the generated models.
i<-1
j<-1
k<-1
totalParams<-nrow(sampleSet)
numCombs<-0
cou<-0
modCou<-0
for (i in 1:totalParams)
{
combi <- combn(totalParams,i)
numCombs<-ncol(combi)
for (l in 1:numCombs)
{
sampleModels<-rbind(sampleModels,c(rep(0)))
}
for (j in 1:numCombs)
{
modCou<-modCou+1
#here, each column of combi is a set of expl vars for the model
# the number of vars being the num of rows of combi...
str1<-""
for (k in 1:nrow(combi))
{
cou<-cou+1
str1<-paste(str1,sampleSet[combi[k,j],],sep="+")
otpt<-paste("cou=",cou,"i=",i,"j=",j,"k=",k,"data=",str1,sep=" ")
print(otpt)
}#k
fmla<-as.formula(paste("Lifetime~",str1,"-1"))
sampleModel<-lm(fmla)
sampleSummary<-summary(sampleModel)
sampleModels[modCou,1]<-sampleSummary$r.squared
sampleModels[modCou,2]<-toString(sampleSummary$terms)
}#j
}#i
write.table(sampleModels,file="./data/residuals.csv",append=TRUE,row.names = FALSE,sep=",",qmethod="double")
dim(sampleModels)
#[1] 262144 2
indexBstMod<-which.max(sampleModels[,1])
#[1] 262143
bestModel<-sampleModels[indexBstMod,2]
######moving thro' combinations complete - the following code uses the recommended
## models and related deviants to arrive at the most useful model for
## ROI determination and earning prediction
#manually inspect the "bestModel" variable and construct required formula string
bstModFrmla<-"Lifetime ~ +mvs + holiday1 + Genre1 + sequel + larm1 + larf1 + dirr1 + tv_weeks + tv_spots + tv_channels + tv_grps + tv_spend + yt_trailerlikes + yt_moviesonglikes + fb_pagelikes + fb_pageposts + tw_faves + search_mvkywd - 1"
bestModel<-lm(bstModFrmla)
summary(bestModel)
goodModFrmla<-"Lifetime ~ larm1 + larf1 + dirr1 + tv_channels + tw_faves + search_mvkywd - 1"
goodModel<-lm(goodModFrmla)
summary(goodModel)
anoGoodModFrmla<-"Lifetime ~ +mvs + holiday1 + Genre1 + sequel + larm1 + larf1 + dirr1 + tv_weeks + tv_spots + tv_channels + tv_grps + tv_spend + yt_trailerlikes + yt_moviesonglikes + fb_pagelikes + fb_pageposts + tw_faves + search_mvkywd - 1"
anoGoodModel<-lm(anoGoodModFrmla)
summary(anoGoodModel)
yetAnoGoodModel<-lm(Lifetime~larm1+search_mvkywd-1)
summary(yetAnoGoodModel)
##conclusion - use "good model"
#summary(goodModel)
####### supplementary models
##determine what drives search_mvkywd - since search is of significant importance
## in predicting/ influencing Lifetime earnings - attempting to understand what
## "actionable" variables are driving search itself..
supMod<-lm(search_mvkywd~tw_follws+rad6_comm+rad6_news+rad6_blgs+rad6_vds+rad6_twsentiP+rad6_twsentiN-1)
summary(supMod)
anoSupMod<-lm(search_mvkywd~yt_trailerviews+fb_pagelikes+tw_follws+tw_faves+fg+fb_pageposts-1)
summary(anoSupMod)
##model for prediction - due to lack of data for search mv keyword
coefs<-coef(anoSupMod)
searchVol<-coefs[1]*yt_trailerviews+coefs[2]*fb_pagelikes+coefs[3]*tw_follws+coefs[4]*tw_faves+coefs[5]*fg+coefs[6]*fb_pageposts
coefs<-coef(goodModel)
lifetimeEarnings<-coefs[1]*larm1+coefs[2]*larf1+coefs[3]*dirr1+coefs[4]*tv_channels+coefs[5]*tw_faves+coefs[6]*searchVol
|
labs <- elr_linelist %>%
filter(str_detect(str_to_lower(auth_facility), "urgent care|urgentcare|afc|gohealth|physicianone|prohealth uc|physician one|carewell|alliance uc|uc-#")) %>%
mutate (auth_facility = str_to_lower(auth_facility),
urgentcaregroup = ifelse(str_detect(auth_facility, "afc"), "AFC",
ifelse(str_detect(auth_facility, "gohealth"), "GoHealth",
ifelse(str_detect(auth_facility, "physicianone|physician one"), "PhysicianOne",
ifelse(str_detect(auth_facility, "carewell"), "Carewell",
ifelse(str_detect(auth_facility, "docs|doc's"), "DOCS",
ifelse(str_detect(auth_facility, "kathy's|kathys"), "Kathy's",
ifelse(str_detect(auth_facility, "priority"), "Priority",
ifelse(str_detect(auth_facility, "stony creek"), "Stony Creek",
ifelse(str_detect(auth_facility, "velocity"), "Velocity",
ifelse(str_detect(auth_facility,"westport"), "Westport", "Other UC")
))))))))))
auths <- count(labs, lab_facility, auth_facility)
ucs <- count(labs, urgentcaregroup)
ucs2 <- count(labs, urgentcaregroup, lab_facility) | /urgentcares.R | no_license | HuanW1/SDE | R | false | false | 1,683 | r | labs <- elr_linelist %>%
filter(str_detect(str_to_lower(auth_facility), "urgent care|urgentcare|afc|gohealth|physicianone|prohealth uc|physician one|carewell|alliance uc|uc-#")) %>%
mutate (auth_facility = str_to_lower(auth_facility),
urgentcaregroup = ifelse(str_detect(auth_facility, "afc"), "AFC",
ifelse(str_detect(auth_facility, "gohealth"), "GoHealth",
ifelse(str_detect(auth_facility, "physicianone|physician one"), "PhysicianOne",
ifelse(str_detect(auth_facility, "carewell"), "Carewell",
ifelse(str_detect(auth_facility, "docs|doc's"), "DOCS",
ifelse(str_detect(auth_facility, "kathy's|kathys"), "Kathy's",
ifelse(str_detect(auth_facility, "priority"), "Priority",
ifelse(str_detect(auth_facility, "stony creek"), "Stony Creek",
ifelse(str_detect(auth_facility, "velocity"), "Velocity",
ifelse(str_detect(auth_facility,"westport"), "Westport", "Other UC")
))))))))))
auths <- count(labs, lab_facility, auth_facility)
ucs <- count(labs, urgentcaregroup)
ucs2 <- count(labs, urgentcaregroup, lab_facility) |
#' Custom createCoveriate Settings
#'
#' This function is Custom createCoveriate Settings.
#' @param connection,oracleTempSchema,cdmDatabaseSchema,cohortTable,cohortId,cdmVersion,rowIdField,covariateSettings,aggregated
#' @keywordsa createCovariateSetting
#' @examples
#' getTopicFromNoteSettings()
#' @export
getTopicFromNoteSettings <- function(connection,
oracleTempSchema = NULL,
cdmDatabaseSchema,
cohortTable = "cohort",
cohortId = -1,
cdmVersion = "5",
rowIdField = "subject_id",
covariateSettings,
aggregated = FALSE){
writeLines('Constructing TopicFromNote')
#buildTopicModeling
if(covariateSettings$buildTopicModeling == TRUE){
ComparisonCohortId = covariateSettings$ComparisonCohortId
sql <- paste(
'SELECT',
'{@sampleSize != -1} ? {TOP @sampleSize}',
" subject_id AS row_id,",
'n.NOTE_TEXT AS covariate_id,',
'1 AS covariate_value,',
'n.NOTE_TITLE AS note_title',
'FROM @cdm_database_schema.NOTE n',
'JOIN',
'{@cohort_schema != ""} ? {@cohort_schema.}@cohort_table c',
'ON n.person_id = c.subject_id',
'AND n.NOTE_DATE = c.COHORT_START_DATE',
'WHERE NOTE_TYPE_CONCEPT_ID = (SELECT DESCENDANT_CONCEPT_ID FROM @cdm_database_schema.CONCEPT_ANCESTOR WHERE ANCESTOR_CONCEPT_ID IN (@note_concept_id) )',
'{@cohort_id != -1} ? {AND cohort_definition_id = @cohort_id}',
';'
)
sql <- SqlRender::renderSql(sql,
cohort_schema = covariateSettings$ComparisonCohortSchema,
cohort_table = covariateSettings$ComparisonCohortTable,
cohort_id = ComparisonCohortId,
note_concept_id = covariateSettings$noteConceptId,
sampleSize=covariateSettings$sampleSize,
cdm_database_schema = cdmDatabaseSchema)$sql
sql <- SqlRender::translateSql(sql, targetDialect = attr(connection, "dbms"))$sql
# Retrieve the covariate:
rawCovariates <- DatabaseConnector::querySql.ffdf(connection, sql)
colnames(rawCovariates)<-SqlRender::snakeCaseToCamelCase(colnames(rawCovariates))
########################
#ff in list #because Characters can not be inserted into the ff package.
rawcovariateId <- ff::ffapply(x[i1:i2],X= rawCovariates$covariateId, RETURN=TRUE, CFUN="list",
AFUN = notePreprocessing <- function(covariateId){
covariateId <- gsub('<[^<>]*>',' ',covariateId) #Remove Tag
#Remove html special characters
covariateId <- gsub('
', " ", covariateId)
covariateId <- gsub('<', " ", covariateId)
covariateId <- gsub('>', " ", covariateId)
covariateId <- gsub('&', " ", covariateId)
covariateId <- gsub('"', " ", covariateId)
#lower
covariateId<- tolower(covariateId)
#remove hangle typo
covariateId <- gsub('[\u314f-\u3163]*','',covariateId)
covariateId <- gsub('[\u3131-\u314E]*','',covariateId)
#if add other language, add unicode
covariateId <- gsub('[^\uac00-\ud7a3a-zA-Z]',' ',covariateId)
#The spacing is only once ## vector
covariateId <- stringr::str_replace_all(covariateId,"[[:space:]]{1,}"," ")
covariateId <- sub(' ','',covariateId)
return(covariateId)
})
names(rawcovariateId) <- 'note'
rawcovariateId <- rawcovariateId$'note'
#nGram
if(covariateSettings$nGram > 1L){
rawcovariateId <- lapply(rawcovariateId, function(x) RWeka::NGramTokenizer(x,RWeka::Weka_control(min=1,max=covariateSettings$nGram)))
rawcovariateId <- lapply(rawcovariateId, function(x) sub(' ','',x))
rawcovariateId <- lapply(rawcovariateId, unique)
}
else{
rawcovariateId <- strsplit(rawcovariateId,' ')
}
#Remove common words by NOTE_TITLE
noteTitleDf <- data.frame('word' = levels(rawCovariates$noteTitle), 'levels' = seq(levels(rawCovariates$noteTitle)),stringsAsFactors = F)
detailsType <- dplyr::left_join(data.frame('word' = as.vector(rawCovariates[['noteTitle']][]),stringsAsFactors = F),noteTitleDf,by ='word')
names(rawcovariateId) <- detailsType$levels
CommonWord <- list()
for(i in sort(as.numeric(unique(names(rawcovariateId))))){
CommonWord[[i]]<- Reduce(intersect, rawcovariateId[grep(i,names(rawcovariateId))])
}
for(i in 1:length(rawcovariateId)){
rawcovariateId[[i]] <- setdiff(unique(rawcovariateId[[i]]),unlist(CommonWord[as.numeric(names(rawcovariateId[i]))]))
}
#Frequency
if( (covariateSettings$buildTopidModelMinFrac != 0) | (covariateSettings$buildTopidModelMaxFrac != 1)){
#unique
#rawcovariateId <- lapply(rawcovariateId, unique)
MinValue <- as.integer(length(unique(unlist(rawcovariateId))) * covariateSettings$buildTopidModelMinFrac)
MaxValue <- as.integer(length(unique(unlist(rawcovariateId))) * covariateSettings$buildTopidModelMaxFrac)
wordFreq <- data.frame(table(unlist(rawcovariateId)),stringsAsFactors = F)
MoreThanMinWord <- wordFreq[wordFreq$Freq >= MinValue,]
MoreThanMinLessThanMaxWord <- MoreThanMinWord[MoreThanMinWord$Freq<=MaxValue,]
MoreThanMinLessThanMaxWordVec<-as.vector(MoreThanMinLessThanMaxWord[,1])
#unique
rawcovariateId<-lapply(rawcovariateId, function(x) intersect(x, MoreThanMinLessThanMaxWordVec))
}
else{
#unique
rawcovariateId <- lapply(rawcovariateId, unique)
}
#dictionary
if(covariateSettings$useDictionary == TRUE){
dictionary <- lapply(covariateSettings$limitedMedicalTermOnlyLanguage, function(x) dictionaryForLanguage(x))
dictionary <- unlist(rapply(dictionary, as.character, classes="factor", how="replace"))
## Extract up to words containing dictionary words
# dictionary <- paste(dictionary,collapse = '|')
# rawcovariateId <-lapply(rawcovariateId, function(x) grep(dictionary,x,value = T))
##################################################
##Extraction of words after reorganization with words existing in the certificate
SimplifyDictionary <- lapply(rawcovariateId, function(x) intersect(unique(x),dictionary))
SimplifyDictionary <- lapply(SimplifyDictionary, function(x) paste(x,collapse = '|'))
for(i in 1:length(rawcovariateId)){
rawcovariateId[[i]] <- grep(SimplifyDictionary[[i]],rawcovariateId[[i]],value=T)
}
#################################################################################
#PreProcessing End, word List -> docs
rawcovariateId <- lapply(rawcovariateId, function(x) paste(x,collapse =' '))
}
else{
#PreProcessing End, word List -> docs
rawcovariateId <- lapply(rawcovariateId, function(x) paste(x,collapse =' '))
}
#make Corpus
my_docs <- tm::VectorSource(rawcovariateId)
my_corpus <- tm::VCorpus(my_docs)
DTM <- tm::DocumentTermMatrix(my_corpus)
Encoding(DTM$dimnames$Terms) = 'UTF-8'
wordList <- data.frame('word' = colnames(DTM),'num' = rep(1:ncol(DTM)))
covariates <- data.frame('rowId' = DTM$i,'covariateId' = DTM$j,'covariateValue' = DTM$v)
data <- Matrix::sparseMatrix(i=covariates$rowId,
j=covariates$covariateId,
x=covariates$covariateValue, #add 0.1 to avoid to treated as binary values
dims=c(max(covariates$rowId), max(covariates$covariateId))) # edit this to max(map$newIds)
colnames(data) <- as.numeric(paste0(9999,wordList$num))
if(covariateSettings$optimalTopicValue == TRUE){
topicLange <- seq(2,102, by=10)
best.model <- lapply(topicLange, function(k){topicmodels::LDA(data, k)})
best.model.logLik <- as.data.frame(as.matrix(lapply(best.model, topicmodels::logLik)))
best.model.logLik.df <- data.frame(topics=topicLange, LL=as.numeric(as.matrix(best.model.logLik)))
numberOfTopics = best.model.logLik.df[which.max(best.model.logLik.df$LL),]$topics
detach("package:topicmodels", unload=TRUE)
lda_model = text2vec::LDA$new(n_topics = numberOfTopics, doc_topic_prior = 0.1, topic_word_prior = 0.01)
}
else if(covariateSettings$optimalTopicValue == FALSE){
numberOfTopics = covariateSettings$numberOfTopics
lda_model = text2vec::LDA$new(n_topics = numberOfTopics, doc_topic_prior = 0.1, topic_word_prior = 0.01) # covariateSettings$numberOfTopics -> optimal
}
doc_topic_distr = lda_model$fit_transform(x = data, n_iter = 1000,
convergence_tol = 0.001, n_check_convergence = 25,
progressbar = FALSE)
if (aggregated)
stop("Aggregation not supported")
result <- list(topicModel = lda_model,
topicDistr = doc_topic_distr,
wordList = wordList,
nGramSetting = covariateSettings$nGram,
numberOfTopics = numberOfTopics
)
saveRDS(result,covariateSettings$topicModelExportRds)
message(paste('your topic model is saved at',covariateSettings$topicModelExportRds))
covariateSettings$existingTopicModel <- readRDS(covariateSettings$topicModelExportRds)
}
#SQL query should be revised to extract only the latest record
#SQL to construct the covariate:
sql <- paste(
'SELECT',
'{@sampleSize != -1} ? {TOP @sampleSize}',
" @row_id_field AS row_id,",
'n.NOTE_TEXT AS covariate_id,',
'1 AS covariate_value,',
'NOTE_TITLE AS note_title',
'FROM @cdm_database_schema.NOTE n',
'JOIN @cohort_table c',
'ON n.person_id = c.subject_id',
'AND n.NOTE_DATE = c.COHORT_START_DATE',
'WHERE NOTE_TYPE_CONCEPT_ID = (SELECT DESCENDANT_CONCEPT_ID FROM @cdm_database_schema.CONCEPT_ANCESTOR WHERE ANCESTOR_CONCEPT_ID IN (@note_concept_id) )',
'{@cohort_id != -1} ? {AND cohort_definition_id = @cohort_id}'
)
sql <- SqlRender::renderSql(sql,
cohort_table = cohortTable,
cohort_id = cohortId,
note_concept_id = covariateSettings$noteConceptId,
row_id_field = rowIdField,
sampleSize=covariateSettings$sampleSize,
cdm_database_schema = cdmDatabaseSchema)$sql
sql <- SqlRender::translateSql(sql, targetDialect = attr(connection, "dbms"))$sql
# Retrieve the covariate:
rawCovariates <- DatabaseConnector::querySql.ffdf(connection, sql)
colnames(rawCovariates)<-SqlRender::snakeCaseToCamelCase(colnames(rawCovariates))
########################
#ff in list #because Characters can not be inserted into the ff package.
rawcovariateId <- ff::ffapply(x[i1:i2],X= rawCovariates$covariateId, RETURN=TRUE, CFUN="list",
AFUN = notePreprocessing <- function(covariateId){
covariateId <- gsub('<[^<>]*>',' ',covariateId) #Remove Tag
#Remove html special characters
covariateId <- gsub('
', " ", covariateId)
covariateId <- gsub('<', " ", covariateId)
covariateId <- gsub('>', " ", covariateId)
covariateId <- gsub('&', " ", covariateId)
covariateId <- gsub('"', " ", covariateId)
#lower
covariateId<- tolower(covariateId)
#remove hangle typo
covariateId <- gsub('[\u314f-\u3163]*','',covariateId)
covariateId <- gsub('[\u3131-\u314E]*','',covariateId)
#if add other language, add unicode
covariateId <- gsub('[^\uac00-\ud7a3a-zA-Z]',' ',covariateId)
#The spacing is only once ## vector
covariateId <- stringr::str_replace_all(covariateId,"[[:space:]]{1,}"," ")
covariateId <- sub(' ','',covariateId)
return(covariateId)
})
names(rawcovariateId) <- 'note'
rawcovariateId <- rawcovariateId$'note'
#Only word (not Word order) # Currently, only parts that do not care about word order are implemented.
#Word Spacing -> nGram -> Use unique words by each diagnosis -> Frequency -> Dictionary -> List to Docs -> Corpus
#nGram
if(covariateSettings$nGram > 1L){
rawcovariateId <- lapply(rawcovariateId, function(x) RWeka::NGramTokenizer(x,RWeka::Weka_control(min=1,max=covariateSettings$nGram)))
rawcovariateId <- lapply(rawcovariateId, function(x) sub(' ','',x))
rawcovariateId <- lapply(rawcovariateId, unique)
}
else{
rawcovariateId <- strsplit(rawcovariateId,' ')
}
#Remove common words by NOTE_TITLE
noteTitleDf <- data.frame('word' = levels(rawCovariates$noteTitle), 'levels' = seq(levels(rawCovariates$noteTitle)),stringsAsFactors = F)
detailsType <- dplyr::left_join(data.frame('word' = as.vector(rawCovariates[['noteTitle']][]),stringsAsFactors = F),noteTitleDf,by ='word')
names(rawcovariateId) <- detailsType$levels
CommonWord <- list()
for(i in sort(as.numeric(unique(names(rawcovariateId))))){
CommonWord[[i]]<- Reduce(intersect, rawcovariateId[grep(i,names(rawcovariateId))])
}
for(i in 1:length(rawcovariateId)){
rawcovariateId[[i]] <- setdiff(unique(rawcovariateId[[i]]),unlist(CommonWord[as.numeric(names(rawcovariateId[i]))]))
}
#Frequency
if( (covariateSettings$buildTopidModelMinFrac != 0) | (covariateSettings$buildTopidModelMaxFrac != 1)){
#unique
#rawcovariateId <- lapply(rawcovariateId, unique)
MinValue <- as.integer(length(unique(unlist(rawcovariateId))) * covariateSettings$buildTopidModelMinFrac)
MaxValue <- as.integer(length(unique(unlist(rawcovariateId))) * covariateSettings$buildTopidModelMaxFrac)
wordFreq <- data.frame(table(unlist(rawcovariateId)),stringsAsFactors = F)
MoreThanMinWord <- wordFreq[wordFreq$Freq >= MinValue,]
MoreThanMinLessThanMaxWord <- MoreThanMinWord[MoreThanMinWord$Freq<=MaxValue,]
MoreThanMinLessThanMaxWordVec<-as.vector(MoreThanMinLessThanMaxWord[,1])
#unique
rawcovariateId<-lapply(rawcovariateId, function(x) intersect(x, MoreThanMinLessThanMaxWordVec))
}
else{
#unique
rawcovariateId <- lapply(rawcovariateId, unique)
}
#dictionary
if(covariateSettings$useDictionary == TRUE){
dictionary <- lapply(covariateSettings$limitedMedicalTermOnlyLanguage, function(x) dictionaryForLanguage(x))
dictionary <- unlist(rapply(dictionary, as.character, classes="factor", how="replace"))
## Extract up to words containing dictionary words
# dictionary <- paste(dictionary,collapse = '|')
# rawcovariateId <-lapply(rawcovariateId, function(x) grep(dictionary,x,value = T))
##################################################
##Extraction of words after reorganization with words existing in the certificate
SimplifyDictionary <- lapply(rawcovariateId, function(x) intersect(unique(x),dictionary))
SimplifyDictionary <- lapply(SimplifyDictionary, function(x) paste(x,collapse = '|'))
for(i in 1:length(rawcovariateId)){
rawcovariateId[[i]] <- grep(SimplifyDictionary[[i]],rawcovariateId[[i]],value=T)
}
#################################################################################
#PreProcessing End, word List -> docs
rawcovariateId <- lapply(rawcovariateId, function(x) paste(x,collapse =' '))
}
else{
#PreProcessing End, word List -> docs
rawcovariateId <- lapply(rawcovariateId, function(x) paste(x,collapse =' '))
}
#make Corpus
my_docs <- tm::VectorSource(rawcovariateId)
my_corpus <- tm::VCorpus(my_docs)
DTM <- tm::DocumentTermMatrix(my_corpus)
Encoding(DTM$dimnames$Terms) = 'UTF-8'
wordList <- data.frame('word' = colnames(DTM),'num' = rep(1:ncol(DTM)))
covariates <- data.frame('rowId' = DTM$i,'covariateId' = DTM$j,'covariateValue' = DTM$v)
if(covariateSettings$useTextToVec == TRUE){
##Text2Vec
covariates$covariateId <- as.numeric(paste0(9999,DTM$j))
covariates<-ff::as.ffdf(covariates)
covariateRef <- data.frame(covariateId = as.numeric(paste0(9999,wordList$num)),
covariateName = paste0("NOTE-",wordList$word),
analysisId = 0,
conceptId = 0)
covariateRef <- ff::as.ffdf(covariateRef)
}
if(covariateSettings$useTopicModeling == TRUE){
if(is.null(covariateSettings$existingTopicModel)){
data <- Matrix::sparseMatrix(i=covariates$rowId,
j=covariates$covariateId,
x=covariates$covariateValue, #add 0.1 to avoid to treated as binary values
dims=c(max(covariates$rowId), max(covariates$covariateId))) # edit this to max(map$newIds)
colnames(data) <- as.numeric(paste0(9999,wordList$num))
if(covariateSettings$optimalTopicValue == TRUE){
topicLange <- seq(2,102, by=10)
best.model <- lapply(topicLange, function(k){topicmodels::LDA(data, k)})
best.model.logLik <- as.data.frame(as.matrix(lapply(best.model, topicmodels::logLik)))
best.model.logLik.df <- data.frame(topics=topicLange, LL=as.numeric(as.matrix(best.model.logLik)))
numberOfTopics = best.model.logLik.df[which.max(best.model.logLik.df$LL),]$topics
detach("package:topicmodels", unload=TRUE)
lda_model = text2vec::LDA$new(n_topics = numberOfTopics, doc_topic_prior = 0.1, topic_word_prior = 0.01)
}
else if(covariateSettings$optimalTopicValue == FALSE){
numberOfTopics = covariateSettings$numberOfTopics
lda_model = text2vec::LDA$new(n_topics = numberOfTopics, doc_topic_prior = 0.1, topic_word_prior = 0.01) # covariateSettings$numberOfTopics -> optimal
}
}
if (!is.null(covariateSettings$existingTopicModel)) {
covariates$covariateId <- dplyr::left_join(data.frame('num'=DTM$j),wordList,by ='num')$word
mergedCov<-merge(covariates,covariateSettings$existingTopicModel$wordList,by.x="covariateId",by.y = "word")
covariateIdInt<-as.numeric(mergedCov$num)
if(nrow(mergedCov) == 0){
stop('buildTopicModel And your covariate Word None match')
}
data <- Matrix::sparseMatrix(i=mergedCov$rowId,
j=covariateIdInt,
x=mergedCov$covariateValue, #add 0.1 to avoid to treated as binary values
dims=c(max(mergedCov$rowId), max(nrow(covariateSettings$existingTopicModel$wordList)))) # edit this to max(map$newIds)
colnames(data) <- as.numeric(paste0(9999, rep(1:nrow(covariateSettings$existingTopicModel$wordList))))
lda_model = covariateSettings$existingTopicModel$topicModel
}
doc_topic_distr = lda_model$fit_transform(x = data, n_iter = 1000,
convergence_tol = 0.001, n_check_convergence = 25,
progressbar = FALSE)
doc_topic_distr_df <- data.frame(doc_topic_distr)
covariateIds<-as.numeric(paste0(9999,as.numeric(1:length(doc_topic_distr_df))))
colnames(doc_topic_distr_df)<-covariateIds
doc_topic_distr_df$rowId<- seq(max(covariates$rowId))
covariates<-reshape2::melt(doc_topic_distr_df,id.var = "rowId",
variable.name="covariateId",
value.name = "covariateValue")
covariates$covariateId<-as.numeric(as.character(covariates$covariateId))
covariates<-covariates[covariates$covariateValue!=0,]
rownames(covariates) <- rep(1:nrow(covariates))
covariates<-ff::as.ffdf(covariates)
##need to remove 0
covariateRef <- data.frame(covariateId = covariateIds,
covariateName = paste0("Topic",covariateIds),
analysisId = 0,
conceptId = 0)
covariateRef <- ff::as.ffdf(covariateRef)
}
# Construct analysis reference:
analysisRef <- data.frame(analysisId = 0,
analysisName = "Features from Note",
domainId = "Note",
startDay = 0,
endDay = 0,
isBinary = "N",
missingMeansZero = "Y")
analysisRef <- ff::as.ffdf(analysisRef)
#}
if (aggregated)
stop("Aggregation not supported")
# Construct analysis reference:
metaData <- list(sql = sql, call = match.call())
result <- list(covariates = covariates,
covariateRef = covariateRef,
analysisRef = analysisRef,
metaData = metaData)
class(result) <- "covariateData"
return(result)
}
| /R/getTopicFromNoteSettings.R | permissive | ABMI/noteCovariateExtraction | R | false | false | 23,668 | r | #' Custom createCoveriate Settings
#'
#' This function is Custom createCoveriate Settings.
#' @param connection,oracleTempSchema,cdmDatabaseSchema,cohortTable,cohortId,cdmVersion,rowIdField,covariateSettings,aggregated
#' @keywordsa createCovariateSetting
#' @examples
#' getTopicFromNoteSettings()
#' @export
getTopicFromNoteSettings <- function(connection,
oracleTempSchema = NULL,
cdmDatabaseSchema,
cohortTable = "cohort",
cohortId = -1,
cdmVersion = "5",
rowIdField = "subject_id",
covariateSettings,
aggregated = FALSE){
writeLines('Constructing TopicFromNote')
#buildTopicModeling
if(covariateSettings$buildTopicModeling == TRUE){
ComparisonCohortId = covariateSettings$ComparisonCohortId
sql <- paste(
'SELECT',
'{@sampleSize != -1} ? {TOP @sampleSize}',
" subject_id AS row_id,",
'n.NOTE_TEXT AS covariate_id,',
'1 AS covariate_value,',
'n.NOTE_TITLE AS note_title',
'FROM @cdm_database_schema.NOTE n',
'JOIN',
'{@cohort_schema != ""} ? {@cohort_schema.}@cohort_table c',
'ON n.person_id = c.subject_id',
'AND n.NOTE_DATE = c.COHORT_START_DATE',
'WHERE NOTE_TYPE_CONCEPT_ID = (SELECT DESCENDANT_CONCEPT_ID FROM @cdm_database_schema.CONCEPT_ANCESTOR WHERE ANCESTOR_CONCEPT_ID IN (@note_concept_id) )',
'{@cohort_id != -1} ? {AND cohort_definition_id = @cohort_id}',
';'
)
sql <- SqlRender::renderSql(sql,
cohort_schema = covariateSettings$ComparisonCohortSchema,
cohort_table = covariateSettings$ComparisonCohortTable,
cohort_id = ComparisonCohortId,
note_concept_id = covariateSettings$noteConceptId,
sampleSize=covariateSettings$sampleSize,
cdm_database_schema = cdmDatabaseSchema)$sql
sql <- SqlRender::translateSql(sql, targetDialect = attr(connection, "dbms"))$sql
# Retrieve the covariate:
rawCovariates <- DatabaseConnector::querySql.ffdf(connection, sql)
colnames(rawCovariates)<-SqlRender::snakeCaseToCamelCase(colnames(rawCovariates))
########################
#ff in list #because Characters can not be inserted into the ff package.
rawcovariateId <- ff::ffapply(x[i1:i2],X= rawCovariates$covariateId, RETURN=TRUE, CFUN="list",
AFUN = notePreprocessing <- function(covariateId){
covariateId <- gsub('<[^<>]*>',' ',covariateId) #Remove Tag
#Remove html special characters
covariateId <- gsub('
', " ", covariateId)
covariateId <- gsub('<', " ", covariateId)
covariateId <- gsub('>', " ", covariateId)
covariateId <- gsub('&', " ", covariateId)
covariateId <- gsub('"', " ", covariateId)
#lower
covariateId<- tolower(covariateId)
#remove hangle typo
covariateId <- gsub('[\u314f-\u3163]*','',covariateId)
covariateId <- gsub('[\u3131-\u314E]*','',covariateId)
#if add other language, add unicode
covariateId <- gsub('[^\uac00-\ud7a3a-zA-Z]',' ',covariateId)
#The spacing is only once ## vector
covariateId <- stringr::str_replace_all(covariateId,"[[:space:]]{1,}"," ")
covariateId <- sub(' ','',covariateId)
return(covariateId)
})
names(rawcovariateId) <- 'note'
rawcovariateId <- rawcovariateId$'note'
#nGram
if(covariateSettings$nGram > 1L){
rawcovariateId <- lapply(rawcovariateId, function(x) RWeka::NGramTokenizer(x,RWeka::Weka_control(min=1,max=covariateSettings$nGram)))
rawcovariateId <- lapply(rawcovariateId, function(x) sub(' ','',x))
rawcovariateId <- lapply(rawcovariateId, unique)
}
else{
rawcovariateId <- strsplit(rawcovariateId,' ')
}
#Remove common words by NOTE_TITLE
noteTitleDf <- data.frame('word' = levels(rawCovariates$noteTitle), 'levels' = seq(levels(rawCovariates$noteTitle)),stringsAsFactors = F)
detailsType <- dplyr::left_join(data.frame('word' = as.vector(rawCovariates[['noteTitle']][]),stringsAsFactors = F),noteTitleDf,by ='word')
names(rawcovariateId) <- detailsType$levels
CommonWord <- list()
for(i in sort(as.numeric(unique(names(rawcovariateId))))){
CommonWord[[i]]<- Reduce(intersect, rawcovariateId[grep(i,names(rawcovariateId))])
}
for(i in 1:length(rawcovariateId)){
rawcovariateId[[i]] <- setdiff(unique(rawcovariateId[[i]]),unlist(CommonWord[as.numeric(names(rawcovariateId[i]))]))
}
#Frequency
if( (covariateSettings$buildTopidModelMinFrac != 0) | (covariateSettings$buildTopidModelMaxFrac != 1)){
#unique
#rawcovariateId <- lapply(rawcovariateId, unique)
MinValue <- as.integer(length(unique(unlist(rawcovariateId))) * covariateSettings$buildTopidModelMinFrac)
MaxValue <- as.integer(length(unique(unlist(rawcovariateId))) * covariateSettings$buildTopidModelMaxFrac)
wordFreq <- data.frame(table(unlist(rawcovariateId)),stringsAsFactors = F)
MoreThanMinWord <- wordFreq[wordFreq$Freq >= MinValue,]
MoreThanMinLessThanMaxWord <- MoreThanMinWord[MoreThanMinWord$Freq<=MaxValue,]
MoreThanMinLessThanMaxWordVec<-as.vector(MoreThanMinLessThanMaxWord[,1])
#unique
rawcovariateId<-lapply(rawcovariateId, function(x) intersect(x, MoreThanMinLessThanMaxWordVec))
}
else{
#unique
rawcovariateId <- lapply(rawcovariateId, unique)
}
#dictionary
if(covariateSettings$useDictionary == TRUE){
dictionary <- lapply(covariateSettings$limitedMedicalTermOnlyLanguage, function(x) dictionaryForLanguage(x))
dictionary <- unlist(rapply(dictionary, as.character, classes="factor", how="replace"))
## Extract up to words containing dictionary words
# dictionary <- paste(dictionary,collapse = '|')
# rawcovariateId <-lapply(rawcovariateId, function(x) grep(dictionary,x,value = T))
##################################################
##Extraction of words after reorganization with words existing in the certificate
SimplifyDictionary <- lapply(rawcovariateId, function(x) intersect(unique(x),dictionary))
SimplifyDictionary <- lapply(SimplifyDictionary, function(x) paste(x,collapse = '|'))
for(i in 1:length(rawcovariateId)){
rawcovariateId[[i]] <- grep(SimplifyDictionary[[i]],rawcovariateId[[i]],value=T)
}
#################################################################################
#PreProcessing End, word List -> docs
rawcovariateId <- lapply(rawcovariateId, function(x) paste(x,collapse =' '))
}
else{
#PreProcessing End, word List -> docs
rawcovariateId <- lapply(rawcovariateId, function(x) paste(x,collapse =' '))
}
#make Corpus
my_docs <- tm::VectorSource(rawcovariateId)
my_corpus <- tm::VCorpus(my_docs)
DTM <- tm::DocumentTermMatrix(my_corpus)
Encoding(DTM$dimnames$Terms) = 'UTF-8'
wordList <- data.frame('word' = colnames(DTM),'num' = rep(1:ncol(DTM)))
covariates <- data.frame('rowId' = DTM$i,'covariateId' = DTM$j,'covariateValue' = DTM$v)
data <- Matrix::sparseMatrix(i=covariates$rowId,
j=covariates$covariateId,
x=covariates$covariateValue, #add 0.1 to avoid to treated as binary values
dims=c(max(covariates$rowId), max(covariates$covariateId))) # edit this to max(map$newIds)
colnames(data) <- as.numeric(paste0(9999,wordList$num))
if(covariateSettings$optimalTopicValue == TRUE){
topicLange <- seq(2,102, by=10)
best.model <- lapply(topicLange, function(k){topicmodels::LDA(data, k)})
best.model.logLik <- as.data.frame(as.matrix(lapply(best.model, topicmodels::logLik)))
best.model.logLik.df <- data.frame(topics=topicLange, LL=as.numeric(as.matrix(best.model.logLik)))
numberOfTopics = best.model.logLik.df[which.max(best.model.logLik.df$LL),]$topics
detach("package:topicmodels", unload=TRUE)
lda_model = text2vec::LDA$new(n_topics = numberOfTopics, doc_topic_prior = 0.1, topic_word_prior = 0.01)
}
else if(covariateSettings$optimalTopicValue == FALSE){
numberOfTopics = covariateSettings$numberOfTopics
lda_model = text2vec::LDA$new(n_topics = numberOfTopics, doc_topic_prior = 0.1, topic_word_prior = 0.01) # covariateSettings$numberOfTopics -> optimal
}
doc_topic_distr = lda_model$fit_transform(x = data, n_iter = 1000,
convergence_tol = 0.001, n_check_convergence = 25,
progressbar = FALSE)
if (aggregated)
stop("Aggregation not supported")
result <- list(topicModel = lda_model,
topicDistr = doc_topic_distr,
wordList = wordList,
nGramSetting = covariateSettings$nGram,
numberOfTopics = numberOfTopics
)
saveRDS(result,covariateSettings$topicModelExportRds)
message(paste('your topic model is saved at',covariateSettings$topicModelExportRds))
covariateSettings$existingTopicModel <- readRDS(covariateSettings$topicModelExportRds)
}
#SQL query should be revised to extract only the latest record
#SQL to construct the covariate:
sql <- paste(
'SELECT',
'{@sampleSize != -1} ? {TOP @sampleSize}',
" @row_id_field AS row_id,",
'n.NOTE_TEXT AS covariate_id,',
'1 AS covariate_value,',
'NOTE_TITLE AS note_title',
'FROM @cdm_database_schema.NOTE n',
'JOIN @cohort_table c',
'ON n.person_id = c.subject_id',
'AND n.NOTE_DATE = c.COHORT_START_DATE',
'WHERE NOTE_TYPE_CONCEPT_ID = (SELECT DESCENDANT_CONCEPT_ID FROM @cdm_database_schema.CONCEPT_ANCESTOR WHERE ANCESTOR_CONCEPT_ID IN (@note_concept_id) )',
'{@cohort_id != -1} ? {AND cohort_definition_id = @cohort_id}'
)
sql <- SqlRender::renderSql(sql,
cohort_table = cohortTable,
cohort_id = cohortId,
note_concept_id = covariateSettings$noteConceptId,
row_id_field = rowIdField,
sampleSize=covariateSettings$sampleSize,
cdm_database_schema = cdmDatabaseSchema)$sql
sql <- SqlRender::translateSql(sql, targetDialect = attr(connection, "dbms"))$sql
# Retrieve the covariate:
rawCovariates <- DatabaseConnector::querySql.ffdf(connection, sql)
colnames(rawCovariates)<-SqlRender::snakeCaseToCamelCase(colnames(rawCovariates))
########################
#ff in list #because Characters can not be inserted into the ff package.
rawcovariateId <- ff::ffapply(x[i1:i2],X= rawCovariates$covariateId, RETURN=TRUE, CFUN="list",
AFUN = notePreprocessing <- function(covariateId){
covariateId <- gsub('<[^<>]*>',' ',covariateId) #Remove Tag
#Remove html special characters
covariateId <- gsub('
', " ", covariateId)
covariateId <- gsub('<', " ", covariateId)
covariateId <- gsub('>', " ", covariateId)
covariateId <- gsub('&', " ", covariateId)
covariateId <- gsub('"', " ", covariateId)
#lower
covariateId<- tolower(covariateId)
#remove hangle typo
covariateId <- gsub('[\u314f-\u3163]*','',covariateId)
covariateId <- gsub('[\u3131-\u314E]*','',covariateId)
#if add other language, add unicode
covariateId <- gsub('[^\uac00-\ud7a3a-zA-Z]',' ',covariateId)
#The spacing is only once ## vector
covariateId <- stringr::str_replace_all(covariateId,"[[:space:]]{1,}"," ")
covariateId <- sub(' ','',covariateId)
return(covariateId)
})
names(rawcovariateId) <- 'note'
rawcovariateId <- rawcovariateId$'note'
#Only word (not Word order) # Currently, only parts that do not care about word order are implemented.
#Word Spacing -> nGram -> Use unique words by each diagnosis -> Frequency -> Dictionary -> List to Docs -> Corpus
#nGram
if(covariateSettings$nGram > 1L){
rawcovariateId <- lapply(rawcovariateId, function(x) RWeka::NGramTokenizer(x,RWeka::Weka_control(min=1,max=covariateSettings$nGram)))
rawcovariateId <- lapply(rawcovariateId, function(x) sub(' ','',x))
rawcovariateId <- lapply(rawcovariateId, unique)
}
else{
rawcovariateId <- strsplit(rawcovariateId,' ')
}
#Remove common words by NOTE_TITLE
noteTitleDf <- data.frame('word' = levels(rawCovariates$noteTitle), 'levels' = seq(levels(rawCovariates$noteTitle)),stringsAsFactors = F)
detailsType <- dplyr::left_join(data.frame('word' = as.vector(rawCovariates[['noteTitle']][]),stringsAsFactors = F),noteTitleDf,by ='word')
names(rawcovariateId) <- detailsType$levels
CommonWord <- list()
for(i in sort(as.numeric(unique(names(rawcovariateId))))){
CommonWord[[i]]<- Reduce(intersect, rawcovariateId[grep(i,names(rawcovariateId))])
}
for(i in 1:length(rawcovariateId)){
rawcovariateId[[i]] <- setdiff(unique(rawcovariateId[[i]]),unlist(CommonWord[as.numeric(names(rawcovariateId[i]))]))
}
#Frequency
if( (covariateSettings$buildTopidModelMinFrac != 0) | (covariateSettings$buildTopidModelMaxFrac != 1)){
#unique
#rawcovariateId <- lapply(rawcovariateId, unique)
MinValue <- as.integer(length(unique(unlist(rawcovariateId))) * covariateSettings$buildTopidModelMinFrac)
MaxValue <- as.integer(length(unique(unlist(rawcovariateId))) * covariateSettings$buildTopidModelMaxFrac)
wordFreq <- data.frame(table(unlist(rawcovariateId)),stringsAsFactors = F)
MoreThanMinWord <- wordFreq[wordFreq$Freq >= MinValue,]
MoreThanMinLessThanMaxWord <- MoreThanMinWord[MoreThanMinWord$Freq<=MaxValue,]
MoreThanMinLessThanMaxWordVec<-as.vector(MoreThanMinLessThanMaxWord[,1])
#unique
rawcovariateId<-lapply(rawcovariateId, function(x) intersect(x, MoreThanMinLessThanMaxWordVec))
}
else{
#unique
rawcovariateId <- lapply(rawcovariateId, unique)
}
#dictionary
if(covariateSettings$useDictionary == TRUE){
dictionary <- lapply(covariateSettings$limitedMedicalTermOnlyLanguage, function(x) dictionaryForLanguage(x))
dictionary <- unlist(rapply(dictionary, as.character, classes="factor", how="replace"))
## Extract up to words containing dictionary words
# dictionary <- paste(dictionary,collapse = '|')
# rawcovariateId <-lapply(rawcovariateId, function(x) grep(dictionary,x,value = T))
##################################################
##Extraction of words after reorganization with words existing in the certificate
SimplifyDictionary <- lapply(rawcovariateId, function(x) intersect(unique(x),dictionary))
SimplifyDictionary <- lapply(SimplifyDictionary, function(x) paste(x,collapse = '|'))
for(i in 1:length(rawcovariateId)){
rawcovariateId[[i]] <- grep(SimplifyDictionary[[i]],rawcovariateId[[i]],value=T)
}
#################################################################################
#PreProcessing End, word List -> docs
rawcovariateId <- lapply(rawcovariateId, function(x) paste(x,collapse =' '))
}
else{
#PreProcessing End, word List -> docs
rawcovariateId <- lapply(rawcovariateId, function(x) paste(x,collapse =' '))
}
#make Corpus
my_docs <- tm::VectorSource(rawcovariateId)
my_corpus <- tm::VCorpus(my_docs)
DTM <- tm::DocumentTermMatrix(my_corpus)
Encoding(DTM$dimnames$Terms) = 'UTF-8'
wordList <- data.frame('word' = colnames(DTM),'num' = rep(1:ncol(DTM)))
covariates <- data.frame('rowId' = DTM$i,'covariateId' = DTM$j,'covariateValue' = DTM$v)
if(covariateSettings$useTextToVec == TRUE){
##Text2Vec
covariates$covariateId <- as.numeric(paste0(9999,DTM$j))
covariates<-ff::as.ffdf(covariates)
covariateRef <- data.frame(covariateId = as.numeric(paste0(9999,wordList$num)),
covariateName = paste0("NOTE-",wordList$word),
analysisId = 0,
conceptId = 0)
covariateRef <- ff::as.ffdf(covariateRef)
}
if(covariateSettings$useTopicModeling == TRUE){
if(is.null(covariateSettings$existingTopicModel)){
data <- Matrix::sparseMatrix(i=covariates$rowId,
j=covariates$covariateId,
x=covariates$covariateValue, #add 0.1 to avoid to treated as binary values
dims=c(max(covariates$rowId), max(covariates$covariateId))) # edit this to max(map$newIds)
colnames(data) <- as.numeric(paste0(9999,wordList$num))
if(covariateSettings$optimalTopicValue == TRUE){
topicLange <- seq(2,102, by=10)
best.model <- lapply(topicLange, function(k){topicmodels::LDA(data, k)})
best.model.logLik <- as.data.frame(as.matrix(lapply(best.model, topicmodels::logLik)))
best.model.logLik.df <- data.frame(topics=topicLange, LL=as.numeric(as.matrix(best.model.logLik)))
numberOfTopics = best.model.logLik.df[which.max(best.model.logLik.df$LL),]$topics
detach("package:topicmodels", unload=TRUE)
lda_model = text2vec::LDA$new(n_topics = numberOfTopics, doc_topic_prior = 0.1, topic_word_prior = 0.01)
}
else if(covariateSettings$optimalTopicValue == FALSE){
numberOfTopics = covariateSettings$numberOfTopics
lda_model = text2vec::LDA$new(n_topics = numberOfTopics, doc_topic_prior = 0.1, topic_word_prior = 0.01) # covariateSettings$numberOfTopics -> optimal
}
}
if (!is.null(covariateSettings$existingTopicModel)) {
covariates$covariateId <- dplyr::left_join(data.frame('num'=DTM$j),wordList,by ='num')$word
mergedCov<-merge(covariates,covariateSettings$existingTopicModel$wordList,by.x="covariateId",by.y = "word")
covariateIdInt<-as.numeric(mergedCov$num)
if(nrow(mergedCov) == 0){
stop('buildTopicModel And your covariate Word None match')
}
data <- Matrix::sparseMatrix(i=mergedCov$rowId,
j=covariateIdInt,
x=mergedCov$covariateValue, #add 0.1 to avoid to treated as binary values
dims=c(max(mergedCov$rowId), max(nrow(covariateSettings$existingTopicModel$wordList)))) # edit this to max(map$newIds)
colnames(data) <- as.numeric(paste0(9999, rep(1:nrow(covariateSettings$existingTopicModel$wordList))))
lda_model = covariateSettings$existingTopicModel$topicModel
}
doc_topic_distr = lda_model$fit_transform(x = data, n_iter = 1000,
convergence_tol = 0.001, n_check_convergence = 25,
progressbar = FALSE)
doc_topic_distr_df <- data.frame(doc_topic_distr)
covariateIds<-as.numeric(paste0(9999,as.numeric(1:length(doc_topic_distr_df))))
colnames(doc_topic_distr_df)<-covariateIds
doc_topic_distr_df$rowId<- seq(max(covariates$rowId))
covariates<-reshape2::melt(doc_topic_distr_df,id.var = "rowId",
variable.name="covariateId",
value.name = "covariateValue")
covariates$covariateId<-as.numeric(as.character(covariates$covariateId))
covariates<-covariates[covariates$covariateValue!=0,]
rownames(covariates) <- rep(1:nrow(covariates))
covariates<-ff::as.ffdf(covariates)
##need to remove 0
covariateRef <- data.frame(covariateId = covariateIds,
covariateName = paste0("Topic",covariateIds),
analysisId = 0,
conceptId = 0)
covariateRef <- ff::as.ffdf(covariateRef)
}
# Construct analysis reference:
analysisRef <- data.frame(analysisId = 0,
analysisName = "Features from Note",
domainId = "Note",
startDay = 0,
endDay = 0,
isBinary = "N",
missingMeansZero = "Y")
analysisRef <- ff::as.ffdf(analysisRef)
#}
if (aggregated)
stop("Aggregation not supported")
# Construct analysis reference:
metaData <- list(sql = sql, call = match.call())
result <- list(covariates = covariates,
covariateRef = covariateRef,
analysisRef = analysisRef,
metaData = metaData)
class(result) <- "covariateData"
return(result)
}
|
require(e1071)
# Enter your code here. Read input from STDIN. Print output to STDOUT
x <- suppressWarnings(readLines(file("stdin")));
x <- x[-1];
x <- matrix(unlist(strsplit(x,","),","),byrow=TRUE,ncol=18);
#append(x,matrix(x[x[,18] == "-1",],ncol=18))
#x <- rbind(x, c("bass",0,0,1,0,0,1,1,1,1,0,0,1,0,1,0,0,-1))
#print(x)
#print(subset(x, x[,18] == -1));
#print(matrix(x[x[,18] == "-1",],ncol=18));
dfTest <- matrix(x[x[,18] == "-1",],ncol=18)
#dfTest <- split(mydata,mydata$V18);
#print(dfTest);
x <- x[x[,18]!=-1,]
#print(x)
data_df=as.data.frame(x)
#print(data_df)
model <- naiveBayes(V18 ~ ., data = data_df, laplace = 0.1)
#print(as.data.frame(t(dfTest)))
result <- predict(model, as.data.frame(dfTest),type = c("class"))
#print(result)
for(ch in result)
cat(as.numeric(ch),"\n")
| /Programming Assignment 7/NiaveBayesClassifier.R | no_license | ShrashtiSinghal/Data-Mining | R | false | false | 823 | r | require(e1071)
# Enter your code here. Read input from STDIN. Print output to STDOUT
x <- suppressWarnings(readLines(file("stdin")));
x <- x[-1];
x <- matrix(unlist(strsplit(x,","),","),byrow=TRUE,ncol=18);
#append(x,matrix(x[x[,18] == "-1",],ncol=18))
#x <- rbind(x, c("bass",0,0,1,0,0,1,1,1,1,0,0,1,0,1,0,0,-1))
#print(x)
#print(subset(x, x[,18] == -1));
#print(matrix(x[x[,18] == "-1",],ncol=18));
dfTest <- matrix(x[x[,18] == "-1",],ncol=18)
#dfTest <- split(mydata,mydata$V18);
#print(dfTest);
x <- x[x[,18]!=-1,]
#print(x)
data_df=as.data.frame(x)
#print(data_df)
model <- naiveBayes(V18 ~ ., data = data_df, laplace = 0.1)
#print(as.data.frame(t(dfTest)))
result <- predict(model, as.data.frame(dfTest),type = c("class"))
#print(result)
for(ch in result)
cat(as.numeric(ch),"\n")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/events.R
\name{read_isruc_metadata}
\alias{read_isruc_metadata}
\title{Read ISRUC-Sleep dataset records metadata.}
\usage{
read_isruc_metadata(dir)
}
\arguments{
\item{dir}{ISRUC dataset directory.}
}
\value{
A dataframe containing records metadata.
}
\description{
Read all the subgroups XLSX metadata files and concatenates the dataframes into one.
}
| /man/read_isruc_metadata.Rd | permissive | Jun-Lizst/sleepr | R | false | true | 431 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/events.R
\name{read_isruc_metadata}
\alias{read_isruc_metadata}
\title{Read ISRUC-Sleep dataset records metadata.}
\usage{
read_isruc_metadata(dir)
}
\arguments{
\item{dir}{ISRUC dataset directory.}
}
\value{
A dataframe containing records metadata.
}
\description{
Read all the subgroups XLSX metadata files and concatenates the dataframes into one.
}
|
#' Reading dates and focing from filename
#'
#' @param dir dir with raw files
#'
#' @return a database with Date(POSIX) and forcing from filenames
#' @export create_dates_forcing
#'
#' @examples
create_dates_forcing <- function(dir){
Dates <- strsplit(substr(dir, start = nchar(dir) - 27, stop = nchar(dir) - 3), '-')
Date_ID <- data.table(date = seq(as.POSIXct(dates[[1]][1], format = "%Y%m%d%H%M"),
as.POSIXct(dates[[1]][2], format = "%Y%m%d%H%M"), by = 'hour'),
forcing = sapply(strsplit(substr(fls[i], start = 1, stop = n - 29), split = "-")))}
| /R/dates_and_forcing.R | no_license | hanel/advanced.r.stuff | R | false | false | 627 | r | #' Reading dates and focing from filename
#'
#' @param dir dir with raw files
#'
#' @return a database with Date(POSIX) and forcing from filenames
#' @export create_dates_forcing
#'
#' @examples
create_dates_forcing <- function(dir){
Dates <- strsplit(substr(dir, start = nchar(dir) - 27, stop = nchar(dir) - 3), '-')
Date_ID <- data.table(date = seq(as.POSIXct(dates[[1]][1], format = "%Y%m%d%H%M"),
as.POSIXct(dates[[1]][2], format = "%Y%m%d%H%M"), by = 'hour'),
forcing = sapply(strsplit(substr(fls[i], start = 1, stop = n - 29), split = "-")))}
|
\name{center_data}
\alias{center_data}
\title{Centers the observations in a matrix by their respective class sample means}
\usage{
center_data(x, y)
}
\arguments{
\item{x}{matrix containing the training data. The rows
are the sample observations, and the columns are the
features.}
\item{y}{vector of class labels for each training
observation}
}
\value{
matrix with observations centered by its corresponding
class sample mean
}
\description{
Centers the observations in a matrix by their respective
class sample means
}
| /man/center_data.Rd | no_license | wfudong/sparsediscrim | R | false | false | 544 | rd | \name{center_data}
\alias{center_data}
\title{Centers the observations in a matrix by their respective class sample means}
\usage{
center_data(x, y)
}
\arguments{
\item{x}{matrix containing the training data. The rows
are the sample observations, and the columns are the
features.}
\item{y}{vector of class labels for each training
observation}
}
\value{
matrix with observations centered by its corresponding
class sample mean
}
\description{
Centers the observations in a matrix by their respective
class sample means
}
|
#Finding Andy's Data
#Jazmyn Winzer
#jiwinzer@email.arizona.com
#2020/3/23
library(tidyverse)
eBut <- read_csv("eBut Data.csv")
glimpse(eBut)
eBut.AH = eBut %>% filter(Observer == "Andrew Hogan")
write.csv(eBut.AH, "eBut_ah.csv")
| /Saving Andy's Data.R | no_license | jazwin26/Tohono-Chul- | R | false | false | 249 | r | #Finding Andy's Data
#Jazmyn Winzer
#jiwinzer@email.arizona.com
#2020/3/23
library(tidyverse)
eBut <- read_csv("eBut Data.csv")
glimpse(eBut)
eBut.AH = eBut %>% filter(Observer == "Andrew Hogan")
write.csv(eBut.AH, "eBut_ah.csv")
|
library(tidyverse)
library(janitor)
# This data contains monthly temperature deviations in ยฐC from the respective
# average temperature in a region during 1991-2020. The data from before 1991
# initally used another reference time period, but values were re-calculated
# later on. Note: the variable us49 excludes Hawaii, apparently since "its land
# area is less than that of a satellite grid square, so it would have virtually
# no impact on the overall national results."
# Read in data for lower troposphere (last accessed Aug-04-21). Row number 513
# is where the garbage starts, hence the limit to 512 rows.
x <- read_table(read_lines("https://www.nsstc.uah.edu/data/msu/v6.0/tmt/uahncdc_mt_6.0.txt", n_max = 512)) %>%
clean_names() %>%
rename(month = mo,
"globe_land" = land,
"globe_ocean" = ocean,
"nh_land" = land_1,
"nh_ocean" = ocean_1,
"sh_land" = land_2,
"sh_ocean" = ocean_2,
"tropics" = trpcs,
"tropics_land" = land_3,
"tropics_ocean" = ocean_3,
"north_ext" = no_ext, # ext stands for extratropics. Should we keep it that way?
"north_ext_land" = land_4,
"north_ext_ocean" = ocean_4,
"south_ext" = so_ext,
"south_ext_land" = land_5,
"south_ext_ocean" = ocean_5,
"north_pole" = no_pol,
"north_pole_land" = land_6,
"north_pole_ocean" = ocean_6,
"south_pole" = so_pol,
"south_pole_land" = land_7,
"south_pole_ocean" = ocean_7,
"australia" = aust)
# I don't think variables should be doubles if they only contain values that
# contain integers. This makes people think that something might be wrong with
# this data.
x %<>%
mutate(year = as.integer(year),
month = as.integer(month))
# Save.
temperature <- x
usethis::use_data(temperature, overwrite = TRUE)
| /data-raw/make_temperature.R | permissive | tcweiss/primer.data | R | false | false | 1,988 | r |
library(tidyverse)
library(janitor)
# This data contains monthly temperature deviations in ยฐC from the respective
# average temperature in a region during 1991-2020. The data from before 1991
# initally used another reference time period, but values were re-calculated
# later on. Note: the variable us49 excludes Hawaii, apparently since "its land
# area is less than that of a satellite grid square, so it would have virtually
# no impact on the overall national results."
# Read in data for lower troposphere (last accessed Aug-04-21). Row number 513
# is where the garbage starts, hence the limit to 512 rows.
x <- read_table(read_lines("https://www.nsstc.uah.edu/data/msu/v6.0/tmt/uahncdc_mt_6.0.txt", n_max = 512)) %>%
clean_names() %>%
rename(month = mo,
"globe_land" = land,
"globe_ocean" = ocean,
"nh_land" = land_1,
"nh_ocean" = ocean_1,
"sh_land" = land_2,
"sh_ocean" = ocean_2,
"tropics" = trpcs,
"tropics_land" = land_3,
"tropics_ocean" = ocean_3,
"north_ext" = no_ext, # ext stands for extratropics. Should we keep it that way?
"north_ext_land" = land_4,
"north_ext_ocean" = ocean_4,
"south_ext" = so_ext,
"south_ext_land" = land_5,
"south_ext_ocean" = ocean_5,
"north_pole" = no_pol,
"north_pole_land" = land_6,
"north_pole_ocean" = ocean_6,
"south_pole" = so_pol,
"south_pole_land" = land_7,
"south_pole_ocean" = ocean_7,
"australia" = aust)
# I don't think variables should be doubles if they only contain values that
# contain integers. This makes people think that something might be wrong with
# this data.
x %<>%
mutate(year = as.integer(year),
month = as.integer(month))
# Save.
temperature <- x
usethis::use_data(temperature, overwrite = TRUE)
|
###### INC118-123
numSamples <- 1000
task_len <- 118
fileName <- paste("inc",task_len,"-",task_len+2,"_hist_bin_count.txt",sep="")
fraction <- 0
count <- -1
for (task_len in c(118,119,120,121,122,123)){
count <- count+1
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
INC_label <- paste(task_len,".",fraction,sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
print(dataFileName)
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(task_len == 118){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC60~INC61
numSamples <- 1000
task_len <- 60
fileName <- paste("inc",task_len,"-",task_len+1,"_hist_bin_count.txt",sep="")
for (fraction in c(0,125,25,375,5,625,75,875)){
INC_label <- paste(task_len,".",fraction,sep="")
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
if(fraction == 25 || fraction == 75){
adj_fraction <- fraction*10
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else if (fraction == 5){
adj_fraction <- fraction*100
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else{
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
}
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}else if (fraction == 0 || fraction == 1000){
tl[c:1] <- task_len
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(fraction == 0){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC8192
numSamples <- 300
task_len <- 8192
fileName <- paste("inc",task_len,"_hist_bin_count.txt",sep="")
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
INC_label <- paste(task_len,".",fraction,sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
print(dataFileName)
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=5
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(task_len == 8192){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
###### INC80
numSamples <- 1000
task_len <- 80
fileName <- paste("inc",task_len,"_hist_bin_count.txt",sep="")
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
INC_label <- paste(task_len,".",fraction,sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
print(dataFileName)
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(task_len == 80){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
###### INC93-103
numSamples <- 1000
task_len <- 93
fileName <- paste("inc",task_len,"-",task_len+10,"_hist_bin_count.txt",sep="")
fraction <- 0
count <- -1
for (task_len in c(93,94,95,96,97,98,99,100,101,102,103)){
count <- count+1
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
INC_label <- paste(task_len,".",fraction,sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
print(dataFileName)
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(task_len == 93){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC78-80
numSamples <- 1000
task_len <- 78
fileName <- paste("inc",task_len,"-",task_len+2,"_hist_bin_count.txt",sep="")
fraction <- 0
count <- -1
for (task_len in c(78,78.25,78.5,78.75,79,79.25,79.5,79.75,80)){
count <- count+1
if(count %% 4 == 0){
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
INC_label <- paste(task_len,".",fraction,sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}else{
fraction <- fraction+0.25
if(fraction == 1){
fraction <- 0
}
dataFileName <- paste("INC",task_len,".dat",sep="")
INC_label <- paste(task_len,sep="")
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
}
print(dataFileName)
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(task_len == 78){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC60-64
numSamples <- 1000
task_len <- 60
fileName <- paste("inc",task_len,"-",task_len+4,"_hist_bin_count.txt",sep="")
fraction <- 0
count <- -1
for (task_len in c(60,60.5,61,61.5,62,62.5,63,63.5,64)){
count <- count+1
if(count %% 2 == 0){
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
INC_label <- paste(task_len,".",fraction,sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}else{
fraction <- 0.5
dataFileName <- paste("INC",task_len,".dat",sep="")
INC_label <- paste(task_len,sep="")
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
}
#print(dataFileName)
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(task_len == 60){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC224
numSamples <- 1000
task_len <- 224
fileName <- paste("inc",task_len,"_hist_bin_count.txt",sep="")
for (task_len in c(224)){
fraction <- 0
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 150
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}else if (fraction == 0 || fraction == 1000){
tl[c:1] <- task_len
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(fraction == 0){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC60,62
numSamples <- 1000
task_len <- 60
fileName <- paste("inc",task_len,"-",task_len+2,"_hist_bin_count.txt",sep="")
for (task_len in c(60,62)){
fraction <- 0
INC_label <- paste(task_len,sep="")
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}else if (fraction == 0 || fraction == 1000){
tl[c:1] <- task_len
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(fraction == 0){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC21~22
numSamples <- 1000
task_len <- 21
fileName <- paste("inc",task_len,"-",task_len+1,"_hist_bin_count.txt",sep="")
for (fraction in c(0,125,25,375,5,625,75,875,1000)){
if(fraction == 0){
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else if(fraction == 1000){
task_len <- task_len+1
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else{
INC_label <- paste(task_len,".",fraction,sep="")
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
if(fraction == 25 || fraction == 75){
adj_fraction <- fraction*10
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else if (fraction == 5){
adj_fraction <- fraction*100
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else{
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
}
}
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}else if (fraction == 0 || fraction == 1000){
tl[c:1] <- task_len
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(fraction == 0){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC20~21
numSamples <- 1000
task_len <- 20
fileName <- paste("inc",task_len,"-",task_len+1,"_hist_bin_count.txt",sep="")
for (fraction in c(0,125,25,375,5,625,75,875,1000)){
if(fraction == 0){
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else if(fraction == 1000){
task_len <- task_len+1
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else{
INC_label <- paste(task_len,".",fraction,sep="")
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
if(fraction == 25 || fraction == 75){
adj_fraction <- fraction*10
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else if (fraction == 5){
adj_fraction <- fraction*100
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else{
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
}
}
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
#ymax <- ceiling(ymax/100)*100
ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}else if (fraction == 0 || fraction == 1000){
tl[c:1] <- task_len
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(fraction == 0){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC19~20
numSamples <- 1000
task_len <- 19
fileName <- paste("inc",task_len,"-",task_len+1,"_hist_bin_count.txt",sep="")
for (fraction in c(0,125,25,375,5,625,75,875,1000)){
if(fraction == 0){
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else if(fraction == 1000){
task_len <- task_len+1
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else{
INC_label <- paste(task_len,".",fraction,sep="")
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
if(fraction == 25 || fraction == 75){
adj_fraction <- fraction*10
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else if (fraction == 5){
adj_fraction <- fraction*100
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else{
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
}
}
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}else if (fraction == 0 || fraction == 1000){
tl[c:1] <- task_len
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(fraction == 0){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
numSamples <- 1000
task_len <- 19
fileName <- paste("inc",task_len,"-",task_len+1,"_hist_bin_count.txt",sep="")
for (fraction in c(0,125,25,375,5,625,75,875,1000)){
if(fraction == 0){
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else if(fraction == 1000){
task_len <- task_len+1
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else{
INC_label <- paste(task_len,".",fraction,sep="")
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
if(fraction == 25 || fraction == 75){
adj_fraction <- fraction*10
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else if (fraction == 5){
adj_fraction <- fraction*100
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else{
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
}
}
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}else if (fraction == 0 || fraction == 1000){
tl[c:1] <- task_len
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(fraction == 0){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC48,64 --- run1
numSamples <- 1000
task_len <- 19
fileName <- paste("INC48_64_hist1.txt",sep="")
for (task_len in c(48,64)){
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist1.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
#if(task_len == 48){
#write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
#}else{
# write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
#}
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
if(task_len == 48){
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main=paste("2017-06-08: PT frequency on INC",task_len,sep=""), sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
}else{
ymax <-250
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main=paste("2017-03-09: PT frequency on INC",task_len,sep=""), sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
}
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
}
###INC25
task_len <- 25
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
task_len <- task_len+1
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
task_len <- task_len+1
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
task_len <- task_len+1
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
task_len <- task_len+1
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
task_len <- task_len+1
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
task_len <- task_len+1
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
###### INC48,64 --- run1
numSamples <- 1000
fileName <- paste("INC48_64_hist1.txt",sep="")
for (task_len in c(48,64)){
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist1.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
#if(task_len == 48){
#write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
#}else{
# write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
#}
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
if(task_len == 48){
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main=paste("2017-06-08: PT frequency on INC",task_len,sep=""), sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
}else{
ymax <-250
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main=paste("2017-03-09: PT frequency on INC",task_len,sep=""), sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
}
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
}
###### INC48,60 --- run2
numSamples <- 1000
fileName <- paste("INC48_64_hist2.txt",sep="")
for (task_len in c(48,64)){
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat2",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist2.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
#if(task_len == 48){
# write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
#}else{
# write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
#}
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
if(task_len == 48){
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main=paste("2019-03-29: PT frequency on INC",task_len,sep=""), sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
}else{
ymax <-250
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main=paste("2019-03-30: PT frequency on INC",task_len,sep=""), sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
}
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
}
###INC50
numSamples <- 1000
fileName <- paste("interm_inc_hist_bin_count2.txt",sep="")
for (task_len in c(50,52,54,56,58,60,62)){
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(task_len == 50){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###INC65-71
for (task_len in c(65,66,67,68,69,70,71)){
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
#if(task_len == 50){
#write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
#}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
#}
}
numSamples <- 1000
task_len <- 21
for (fraction in c(125,25,375,5,625,75,875)){
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
fileName <- paste("interm_inc_hist_bin_count3.txt",sep="")
if(fraction == 125){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
######
numSamples <- 30
x = read.csv(file="1_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("1_sec_pt_hist.eps")
binsize=2
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 20
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC1',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="2_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("2_sec_pt_hist.eps")
binsize=2
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-25
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC2',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="4_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("4_sec_pt_hist.eps")
binsize=2
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-25
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC4',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="8_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("8_sec_pt_hist.eps")
binsize=2
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-15
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC8',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="16_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("16_sec_pt_hist.eps")
binsize=2
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-20
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="32_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("32_sec_pt_hist.eps")
binsize=2
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-10
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC32',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="64_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("64_sec_pt_hist.eps")
binsize=2
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-10
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC64',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="128_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("128_sec_pt_hist.eps")
binsize=4
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-15
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC128',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="256_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("256_sec_pt_hist.eps")
binsize=4
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 15
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC256',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="512_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("512_sec_pt_hist.eps")
binsize=10
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 20
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC512',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="1024_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("1024_sec_pt_hist.eps")
binsize=10
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 20
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC1024',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="2048_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("2048_sec_pt_hist.eps")
binsize=10
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 15
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC2048',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="4096_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("4096_sec_pt_hist.eps")
binsize=10
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 10
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC4096',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="8192_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
x$PRTIME <- round(x$PRTIME,1)
setEPS()
postscript("8192_sec_pt_hist.eps")
binsize=10
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 8
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC8192',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="16384_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("16384_sec_pt_hist.eps")
binsize=20
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 10
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16384',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids)),binsize))
dev.off()
###############################################################################################
numSamples <- 1000
x = read.csv(file="16_sec.dat",head=TRUE,sep="\t")
x1 <- subset(x, x$ITERNUM <= 200)
x2 <- subset(x, x$ITERNUM > 200 & x$ITERNUM <= 400)
x3 <- subset(x, x$ITERNUM > 400 & x$ITERNUM <= 600)
x4 <- subset(x, x$ITERNUM > 600 & x$ITERNUM <= 800)
x5 <- subset(x, x$ITERNUM > 800 & x$ITERNUM <= 1000)
### 16-1
setEPS()
postscript("16_sec_pt_hist0_1.eps")
x1_up = mean(x1$PRTIME) + 2*sd(x1$PRTIME)
x1_dn = mean(x1$PRTIME) - 2*sd(x1$PRTIME)
#x1 = subset(x1, x1$PRTIME >= x1_dn & x1$PRTIME <= x1_up)
binsize=1
xmin <-min(x1$PRTIME)
xmin
xmax <-max(x1$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x1$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x1)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-1',
sub=paste("(n=",nrow(x1),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-2
setEPS()
postscript("16_sec_pt_hist0_2.eps")
x2_up = mean(x2$PRTIME) + 2*sd(x2$PRTIME)
x2_dn = mean(x2$PRTIME) - 2*sd(x2$PRTIME)
#x2 = subset(x2, x2$PRTIME >= x2_dn & x2$PRTIME <= x2_up)
binsize=1
xmin <-min(x2$PRTIME)
xmin
xmax <-max(x2$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x2$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x2)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-2',
sub=paste("(n=",nrow(x2),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-3
setEPS()
postscript("16_sec_pt_hist0_3.eps")
x3_up = mean(x3$PRTIME) + 2*sd(x3$PRTIME)
x3_dn = mean(x3$PRTIME) - 2*sd(x3$PRTIME)
#x3 = subset(x3, x3$PRTIME >= x3_dn & x3$PRTIME <= x3_up)
binsize=1
xmin <-min(x3$PRTIME)
xmin
xmax <-max(x3$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x3$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x3)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-3',
sub=paste("(n=",nrow(x3),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-4
setEPS()
postscript("16_sec_pt_hist0_4.eps")
x4_up = mean(x4$PRTIME) + 2*sd(x4$PRTIME)
x4_dn = mean(x4$PRTIME) - 2*sd(x4$PRTIME)
#x4 = subset(x4, x4$PRTIME >= x4_dn & x4$PRTIME <= x4_up)
binsize=1
xmin <-min(x4$PRTIME)
xmin
xmax <-max(x4$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x4$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x4)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-4',
sub=paste("(n=",nrow(x4),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-5
setEPS()
postscript("16_sec_pt_hist0_5.eps")
x5_up = mean(x5$PRTIME) + 2*sd(x5$PRTIME)
x5_dn = mean(x5$PRTIME) - 2*sd(x5$PRTIME)
#x5 = subset(x5, x5$PRTIME >= x5_dn & x5$PRTIME <= x5_up)
binsize=1
xmin <-min(x5$PRTIME)
xmin
xmax <-max(x5$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x5$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x5)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-5',
sub=paste("(n=",nrow(x5),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
###########
### 16-1
setEPS()
postscript("16_sec_pt_hist1.eps")
x1_up = mean(x1$PRTIME) + 2*sd(x1$PRTIME)
x1_dn = mean(x1$PRTIME) - 2*sd(x1$PRTIME)
x1 = subset(x1, x1$PRTIME >= x1_dn & x1$PRTIME <= x1_up)
binsize=1
xmin <-min(x1$PRTIME)
xmin
xmax <-max(x1$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x1$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x1)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-1',
sub=paste("(n=",nrow(x1),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-2
setEPS()
postscript("16_sec_pt_hist2.eps")
x2_up = mean(x2$PRTIME) + 2*sd(x2$PRTIME)
x2_dn = mean(x2$PRTIME) - 2*sd(x2$PRTIME)
x2 = subset(x2, x2$PRTIME >= x2_dn & x2$PRTIME <= x2_up)
binsize=1
xmin <-min(x2$PRTIME)
xmin
xmax <-max(x2$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x2$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x2)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-2',
sub=paste("(n=",nrow(x2),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-3
setEPS()
postscript("16_sec_pt_hist3.eps")
x3_up = mean(x3$PRTIME) + 2*sd(x3$PRTIME)
x3_dn = mean(x3$PRTIME) - 2*sd(x3$PRTIME)
x3 = subset(x3, x3$PRTIME >= x3_dn & x3$PRTIME <= x3_up)
binsize=1
xmin <-min(x3$PRTIME)
xmin
xmax <-max(x3$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x3$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x3)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-3',
sub=paste("(n=",nrow(x3),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-4
setEPS()
postscript("16_sec_pt_hist4.eps")
x4_up = mean(x4$PRTIME) + 2*sd(x4$PRTIME)
x4_dn = mean(x4$PRTIME) - 2*sd(x4$PRTIME)
x4 = subset(x4, x4$PRTIME >= x4_dn & x4$PRTIME <= x4_up)
binsize=1
xmin <-min(x4$PRTIME)
xmin
xmax <-max(x4$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x4$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x4)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-4',
sub=paste("(n=",nrow(x4),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-5
setEPS()
postscript("16_sec_pt_hist5.eps")
x5_up = mean(x5$PRTIME) + 2*sd(x5$PRTIME)
x5_dn = mean(x5$PRTIME) - 2*sd(x5$PRTIME)
x5 = subset(x5, x5$PRTIME >= x5_dn & x5$PRTIME <= x5_up)
binsize=1
xmin <-min(x5$PRTIME)
xmin
xmax <-max(x5$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x5$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x5)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-5',
sub=paste("(n=",nrow(x5),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### INC1
numSamples <- 30
x = read.csv(file="1_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME);
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME);
#x <- subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up);
setEPS()
postscript("1_sec_pt_hist.eps")
binsize=1
xmin <-min(x$PRTIME)
xmax <-max(x$PRTIME)
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC1',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
#########################################
### INC9
numSamples <- 1000
x = read.csv(file="INC9_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME);
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME);
x <- subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up);
setEPS()
postscript("9_sec_pt_hist.eps")
binsize=1
xmin <-min(x$PRTIME)
xmax <-max(x$PRTIME)
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC9',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### sodb9
numSamples <- 1000
x = read.csv(file="1_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("1_sec_pt_hist.eps")
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 1000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC1',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
setEPS()
postscript("16_sec_pt_hist.eps")
#x <- subset(x, x$ITERNUM != 686 & x$ITERNUM != 700)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=1
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 16000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 100
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC16',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### sodb9
numSamples <- 1000
x = read.csv(file="1_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("1_sec_pt_hist.eps")
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 1000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC1',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
x = read.csv(file="2_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("2_sec_pt_hist.eps")
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 2000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 400
plot(h, xaxt="n",freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC2',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
x = read.csv(file="4_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("4_sec_pt_hist.eps")
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 200
xmin <-min(x$PRTIME)
xmax <-max(x$PRTIME)
task_len <- 4000
sd(x$PRTIME)/task_len
binsize=1
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC4',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
x = read.csv(file="8_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("8_sec_pt_hist.eps")
x <- subset(x, x$ITERNUM != 27)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=1
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 8000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 100
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC8',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
x = read.csv(file="16_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("16_sec_pt_hist.eps")
#x <- subset(x, x$ITERNUM != 686 & x$ITERNUM != 700)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=1
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 16000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 100
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC16',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
x = read.csv(file="32_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("32_sec_pt_hist.eps")
x <- subset(x, x$ITERNUM != 433)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=1
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 32000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 100
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC32',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
x = read.csv(file="64_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("64_sec_pt_hist.eps")
#x <- subset(x, x$ITERNUM != 85 & x$ITERNUM != 89 & x$ITERNUM != 308 & x$ITERNUM != 312 & x$ITERNUM != 437 & x$ITERNUM != 437 & x$ITERNUM != 531 & x$ITERNUM != 535 & x$ITERNUM != 754 & x$ITERNUM != 758 & x$ITERNUM != 977 & x$ITERNUM != 981)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=1
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 64000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 250
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC64',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
x = read.csv(file="128_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("128_sec_pt_hist.eps")
x <- subset(x, x$ITERNUM != 26)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmax <- max(x$PRTIME)
xmin <- min(x$PRTIME)
binsize=2
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 128000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 100
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC128',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
x = read.csv(file="256_sec.dat",head=TRUE,sep="\t")
setEPS()
postscript("256_sec_pt_hist.eps")
x <- subset(x, x$ITERNUM != 199)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=2
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 256000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 80
xmin <-min(x$PRTIME)
xmax <-max(x$PRTIME)
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC256',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
x = read.csv(file="512_sec.dat",head=TRUE,sep="\t")
setEPS()
postscript("512_sec_pt_hist.eps")
x <- subset(x, x$ITERNUM != 114 & x$ITERNUM != 186 & x$ITERNUM != 285)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=5
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 512000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-120
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC512',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
x = read.csv(file="1024_sec.dat",head=TRUE,sep="\t")
setEPS()
postscript("1024_sec_pt_hist.eps")
x <- subset(x, x$ITERNUM != 17 & x$ITERNUM != 101 & x$ITERNUM != 184 & x$ITERNUM != 268)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=5
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 1024000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 100
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC1024',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
### run on sodb10
x = read.csv(file="2048_sec.dat",head=TRUE,sep="\t")
setEPS()
postscript("2048_sec_pt_hist.eps")
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=5
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 2048000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 80
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC2048',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
### run on sodb12
x = read.csv(file="4096_sec.dat",head=TRUE,sep="\t")
setEPS()
postscript("4096_sec_pt_hist.eps")
x <- subset(x, x$ITERNUM != 5 & x$ITERNUM != 26 & x$ITERNUM != 47 & x$ITERNUM != 68 & x$ITERNUM != 89 & x$ITERNUM != 110 & x$ITERNUM != 131 & x$ITERNUM != 152 & x$ITERNUM != 173 & x$ITERNUM != 195 & x$ITERNUM != 215 & x$ITERNUM != 236 & x$ITERNUM != 258 & x$ITERNUM != 278 & x$ITERNUM != 299)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=5
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 4096000
sd(x$PRTIME)/task_len
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-50
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC4096',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
x = read.csv(file="8192_sec.dat",head=TRUE,sep="\t")
setEPS()
postscript("8192_sec_pt_hist.eps")
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=5
xmin <-min(x$PRTIME)
xmin <- xmin-2
xmax <-max(x$PRTIME)
xmax <- xmax+1
task_len <-8192
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 40
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
x = read.csv(file="16384_sec.dat",head=TRUE,sep="\t")
setEPS()
task_len <-16384
postscript("16384_sec_pt_hist.eps")
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
task_len <-16384
sd(x$PRTIME)/task_len
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=10
xmin <-min(x$PRTIME)
xmin <- xmin-7
xmax <-max(x$PRTIME)
xmax <- xmax+5
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 20
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
| /pt_characterization/protocol/intermediate_data/pt_hist_sodb9.r | no_license | yksuh-azcs/papers | R | false | false | 76,980 | r | ###### INC118-123
numSamples <- 1000
task_len <- 118
fileName <- paste("inc",task_len,"-",task_len+2,"_hist_bin_count.txt",sep="")
fraction <- 0
count <- -1
for (task_len in c(118,119,120,121,122,123)){
count <- count+1
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
INC_label <- paste(task_len,".",fraction,sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
print(dataFileName)
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(task_len == 118){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC60~INC61
numSamples <- 1000
task_len <- 60
fileName <- paste("inc",task_len,"-",task_len+1,"_hist_bin_count.txt",sep="")
for (fraction in c(0,125,25,375,5,625,75,875)){
INC_label <- paste(task_len,".",fraction,sep="")
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
if(fraction == 25 || fraction == 75){
adj_fraction <- fraction*10
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else if (fraction == 5){
adj_fraction <- fraction*100
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else{
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
}
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}else if (fraction == 0 || fraction == 1000){
tl[c:1] <- task_len
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(fraction == 0){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC8192
numSamples <- 300
task_len <- 8192
fileName <- paste("inc",task_len,"_hist_bin_count.txt",sep="")
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
INC_label <- paste(task_len,".",fraction,sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
print(dataFileName)
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=5
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(task_len == 8192){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
###### INC80
numSamples <- 1000
task_len <- 80
fileName <- paste("inc",task_len,"_hist_bin_count.txt",sep="")
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
INC_label <- paste(task_len,".",fraction,sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
print(dataFileName)
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(task_len == 80){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
###### INC93-103
numSamples <- 1000
task_len <- 93
fileName <- paste("inc",task_len,"-",task_len+10,"_hist_bin_count.txt",sep="")
fraction <- 0
count <- -1
for (task_len in c(93,94,95,96,97,98,99,100,101,102,103)){
count <- count+1
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
INC_label <- paste(task_len,".",fraction,sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
print(dataFileName)
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(task_len == 93){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC78-80
numSamples <- 1000
task_len <- 78
fileName <- paste("inc",task_len,"-",task_len+2,"_hist_bin_count.txt",sep="")
fraction <- 0
count <- -1
for (task_len in c(78,78.25,78.5,78.75,79,79.25,79.5,79.75,80)){
count <- count+1
if(count %% 4 == 0){
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
INC_label <- paste(task_len,".",fraction,sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}else{
fraction <- fraction+0.25
if(fraction == 1){
fraction <- 0
}
dataFileName <- paste("INC",task_len,".dat",sep="")
INC_label <- paste(task_len,sep="")
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
}
print(dataFileName)
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(task_len == 78){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC60-64
numSamples <- 1000
task_len <- 60
fileName <- paste("inc",task_len,"-",task_len+4,"_hist_bin_count.txt",sep="")
fraction <- 0
count <- -1
for (task_len in c(60,60.5,61,61.5,62,62.5,63,63.5,64)){
count <- count+1
if(count %% 2 == 0){
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
INC_label <- paste(task_len,".",fraction,sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}else{
fraction <- 0.5
dataFileName <- paste("INC",task_len,".dat",sep="")
INC_label <- paste(task_len,sep="")
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
}
#print(dataFileName)
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(task_len == 60){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC224
numSamples <- 1000
task_len <- 224
fileName <- paste("inc",task_len,"_hist_bin_count.txt",sep="")
for (task_len in c(224)){
fraction <- 0
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 150
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}else if (fraction == 0 || fraction == 1000){
tl[c:1] <- task_len
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(fraction == 0){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC60,62
numSamples <- 1000
task_len <- 60
fileName <- paste("inc",task_len,"-",task_len+2,"_hist_bin_count.txt",sep="")
for (task_len in c(60,62)){
fraction <- 0
INC_label <- paste(task_len,sep="")
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}else if (fraction == 0 || fraction == 1000){
tl[c:1] <- task_len
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(fraction == 0){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC21~22
numSamples <- 1000
task_len <- 21
fileName <- paste("inc",task_len,"-",task_len+1,"_hist_bin_count.txt",sep="")
for (fraction in c(0,125,25,375,5,625,75,875,1000)){
if(fraction == 0){
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else if(fraction == 1000){
task_len <- task_len+1
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else{
INC_label <- paste(task_len,".",fraction,sep="")
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
if(fraction == 25 || fraction == 75){
adj_fraction <- fraction*10
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else if (fraction == 5){
adj_fraction <- fraction*100
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else{
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
}
}
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}else if (fraction == 0 || fraction == 1000){
tl[c:1] <- task_len
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(fraction == 0){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC20~21
numSamples <- 1000
task_len <- 20
fileName <- paste("inc",task_len,"-",task_len+1,"_hist_bin_count.txt",sep="")
for (fraction in c(0,125,25,375,5,625,75,875,1000)){
if(fraction == 0){
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else if(fraction == 1000){
task_len <- task_len+1
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else{
INC_label <- paste(task_len,".",fraction,sep="")
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
if(fraction == 25 || fraction == 75){
adj_fraction <- fraction*10
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else if (fraction == 5){
adj_fraction <- fraction*100
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else{
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
}
}
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
#ymax <- ceiling(ymax/100)*100
ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}else if (fraction == 0 || fraction == 1000){
tl[c:1] <- task_len
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(fraction == 0){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC19~20
numSamples <- 1000
task_len <- 19
fileName <- paste("inc",task_len,"-",task_len+1,"_hist_bin_count.txt",sep="")
for (fraction in c(0,125,25,375,5,625,75,875,1000)){
if(fraction == 0){
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else if(fraction == 1000){
task_len <- task_len+1
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else{
INC_label <- paste(task_len,".",fraction,sep="")
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
if(fraction == 25 || fraction == 75){
adj_fraction <- fraction*10
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else if (fraction == 5){
adj_fraction <- fraction*100
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else{
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
}
}
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}else if (fraction == 0 || fraction == 1000){
tl[c:1] <- task_len
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(fraction == 0){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
numSamples <- 1000
task_len <- 19
fileName <- paste("inc",task_len,"-",task_len+1,"_hist_bin_count.txt",sep="")
for (fraction in c(0,125,25,375,5,625,75,875,1000)){
if(fraction == 0){
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else if(fraction == 1000){
task_len <- task_len+1
INC_label <- paste(task_len,sep="")
dataFileName <- paste(task_len,"_sec.dat",sep="")
histFileName <- paste(task_len,"_sec_pt_hist.eps",sep="")
}
else{
INC_label <- paste(task_len,".",fraction,sep="")
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
if(fraction == 25 || fraction == 75){
adj_fraction <- fraction*10
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else if (fraction == 5){
adj_fraction <- fraction*100
histFileName <- paste(task_len,"_",adj_fraction,"_sec_pt_hist.eps",sep="")
}else{
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
}
}
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 600
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green",
main=paste("PT frequency on INC",INC_label,sep=""),
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}else if (fraction == 0 || fraction == 1000){
tl[c:1] <- task_len
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(fraction == 0){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###### INC48,64 --- run1
numSamples <- 1000
task_len <- 19
fileName <- paste("INC48_64_hist1.txt",sep="")
for (task_len in c(48,64)){
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist1.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
#if(task_len == 48){
#write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
#}else{
# write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
#}
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
if(task_len == 48){
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main=paste("2017-06-08: PT frequency on INC",task_len,sep=""), sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
}else{
ymax <-250
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main=paste("2017-03-09: PT frequency on INC",task_len,sep=""), sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
}
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
}
###INC25
task_len <- 25
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
task_len <- task_len+1
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
task_len <- task_len+1
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
task_len <- task_len+1
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
task_len <- task_len+1
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
task_len <- task_len+1
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
task_len <- task_len+1
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
###### INC48,64 --- run1
numSamples <- 1000
fileName <- paste("INC48_64_hist1.txt",sep="")
for (task_len in c(48,64)){
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist1.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
#if(task_len == 48){
#write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
#}else{
# write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
#}
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
if(task_len == 48){
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main=paste("2017-06-08: PT frequency on INC",task_len,sep=""), sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
}else{
ymax <-250
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main=paste("2017-03-09: PT frequency on INC",task_len,sep=""), sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
}
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
}
###### INC48,60 --- run2
numSamples <- 1000
fileName <- paste("INC48_64_hist2.txt",sep="")
for (task_len in c(48,64)){
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat2",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist2.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
#if(task_len == 48){
# write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
#}else{
# write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
#}
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
if(task_len == 48){
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main=paste("2019-03-29: PT frequency on INC",task_len,sep=""), sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
}else{
ymax <-250
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main=paste("2019-03-30: PT frequency on INC",task_len,sep=""), sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""), xlab='PT (ms)', ylab=expression('Frequency'))
}
axis(side=1,at=h$mids,labels=seq(min(h$breaks),max(h$breaks)-1,binsize))
dev.off()
}
###INC50
numSamples <- 1000
fileName <- paste("interm_inc_hist_bin_count2.txt",sep="")
for (task_len in c(50,52,54,56,58,60,62)){
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
if(task_len == 50){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
###INC65-71
for (task_len in c(65,66,67,68,69,70,71)){
fraction <- 0
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
tl[c:1] <- task_len
}
y <- cbind(tl,floor(h$mids),h$counts)
y
#if(task_len == 50){
#write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
#}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
#}
}
numSamples <- 1000
task_len <- 21
for (fraction in c(125,25,375,5,625,75,875)){
dataFileName <- paste("INC",task_len,".",fraction,".dat",sep="")
x = read.csv(file=dataFileName,head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
histFileName <- paste(task_len,"_",fraction,"_sec_pt_hist.eps",sep="")
postscript(histFileName)
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
tl <- h$counts
c <- 0
for (v in tl) {
c <- c+1
if(fraction == 25 || fraction == 75){
tl[c:1] <- task_len+fraction/100
}else if (fraction == 5){
tl[c:1] <- task_len+fraction/10
}
else{
tl[c:1] <- task_len+fraction/1000
}
}
y <- cbind(tl,floor(h$mids),h$counts)
y
fileName <- paste("interm_inc_hist_bin_count3.txt",sep="")
if(fraction == 125){
write.table(y, file=fileName, row.names = FALSE, col.names = c('INC Number', 'Bin Number', 'Counts'), sep = "\t")
}else{
write.table(y, file=fileName, append = TRUE, row.names = FALSE, col.names = FALSE, sep = "\t")
}
}
######
numSamples <- 30
x = read.csv(file="1_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("1_sec_pt_hist.eps")
binsize=2
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 20
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC1',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="2_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("2_sec_pt_hist.eps")
binsize=2
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-25
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC2',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="4_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("4_sec_pt_hist.eps")
binsize=2
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-25
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC4',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="8_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("8_sec_pt_hist.eps")
binsize=2
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-15
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC8',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="16_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("16_sec_pt_hist.eps")
binsize=2
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-20
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="32_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("32_sec_pt_hist.eps")
binsize=2
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-10
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC32',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="64_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("64_sec_pt_hist.eps")
binsize=2
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-10
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC64',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="128_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("128_sec_pt_hist.eps")
binsize=4
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-15
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC128',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="256_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("256_sec_pt_hist.eps")
binsize=4
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 15
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC256',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="512_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("512_sec_pt_hist.eps")
binsize=10
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 20
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC512',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="1024_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("1024_sec_pt_hist.eps")
binsize=10
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 20
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC1024',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="2048_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("2048_sec_pt_hist.eps")
binsize=10
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 15
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC2048',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="4096_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("4096_sec_pt_hist.eps")
binsize=10
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 10
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC4096',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="8192_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
x$PRTIME <- round(x$PRTIME,1)
setEPS()
postscript("8192_sec_pt_hist.eps")
binsize=10
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 8
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC8192',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(min(h$mids),max(h$mids),binsize))
dev.off()
numSamples <- 30
x = read.csv(file="16384_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("16384_sec_pt_hist.eps")
binsize=20
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 10
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16384',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids)),binsize))
dev.off()
###############################################################################################
numSamples <- 1000
x = read.csv(file="16_sec.dat",head=TRUE,sep="\t")
x1 <- subset(x, x$ITERNUM <= 200)
x2 <- subset(x, x$ITERNUM > 200 & x$ITERNUM <= 400)
x3 <- subset(x, x$ITERNUM > 400 & x$ITERNUM <= 600)
x4 <- subset(x, x$ITERNUM > 600 & x$ITERNUM <= 800)
x5 <- subset(x, x$ITERNUM > 800 & x$ITERNUM <= 1000)
### 16-1
setEPS()
postscript("16_sec_pt_hist0_1.eps")
x1_up = mean(x1$PRTIME) + 2*sd(x1$PRTIME)
x1_dn = mean(x1$PRTIME) - 2*sd(x1$PRTIME)
#x1 = subset(x1, x1$PRTIME >= x1_dn & x1$PRTIME <= x1_up)
binsize=1
xmin <-min(x1$PRTIME)
xmin
xmax <-max(x1$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x1$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x1)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-1',
sub=paste("(n=",nrow(x1),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-2
setEPS()
postscript("16_sec_pt_hist0_2.eps")
x2_up = mean(x2$PRTIME) + 2*sd(x2$PRTIME)
x2_dn = mean(x2$PRTIME) - 2*sd(x2$PRTIME)
#x2 = subset(x2, x2$PRTIME >= x2_dn & x2$PRTIME <= x2_up)
binsize=1
xmin <-min(x2$PRTIME)
xmin
xmax <-max(x2$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x2$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x2)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-2',
sub=paste("(n=",nrow(x2),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-3
setEPS()
postscript("16_sec_pt_hist0_3.eps")
x3_up = mean(x3$PRTIME) + 2*sd(x3$PRTIME)
x3_dn = mean(x3$PRTIME) - 2*sd(x3$PRTIME)
#x3 = subset(x3, x3$PRTIME >= x3_dn & x3$PRTIME <= x3_up)
binsize=1
xmin <-min(x3$PRTIME)
xmin
xmax <-max(x3$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x3$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x3)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-3',
sub=paste("(n=",nrow(x3),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-4
setEPS()
postscript("16_sec_pt_hist0_4.eps")
x4_up = mean(x4$PRTIME) + 2*sd(x4$PRTIME)
x4_dn = mean(x4$PRTIME) - 2*sd(x4$PRTIME)
#x4 = subset(x4, x4$PRTIME >= x4_dn & x4$PRTIME <= x4_up)
binsize=1
xmin <-min(x4$PRTIME)
xmin
xmax <-max(x4$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x4$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x4)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-4',
sub=paste("(n=",nrow(x4),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-5
setEPS()
postscript("16_sec_pt_hist0_5.eps")
x5_up = mean(x5$PRTIME) + 2*sd(x5$PRTIME)
x5_dn = mean(x5$PRTIME) - 2*sd(x5$PRTIME)
#x5 = subset(x5, x5$PRTIME >= x5_dn & x5$PRTIME <= x5_up)
binsize=1
xmin <-min(x5$PRTIME)
xmin
xmax <-max(x5$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x5$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x5)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-5',
sub=paste("(n=",nrow(x5),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
###########
### 16-1
setEPS()
postscript("16_sec_pt_hist1.eps")
x1_up = mean(x1$PRTIME) + 2*sd(x1$PRTIME)
x1_dn = mean(x1$PRTIME) - 2*sd(x1$PRTIME)
x1 = subset(x1, x1$PRTIME >= x1_dn & x1$PRTIME <= x1_up)
binsize=1
xmin <-min(x1$PRTIME)
xmin
xmax <-max(x1$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x1$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x1)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-1',
sub=paste("(n=",nrow(x1),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-2
setEPS()
postscript("16_sec_pt_hist2.eps")
x2_up = mean(x2$PRTIME) + 2*sd(x2$PRTIME)
x2_dn = mean(x2$PRTIME) - 2*sd(x2$PRTIME)
x2 = subset(x2, x2$PRTIME >= x2_dn & x2$PRTIME <= x2_up)
binsize=1
xmin <-min(x2$PRTIME)
xmin
xmax <-max(x2$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x2$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x2)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-2',
sub=paste("(n=",nrow(x2),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-3
setEPS()
postscript("16_sec_pt_hist3.eps")
x3_up = mean(x3$PRTIME) + 2*sd(x3$PRTIME)
x3_dn = mean(x3$PRTIME) - 2*sd(x3$PRTIME)
x3 = subset(x3, x3$PRTIME >= x3_dn & x3$PRTIME <= x3_up)
binsize=1
xmin <-min(x3$PRTIME)
xmin
xmax <-max(x3$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x3$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x3)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-3',
sub=paste("(n=",nrow(x3),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-4
setEPS()
postscript("16_sec_pt_hist4.eps")
x4_up = mean(x4$PRTIME) + 2*sd(x4$PRTIME)
x4_dn = mean(x4$PRTIME) - 2*sd(x4$PRTIME)
x4 = subset(x4, x4$PRTIME >= x4_dn & x4$PRTIME <= x4_up)
binsize=1
xmin <-min(x4$PRTIME)
xmin
xmax <-max(x4$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x4$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x4)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-4',
sub=paste("(n=",nrow(x4),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### 16-5
setEPS()
postscript("16_sec_pt_hist5.eps")
x5_up = mean(x5$PRTIME) + 2*sd(x5$PRTIME)
x5_dn = mean(x5$PRTIME) - 2*sd(x5$PRTIME)
x5 = subset(x5, x5$PRTIME >= x5_dn & x5$PRTIME <= x5_up)
binsize=1
xmin <-min(x5$PRTIME)
xmin
xmax <-max(x5$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x5$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x5)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-80
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC16-5',
sub=paste("(n=",nrow(x5),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### INC1
numSamples <- 30
x = read.csv(file="1_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME);
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME);
#x <- subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up);
setEPS()
postscript("1_sec_pt_hist.eps")
binsize=1
xmin <-min(x$PRTIME)
xmax <-max(x$PRTIME)
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC1',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
#########################################
### INC9
numSamples <- 1000
x = read.csv(file="INC9_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME);
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME);
x <- subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up);
setEPS()
postscript("9_sec_pt_hist.eps")
binsize=1
xmin <-min(x$PRTIME)
xmax <-max(x$PRTIME)
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+1,1),plot=F)
nrow(x)
h
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
plot(h, xaxt='n', axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC9',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### sodb9
numSamples <- 1000
x = read.csv(file="1_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("1_sec_pt_hist.eps")
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 1000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC1',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
setEPS()
postscript("16_sec_pt_hist.eps")
#x <- subset(x, x$ITERNUM != 686 & x$ITERNUM != 700)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=1
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 16000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 100
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC16',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
### sodb9
numSamples <- 1000
x = read.csv(file="1_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("1_sec_pt_hist.eps")
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 1000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
plot(h, xaxt="n",axes = TRUE,freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC1',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
x = read.csv(file="2_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("2_sec_pt_hist.eps")
binsize=1
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 2000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 400
plot(h, xaxt="n",freq=TRUE,ylim=c(0,ymax), col="green", main='PT frequency on INC2',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
x = read.csv(file="4_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("4_sec_pt_hist.eps")
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 200
xmin <-min(x$PRTIME)
xmax <-max(x$PRTIME)
task_len <- 4000
sd(x$PRTIME)/task_len
binsize=1
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC4',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
x = read.csv(file="8_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("8_sec_pt_hist.eps")
x <- subset(x, x$ITERNUM != 27)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=1
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 8000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 100
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC8',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
x = read.csv(file="16_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("16_sec_pt_hist.eps")
#x <- subset(x, x$ITERNUM != 686 & x$ITERNUM != 700)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=1
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 16000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 100
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC16',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
x = read.csv(file="32_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("32_sec_pt_hist.eps")
x <- subset(x, x$ITERNUM != 433)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=1
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 32000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
#ymax <- 100
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC32',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
x = read.csv(file="64_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("64_sec_pt_hist.eps")
#x <- subset(x, x$ITERNUM != 85 & x$ITERNUM != 89 & x$ITERNUM != 308 & x$ITERNUM != 312 & x$ITERNUM != 437 & x$ITERNUM != 437 & x$ITERNUM != 531 & x$ITERNUM != 535 & x$ITERNUM != 754 & x$ITERNUM != 758 & x$ITERNUM != 977 & x$ITERNUM != 981)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=1
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 64000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 250
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC64',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=h$mids,labels=seq(xmin,xmax,1))
dev.off()
x = read.csv(file="128_sec.dat",head=TRUE,sep="\t")
x <- subset(x, x$ITERNUM <= numSamples)
setEPS()
postscript("128_sec_pt_hist.eps")
x <- subset(x, x$ITERNUM != 26)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
xmax <- max(x$PRTIME)
xmin <- min(x$PRTIME)
binsize=2
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 128000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 100
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC128',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
x = read.csv(file="256_sec.dat",head=TRUE,sep="\t")
setEPS()
postscript("256_sec_pt_hist.eps")
x <- subset(x, x$ITERNUM != 199)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=2
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 256000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 80
xmin <-min(x$PRTIME)
xmax <-max(x$PRTIME)
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC256',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
x = read.csv(file="512_sec.dat",head=TRUE,sep="\t")
setEPS()
postscript("512_sec_pt_hist.eps")
x <- subset(x, x$ITERNUM != 114 & x$ITERNUM != 186 & x$ITERNUM != 285)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=5
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 512000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-120
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC512',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
x = read.csv(file="1024_sec.dat",head=TRUE,sep="\t")
setEPS()
postscript("1024_sec_pt_hist.eps")
x <- subset(x, x$ITERNUM != 17 & x$ITERNUM != 101 & x$ITERNUM != 184 & x$ITERNUM != 268)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=5
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 1024000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 100
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC1024',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
### run on sodb10
x = read.csv(file="2048_sec.dat",head=TRUE,sep="\t")
setEPS()
postscript("2048_sec_pt_hist.eps")
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=5
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 2048000
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 80
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC2048',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
### run on sodb12
x = read.csv(file="4096_sec.dat",head=TRUE,sep="\t")
setEPS()
postscript("4096_sec_pt_hist.eps")
x <- subset(x, x$ITERNUM != 5 & x$ITERNUM != 26 & x$ITERNUM != 47 & x$ITERNUM != 68 & x$ITERNUM != 89 & x$ITERNUM != 110 & x$ITERNUM != 131 & x$ITERNUM != 152 & x$ITERNUM != 173 & x$ITERNUM != 195 & x$ITERNUM != 215 & x$ITERNUM != 236 & x$ITERNUM != 258 & x$ITERNUM != 278 & x$ITERNUM != 299)
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=5
xmin <-min(x$PRTIME)
xmin
xmax <-max(x$PRTIME)
xmax
task_len <- 4096000
sd(x$PRTIME)/task_len
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <-50
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='PT frequency on INC4096',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
x = read.csv(file="8192_sec.dat",head=TRUE,sep="\t")
setEPS()
postscript("8192_sec_pt_hist.eps")
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=5
xmin <-min(x$PRTIME)
xmin <- xmin-2
xmax <-max(x$PRTIME)
xmax <- xmax+1
task_len <-8192
sd(x$PRTIME)/task_len
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 40
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
x = read.csv(file="16384_sec.dat",head=TRUE,sep="\t")
setEPS()
task_len <-16384
postscript("16384_sec_pt_hist.eps")
x_up = mean(x$PRTIME) + 2*sd(x$PRTIME)
x_dn = mean(x$PRTIME) - 2*sd(x$PRTIME)
task_len <-16384
sd(x$PRTIME)/task_len
#x = subset(x, x$PRTIME >= x_dn & x$PRTIME <= x_up)
binsize=10
xmin <-min(x$PRTIME)
xmin <- xmin-7
xmax <-max(x$PRTIME)
xmax <- xmax+5
nbins <- ceiling((xmax-xmin) / binsize)
h = hist(x$PRTIME, right=F, breaks=seq(xmin,xmax+binsize,binsize),plot=F)
ymax <- max(h$counts)
ymax <- ceiling(ymax/100)*100
ymax <- 20
plot(h, ylim=c(0,ymax), xaxt="n",freq=TRUE,col="green", main='',
sub=paste("(n=",nrow(x),", bin_size=",binsize,"ms)",sep=""),
xlab='PT (ms)', ylab=expression('Frequency'))
axis(side=1,at=seq(min(h$mids),max(h$mids)+binsize,binsize),labels=seq(ceiling(min(h$mids)),ceiling(max(h$mids))+binsize,binsize))
dev.off()
|
head(Brain)
| /inst/snippets/Exploration9.1.3.R | no_license | rpruim/ISIwithR | R | false | false | 13 | r | head(Brain)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ger_indices.R
\name{ger_GRS}
\alias{ger_GRS}
\title{Germinated Seed Number}
\usage{
ger_GRS(evalName, data)
}
\arguments{
\item{evalName}{Prefix of the names of the periods of evaluation.}
\item{data}{The name of the data frame containing the data.}
}
\value{
Number of seed germinated
}
\description{
This function calculates the number of seed germinated.
}
\examples{
library(GerminaR)
dt <- prosopis
grs <- ger_GRS(evalName = "D", data = dt)
grs
}
| /man/ger_GRS.Rd | no_license | cran/GerminaR | R | false | true | 558 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ger_indices.R
\name{ger_GRS}
\alias{ger_GRS}
\title{Germinated Seed Number}
\usage{
ger_GRS(evalName, data)
}
\arguments{
\item{evalName}{Prefix of the names of the periods of evaluation.}
\item{data}{The name of the data frame containing the data.}
}
\value{
Number of seed germinated
}
\description{
This function calculates the number of seed germinated.
}
\examples{
library(GerminaR)
dt <- prosopis
grs <- ger_GRS(evalName = "D", data = dt)
grs
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gdal_grid.R
\name{gdal_grid}
\alias{gdal_grid}
\title{gdal_grid}
\usage{
gdal_grid(
src_datasource,
dst_filename,
ot,
of,
txe,
tye,
outsize,
a_srs,
zfield,
z_increase,
z_multiply,
a,
spat,
clipsrc,
clipsrcsql,
clipsrclayer,
clipsrcwhere,
l,
where,
sql,
co,
q,
config,
output_Raster = FALSE,
ignore.full_scan = TRUE,
verbose = FALSE
)
}
\arguments{
\item{src_datasource}{Character. Any OGR supported readable datasource.}
\item{dst_filename}{Character. The GDAL supported output file.}
\item{ot}{Character. "type". For the output bands to be of the indicated data type.}
\item{of}{Character. "format". Select the output format. The default is GeoTIFF (GTiff). Use the short format name.}
\item{txe}{Numeric. c(xmin,xmax). Set georeferenced X extents of output file to be created.}
\item{tye}{Numeric. c(ymin,ymax). Set georeferenced Y extents of output file to be created.}
\item{outsize}{Numeric. c(xsize,ysize). Set the size of the output file in pixels and lines.}
\item{a_srs}{Character. "srs_def". Override the projection for the output file. The srs_def may be any of the usual GDAL/OGR forms, complete WKT, PROJ.4, EPSG:n or a file containing the WKT.}
\item{zfield}{Character. "field_name". Identifies an attribute field on the features to be used to get a Z value from. This value overrides Z value read from feature geometry record (naturally, if you have a Z value in geometry, otherwise you have no choice and should specify a field name containing Z value).}
\item{z_increase}{Numeric. increase_value. Addition to the attribute field on the features to be used to get a Z value from. The addition should be the same unit as Z value. The result value will be Z value + Z increase value. The default value is 0.}
\item{z_multiply}{Numeric. multiply_value. This is multiplication ratio for Z field. This can be used for shift from e.g. foot to meters or from elevation to deep. The result value will be (Z value + Z increase value) * Z multiply value. The default value is 1.}
\item{a}{Character. [algorithm[:parameter1=value1][:parameter2=value2]...] Set the interpolation algorithm or data metric name and (optionally) its parameters. See INTERPOLATION ALGORITHMS and DATA METRICS sections for further discussion of available options.}
\item{spat}{Numeric. c(xmin,ymin,xmax,ymax). Adds a spatial filter to select only features contained within the bounding box described by (xmin, ymin) - (xmax, ymax).}
\item{clipsrc}{Numeric or Character. c(xmin,ymin,xmax,ymax)|WKT|datasource|spat_extent. Adds a spatial filter to select only features contained within the specified bounding box (expressed in source SRS), WKT geometry (POLYGON or MULTIPOLYGON), from a datasource or to the spatial extent of the -spat option if you use the spat_extent keyword. When specifying a datasource, you will generally want to use it in combination of the -clipsrclayer, -clipsrcwhere or -clipsrcsql options.}
\item{clipsrcsql}{Character. Select desired geometries using an SQL query instead.}
\item{clipsrclayer}{Character. "layername". Select the named layer from the source clip datasource.}
\item{clipsrcwhere}{Character. "expression". Restrict desired geometries based on attribute query.}
\item{l}{Character. "layername". Indicates the layer(s) from the datasource that will be used for input features. May be specified multiple times, but at least one layer name or a -sql option must be specified.}
\item{where}{Character. "expression". An optional SQL WHERE style query expression to be applied to select features to process from the input layer(s).}
\item{sql}{Character. "select_statement". An SQL statement to be evaluated against the datasource to produce a virtual layer of features to be processed.}
\item{co}{Character. "NAME=VALUE". Passes a creation option to the output format driver. Multiple -co options may be listed. See format specific documentation for legal creation options for each format.}
\item{q}{Logical. Suppress progress monitor and other non-error output.}
\item{config}{Character. Sets runtime configuration options for GDAL. See https://trac.osgeo.org/gdal/wiki/ConfigOptions for more information.}
\item{output_Raster}{Logical. Return output dst_filename as a RasterBrick?}
\item{ignore.full_scan}{Logical. If FALSE, perform a brute-force scan if other installs are not found. Default is TRUE.}
\item{verbose}{Logical. Enable verbose execution? Default is FALSE.}
}
\value{
NULL or if(output_Raster), a RasterBrick.
}
\description{
R wrapper for gdal_grid: creates regular grid from the scattered data
}
\details{
This is an R wrapper for the 'gdal_grid' function that is part of the
Geospatial Data Abstraction Library (GDAL). It follows the parameter naming
conventions of the original function, with some modifications to allow for more R-like
parameters. For all parameters, the user can use a single character string following,
precisely, the gdal_contour format (\url{http://www.gdal.org/gdal_grid.html}), or,
in some cases, can use R vectors to achieve the same end.
INTERPOLATION ALGORITHMS
There are number of interpolation algorithms to choose from.
\itemize{
\item{invdist
Inverse distance to a power. This is default algorithm. It has following parameters:
\itemize{
\item{power:
Weighting power (default 2.0).
}
\item{
smoothing:
Smoothing parameter (default 0.0).
}
\item{
radius1:
The first radius (X axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{
radius2:
The second radius (Y axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{
angle:
Angle of search ellipse rotation in degrees (counter clockwise, default 0.0).
}
\item{
max_points:
Maximum number of data points to use. Do not search for more points than this number. This is only used if search ellipse is set (both radii are non-zero). Zero means that all found points should be used. Default is 0.
}
\item{
min_points:
Minimum number of data points to use. If less amount of points found the grid node considered empty and will be filled with NODATA marker. This is only used if search ellipse is set (both radii are non-zero). Default is 0.
}
\item{
nodata:
NODATA marker to fill empty points (default 0.0).
}
}
}
\item{invdistnn
(Since GDAL 2.1) Inverse distance to a power with nearest neighbor searching, ideal when max_points is used. It has following parameters:
\itemize{
\item{power:
Weighting power (default 2.0).
}
\item{radius:
The radius of the search circle, which should be non-zero. Default is 1.0.
}
\item{max_points:
Maximum number of data points to use. Do not search for more points than this number. Found points will be ranked from nearest to furthest distance when weighting. Default is 12.
}
\item{min_points:
Minimum number of data points to use. If less amount of points found the grid node is considered empty and will be filled with NODATA marker. Default is 0.
}
\item{nodata:
NODATA marker to fill empty points (default 0.0).
}
}
}
\item{average
Moving average algorithm. It has following parameters:
\itemize{
\item{radius1:
The first radius (X axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{radius2:
The second radius (Y axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{angle:
Angle of search ellipse rotation in degrees (counter clockwise, default 0.0).
}
\item{min_points:
Minimum number of data points to use. If less amount of points found the grid node considered empty and will be filled with NODATA marker. Default is 0.
}
\item{nodata:
NODATA marker to fill empty points (default 0.0).
Note, that it is essential to set search ellipse for moving average method. It is a window that will be averaged when computing grid nodes values.
}
}
}
\item{nearest
Nearest neighbor algorithm. It has following parameters:
\itemize{
\item{radius1:
The first radius (X axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{radius2:
The second radius (Y axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{angle:
Angle of search ellipse rotation in degrees (counter clockwise, default 0.0).
}
\item{nodata:
NODATA marker to fill empty points (default 0.0).
}
}
}
\item{linear
(Since GDAL 2.1) Linear interpolation algorithm.
The Linear method performs linear interpolation by compution a Delaunay triangulation of the point cloud, finding in which triangle of the triangulation the point is, and by doing linear interpolation from its barycentric coordinates within the triangle. If the point is not in any triangle, depending on the radius, the algorithm will use the value of the nearest point or the nodata value.
It has following parameters:
\itemize{
\item{radius:
In case the point to be interpolated does not fit into a triangle of the Delaunay triangulation, use that maximum distance to search a nearest neighbour, or use nodata otherwise. If set to -1, the search distance is infinite. If set to 0, nodata value will be always used. Default is -1.
}
\item{nodata:
NODATA marker to fill empty points (default 0.0).
}
}
}
}
DATA METRICS
Besides the interpolation functionality gdal_grid can be used to compute some data metrics using the specified window and output grid geometry. These metrics are:
\itemize{
\item{minimum:
Minimum value found in grid node search ellipse.
}
\item{maximum:
Maximum value found in grid node search ellipse.
}
\item{range:
A difference between the minimum and maximum values found in grid node search ellipse.
}
\item{count:
A number of data points found in grid node search ellipse.
}
\item{average_distance:
An average distance between the grid node (center of the search ellipse) and all of the data points found in grid node search ellipse.
}
\item{average_distance_pts:
An average distance between the data points found in grid node search ellipse. The distance between each pair of points within ellipse is calculated and average of all distances is set as a grid node value.
}
}
All the metrics have the same set of options:
\itemize{
\item{radius1:
The first radius (X axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{radius2:
The second radius (Y axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{angle:
Angle of search ellipse rotation in degrees (counter clockwise, default 0.0).
}
\item{min_points:
Minimum number of data points to use. If less amount of points found the grid node considered empty and will be filled with NODATA marker. This is only used if search ellipse is set (both radii are non-zero). Default is 0.
}
\item{nodata:
NODATA marker to fill empty points (default 0.0).
}
}
This function assumes the user has a working GDAL on their system. If the
"gdalUtils_gdalPath" option has been set (usually by gdal_setInstallation),
the GDAL found in that path will be used. If nothing is found, gdal_setInstallation
will be executed to attempt to find a working GDAL that has the right drivers
as specified with the "of" (output format) parameter.
The user can choose to (optionally) return a RasterBrick of the output file (assuming
raster/rgdal supports the particular output format).
}
\examples{
# We'll pre-check to make sure there is a valid GDAL install
# and that raster and rgdal are also installed.
# Note this isn't strictly neccessary, as executing the function will
# force a search for a valid GDAL install.
gdal_setInstallation()
valid_install <- !is.null(getOption("gdalUtils_gdalPath"))
if(require(raster) && valid_install)
{
# Create a properly formatted CSV:
temporary_dir <- tempdir()
tempfname_base <- file.path(temporary_dir,"dem")
tempfname_csv <- paste(tempfname_base,".csv",sep="")
pts <- data.frame(
Easting=c(86943.4,87124.3,86962.4,87077.6),
Northing=c(891957,892075,892321,891995),
Elevation=c(139.13,135.01,182.04,135.01)
)
write.csv(pts,file=tempfname_csv,row.names=FALSE)
# Now make a matching VRT file
tempfname_vrt <- paste(tempfname_base,".vrt",sep="")
vrt_header <- c(
'<OGRVRTDataSource>',
'\t<OGRVRTLayer name="dem">',
'\t<SrcDataSource>dem.csv</SrcDataSource>',
'\t<GeometryType>wkbPoint</GeometryType>',
'\t<GeometryField encoding="PointFromColumns" x="Easting" y="Northing" z="Elevation"/>',
'\t</OGRVRTLayer>',
'\t</OGRVRTDataSource>'
)
vrt_filecon <- file(tempfname_vrt,"w")
writeLines(vrt_header,con=vrt_filecon)
close(vrt_filecon)
tempfname_tif <- paste(tempfname_base,".tiff",sep="")
# Now run gdal_grid:
setMinMax(gdal_grid(src_datasource=tempfname_vrt,
dst_filename=tempfname_tif,a="invdist:power=2.0:smoothing=1.0",
txe=c(85000,89000),tye=c(894000,890000),outsize=c(400,400),
of="GTiff",ot="Float64",l="dem",output_Raster=TRUE))
}
}
\references{
\url{http://www.gdal.org/gdal_grid.html}
}
\author{
Jonathan A. Greenberg (\email{gdalUtils@estarcion.net}) (wrapper) and Frank Warmerdam (GDAL lead developer).
}
| /man/gdal_grid.Rd | no_license | cran/gdalUtils | R | false | true | 13,376 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gdal_grid.R
\name{gdal_grid}
\alias{gdal_grid}
\title{gdal_grid}
\usage{
gdal_grid(
src_datasource,
dst_filename,
ot,
of,
txe,
tye,
outsize,
a_srs,
zfield,
z_increase,
z_multiply,
a,
spat,
clipsrc,
clipsrcsql,
clipsrclayer,
clipsrcwhere,
l,
where,
sql,
co,
q,
config,
output_Raster = FALSE,
ignore.full_scan = TRUE,
verbose = FALSE
)
}
\arguments{
\item{src_datasource}{Character. Any OGR supported readable datasource.}
\item{dst_filename}{Character. The GDAL supported output file.}
\item{ot}{Character. "type". For the output bands to be of the indicated data type.}
\item{of}{Character. "format". Select the output format. The default is GeoTIFF (GTiff). Use the short format name.}
\item{txe}{Numeric. c(xmin,xmax). Set georeferenced X extents of output file to be created.}
\item{tye}{Numeric. c(ymin,ymax). Set georeferenced Y extents of output file to be created.}
\item{outsize}{Numeric. c(xsize,ysize). Set the size of the output file in pixels and lines.}
\item{a_srs}{Character. "srs_def". Override the projection for the output file. The srs_def may be any of the usual GDAL/OGR forms, complete WKT, PROJ.4, EPSG:n or a file containing the WKT.}
\item{zfield}{Character. "field_name". Identifies an attribute field on the features to be used to get a Z value from. This value overrides Z value read from feature geometry record (naturally, if you have a Z value in geometry, otherwise you have no choice and should specify a field name containing Z value).}
\item{z_increase}{Numeric. increase_value. Addition to the attribute field on the features to be used to get a Z value from. The addition should be the same unit as Z value. The result value will be Z value + Z increase value. The default value is 0.}
\item{z_multiply}{Numeric. multiply_value. This is multiplication ratio for Z field. This can be used for shift from e.g. foot to meters or from elevation to deep. The result value will be (Z value + Z increase value) * Z multiply value. The default value is 1.}
\item{a}{Character. [algorithm[:parameter1=value1][:parameter2=value2]...] Set the interpolation algorithm or data metric name and (optionally) its parameters. See INTERPOLATION ALGORITHMS and DATA METRICS sections for further discussion of available options.}
\item{spat}{Numeric. c(xmin,ymin,xmax,ymax). Adds a spatial filter to select only features contained within the bounding box described by (xmin, ymin) - (xmax, ymax).}
\item{clipsrc}{Numeric or Character. c(xmin,ymin,xmax,ymax)|WKT|datasource|spat_extent. Adds a spatial filter to select only features contained within the specified bounding box (expressed in source SRS), WKT geometry (POLYGON or MULTIPOLYGON), from a datasource or to the spatial extent of the -spat option if you use the spat_extent keyword. When specifying a datasource, you will generally want to use it in combination of the -clipsrclayer, -clipsrcwhere or -clipsrcsql options.}
\item{clipsrcsql}{Character. Select desired geometries using an SQL query instead.}
\item{clipsrclayer}{Character. "layername". Select the named layer from the source clip datasource.}
\item{clipsrcwhere}{Character. "expression". Restrict desired geometries based on attribute query.}
\item{l}{Character. "layername". Indicates the layer(s) from the datasource that will be used for input features. May be specified multiple times, but at least one layer name or a -sql option must be specified.}
\item{where}{Character. "expression". An optional SQL WHERE style query expression to be applied to select features to process from the input layer(s).}
\item{sql}{Character. "select_statement". An SQL statement to be evaluated against the datasource to produce a virtual layer of features to be processed.}
\item{co}{Character. "NAME=VALUE". Passes a creation option to the output format driver. Multiple -co options may be listed. See format specific documentation for legal creation options for each format.}
\item{q}{Logical. Suppress progress monitor and other non-error output.}
\item{config}{Character. Sets runtime configuration options for GDAL. See https://trac.osgeo.org/gdal/wiki/ConfigOptions for more information.}
\item{output_Raster}{Logical. Return output dst_filename as a RasterBrick?}
\item{ignore.full_scan}{Logical. If FALSE, perform a brute-force scan if other installs are not found. Default is TRUE.}
\item{verbose}{Logical. Enable verbose execution? Default is FALSE.}
}
\value{
NULL or if(output_Raster), a RasterBrick.
}
\description{
R wrapper for gdal_grid: creates regular grid from the scattered data
}
\details{
This is an R wrapper for the 'gdal_grid' function that is part of the
Geospatial Data Abstraction Library (GDAL). It follows the parameter naming
conventions of the original function, with some modifications to allow for more R-like
parameters. For all parameters, the user can use a single character string following,
precisely, the gdal_contour format (\url{http://www.gdal.org/gdal_grid.html}), or,
in some cases, can use R vectors to achieve the same end.
INTERPOLATION ALGORITHMS
There are number of interpolation algorithms to choose from.
\itemize{
\item{invdist
Inverse distance to a power. This is default algorithm. It has following parameters:
\itemize{
\item{power:
Weighting power (default 2.0).
}
\item{
smoothing:
Smoothing parameter (default 0.0).
}
\item{
radius1:
The first radius (X axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{
radius2:
The second radius (Y axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{
angle:
Angle of search ellipse rotation in degrees (counter clockwise, default 0.0).
}
\item{
max_points:
Maximum number of data points to use. Do not search for more points than this number. This is only used if search ellipse is set (both radii are non-zero). Zero means that all found points should be used. Default is 0.
}
\item{
min_points:
Minimum number of data points to use. If less amount of points found the grid node considered empty and will be filled with NODATA marker. This is only used if search ellipse is set (both radii are non-zero). Default is 0.
}
\item{
nodata:
NODATA marker to fill empty points (default 0.0).
}
}
}
\item{invdistnn
(Since GDAL 2.1) Inverse distance to a power with nearest neighbor searching, ideal when max_points is used. It has following parameters:
\itemize{
\item{power:
Weighting power (default 2.0).
}
\item{radius:
The radius of the search circle, which should be non-zero. Default is 1.0.
}
\item{max_points:
Maximum number of data points to use. Do not search for more points than this number. Found points will be ranked from nearest to furthest distance when weighting. Default is 12.
}
\item{min_points:
Minimum number of data points to use. If less amount of points found the grid node is considered empty and will be filled with NODATA marker. Default is 0.
}
\item{nodata:
NODATA marker to fill empty points (default 0.0).
}
}
}
\item{average
Moving average algorithm. It has following parameters:
\itemize{
\item{radius1:
The first radius (X axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{radius2:
The second radius (Y axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{angle:
Angle of search ellipse rotation in degrees (counter clockwise, default 0.0).
}
\item{min_points:
Minimum number of data points to use. If less amount of points found the grid node considered empty and will be filled with NODATA marker. Default is 0.
}
\item{nodata:
NODATA marker to fill empty points (default 0.0).
Note, that it is essential to set search ellipse for moving average method. It is a window that will be averaged when computing grid nodes values.
}
}
}
\item{nearest
Nearest neighbor algorithm. It has following parameters:
\itemize{
\item{radius1:
The first radius (X axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{radius2:
The second radius (Y axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{angle:
Angle of search ellipse rotation in degrees (counter clockwise, default 0.0).
}
\item{nodata:
NODATA marker to fill empty points (default 0.0).
}
}
}
\item{linear
(Since GDAL 2.1) Linear interpolation algorithm.
The Linear method performs linear interpolation by compution a Delaunay triangulation of the point cloud, finding in which triangle of the triangulation the point is, and by doing linear interpolation from its barycentric coordinates within the triangle. If the point is not in any triangle, depending on the radius, the algorithm will use the value of the nearest point or the nodata value.
It has following parameters:
\itemize{
\item{radius:
In case the point to be interpolated does not fit into a triangle of the Delaunay triangulation, use that maximum distance to search a nearest neighbour, or use nodata otherwise. If set to -1, the search distance is infinite. If set to 0, nodata value will be always used. Default is -1.
}
\item{nodata:
NODATA marker to fill empty points (default 0.0).
}
}
}
}
DATA METRICS
Besides the interpolation functionality gdal_grid can be used to compute some data metrics using the specified window and output grid geometry. These metrics are:
\itemize{
\item{minimum:
Minimum value found in grid node search ellipse.
}
\item{maximum:
Maximum value found in grid node search ellipse.
}
\item{range:
A difference between the minimum and maximum values found in grid node search ellipse.
}
\item{count:
A number of data points found in grid node search ellipse.
}
\item{average_distance:
An average distance between the grid node (center of the search ellipse) and all of the data points found in grid node search ellipse.
}
\item{average_distance_pts:
An average distance between the data points found in grid node search ellipse. The distance between each pair of points within ellipse is calculated and average of all distances is set as a grid node value.
}
}
All the metrics have the same set of options:
\itemize{
\item{radius1:
The first radius (X axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{radius2:
The second radius (Y axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0.
}
\item{angle:
Angle of search ellipse rotation in degrees (counter clockwise, default 0.0).
}
\item{min_points:
Minimum number of data points to use. If less amount of points found the grid node considered empty and will be filled with NODATA marker. This is only used if search ellipse is set (both radii are non-zero). Default is 0.
}
\item{nodata:
NODATA marker to fill empty points (default 0.0).
}
}
This function assumes the user has a working GDAL on their system. If the
"gdalUtils_gdalPath" option has been set (usually by gdal_setInstallation),
the GDAL found in that path will be used. If nothing is found, gdal_setInstallation
will be executed to attempt to find a working GDAL that has the right drivers
as specified with the "of" (output format) parameter.
The user can choose to (optionally) return a RasterBrick of the output file (assuming
raster/rgdal supports the particular output format).
}
\examples{
# We'll pre-check to make sure there is a valid GDAL install
# and that raster and rgdal are also installed.
# Note this isn't strictly neccessary, as executing the function will
# force a search for a valid GDAL install.
gdal_setInstallation()
valid_install <- !is.null(getOption("gdalUtils_gdalPath"))
if(require(raster) && valid_install)
{
# Create a properly formatted CSV:
temporary_dir <- tempdir()
tempfname_base <- file.path(temporary_dir,"dem")
tempfname_csv <- paste(tempfname_base,".csv",sep="")
pts <- data.frame(
Easting=c(86943.4,87124.3,86962.4,87077.6),
Northing=c(891957,892075,892321,891995),
Elevation=c(139.13,135.01,182.04,135.01)
)
write.csv(pts,file=tempfname_csv,row.names=FALSE)
# Now make a matching VRT file
tempfname_vrt <- paste(tempfname_base,".vrt",sep="")
vrt_header <- c(
'<OGRVRTDataSource>',
'\t<OGRVRTLayer name="dem">',
'\t<SrcDataSource>dem.csv</SrcDataSource>',
'\t<GeometryType>wkbPoint</GeometryType>',
'\t<GeometryField encoding="PointFromColumns" x="Easting" y="Northing" z="Elevation"/>',
'\t</OGRVRTLayer>',
'\t</OGRVRTDataSource>'
)
vrt_filecon <- file(tempfname_vrt,"w")
writeLines(vrt_header,con=vrt_filecon)
close(vrt_filecon)
tempfname_tif <- paste(tempfname_base,".tiff",sep="")
# Now run gdal_grid:
setMinMax(gdal_grid(src_datasource=tempfname_vrt,
dst_filename=tempfname_tif,a="invdist:power=2.0:smoothing=1.0",
txe=c(85000,89000),tye=c(894000,890000),outsize=c(400,400),
of="GTiff",ot="Float64",l="dem",output_Raster=TRUE))
}
}
\references{
\url{http://www.gdal.org/gdal_grid.html}
}
\author{
Jonathan A. Greenberg (\email{gdalUtils@estarcion.net}) (wrapper) and Frank Warmerdam (GDAL lead developer).
}
|
# Swaggy Jenkins
#
# Jenkins API clients generated from Swagger / Open API specification
#
# OpenAPI spec version: 1.0.0
# Contact: blah@cliffano.com
# Generated by: https://openapi-generator.tech
#' HudsonMasterComputermonitorData Class
#'
#' @field hudson.node_monitors.SwapSpaceMonitor
#' @field hudson.node_monitors.TemporarySpaceMonitor
#' @field hudson.node_monitors.DiskSpaceMonitor
#' @field hudson.node_monitors.ArchitectureMonitor
#' @field hudson.node_monitors.ResponseTimeMonitor
#' @field hudson.node_monitors.ClockMonitor
#' @field _class
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
HudsonMasterComputermonitorData <- R6::R6Class(
'HudsonMasterComputermonitorData',
public = list(
`hudson.node_monitors.SwapSpaceMonitor` = NULL,
`hudson.node_monitors.TemporarySpaceMonitor` = NULL,
`hudson.node_monitors.DiskSpaceMonitor` = NULL,
`hudson.node_monitors.ArchitectureMonitor` = NULL,
`hudson.node_monitors.ResponseTimeMonitor` = NULL,
`hudson.node_monitors.ClockMonitor` = NULL,
`_class` = NULL,
initialize = function(`hudson.node_monitors.SwapSpaceMonitor`, `hudson.node_monitors.TemporarySpaceMonitor`, `hudson.node_monitors.DiskSpaceMonitor`, `hudson.node_monitors.ArchitectureMonitor`, `hudson.node_monitors.ResponseTimeMonitor`, `hudson.node_monitors.ClockMonitor`, `_class`){
if (!missing(`hudson.node_monitors.SwapSpaceMonitor`)) {
stopifnot(R6::is.R6(`hudson.node_monitors.SwapSpaceMonitor`))
self$`hudson.node_monitors.SwapSpaceMonitor` <- `hudson.node_monitors.SwapSpaceMonitor`
}
if (!missing(`hudson.node_monitors.TemporarySpaceMonitor`)) {
stopifnot(R6::is.R6(`hudson.node_monitors.TemporarySpaceMonitor`))
self$`hudson.node_monitors.TemporarySpaceMonitor` <- `hudson.node_monitors.TemporarySpaceMonitor`
}
if (!missing(`hudson.node_monitors.DiskSpaceMonitor`)) {
stopifnot(R6::is.R6(`hudson.node_monitors.DiskSpaceMonitor`))
self$`hudson.node_monitors.DiskSpaceMonitor` <- `hudson.node_monitors.DiskSpaceMonitor`
}
if (!missing(`hudson.node_monitors.ArchitectureMonitor`)) {
stopifnot(is.character(`hudson.node_monitors.ArchitectureMonitor`), length(`hudson.node_monitors.ArchitectureMonitor`) == 1)
self$`hudson.node_monitors.ArchitectureMonitor` <- `hudson.node_monitors.ArchitectureMonitor`
}
if (!missing(`hudson.node_monitors.ResponseTimeMonitor`)) {
stopifnot(R6::is.R6(`hudson.node_monitors.ResponseTimeMonitor`))
self$`hudson.node_monitors.ResponseTimeMonitor` <- `hudson.node_monitors.ResponseTimeMonitor`
}
if (!missing(`hudson.node_monitors.ClockMonitor`)) {
stopifnot(R6::is.R6(`hudson.node_monitors.ClockMonitor`))
self$`hudson.node_monitors.ClockMonitor` <- `hudson.node_monitors.ClockMonitor`
}
if (!missing(`_class`)) {
stopifnot(is.character(`_class`), length(`_class`) == 1)
self$`_class` <- `_class`
}
},
toJSON = function() {
HudsonMasterComputermonitorDataObject <- list()
if (!is.null(self$`hudson.node_monitors.SwapSpaceMonitor`)) {
HudsonMasterComputermonitorDataObject[['hudson.node_monitors.SwapSpaceMonitor']] <- self$`hudson.node_monitors.SwapSpaceMonitor`$toJSON()
}
if (!is.null(self$`hudson.node_monitors.TemporarySpaceMonitor`)) {
HudsonMasterComputermonitorDataObject[['hudson.node_monitors.TemporarySpaceMonitor']] <- self$`hudson.node_monitors.TemporarySpaceMonitor`$toJSON()
}
if (!is.null(self$`hudson.node_monitors.DiskSpaceMonitor`)) {
HudsonMasterComputermonitorDataObject[['hudson.node_monitors.DiskSpaceMonitor']] <- self$`hudson.node_monitors.DiskSpaceMonitor`$toJSON()
}
if (!is.null(self$`hudson.node_monitors.ArchitectureMonitor`)) {
HudsonMasterComputermonitorDataObject[['hudson.node_monitors.ArchitectureMonitor']] <- self$`hudson.node_monitors.ArchitectureMonitor`
}
if (!is.null(self$`hudson.node_monitors.ResponseTimeMonitor`)) {
HudsonMasterComputermonitorDataObject[['hudson.node_monitors.ResponseTimeMonitor']] <- self$`hudson.node_monitors.ResponseTimeMonitor`$toJSON()
}
if (!is.null(self$`hudson.node_monitors.ClockMonitor`)) {
HudsonMasterComputermonitorDataObject[['hudson.node_monitors.ClockMonitor']] <- self$`hudson.node_monitors.ClockMonitor`$toJSON()
}
if (!is.null(self$`_class`)) {
HudsonMasterComputermonitorDataObject[['_class']] <- self$`_class`
}
HudsonMasterComputermonitorDataObject
},
fromJSON = function(HudsonMasterComputermonitorDataJson) {
HudsonMasterComputermonitorDataObject <- jsonlite::fromJSON(HudsonMasterComputermonitorDataJson)
if (!is.null(HudsonMasterComputermonitorDataObject$`hudson.node_monitors.SwapSpaceMonitor`)) {
hudson.node_monitors.SwapSpaceMonitorObject <- SwapSpaceMonitorMemoryUsage2$new()
hudson.node_monitors.SwapSpaceMonitorObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.SwapSpaceMonitor, auto_unbox = TRUE))
self$`hudson.node_monitors.SwapSpaceMonitor` <- hudson.node_monitors.SwapSpaceMonitorObject
}
if (!is.null(HudsonMasterComputermonitorDataObject$`hudson.node_monitors.TemporarySpaceMonitor`)) {
hudson.node_monitors.TemporarySpaceMonitorObject <- DiskSpaceMonitorDescriptorDiskSpace$new()
hudson.node_monitors.TemporarySpaceMonitorObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.TemporarySpaceMonitor, auto_unbox = TRUE))
self$`hudson.node_monitors.TemporarySpaceMonitor` <- hudson.node_monitors.TemporarySpaceMonitorObject
}
if (!is.null(HudsonMasterComputermonitorDataObject$`hudson.node_monitors.DiskSpaceMonitor`)) {
hudson.node_monitors.DiskSpaceMonitorObject <- DiskSpaceMonitorDescriptorDiskSpace$new()
hudson.node_monitors.DiskSpaceMonitorObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.DiskSpaceMonitor, auto_unbox = TRUE))
self$`hudson.node_monitors.DiskSpaceMonitor` <- hudson.node_monitors.DiskSpaceMonitorObject
}
if (!is.null(HudsonMasterComputermonitorDataObject$`hudson.node_monitors.ArchitectureMonitor`)) {
self$`hudson.node_monitors.ArchitectureMonitor` <- HudsonMasterComputermonitorDataObject$`hudson.node_monitors.ArchitectureMonitor`
}
if (!is.null(HudsonMasterComputermonitorDataObject$`hudson.node_monitors.ResponseTimeMonitor`)) {
hudson.node_monitors.ResponseTimeMonitorObject <- ResponseTimeMonitorData$new()
hudson.node_monitors.ResponseTimeMonitorObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.ResponseTimeMonitor, auto_unbox = TRUE))
self$`hudson.node_monitors.ResponseTimeMonitor` <- hudson.node_monitors.ResponseTimeMonitorObject
}
if (!is.null(HudsonMasterComputermonitorDataObject$`hudson.node_monitors.ClockMonitor`)) {
hudson.node_monitors.ClockMonitorObject <- ClockDifference$new()
hudson.node_monitors.ClockMonitorObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.ClockMonitor, auto_unbox = TRUE))
self$`hudson.node_monitors.ClockMonitor` <- hudson.node_monitors.ClockMonitorObject
}
if (!is.null(HudsonMasterComputermonitorDataObject$`_class`)) {
self$`_class` <- HudsonMasterComputermonitorDataObject$`_class`
}
},
toJSONString = function() {
sprintf(
'{
"hudson.node_monitors.SwapSpaceMonitor": %s,
"hudson.node_monitors.TemporarySpaceMonitor": %s,
"hudson.node_monitors.DiskSpaceMonitor": %s,
"hudson.node_monitors.ArchitectureMonitor": %s,
"hudson.node_monitors.ResponseTimeMonitor": %s,
"hudson.node_monitors.ClockMonitor": %s,
"_class": %s
}',
self$`hudson.node_monitors.SwapSpaceMonitor`$toJSON(),
self$`hudson.node_monitors.TemporarySpaceMonitor`$toJSON(),
self$`hudson.node_monitors.DiskSpaceMonitor`$toJSON(),
self$`hudson.node_monitors.ArchitectureMonitor`,
self$`hudson.node_monitors.ResponseTimeMonitor`$toJSON(),
self$`hudson.node_monitors.ClockMonitor`$toJSON(),
self$`_class`
)
},
fromJSONString = function(HudsonMasterComputermonitorDataJson) {
HudsonMasterComputermonitorDataObject <- jsonlite::fromJSON(HudsonMasterComputermonitorDataJson)
SwapSpaceMonitorMemoryUsage2Object <- SwapSpaceMonitorMemoryUsage2$new()
self$`hudson.node_monitors.SwapSpaceMonitor` <- SwapSpaceMonitorMemoryUsage2Object$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.SwapSpaceMonitor, auto_unbox = TRUE))
DiskSpaceMonitorDescriptorDiskSpaceObject <- DiskSpaceMonitorDescriptorDiskSpace$new()
self$`hudson.node_monitors.TemporarySpaceMonitor` <- DiskSpaceMonitorDescriptorDiskSpaceObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.TemporarySpaceMonitor, auto_unbox = TRUE))
DiskSpaceMonitorDescriptorDiskSpaceObject <- DiskSpaceMonitorDescriptorDiskSpace$new()
self$`hudson.node_monitors.DiskSpaceMonitor` <- DiskSpaceMonitorDescriptorDiskSpaceObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.DiskSpaceMonitor, auto_unbox = TRUE))
self$`hudson.node_monitors.ArchitectureMonitor` <- HudsonMasterComputermonitorDataObject$`hudson.node_monitors.ArchitectureMonitor`
ResponseTimeMonitorDataObject <- ResponseTimeMonitorData$new()
self$`hudson.node_monitors.ResponseTimeMonitor` <- ResponseTimeMonitorDataObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.ResponseTimeMonitor, auto_unbox = TRUE))
ClockDifferenceObject <- ClockDifference$new()
self$`hudson.node_monitors.ClockMonitor` <- ClockDifferenceObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.ClockMonitor, auto_unbox = TRUE))
self$`_class` <- HudsonMasterComputermonitorDataObject$`_class`
}
)
)
| /clients/r/generated/R/HudsonMasterComputermonitorData.r | permissive | rahulyhg/swaggy-jenkins | R | false | false | 10,285 | r | # Swaggy Jenkins
#
# Jenkins API clients generated from Swagger / Open API specification
#
# OpenAPI spec version: 1.0.0
# Contact: blah@cliffano.com
# Generated by: https://openapi-generator.tech
#' HudsonMasterComputermonitorData Class
#'
#' @field hudson.node_monitors.SwapSpaceMonitor
#' @field hudson.node_monitors.TemporarySpaceMonitor
#' @field hudson.node_monitors.DiskSpaceMonitor
#' @field hudson.node_monitors.ArchitectureMonitor
#' @field hudson.node_monitors.ResponseTimeMonitor
#' @field hudson.node_monitors.ClockMonitor
#' @field _class
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
HudsonMasterComputermonitorData <- R6::R6Class(
'HudsonMasterComputermonitorData',
public = list(
`hudson.node_monitors.SwapSpaceMonitor` = NULL,
`hudson.node_monitors.TemporarySpaceMonitor` = NULL,
`hudson.node_monitors.DiskSpaceMonitor` = NULL,
`hudson.node_monitors.ArchitectureMonitor` = NULL,
`hudson.node_monitors.ResponseTimeMonitor` = NULL,
`hudson.node_monitors.ClockMonitor` = NULL,
`_class` = NULL,
initialize = function(`hudson.node_monitors.SwapSpaceMonitor`, `hudson.node_monitors.TemporarySpaceMonitor`, `hudson.node_monitors.DiskSpaceMonitor`, `hudson.node_monitors.ArchitectureMonitor`, `hudson.node_monitors.ResponseTimeMonitor`, `hudson.node_monitors.ClockMonitor`, `_class`){
if (!missing(`hudson.node_monitors.SwapSpaceMonitor`)) {
stopifnot(R6::is.R6(`hudson.node_monitors.SwapSpaceMonitor`))
self$`hudson.node_monitors.SwapSpaceMonitor` <- `hudson.node_monitors.SwapSpaceMonitor`
}
if (!missing(`hudson.node_monitors.TemporarySpaceMonitor`)) {
stopifnot(R6::is.R6(`hudson.node_monitors.TemporarySpaceMonitor`))
self$`hudson.node_monitors.TemporarySpaceMonitor` <- `hudson.node_monitors.TemporarySpaceMonitor`
}
if (!missing(`hudson.node_monitors.DiskSpaceMonitor`)) {
stopifnot(R6::is.R6(`hudson.node_monitors.DiskSpaceMonitor`))
self$`hudson.node_monitors.DiskSpaceMonitor` <- `hudson.node_monitors.DiskSpaceMonitor`
}
if (!missing(`hudson.node_monitors.ArchitectureMonitor`)) {
stopifnot(is.character(`hudson.node_monitors.ArchitectureMonitor`), length(`hudson.node_monitors.ArchitectureMonitor`) == 1)
self$`hudson.node_monitors.ArchitectureMonitor` <- `hudson.node_monitors.ArchitectureMonitor`
}
if (!missing(`hudson.node_monitors.ResponseTimeMonitor`)) {
stopifnot(R6::is.R6(`hudson.node_monitors.ResponseTimeMonitor`))
self$`hudson.node_monitors.ResponseTimeMonitor` <- `hudson.node_monitors.ResponseTimeMonitor`
}
if (!missing(`hudson.node_monitors.ClockMonitor`)) {
stopifnot(R6::is.R6(`hudson.node_monitors.ClockMonitor`))
self$`hudson.node_monitors.ClockMonitor` <- `hudson.node_monitors.ClockMonitor`
}
if (!missing(`_class`)) {
stopifnot(is.character(`_class`), length(`_class`) == 1)
self$`_class` <- `_class`
}
},
toJSON = function() {
HudsonMasterComputermonitorDataObject <- list()
if (!is.null(self$`hudson.node_monitors.SwapSpaceMonitor`)) {
HudsonMasterComputermonitorDataObject[['hudson.node_monitors.SwapSpaceMonitor']] <- self$`hudson.node_monitors.SwapSpaceMonitor`$toJSON()
}
if (!is.null(self$`hudson.node_monitors.TemporarySpaceMonitor`)) {
HudsonMasterComputermonitorDataObject[['hudson.node_monitors.TemporarySpaceMonitor']] <- self$`hudson.node_monitors.TemporarySpaceMonitor`$toJSON()
}
if (!is.null(self$`hudson.node_monitors.DiskSpaceMonitor`)) {
HudsonMasterComputermonitorDataObject[['hudson.node_monitors.DiskSpaceMonitor']] <- self$`hudson.node_monitors.DiskSpaceMonitor`$toJSON()
}
if (!is.null(self$`hudson.node_monitors.ArchitectureMonitor`)) {
HudsonMasterComputermonitorDataObject[['hudson.node_monitors.ArchitectureMonitor']] <- self$`hudson.node_monitors.ArchitectureMonitor`
}
if (!is.null(self$`hudson.node_monitors.ResponseTimeMonitor`)) {
HudsonMasterComputermonitorDataObject[['hudson.node_monitors.ResponseTimeMonitor']] <- self$`hudson.node_monitors.ResponseTimeMonitor`$toJSON()
}
if (!is.null(self$`hudson.node_monitors.ClockMonitor`)) {
HudsonMasterComputermonitorDataObject[['hudson.node_monitors.ClockMonitor']] <- self$`hudson.node_monitors.ClockMonitor`$toJSON()
}
if (!is.null(self$`_class`)) {
HudsonMasterComputermonitorDataObject[['_class']] <- self$`_class`
}
HudsonMasterComputermonitorDataObject
},
fromJSON = function(HudsonMasterComputermonitorDataJson) {
HudsonMasterComputermonitorDataObject <- jsonlite::fromJSON(HudsonMasterComputermonitorDataJson)
if (!is.null(HudsonMasterComputermonitorDataObject$`hudson.node_monitors.SwapSpaceMonitor`)) {
hudson.node_monitors.SwapSpaceMonitorObject <- SwapSpaceMonitorMemoryUsage2$new()
hudson.node_monitors.SwapSpaceMonitorObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.SwapSpaceMonitor, auto_unbox = TRUE))
self$`hudson.node_monitors.SwapSpaceMonitor` <- hudson.node_monitors.SwapSpaceMonitorObject
}
if (!is.null(HudsonMasterComputermonitorDataObject$`hudson.node_monitors.TemporarySpaceMonitor`)) {
hudson.node_monitors.TemporarySpaceMonitorObject <- DiskSpaceMonitorDescriptorDiskSpace$new()
hudson.node_monitors.TemporarySpaceMonitorObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.TemporarySpaceMonitor, auto_unbox = TRUE))
self$`hudson.node_monitors.TemporarySpaceMonitor` <- hudson.node_monitors.TemporarySpaceMonitorObject
}
if (!is.null(HudsonMasterComputermonitorDataObject$`hudson.node_monitors.DiskSpaceMonitor`)) {
hudson.node_monitors.DiskSpaceMonitorObject <- DiskSpaceMonitorDescriptorDiskSpace$new()
hudson.node_monitors.DiskSpaceMonitorObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.DiskSpaceMonitor, auto_unbox = TRUE))
self$`hudson.node_monitors.DiskSpaceMonitor` <- hudson.node_monitors.DiskSpaceMonitorObject
}
if (!is.null(HudsonMasterComputermonitorDataObject$`hudson.node_monitors.ArchitectureMonitor`)) {
self$`hudson.node_monitors.ArchitectureMonitor` <- HudsonMasterComputermonitorDataObject$`hudson.node_monitors.ArchitectureMonitor`
}
if (!is.null(HudsonMasterComputermonitorDataObject$`hudson.node_monitors.ResponseTimeMonitor`)) {
hudson.node_monitors.ResponseTimeMonitorObject <- ResponseTimeMonitorData$new()
hudson.node_monitors.ResponseTimeMonitorObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.ResponseTimeMonitor, auto_unbox = TRUE))
self$`hudson.node_monitors.ResponseTimeMonitor` <- hudson.node_monitors.ResponseTimeMonitorObject
}
if (!is.null(HudsonMasterComputermonitorDataObject$`hudson.node_monitors.ClockMonitor`)) {
hudson.node_monitors.ClockMonitorObject <- ClockDifference$new()
hudson.node_monitors.ClockMonitorObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.ClockMonitor, auto_unbox = TRUE))
self$`hudson.node_monitors.ClockMonitor` <- hudson.node_monitors.ClockMonitorObject
}
if (!is.null(HudsonMasterComputermonitorDataObject$`_class`)) {
self$`_class` <- HudsonMasterComputermonitorDataObject$`_class`
}
},
toJSONString = function() {
sprintf(
'{
"hudson.node_monitors.SwapSpaceMonitor": %s,
"hudson.node_monitors.TemporarySpaceMonitor": %s,
"hudson.node_monitors.DiskSpaceMonitor": %s,
"hudson.node_monitors.ArchitectureMonitor": %s,
"hudson.node_monitors.ResponseTimeMonitor": %s,
"hudson.node_monitors.ClockMonitor": %s,
"_class": %s
}',
self$`hudson.node_monitors.SwapSpaceMonitor`$toJSON(),
self$`hudson.node_monitors.TemporarySpaceMonitor`$toJSON(),
self$`hudson.node_monitors.DiskSpaceMonitor`$toJSON(),
self$`hudson.node_monitors.ArchitectureMonitor`,
self$`hudson.node_monitors.ResponseTimeMonitor`$toJSON(),
self$`hudson.node_monitors.ClockMonitor`$toJSON(),
self$`_class`
)
},
fromJSONString = function(HudsonMasterComputermonitorDataJson) {
HudsonMasterComputermonitorDataObject <- jsonlite::fromJSON(HudsonMasterComputermonitorDataJson)
SwapSpaceMonitorMemoryUsage2Object <- SwapSpaceMonitorMemoryUsage2$new()
self$`hudson.node_monitors.SwapSpaceMonitor` <- SwapSpaceMonitorMemoryUsage2Object$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.SwapSpaceMonitor, auto_unbox = TRUE))
DiskSpaceMonitorDescriptorDiskSpaceObject <- DiskSpaceMonitorDescriptorDiskSpace$new()
self$`hudson.node_monitors.TemporarySpaceMonitor` <- DiskSpaceMonitorDescriptorDiskSpaceObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.TemporarySpaceMonitor, auto_unbox = TRUE))
DiskSpaceMonitorDescriptorDiskSpaceObject <- DiskSpaceMonitorDescriptorDiskSpace$new()
self$`hudson.node_monitors.DiskSpaceMonitor` <- DiskSpaceMonitorDescriptorDiskSpaceObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.DiskSpaceMonitor, auto_unbox = TRUE))
self$`hudson.node_monitors.ArchitectureMonitor` <- HudsonMasterComputermonitorDataObject$`hudson.node_monitors.ArchitectureMonitor`
ResponseTimeMonitorDataObject <- ResponseTimeMonitorData$new()
self$`hudson.node_monitors.ResponseTimeMonitor` <- ResponseTimeMonitorDataObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.ResponseTimeMonitor, auto_unbox = TRUE))
ClockDifferenceObject <- ClockDifference$new()
self$`hudson.node_monitors.ClockMonitor` <- ClockDifferenceObject$fromJSON(jsonlite::toJSON(HudsonMasterComputermonitorDataObject$hudson.node_monitors.ClockMonitor, auto_unbox = TRUE))
self$`_class` <- HudsonMasterComputermonitorDataObject$`_class`
}
)
)
|
# ____ _
# / ___| / \ Genetic Algorithm
# | | _ / _ \ for
# | |_| |/ ___ \ Subset Selection
# \____/_/ \_\
#
# <https://www.jstatsoft.org/article/view/v053i04/v53i04.pdf>
labels <- c("Ca","P","pH","Sand","SOC")
label <- labels[1]
cost <- c(1e0,1e0,1e0,1e0,1e0)*1e4
names(cost) <- labels
USE_SUGGESTIONS <- FALSE # check to see whether there are past suggestions?
EXECUTION_ID <- generateID()
#########
# Setup #
#########
par(pty="s")
# Copy the data
X = train.infrared
y = train.Y[,label]
# Setup the computational nuances of the model training phase
fitControl <- trainControl(
## k-fold CV
method="cv",
number=8,
seeds=(803):(803+8+1),
allowParallel=TRUE,
returnData=FALSE) # saves memory
# Parallel Computing
availableCores = detectCores()
cl <- makeCluster(availableCores, type="PSOCK", outfile="")
registerDoParallel(cl)
#######################
# Set Genetic Nuances #
#######################
gaControl("binary"=list(selection=c("gabin_lrSelection","gabin_rwSelection","gareal_sigmaSelection","gabin_tourSelection")[4],
crossover=c("gabin_spCrossover","gabin_uCrossover")[2],
mutation=c("gabin_raMutation")[1]))
parameters = data.frame(selection=gaControl("binary")$selection,
crossover=gaControl("binary")$crossover,
mutation=gaControl("binary")$mutation,
stringsAsFactors=FALSE)
# Strategy Parameter Setting
#' De Jongโs strategy parameters for online (offline) performance are:
#' popSize = 30(80)
#' pcrossover = 0.95(0.45)
#' pmutation = 0.01(0.01)
parameters$maxgen = 100
parameters$popSize = max(8*2,availableCores)
parameters$pcrossover = 0.95
parameters$pmutation = 0.1
####################
# Fitness Function #
####################
fitness <- function(string){
# potential solution
inc = which(string==1)
# resample the data
n = nrow(X)
x = X[,inc]
y = y
# build the model
mdl <- train(x=x, y=y,
method=libSVM(),
trControl=fitControl,
#preProc=c("center", "scale")
tuneGrid=expand.grid(cost=cost[[label]]))
# evaluate the model
RMSE = mean(mdl$resample$RMSE)
return(-RMSE)
}# fitness
###############
# Suggestions #
###############
# Sample the relevant spectrum in windows of W (prime numbers)
prime_numbers = c(2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97)
suggestions = matrix(0, nrow=length(prime_numbers), ncol=ncol(X))
for(i in 1:length(prime_numbers))
suggestions[i,seq(from=1,to=ncol(X), by=prime_numbers[i])] = 1
# Get the previous creatures
if(USE_SUGGESTIONS){
# check if a previous creatures exists
the_most_fitted_individual = getSelectedFeatures(label)
# append the most fitted individual
if(!is.null(the_most_fitted_individual))
suggestions = rbind(suggestions,data.matrix(the_most_fitted_individual))
}
###############
# Memoization #
###############
#' In certain circumstances, particularly with binary GAs, memoization can be
#' used to speed up calculations by using cached results. This is easily
#' obtained using the memoise package.
mfitness <- memoise(fitness)
###################
# Start Evolution #
###################
startTime = Sys.time()
GA <- ga(type="binary", fitness=mfitness,
popSize=parameters$popSize,
pcrossover=parameters$pcrossover,
pmutation=parameters$pmutation,
maxiter=parameters$maxgen, #run=100,
names=colnames(X),
nBits=ncol(X),
suggestions=suggestions,
parallel=TRUE,
min=100, max=100, # not relevant in the case of type="binary"
monitor=plot,
seed=2047)
finishTime = Sys.time()
forget(mfitness) # clear cache
stopCluster(cl) # shut down the cluster
###########
# Summary #
###########
summary(GA)
timeDiff = round(as.numeric(finishTime-startTime, units="mins"),0)
###############
# Save Visual #
###############
# Set the folder name
folder_name = paste0('(',parameters$selection,')',
'(',parameters$crossover,')',
'(',parameters$mutation,')')
folder_path = file.path(getwd(),'figures',folder_name)
dir.create(folder_path, recursive=TRUE, showWarnings=FALSE)
# Save Image
image_info = paste0('(',label,')',
'(popSize=',parameters$popSize,')',
'(pcrossover=',parameters$pcrossover,')',
'(pmutation=',parameters$pmutation,')')
image_path = file.path(folder_path,paste0(image_info,'(',EXECUTION_ID,')',".png"))
dev.copy(png, image_path)
dev.off()
#######################
# Export the solution #
#######################
# Get the chromosome of the most fitted individual
solution = drop(GA@solution)
if (!is.null(dim(solution))) solution = solution[,1]
# Get the fitted value
fitnessValue = GA@fitnessValue[1]
# Record the solution
solution = data.frame(executionID=EXECUTION_ID, executionDate=as.character(Sys.Date()), executionTimeInMinutes=timeDiff,
label=label,fitnessValue=fitnessValue,
as.vector(parameters),
t(solution),
stringsAsFactors=FALSE)
# Check if file exists then add the new solution, otherwise create a new file
destfile = file.path(getwd(), GLOBALS[["FEATURE_SELECTION_FOLDER"]], paste0(GLOBALS[["FEATURE_SELECTION_VIA_GA"]],'.csv'))
if(file.exists(destfile)){
temp = read.csv(destfile, stringsAsFactors=FALSE)
temp = rbind(temp,solution)
write.csv(temp, destfile, row.names=FALSE)
} else {
write.csv(solution, destfile, row.names=FALSE)
} | /procedures/feature_selection_via_genetic_algorithm.R | no_license | data-science-competitions/Kaggle-Africa-Soil-Property-Prediction-Challenge | R | false | false | 5,892 | r | # ____ _
# / ___| / \ Genetic Algorithm
# | | _ / _ \ for
# | |_| |/ ___ \ Subset Selection
# \____/_/ \_\
#
# <https://www.jstatsoft.org/article/view/v053i04/v53i04.pdf>
labels <- c("Ca","P","pH","Sand","SOC")
label <- labels[1]
cost <- c(1e0,1e0,1e0,1e0,1e0)*1e4
names(cost) <- labels
USE_SUGGESTIONS <- FALSE # check to see whether there are past suggestions?
EXECUTION_ID <- generateID()
#########
# Setup #
#########
par(pty="s")
# Copy the data
X = train.infrared
y = train.Y[,label]
# Setup the computational nuances of the model training phase
fitControl <- trainControl(
## k-fold CV
method="cv",
number=8,
seeds=(803):(803+8+1),
allowParallel=TRUE,
returnData=FALSE) # saves memory
# Parallel Computing
availableCores = detectCores()
cl <- makeCluster(availableCores, type="PSOCK", outfile="")
registerDoParallel(cl)
#######################
# Set Genetic Nuances #
#######################
gaControl("binary"=list(selection=c("gabin_lrSelection","gabin_rwSelection","gareal_sigmaSelection","gabin_tourSelection")[4],
crossover=c("gabin_spCrossover","gabin_uCrossover")[2],
mutation=c("gabin_raMutation")[1]))
parameters = data.frame(selection=gaControl("binary")$selection,
crossover=gaControl("binary")$crossover,
mutation=gaControl("binary")$mutation,
stringsAsFactors=FALSE)
# Strategy Parameter Setting
#' De Jongโs strategy parameters for online (offline) performance are:
#' popSize = 30(80)
#' pcrossover = 0.95(0.45)
#' pmutation = 0.01(0.01)
parameters$maxgen = 100
parameters$popSize = max(8*2,availableCores)
parameters$pcrossover = 0.95
parameters$pmutation = 0.1
####################
# Fitness Function #
####################
fitness <- function(string){
# potential solution
inc = which(string==1)
# resample the data
n = nrow(X)
x = X[,inc]
y = y
# build the model
mdl <- train(x=x, y=y,
method=libSVM(),
trControl=fitControl,
#preProc=c("center", "scale")
tuneGrid=expand.grid(cost=cost[[label]]))
# evaluate the model
RMSE = mean(mdl$resample$RMSE)
return(-RMSE)
}# fitness
###############
# Suggestions #
###############
# Sample the relevant spectrum in windows of W (prime numbers)
prime_numbers = c(2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97)
suggestions = matrix(0, nrow=length(prime_numbers), ncol=ncol(X))
for(i in 1:length(prime_numbers))
suggestions[i,seq(from=1,to=ncol(X), by=prime_numbers[i])] = 1
# Get the previous creatures
if(USE_SUGGESTIONS){
# check if a previous creatures exists
the_most_fitted_individual = getSelectedFeatures(label)
# append the most fitted individual
if(!is.null(the_most_fitted_individual))
suggestions = rbind(suggestions,data.matrix(the_most_fitted_individual))
}
###############
# Memoization #
###############
#' In certain circumstances, particularly with binary GAs, memoization can be
#' used to speed up calculations by using cached results. This is easily
#' obtained using the memoise package.
mfitness <- memoise(fitness)
###################
# Start Evolution #
###################
startTime = Sys.time()
GA <- ga(type="binary", fitness=mfitness,
popSize=parameters$popSize,
pcrossover=parameters$pcrossover,
pmutation=parameters$pmutation,
maxiter=parameters$maxgen, #run=100,
names=colnames(X),
nBits=ncol(X),
suggestions=suggestions,
parallel=TRUE,
min=100, max=100, # not relevant in the case of type="binary"
monitor=plot,
seed=2047)
finishTime = Sys.time()
forget(mfitness) # clear cache
stopCluster(cl) # shut down the cluster
###########
# Summary #
###########
summary(GA)
timeDiff = round(as.numeric(finishTime-startTime, units="mins"),0)
###############
# Save Visual #
###############
# Set the folder name
folder_name = paste0('(',parameters$selection,')',
'(',parameters$crossover,')',
'(',parameters$mutation,')')
folder_path = file.path(getwd(),'figures',folder_name)
dir.create(folder_path, recursive=TRUE, showWarnings=FALSE)
# Save Image
image_info = paste0('(',label,')',
'(popSize=',parameters$popSize,')',
'(pcrossover=',parameters$pcrossover,')',
'(pmutation=',parameters$pmutation,')')
image_path = file.path(folder_path,paste0(image_info,'(',EXECUTION_ID,')',".png"))
dev.copy(png, image_path)
dev.off()
#######################
# Export the solution #
#######################
# Get the chromosome of the most fitted individual
solution = drop(GA@solution)
if (!is.null(dim(solution))) solution = solution[,1]
# Get the fitted value
fitnessValue = GA@fitnessValue[1]
# Record the solution
solution = data.frame(executionID=EXECUTION_ID, executionDate=as.character(Sys.Date()), executionTimeInMinutes=timeDiff,
label=label,fitnessValue=fitnessValue,
as.vector(parameters),
t(solution),
stringsAsFactors=FALSE)
# Check if file exists then add the new solution, otherwise create a new file
destfile = file.path(getwd(), GLOBALS[["FEATURE_SELECTION_FOLDER"]], paste0(GLOBALS[["FEATURE_SELECTION_VIA_GA"]],'.csv'))
if(file.exists(destfile)){
temp = read.csv(destfile, stringsAsFactors=FALSE)
temp = rbind(temp,solution)
write.csv(temp, destfile, row.names=FALSE)
} else {
write.csv(solution, destfile, row.names=FALSE)
} |
#' Render / Convert PDF
#'
#' High quality conversion of pdf page(s) to png, jpeg or tiff format, or render into a
#' raw bitmap array for further processing in R.
#'
#' @export
#' @name rendering
#' @rdname pdf_render_page
#' @param pdf file path or raw vector with pdf data
#' @param page which page to render
#' @param numeric convert raw output to (0-1) real values
#' @param dpi resolution (dots per inch) to render
#' @param antialias enable antialiasing. Must be `"text"` or `"draw"` or `TRUE` (both)
#' or `FALSE` (neither).
#' @param opw owner password
#' @param upw user password
#' @family pdftools
#' @aliases render
#' @examples # Rendering should be supported on all platforms now
#' # convert few pages to png
#' file.copy(file.path(Sys.getenv("R_DOC_DIR"), "NEWS.pdf"), "news.pdf")
#' pdf_convert("news.pdf", pages = 1:3)
#'
#' # render into raw bitmap
#' bitmap <- pdf_render_page("news.pdf")
#'
#' # save to bitmap formats
#' png::writePNG(bitmap, "page.png")
#' webp::write_webp(bitmap, "page.webp")
#'
#' # Higher quality
#' bitmap <- pdf_render_page("news.pdf", page = 1, dpi = 300)
#' png::writePNG(bitmap, "page.png")
#'
#' # slightly more efficient
#' bitmap_raw <- pdf_render_page("news.pdf", numeric = FALSE)
#' webp::write_webp(bitmap_raw, "page.webp")
#'
#' # Cleanup
#' unlink(c('news.pdf', 'news_1.png', 'news_2.png', 'news_3.png',
#' 'page.jpeg', 'page.png', 'page.webp'))
pdf_render_page<- function(pdf, page = 1, dpi = 72, numeric = FALSE, antialias = TRUE, opw = "", upw = "") {
antialiasing <- isTRUE(antialias) || isTRUE(antialias == "draw")
text_antialiasing <- isTRUE(antialias) || isTRUE(antialias == "text")
out <- poppler_render_page(loadfile(pdf), page, dpi, opw, upw, antialiasing, text_antialiasing)
if(identical(dim(out)[1], 4L)){
out <- out[c(3,2,1,4),,, drop = FALSE] ## convert ARGB to RGBA
}
if(isTRUE(numeric)){
out <- structure(as.numeric(out)/255, dim = dim(out))
out <- aperm(out)
} else {
class(out) <- c("bitmap", "rgba")
}
return(out)
}
#' @export
#' @rdname pdf_render_page
#' @param format string with output format such as `"png"` or `"jpeg"`. Must be equal
#' to one of `poppler_config()$supported_image_formats`.
#' @param pages vector with one-based page numbers to render. `NULL` means all pages.
#' @param filenames vector of equal length to `pages` with output filenames. May also be
#' a format string which is expanded using `pages` and `format` respectively.
#' @param verbose print some progress info to stdout
pdf_convert <- function(pdf, format = "png", pages = NULL, filenames = NULL , dpi = 72,
antialias = TRUE, opw = "", upw = "", verbose = TRUE){
config <- poppler_config()
if(!config$can_render || !length(config$supported_image_formats))
stop("You version of libppoppler does not support rendering")
format <- match.arg(format, poppler_config()$supported_image_formats)
if(is.null(pages))
pages <- seq_len(pdf_info(pdf, opw = opw, upw = upw)$pages)
if(!is.numeric(pages) || !length(pages))
stop("Argument 'pages' must be a one-indexed vector of page numbers")
if(length(filenames) < 2){
input <- ifelse(is.raw(pdf), 'output', sub(".pdf", "", basename(pdf), fixed = TRUE))
filenames <- if (length(filenames)) {
sprintf(filenames, pages, format)
} else {
sprintf("%s_%d.%s", input, pages, format)
}
}
if(length(filenames) != length(pages))
stop("Length of 'filenames' must be one or equal to 'pages'")
antialiasing <- isTRUE(antialias) || isTRUE(antialias == "draw")
text_antialiasing <- isTRUE(antialias) || isTRUE(antialias == "text")
poppler_convert(loadfile(pdf), format, pages, filenames, dpi, opw, upw, antialiasing, text_antialiasing, verbose)
}
#' @export
#' @rdname pdf_render_page
poppler_config <- function(){
get_poppler_config()
}
| /R/render.R | permissive | ropensci/pdftools | R | false | false | 3,850 | r | #' Render / Convert PDF
#'
#' High quality conversion of pdf page(s) to png, jpeg or tiff format, or render into a
#' raw bitmap array for further processing in R.
#'
#' @export
#' @name rendering
#' @rdname pdf_render_page
#' @param pdf file path or raw vector with pdf data
#' @param page which page to render
#' @param numeric convert raw output to (0-1) real values
#' @param dpi resolution (dots per inch) to render
#' @param antialias enable antialiasing. Must be `"text"` or `"draw"` or `TRUE` (both)
#' or `FALSE` (neither).
#' @param opw owner password
#' @param upw user password
#' @family pdftools
#' @aliases render
#' @examples # Rendering should be supported on all platforms now
#' # convert few pages to png
#' file.copy(file.path(Sys.getenv("R_DOC_DIR"), "NEWS.pdf"), "news.pdf")
#' pdf_convert("news.pdf", pages = 1:3)
#'
#' # render into raw bitmap
#' bitmap <- pdf_render_page("news.pdf")
#'
#' # save to bitmap formats
#' png::writePNG(bitmap, "page.png")
#' webp::write_webp(bitmap, "page.webp")
#'
#' # Higher quality
#' bitmap <- pdf_render_page("news.pdf", page = 1, dpi = 300)
#' png::writePNG(bitmap, "page.png")
#'
#' # slightly more efficient
#' bitmap_raw <- pdf_render_page("news.pdf", numeric = FALSE)
#' webp::write_webp(bitmap_raw, "page.webp")
#'
#' # Cleanup
#' unlink(c('news.pdf', 'news_1.png', 'news_2.png', 'news_3.png',
#' 'page.jpeg', 'page.png', 'page.webp'))
pdf_render_page<- function(pdf, page = 1, dpi = 72, numeric = FALSE, antialias = TRUE, opw = "", upw = "") {
antialiasing <- isTRUE(antialias) || isTRUE(antialias == "draw")
text_antialiasing <- isTRUE(antialias) || isTRUE(antialias == "text")
out <- poppler_render_page(loadfile(pdf), page, dpi, opw, upw, antialiasing, text_antialiasing)
if(identical(dim(out)[1], 4L)){
out <- out[c(3,2,1,4),,, drop = FALSE] ## convert ARGB to RGBA
}
if(isTRUE(numeric)){
out <- structure(as.numeric(out)/255, dim = dim(out))
out <- aperm(out)
} else {
class(out) <- c("bitmap", "rgba")
}
return(out)
}
#' @export
#' @rdname pdf_render_page
#' @param format string with output format such as `"png"` or `"jpeg"`. Must be equal
#' to one of `poppler_config()$supported_image_formats`.
#' @param pages vector with one-based page numbers to render. `NULL` means all pages.
#' @param filenames vector of equal length to `pages` with output filenames. May also be
#' a format string which is expanded using `pages` and `format` respectively.
#' @param verbose print some progress info to stdout
pdf_convert <- function(pdf, format = "png", pages = NULL, filenames = NULL , dpi = 72,
antialias = TRUE, opw = "", upw = "", verbose = TRUE){
config <- poppler_config()
if(!config$can_render || !length(config$supported_image_formats))
stop("You version of libppoppler does not support rendering")
format <- match.arg(format, poppler_config()$supported_image_formats)
if(is.null(pages))
pages <- seq_len(pdf_info(pdf, opw = opw, upw = upw)$pages)
if(!is.numeric(pages) || !length(pages))
stop("Argument 'pages' must be a one-indexed vector of page numbers")
if(length(filenames) < 2){
input <- ifelse(is.raw(pdf), 'output', sub(".pdf", "", basename(pdf), fixed = TRUE))
filenames <- if (length(filenames)) {
sprintf(filenames, pages, format)
} else {
sprintf("%s_%d.%s", input, pages, format)
}
}
if(length(filenames) != length(pages))
stop("Length of 'filenames' must be one or equal to 'pages'")
antialiasing <- isTRUE(antialias) || isTRUE(antialias == "draw")
text_antialiasing <- isTRUE(antialias) || isTRUE(antialias == "text")
poppler_convert(loadfile(pdf), format, pages, filenames, dpi, opw, upw, antialiasing, text_antialiasing, verbose)
}
#' @export
#' @rdname pdf_render_page
poppler_config <- function(){
get_poppler_config()
}
|
##' @include semiParametric.R survivalModels.R
NULL
##' Method to calculate model restricted means
##' @name calcModelRmst
##' @rdname calcModelRmst-methods
##' @param object (SurvivalModel) A survival model - note there cannot be
##' any covariates and armAsFactor must be FALSE
##' @param ... additional arguments for this generic
##' @return (data.frame or FlexTable)
##' @export
setGeneric("calcModelRmst", function(object, ...){
standardGeneric("calcModelRmst")
})
##' @name calcModelRmst
##' @aliases calcModelRmst,SurvivalModel-method
##' @rdname calcModelRmst-methods
##' @param model (character) The name of the model for which to calculate the restricted mean
##' @param times (nuermic vector) times to calculate the restricted mean
##' @param class ('data.frame' or "FlexTable' (default)) type of output required
##' @param digits (numeric default=3) if outputting a FlexTable then the number of digits
##' to round the entries to
##' @export
setMethod("calcModelRmst", "SurvivalModel",
function(object, model, times, class=c("data.frame","FlexTable")[2], digits=3, ...){
#validation
if(object@armAsFactor){
stop("Cannot calculate restricted means if armAsFactor is TRUE")
}
if(length(object@covariates)!=0){
stop("Cannot calculate restricted means if covariates fitted in model")
}
if(any(!is.numeric(times) | times < 0)){
stop("Times must be numeric and non-negative")
}
if(length(class) != 1 || !class %in% c("data.frame","FlexTable")){
stop("Invalid class argument, should be 'data.frame' or 'FlexTable")
}
if(length(digits)!=1 || !is.numeric(digits) || !digits > 0 || is.infinite(digits) ||
is.na(digits)){
stop("Invalid digits argument")
}
if(length(model)!=1 || !model %in% names(object@models)){
stop("Invalid model argument must be one of ",
paste(names(object@models),collapse=", "))
}
#for each arm
rmsts <- lapply(object@models[[model]],function(oneModel){
#get the cdf function
tempF <- oneModel$dfns$p
#if spline need to add the knots argument for the dfns$p function to work
args <- list()
if(!is.null(oneModel$knots)){
args$knots <- oneModel$knots
}
#survival function
survFn <- function(x){
1 - do.call("tempF", c(args, list(q=x), oneModel$res[,"est"]))
}
#calculate restricted means (an optimization would be to
#not handle times indpendently but calculate [0,t1], [t1, t2], ... and
#sum them up as needed)
vapply(times, function(time){
tryCatch(
integrate(survFn, 0, time)$value,
error=function(cond) NA
)
},
FUN.VALUE = numeric(1))
})
rmsts <- as.data.frame(do.call("rbind",rmsts))
colnames(rmsts) <- NULL
#if two arms add a difference row
if(nrow(rmsts)==2){
rmsts <- rbind(rmsts,difference=as.numeric(rmsts[2,])-as.numeric(rmsts[1,]) )
}
#Add row of times
rmsts <- rbind(time=times, rmsts)
if(class=="data.frame") return(rmsts)
#create FlexTable
numRows <- nrow(rmsts)
numCols <- 1+ncol(rmsts)
MyFTable <- MyFTable <- FlexTable(numrow=numRows,numcol=numCols,
body.par.props=parProperties(text.align="right"),
header.text.props = textProperties(font.weight = "bold"),
body.cell.props = cellProperties(padding.right=1))
#Set borders
MyFTable[1:numRows,1:numCols,side='bottom'] <- borderProperties(width=0)
MyFTable[1:numRows,1:numCols,side='left'] <- borderProperties(width=0)
MyFTable[1:numRows,1:numCols,side='top'] <- borderProperties(width=0)
MyFTable[1:numRows,1:numCols,side='right'] <- borderProperties(width=0)
MyFTable[numRows,1:numCols,side='bottom'] <- borderProperties(width=3)
MyFTable[1,1:numCols,side='top'] <- borderProperties(width=3)
MyFTable[2,1:numCols,side='top'] <- borderProperties(width=3)
#Add in data to table
MyFTable[2:numRows,2:numCols] <- round(rmsts[2:numRows,], digits=digits)
MyFTable[1,2:numCols] <- times
MyFTable[1:numRows, 1] <- rownames(rmsts)
#Add header denoting which distribution
hR <- FlexRow(paste(getDistributionDisplayNames(model),"\nrestricted means"),
colspan = numCols,
par.properties=parProperties(text.align="center",padding=1),
cell.properties = cellProperties(border.width = 0),
text.properties = textProperties(font.weight = "bold"))
MyFTable <- addHeaderRow(MyFTable,hR)
MyFTable
}
)
##' Method to calculate RMST on subset of data contained in a
##' SemiParametricModel object
##' @name calcRmst
##' @rdname calcRmst-methods
##' @param object (SemiParametricModel) The object which was created when
##' fitting the Cox model
##' @param ... additional parameters needed for specific instances of this
##' generic
##' @return (rmst object) contains list of RMST values, differences and call or
##' a FlexTable for output into a word document (depending on the class variable)
##' @export
setGeneric("calcRmst", function(object, ...){
standardGeneric("calcRmst")
})
##' @name calcRmst
##' @aliases calcRmst,SemiParametricModel-method
##' @rdname calcRmst-methods
##' @param class ('rmst' or "FlexTable' (default)) type of output required
##' @param digits (numeric default=3) if outputting a FlexTable then the number of digits
##' to round the entries to
##' @export
setMethod("calcRmst", "SemiParametricModel", function(object,class=c("rmst","FlexTable")[2], digits=3, ...){
if(length(class) != 1 || !class %in% c("rmst","FlexTable")){
stop("Invalid class argument, should be 'rmst' or 'FlexTable")
}
if(isSingleArm(object)){
stop("Cannot calculate rmst for a single arm trial")
}
# Create formula for Kaplan-Meier estimator
formulaToFit <- survivalFormula(armAsFactor=!isSingleArm(object),
covariates=character(0),
timeCol = object@endPointDef[["timeCol"]],
censorCol = object@endPointDef[["censorCol"]])
# Call RMST
result <- rmst(formula = formulaToFit,
data = object@survData@subject.data, ...)
if(class=="rmst"){
return(result)
}
#create FlexTable (note rmst function only works with two arms so size of table is fixed)
numRows <- 3
numCols <- 6
MyFTable <- FlexTable(numrow=numRows,numcol=numCols,
body.par.props=parProperties(text.align="right"),
header.text.props = textProperties(font.weight = "bold"),
body.cell.props = cellProperties(padding.right = 1))
#Add data
MyFTable[3,2:6] <- round(result$diff, digits)
MyFTable[1:2,2:5] <- round(result$RMST[,2:5],digits)
#Add 1st column (the arm names)
MyFTable[1:3,1] <- c(as.character(getArmNames(object@survData)),"Difference")
MyFTable[1:numRows,1] <- parProperties(text.align="left")
MyFTable[1:numRows,1] <- textProperties(font.weight = "bold")
#Add header
hR <- FlexRow(c("Arm","RMST", "SE","Lower CI", "Upper CI", "p-value"),
par.properties=parProperties(text.align="left"),
cell.properties =cellProperties(padding.right = 1),
text.properties = textProperties(font.weight = "bold"))
MyFTable <- addHeaderRow(MyFTable,hR)
MyFTable
}) | /R/rmst.R | no_license | tomsydney/sibyl | R | false | false | 7,657 | r | ##' @include semiParametric.R survivalModels.R
NULL
##' Method to calculate model restricted means
##' @name calcModelRmst
##' @rdname calcModelRmst-methods
##' @param object (SurvivalModel) A survival model - note there cannot be
##' any covariates and armAsFactor must be FALSE
##' @param ... additional arguments for this generic
##' @return (data.frame or FlexTable)
##' @export
setGeneric("calcModelRmst", function(object, ...){
standardGeneric("calcModelRmst")
})
##' @name calcModelRmst
##' @aliases calcModelRmst,SurvivalModel-method
##' @rdname calcModelRmst-methods
##' @param model (character) The name of the model for which to calculate the restricted mean
##' @param times (nuermic vector) times to calculate the restricted mean
##' @param class ('data.frame' or "FlexTable' (default)) type of output required
##' @param digits (numeric default=3) if outputting a FlexTable then the number of digits
##' to round the entries to
##' @export
setMethod("calcModelRmst", "SurvivalModel",
function(object, model, times, class=c("data.frame","FlexTable")[2], digits=3, ...){
#validation
if(object@armAsFactor){
stop("Cannot calculate restricted means if armAsFactor is TRUE")
}
if(length(object@covariates)!=0){
stop("Cannot calculate restricted means if covariates fitted in model")
}
if(any(!is.numeric(times) | times < 0)){
stop("Times must be numeric and non-negative")
}
if(length(class) != 1 || !class %in% c("data.frame","FlexTable")){
stop("Invalid class argument, should be 'data.frame' or 'FlexTable")
}
if(length(digits)!=1 || !is.numeric(digits) || !digits > 0 || is.infinite(digits) ||
is.na(digits)){
stop("Invalid digits argument")
}
if(length(model)!=1 || !model %in% names(object@models)){
stop("Invalid model argument must be one of ",
paste(names(object@models),collapse=", "))
}
#for each arm
rmsts <- lapply(object@models[[model]],function(oneModel){
#get the cdf function
tempF <- oneModel$dfns$p
#if spline need to add the knots argument for the dfns$p function to work
args <- list()
if(!is.null(oneModel$knots)){
args$knots <- oneModel$knots
}
#survival function
survFn <- function(x){
1 - do.call("tempF", c(args, list(q=x), oneModel$res[,"est"]))
}
#calculate restricted means (an optimization would be to
#not handle times indpendently but calculate [0,t1], [t1, t2], ... and
#sum them up as needed)
vapply(times, function(time){
tryCatch(
integrate(survFn, 0, time)$value,
error=function(cond) NA
)
},
FUN.VALUE = numeric(1))
})
rmsts <- as.data.frame(do.call("rbind",rmsts))
colnames(rmsts) <- NULL
#if two arms add a difference row
if(nrow(rmsts)==2){
rmsts <- rbind(rmsts,difference=as.numeric(rmsts[2,])-as.numeric(rmsts[1,]) )
}
#Add row of times
rmsts <- rbind(time=times, rmsts)
if(class=="data.frame") return(rmsts)
#create FlexTable
numRows <- nrow(rmsts)
numCols <- 1+ncol(rmsts)
MyFTable <- MyFTable <- FlexTable(numrow=numRows,numcol=numCols,
body.par.props=parProperties(text.align="right"),
header.text.props = textProperties(font.weight = "bold"),
body.cell.props = cellProperties(padding.right=1))
#Set borders
MyFTable[1:numRows,1:numCols,side='bottom'] <- borderProperties(width=0)
MyFTable[1:numRows,1:numCols,side='left'] <- borderProperties(width=0)
MyFTable[1:numRows,1:numCols,side='top'] <- borderProperties(width=0)
MyFTable[1:numRows,1:numCols,side='right'] <- borderProperties(width=0)
MyFTable[numRows,1:numCols,side='bottom'] <- borderProperties(width=3)
MyFTable[1,1:numCols,side='top'] <- borderProperties(width=3)
MyFTable[2,1:numCols,side='top'] <- borderProperties(width=3)
#Add in data to table
MyFTable[2:numRows,2:numCols] <- round(rmsts[2:numRows,], digits=digits)
MyFTable[1,2:numCols] <- times
MyFTable[1:numRows, 1] <- rownames(rmsts)
#Add header denoting which distribution
hR <- FlexRow(paste(getDistributionDisplayNames(model),"\nrestricted means"),
colspan = numCols,
par.properties=parProperties(text.align="center",padding=1),
cell.properties = cellProperties(border.width = 0),
text.properties = textProperties(font.weight = "bold"))
MyFTable <- addHeaderRow(MyFTable,hR)
MyFTable
}
)
##' Method to calculate RMST on subset of data contained in a
##' SemiParametricModel object
##' @name calcRmst
##' @rdname calcRmst-methods
##' @param object (SemiParametricModel) The object which was created when
##' fitting the Cox model
##' @param ... additional parameters needed for specific instances of this
##' generic
##' @return (rmst object) contains list of RMST values, differences and call or
##' a FlexTable for output into a word document (depending on the class variable)
##' @export
setGeneric("calcRmst", function(object, ...){
standardGeneric("calcRmst")
})
##' @name calcRmst
##' @aliases calcRmst,SemiParametricModel-method
##' @rdname calcRmst-methods
##' @param class ('rmst' or "FlexTable' (default)) type of output required
##' @param digits (numeric default=3) if outputting a FlexTable then the number of digits
##' to round the entries to
##' @export
setMethod("calcRmst", "SemiParametricModel", function(object,class=c("rmst","FlexTable")[2], digits=3, ...){
if(length(class) != 1 || !class %in% c("rmst","FlexTable")){
stop("Invalid class argument, should be 'rmst' or 'FlexTable")
}
if(isSingleArm(object)){
stop("Cannot calculate rmst for a single arm trial")
}
# Create formula for Kaplan-Meier estimator
formulaToFit <- survivalFormula(armAsFactor=!isSingleArm(object),
covariates=character(0),
timeCol = object@endPointDef[["timeCol"]],
censorCol = object@endPointDef[["censorCol"]])
# Call RMST
result <- rmst(formula = formulaToFit,
data = object@survData@subject.data, ...)
if(class=="rmst"){
return(result)
}
#create FlexTable (note rmst function only works with two arms so size of table is fixed)
numRows <- 3
numCols <- 6
MyFTable <- FlexTable(numrow=numRows,numcol=numCols,
body.par.props=parProperties(text.align="right"),
header.text.props = textProperties(font.weight = "bold"),
body.cell.props = cellProperties(padding.right = 1))
#Add data
MyFTable[3,2:6] <- round(result$diff, digits)
MyFTable[1:2,2:5] <- round(result$RMST[,2:5],digits)
#Add 1st column (the arm names)
MyFTable[1:3,1] <- c(as.character(getArmNames(object@survData)),"Difference")
MyFTable[1:numRows,1] <- parProperties(text.align="left")
MyFTable[1:numRows,1] <- textProperties(font.weight = "bold")
#Add header
hR <- FlexRow(c("Arm","RMST", "SE","Lower CI", "Upper CI", "p-value"),
par.properties=parProperties(text.align="left"),
cell.properties =cellProperties(padding.right = 1),
text.properties = textProperties(font.weight = "bold"))
MyFTable <- addHeaderRow(MyFTable,hR)
MyFTable
}) |
library(testthat)
source("functions.R")
expect_equal(add_nums(2, 2), 4)
actual_size <- nrow(random_half_split(iris))
expect_equal(actual_size, 75)
| /test.R | no_license | codememe00/testing_cl_r | R | false | false | 151 | r |
library(testthat)
source("functions.R")
expect_equal(add_nums(2, 2), 4)
actual_size <- nrow(random_half_split(iris))
expect_equal(actual_size, 75)
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760772615L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result) | /IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609860342-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 729 | r | testlist <- list(ends = c(-1125300777L, 765849512L, -1760772615L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result) |
require(MASS)
# Helper function to update beta distribution parameters
update_beta <- function(a, b, successes, failures) {
a_new <- a + successes
b_new <- b + failures
return(c(a_new, b_new))
}
# Helper function to sample correlation coefficients given a population correlation
sample_from_cor <- function(rho, d=10, samples=100) {
return(replicate(n=samples, cor(mvrnorm(d, mu = c(0,0), Sigma = matrix(c(1,rho,rho,1), ncol = 2), empirical = FALSE))[2,1]))
}
# Modeling the probability that for a given population correlation rho (d data points), the sample correlation lies above the threshold
# This function determines the beta parameters for that model
fit_beta <- function(rho, threshold, precision_cor=10, precision_sampling=100) {
cor_dist <- sample_from_cor(rho=rho, d=precision_cor, samples=precision_sampling)
a <- length(which(cor_dist > threshold))
b <- precision_sampling - a
return(c(a, b))
}
# Update the beta parameters given a new measured correlation
update_beta_prior <- function(a, b, rho, threshold, precision_cor=10, precision_sampling=100) {
cor_dist <- sample_from_cor(rho=rho, d=precision_cor, samples=precision_sampling)
successes <- length(which(cor_dist > threshold))
failures <- precision - successes
new_params <- update_beta(a=a, b=b, successes=successes, failures=failures)
return(new_params)
}
# Given threshold rho, generate a transformation table mapping probability of being above threshold to correlation coefficient
# Is this just tan(prob) / arctanh(prob), with a scaling factor to account for num_samples??
# TODO: this can be parallelized
probability_to_rho_table <- function(threshold, stepsize=0.01, num_samples=10, precision=10000, show_progress=TRUE) {
rhos <- seq(from=-1, to=1, by=stepsize)
num_rows <- length(rhos)
p2r_table <- data.frame(rho=numeric(num_rows), prob=numeric(num_rows))
if (show_progress) {
pb <- txtProgressBar(min=0, max=num_rows, style = 3)
}
for (i in 1:length(rhos)) {
rho <- rhos[i]
sample_from_cor(rho, d = num_samples, size=precision)
probability <- length(which(sample_from_cor(rho, d = num_samples, size=precision) > threshold))/precision
p2r_table$rho[i] <- rho
p2r_table$prob[i] <- probability
if (show_progress) {
setTxtProgressBar(pb, i)
}
}
return(p2r_table)
}
# Transform probability of connection above threshold, to a correlation coefficient
# THIS IS WHAT SHOULD BE DONE
beta_to_rho <- function(a, b, p2r.table, precision_sampling=1000, retval="median") {
probability_values <- rbeta(precision_sampling, a, b)
rho_values <- numeric(precision_sampling)
for (i in 1:precision) {
probability <- probability_values[i]
rho_values[i] <- p2r.table$rho[which.min(abs(p2r.table$prob - probability))]
}
if (retval=="distribution") {
return(rho_values)
} else {
return(do.call(retval, list(x=rho_values)))
}
}
# # setup
# thres_cor <- 0.3
# num_mice <- 10
# precision <- 100
#
# # Precompute table for determining correlation from probability
# # Note that the precision here is manually set, because precision in this context is different from the other uses
# # Here, we'd like to be as precise as possible in mapping probability to correlations, as opposed to precision=a+b as a measure of uncertainty of the beta distribution
# p2r_table <- probability_to_rho_table(threshold=thres_cor, stepsize = 0.01, num_samples = num_mice, precision = 10000)
#
# # prior
# init_cor <- 1
# prior_params <- fit_beta(init_cor, thres_cor, num_samples=num_mice, precision=precision)
# hist(rbeta(10000, prior_params[1], prior_params[2]), 100)
# prior_cor <- beta_to_rho(a=prior_params[1], b=prior_params[2], p2r_table=p2r_table, precision = precision, retval = "median")
# print(prior_cor)
#
# # measured cor
# measured_cor <- 0.01
# updated_params <- update_beta_prior(a=prior_params[1], b=prior_params[2], rho = measured_cor, threshold = thres_cor, num_samples = num_mice, precision = precision)
# hist(rbeta(10000, updated_params[1], updated_params[2]), 100)
# posterior_cor <- beta_to_rho(a=updated_params[1], b=updated_params[2], p2r_table=p2r_table, precision = precision, retval = "median")
# print(posterior_cor)
#
# # another measured cor
# measured_cor <- 0.01
# prior_params <- updated_params
# updated_params <- update_beta_prior(a=prior_params[1], b=prior_params[2], rho = measured_cor, threshold = thres_cor, num_samples = num_mice, precision = precision)
# hist(rbeta(10000, updated_params[1], updated_params[2]), 100)
# posterior_cor <- beta_to_rho(a=updated_params[1], b=updated_params[2], p2r_table=p2r_table, precision = precision, retval = "median")
# print(posterior_cor)
#
# # another measured cor
# measured_cor <- 0.01
# prior_params <- updated_params
# updated_params <- update_beta_prior(a=prior_params[1], b=prior_params[2], rho = measured_cor, threshold = thres_cor, num_samples = num_mice, precision = precision)
# hist(rbeta(10000, updated_params[1], updated_params[2]), 100)
# posterior_cor <- beta_to_rho(a=updated_params[1], b=updated_params[2], p2r_table=p2r_table, precision = precision, retval = "median")
# print(posterior_cor)
#
# # another measured cor
# measured_cor <- 0.5
# prior_params <- updated_params
# updated_params <- update_beta_prior(a=prior_params[1], b=prior_params[2], rho = measured_cor, threshold = thres_cor, num_samples = num_mice, precision = precision)
# hist(rbeta(10000, updated_params[1], updated_params[2]), 100)
# posterior_cor <- beta_to_rho(a=updated_params[1], b=updated_params[2], p2r_table=p2r_table, precision = precision, retval = "median")
# print(posterior_cor)
| /RStructuralCovariance/R/beta.R | no_license | yohanyee/StructuralCovariance | R | false | false | 5,619 | r | require(MASS)
# Helper function to update beta distribution parameters
update_beta <- function(a, b, successes, failures) {
a_new <- a + successes
b_new <- b + failures
return(c(a_new, b_new))
}
# Helper function to sample correlation coefficients given a population correlation
sample_from_cor <- function(rho, d=10, samples=100) {
return(replicate(n=samples, cor(mvrnorm(d, mu = c(0,0), Sigma = matrix(c(1,rho,rho,1), ncol = 2), empirical = FALSE))[2,1]))
}
# Modeling the probability that for a given population correlation rho (d data points), the sample correlation lies above the threshold
# This function determines the beta parameters for that model
fit_beta <- function(rho, threshold, precision_cor=10, precision_sampling=100) {
cor_dist <- sample_from_cor(rho=rho, d=precision_cor, samples=precision_sampling)
a <- length(which(cor_dist > threshold))
b <- precision_sampling - a
return(c(a, b))
}
# Update the beta parameters given a new measured correlation
update_beta_prior <- function(a, b, rho, threshold, precision_cor=10, precision_sampling=100) {
cor_dist <- sample_from_cor(rho=rho, d=precision_cor, samples=precision_sampling)
successes <- length(which(cor_dist > threshold))
failures <- precision - successes
new_params <- update_beta(a=a, b=b, successes=successes, failures=failures)
return(new_params)
}
# Given threshold rho, generate a transformation table mapping probability of being above threshold to correlation coefficient
# Is this just tan(prob) / arctanh(prob), with a scaling factor to account for num_samples??
# TODO: this can be parallelized
probability_to_rho_table <- function(threshold, stepsize=0.01, num_samples=10, precision=10000, show_progress=TRUE) {
rhos <- seq(from=-1, to=1, by=stepsize)
num_rows <- length(rhos)
p2r_table <- data.frame(rho=numeric(num_rows), prob=numeric(num_rows))
if (show_progress) {
pb <- txtProgressBar(min=0, max=num_rows, style = 3)
}
for (i in 1:length(rhos)) {
rho <- rhos[i]
sample_from_cor(rho, d = num_samples, size=precision)
probability <- length(which(sample_from_cor(rho, d = num_samples, size=precision) > threshold))/precision
p2r_table$rho[i] <- rho
p2r_table$prob[i] <- probability
if (show_progress) {
setTxtProgressBar(pb, i)
}
}
return(p2r_table)
}
# Transform probability of connection above threshold, to a correlation coefficient
# THIS IS WHAT SHOULD BE DONE
beta_to_rho <- function(a, b, p2r.table, precision_sampling=1000, retval="median") {
probability_values <- rbeta(precision_sampling, a, b)
rho_values <- numeric(precision_sampling)
for (i in 1:precision) {
probability <- probability_values[i]
rho_values[i] <- p2r.table$rho[which.min(abs(p2r.table$prob - probability))]
}
if (retval=="distribution") {
return(rho_values)
} else {
return(do.call(retval, list(x=rho_values)))
}
}
# # setup
# thres_cor <- 0.3
# num_mice <- 10
# precision <- 100
#
# # Precompute table for determining correlation from probability
# # Note that the precision here is manually set, because precision in this context is different from the other uses
# # Here, we'd like to be as precise as possible in mapping probability to correlations, as opposed to precision=a+b as a measure of uncertainty of the beta distribution
# p2r_table <- probability_to_rho_table(threshold=thres_cor, stepsize = 0.01, num_samples = num_mice, precision = 10000)
#
# # prior
# init_cor <- 1
# prior_params <- fit_beta(init_cor, thres_cor, num_samples=num_mice, precision=precision)
# hist(rbeta(10000, prior_params[1], prior_params[2]), 100)
# prior_cor <- beta_to_rho(a=prior_params[1], b=prior_params[2], p2r_table=p2r_table, precision = precision, retval = "median")
# print(prior_cor)
#
# # measured cor
# measured_cor <- 0.01
# updated_params <- update_beta_prior(a=prior_params[1], b=prior_params[2], rho = measured_cor, threshold = thres_cor, num_samples = num_mice, precision = precision)
# hist(rbeta(10000, updated_params[1], updated_params[2]), 100)
# posterior_cor <- beta_to_rho(a=updated_params[1], b=updated_params[2], p2r_table=p2r_table, precision = precision, retval = "median")
# print(posterior_cor)
#
# # another measured cor
# measured_cor <- 0.01
# prior_params <- updated_params
# updated_params <- update_beta_prior(a=prior_params[1], b=prior_params[2], rho = measured_cor, threshold = thres_cor, num_samples = num_mice, precision = precision)
# hist(rbeta(10000, updated_params[1], updated_params[2]), 100)
# posterior_cor <- beta_to_rho(a=updated_params[1], b=updated_params[2], p2r_table=p2r_table, precision = precision, retval = "median")
# print(posterior_cor)
#
# # another measured cor
# measured_cor <- 0.01
# prior_params <- updated_params
# updated_params <- update_beta_prior(a=prior_params[1], b=prior_params[2], rho = measured_cor, threshold = thres_cor, num_samples = num_mice, precision = precision)
# hist(rbeta(10000, updated_params[1], updated_params[2]), 100)
# posterior_cor <- beta_to_rho(a=updated_params[1], b=updated_params[2], p2r_table=p2r_table, precision = precision, retval = "median")
# print(posterior_cor)
#
# # another measured cor
# measured_cor <- 0.5
# prior_params <- updated_params
# updated_params <- update_beta_prior(a=prior_params[1], b=prior_params[2], rho = measured_cor, threshold = thres_cor, num_samples = num_mice, precision = precision)
# hist(rbeta(10000, updated_params[1], updated_params[2]), 100)
# posterior_cor <- beta_to_rho(a=updated_params[1], b=updated_params[2], p2r_table=p2r_table, precision = precision, retval = "median")
# print(posterior_cor)
|
library(sqldf)
acs <- read.csv("getdata4.csv")
unique(acs$AGEP)
sqldf("select distinct AGEP from acs") | /03 - getting_and_cleaning_data/week_2/GCD_23.R | no_license | octopacks/coursera_data_science_jhu | R | false | false | 102 | r | library(sqldf)
acs <- read.csv("getdata4.csv")
unique(acs$AGEP)
sqldf("select distinct AGEP from acs") |
sigma_ewma <- function(pvProductDf, pLogReturns, plambda) {
Np <- ncol(pvProductDf)
EWMA_NumCol <- sum(1:ncol(pLogReturns))
EWMA_NumRow <- nrow(pvProductDf)
EWMA <- matrix(rep(0, EWMA_NumCol * EWMA_NumRow), nrow = EWMA_NumRow, ncol = EWMA_NumCol)
for (i in 2:EWMA_NumRow){
for (k in 1:Np){
EWMA[i,k] <- plambda * EWMA[i - 1,k] + (1 - plambda) * (pLogReturns[i - 1,k] ^ 2)
}
if (Np > 1) {
k <- Np + 1
for (x in 2:Np - 1) {
for (y in (x + 1):Np) {
if (y > x) {
EWMA[i,k] <- plambda * EWMA[i - 1, k] + (1 - plambda) * pLogReturns[i - 1,x] * pLogReturns[i - 1,y]
k <- k + 1
}
}
}
}
}
EWMALastRow <- EWMA[nrow(EWMA),]
Sigma <- matrix(rep(0, Np * Np), nrow = Np, ncol = Np, byrow = TRUE)
for(i in 1:Np){
for(j in 1:Np){
if(i==j){
Sigma[i,j] <- EWMALastRow[i]
}
}
}
if(Np>1){
k <- Np+1
for(i in 1:(Np-1)){
for(j in (i+1):Np){
Sigma[i,j] <- EWMALastRow[k]
Sigma[j,i] <- EWMALastRow[k]
k <- k + 1
}
}
}
return(Sigma)
} | /sigma.R | no_license | brunohp89/VaR_EWMA | R | false | false | 1,126 | r | sigma_ewma <- function(pvProductDf, pLogReturns, plambda) {
Np <- ncol(pvProductDf)
EWMA_NumCol <- sum(1:ncol(pLogReturns))
EWMA_NumRow <- nrow(pvProductDf)
EWMA <- matrix(rep(0, EWMA_NumCol * EWMA_NumRow), nrow = EWMA_NumRow, ncol = EWMA_NumCol)
for (i in 2:EWMA_NumRow){
for (k in 1:Np){
EWMA[i,k] <- plambda * EWMA[i - 1,k] + (1 - plambda) * (pLogReturns[i - 1,k] ^ 2)
}
if (Np > 1) {
k <- Np + 1
for (x in 2:Np - 1) {
for (y in (x + 1):Np) {
if (y > x) {
EWMA[i,k] <- plambda * EWMA[i - 1, k] + (1 - plambda) * pLogReturns[i - 1,x] * pLogReturns[i - 1,y]
k <- k + 1
}
}
}
}
}
EWMALastRow <- EWMA[nrow(EWMA),]
Sigma <- matrix(rep(0, Np * Np), nrow = Np, ncol = Np, byrow = TRUE)
for(i in 1:Np){
for(j in 1:Np){
if(i==j){
Sigma[i,j] <- EWMALastRow[i]
}
}
}
if(Np>1){
k <- Np+1
for(i in 1:(Np-1)){
for(j in (i+1):Np){
Sigma[i,j] <- EWMALastRow[k]
Sigma[j,i] <- EWMALastRow[k]
k <- k + 1
}
}
}
return(Sigma)
} |
# All State Insurance Claims Severity Prediction
# Date: 27 Oct 2016
# Load Required packages
library(xgboost)
library(randomForest)
library(data.table)
library(Matrix)
library(FeatureHashing)
library(dplyr)
library(readr)
library(Metrics)
library(caret)
library(ggplot2)
library(data.table)
#Set the current working directory
setwd("/Users/ashish/Documents/Kaggle/AllStateInsuranceClaimSeverity")
#Load the datasets
train_orig <- fread('train-2.csv')
test_orig <- fread('test-2.csv')
#Feature engineering
#New feature - 'losscat' feature has been added to categorize the losses based on their values
#Exploratory Data Analysis
#Check the structure of the datasets
str(train_orig)
str(test_orig)
#Check corelation between continuous variables
co_var <- paste0("cont", seq(1, 14))
summary(train_orig[,co_var]) #No missing values
train_orig_df <- as.data.frame(train_orig)
corelation <- cor(train_orig_df[,co_var])
head(round(corelation, 2))
#Plot corelation graph
# correlations plot between predictors
corrplot(corelation, type="upper", order="hclust",col=brewer.pal(n=8, name="PuOr"),addCoef.col = "grey",diag=FALSE)
#Load the IDs from the test dataset to a variable for later use
testID <- test_orig$id
#Setting levels
cvar <- names(train_orig)[sapply(train_orig, is.character)]
cat(cvar)
for(var in cvar) {
foo.levels <- unique(c(train_orig[[var]], test_orig[[var]]))
set(train_orig, j = var, value = factor(train_orig[[var]], levels = foo.levels))
set(test_orig, j = var, value = factor(test_orig[[var]], levels = foo.levels))
}
#Dependent variable from train dataset is saved to a new variable for later use
response <- train_orig$loss
#Set Id & loss variables from train dataset to NULL
train_orig[, id := NULL]
train_orig[, loss := NULL]
#Set Id variable from test dataset to NULL
test_orig[, id := NULL]
#Merge the train and test datasets
merge <- rbind(train_orig, test_orig)
merge$i <- 1:dim(merge)[1]
#Check for factor variables
fac_var <- names(train_orig)[sapply(train_orig, is.factor)]
merge[, (fac_var) := lapply(.SD, as.numeric), .SDcols = fac_var]
#Creating a sparse matrix
merge.sparse <- sparseMatrix(merge$i, merge[,cvar[1], with = FALSE][[1]])
for(var in cvar[-1]){
merge.sparse <- cbind(merge.sparse, sparseMatrix(merge$i, merge[,var, with = FALSE][[1]]))
cat('Combining: ', var, '\n')
}
#Separating the test and train dataset
merge.sparse <- cbind(merge.sparse, as.matrix(merge[,-c(char.var, 'i'), with = FALSE]))
dim(merge.sparse)
train <- merge.sparse[1:(dim(train_orig)[1]),]
test <- merge.sparse[(dim(train_orig)[1] + 1):nrow(merge),]
# Function & attributes from
# https://www.kaggle.com/nigelcarpenter/allstate-claims-severity/farons-xgb-starter-ported-to-r/code
xg_eval_mae <- function (yhat, dtrain) {
y = getinfo(dtrain, "label")
err= mae(exp(y),exp(yhat) )
return (list(metric = "error", value = err))
}
xgb_params = list(
seed = 0,
colsample_bytree = 0.7,
subsample = 0.7,
eta = 0.075,
objective = 'reg:linear',
max_depth = 6,
num_parallel_tree = 1,
min_child_weight = 1,
base_score = 7
)
#Get a random sample of data
sample.index <- sample(1:nrow(train_orig), nrow(train_orig) * 0.9)
validation <- xgb.DMatrix(train[-sample.index,], label = log(response[-sample.index]))
train <- xgb.DMatrix(train[sample.index,], label = log(response[sample.index]))
test <- xgb.DMatrix(test)
xgboost_model <- xgb.train(xgb_params,
train,
nrounds=500,
print_every_n = 5,
verbose= 1,
watchlist = list(valid_score = validation),
feval=xg_eval_mae,
early_stop_rounds = 20,
maximize=FALSE)
xgboost_prediction <- predict(xgboost_model, test)
submission <- data.frame(id = testID, loss = exp(xgboost_prediction))
write.csv(submission, 'xgboost_AR4.csv', row.names = FALSE)
| /ClaimSeverity_AR.R | no_license | shermina-dsd/IWD-Hackathon | R | false | false | 3,990 | r | # All State Insurance Claims Severity Prediction
# Date: 27 Oct 2016
# Load Required packages
library(xgboost)
library(randomForest)
library(data.table)
library(Matrix)
library(FeatureHashing)
library(dplyr)
library(readr)
library(Metrics)
library(caret)
library(ggplot2)
library(data.table)
#Set the current working directory
setwd("/Users/ashish/Documents/Kaggle/AllStateInsuranceClaimSeverity")
#Load the datasets
train_orig <- fread('train-2.csv')
test_orig <- fread('test-2.csv')
#Feature engineering
#New feature - 'losscat' feature has been added to categorize the losses based on their values
#Exploratory Data Analysis
#Check the structure of the datasets
str(train_orig)
str(test_orig)
#Check corelation between continuous variables
co_var <- paste0("cont", seq(1, 14))
summary(train_orig[,co_var]) #No missing values
train_orig_df <- as.data.frame(train_orig)
corelation <- cor(train_orig_df[,co_var])
head(round(corelation, 2))
#Plot corelation graph
# correlations plot between predictors
corrplot(corelation, type="upper", order="hclust",col=brewer.pal(n=8, name="PuOr"),addCoef.col = "grey",diag=FALSE)
#Load the IDs from the test dataset to a variable for later use
testID <- test_orig$id
#Setting levels
cvar <- names(train_orig)[sapply(train_orig, is.character)]
cat(cvar)
for(var in cvar) {
foo.levels <- unique(c(train_orig[[var]], test_orig[[var]]))
set(train_orig, j = var, value = factor(train_orig[[var]], levels = foo.levels))
set(test_orig, j = var, value = factor(test_orig[[var]], levels = foo.levels))
}
#Dependent variable from train dataset is saved to a new variable for later use
response <- train_orig$loss
#Set Id & loss variables from train dataset to NULL
train_orig[, id := NULL]
train_orig[, loss := NULL]
#Set Id variable from test dataset to NULL
test_orig[, id := NULL]
#Merge the train and test datasets
merge <- rbind(train_orig, test_orig)
merge$i <- 1:dim(merge)[1]
#Check for factor variables
fac_var <- names(train_orig)[sapply(train_orig, is.factor)]
merge[, (fac_var) := lapply(.SD, as.numeric), .SDcols = fac_var]
#Creating a sparse matrix
merge.sparse <- sparseMatrix(merge$i, merge[,cvar[1], with = FALSE][[1]])
for(var in cvar[-1]){
merge.sparse <- cbind(merge.sparse, sparseMatrix(merge$i, merge[,var, with = FALSE][[1]]))
cat('Combining: ', var, '\n')
}
#Separating the test and train dataset
merge.sparse <- cbind(merge.sparse, as.matrix(merge[,-c(char.var, 'i'), with = FALSE]))
dim(merge.sparse)
train <- merge.sparse[1:(dim(train_orig)[1]),]
test <- merge.sparse[(dim(train_orig)[1] + 1):nrow(merge),]
# Function & attributes from
# https://www.kaggle.com/nigelcarpenter/allstate-claims-severity/farons-xgb-starter-ported-to-r/code
xg_eval_mae <- function (yhat, dtrain) {
y = getinfo(dtrain, "label")
err= mae(exp(y),exp(yhat) )
return (list(metric = "error", value = err))
}
xgb_params = list(
seed = 0,
colsample_bytree = 0.7,
subsample = 0.7,
eta = 0.075,
objective = 'reg:linear',
max_depth = 6,
num_parallel_tree = 1,
min_child_weight = 1,
base_score = 7
)
#Get a random sample of data
sample.index <- sample(1:nrow(train_orig), nrow(train_orig) * 0.9)
validation <- xgb.DMatrix(train[-sample.index,], label = log(response[-sample.index]))
train <- xgb.DMatrix(train[sample.index,], label = log(response[sample.index]))
test <- xgb.DMatrix(test)
xgboost_model <- xgb.train(xgb_params,
train,
nrounds=500,
print_every_n = 5,
verbose= 1,
watchlist = list(valid_score = validation),
feval=xg_eval_mae,
early_stop_rounds = 20,
maximize=FALSE)
xgboost_prediction <- predict(xgboost_model, test)
submission <- data.frame(id = testID, loss = exp(xgboost_prediction))
write.csv(submission, 'xgboost_AR4.csv', row.names = FALSE)
|
#' Add a variable of a higher interval to a data frame
#'
#' Take the datetime variable in a data frame and map this
#' to a variable of a higher interval. The mapping is added to the data frame
#' in a new variable. After applying \code{thicken} the user can aggregate the
#' other variables in the data frame to the higher interval, for instance using
#' \code{dplyr}.
#'
#' @param x A data frame containing at least one datetime variable of
#' class \code{Date}, \code{POSIXct} or \code{POSIXlt}.
#' @param interval The interval of the added datetime variable.
#' Any character string that would be accepted by \code{seq.Date} or
#' \code{seq.POSIXt}. It can only be higher than the interval and step size of
#' the input data.
#' @param colname The column name of the added variable. If \code{NULL} it will
#' be the name of the original datetime variable with the interval name added to
#' it (including the unit), separated by underscores.
#' @param rounding Should a value in the input datetime variable be mapped to
#' the closest value that is lower (\code{down}) or that is higher (\code{up})
#' than itself.
#' @param by Only needs to be specified when \code{x} contains multiple
#' variables of class \code{Date}, \code{POSIXct} or \code{POSIXlt}.
#' Indicates which to use for thickening.
#' @param start_val By default the first instance of \code{interval} that is lower
#' than the lowest value of the input datetime variable, with all time units on
#' default value. Specify \code{start_val} as an offset if you want the range
#' to be nonstandard.
#' @return The data frame \code{x} with the variable added to it.
#' @details When the datetime variable contains missing values, they are left
#' in place in the dataframe. The added column with the new datetime variable,
#' will have a missing values for these rows as well.
#'
#' See \code{vignette("padr")} for more information on \code{thicken}.
#' See \code{vignette("padr_implementation")} for detailed information on
#' daylight savings time, different timezones, and the implementation of
#' \code{thicken}.
#' @examples
#' x_hour <- seq(lubridate::ymd_hms('20160302 000000'), by = 'hour',
#' length.out = 200)
#' some_df <- data.frame(x_hour = x_hour)
#' thicken(some_df, 'week')
#' thicken(some_df, 'month')
#' thicken(some_df, 'day', start_val = lubridate::ymd_hms('20160301 120000'))
#'
#' library(dplyr)
#' x_df <- data.frame(
#' x = seq(lubridate::ymd(20130101), by = 'day', length.out = 1000) %>%
#' sample(500),
#' y = runif(500, 10, 50) %>% round) %>%
#' arrange(x)
#'
#' # get the max per month
#' x_df %>% thicken('month') %>% group_by(x_month) %>%
#' summarise(y_max = max(y))
#'
#' # get the average per week, but you want your week to start on Mondays
#' # instead of Sundays
#' x_df %>% thicken('week',
#' start_val = closest_weekday(x_df$x, 2)) %>%
#' group_by(x_week) %>% summarise(y_avg = mean(y))
#' @export
thicken <- function(x,
interval,
colname = NULL,
rounding = c('down',
'up'),
by = NULL,
start_val = NULL) {
is_df(x)
original_data_frame <- x
x <- as.data.frame(x)
dt_var_info <- get_dt_var_and_name(x, by)
dt_var <- dt_var_info$dt_var
dt_var_name <- dt_var_info$dt_var_name
check_start_and_end(start_val, NULL)
interval_converted <- convert_interval(interval)
interval_converted$interval <- uniform_interval_name(interval_converted$interval)
rounding <- match.arg(rounding)
if (check_for_sorting(dt_var)){
warning('Datetime variable was unsorted, result will be unsorted as well.',
call. = FALSE)
}
if (inherits(start_val, 'POSIXt') & inherits(dt_var, 'POSIXt')) {
start_val <- enforce_time_zone(start_val, dt_var)
}
ind_to_keep <- start_val_after_min_dt(start_val, dt_var)
x <- x[ind_to_keep, , drop = FALSE] #nolint
dt_var <- dt_var[ind_to_keep]
if (is.null(by)) {
x_name <- get_date_variables(x)
} else {
x_name <- by
}
colname <- get_colname(x, x_name, colname, interval_converted)
na_ind <- which(is.na(dt_var))
dt_var <- check_for_NA_thicken(dt_var, dt_var_name, colname)
spanned <- span(dt_var, interval_converted, start_val)
thickened <- round_thicken(dt_var, spanned, rounding)
if (all(all.equal(thickened, dt_var) == TRUE)) {
stop("The thickened result is equal to the original datetime variable,
the interval specified is too low for the interval of the datetime variable", call. = FALSE)
}
thickened_with_na <- add_na_to_thicken(thickened, na_ind)
thickened_frame <- data.frame(thickened_with_na)
return_frame <- dplyr::bind_cols(x, thickened_frame)
colnames(return_frame)[ncol(return_frame)] <- colname
set_to_original_type(return_frame, original_data_frame)
}
# restore to data_frame of data.table if the input data was of this type
set_to_original_type <- function(x,
original) {
if (inherits(original, "tbl_df")) {
x <- dplyr::as_data_frame(x)
grps <- as.character(dplyr::groups(original))
x <- dplyr::group_by_(x, .dots = grps)
} else if (inherits(original, "data.table")) {
x <- data.table::as.data.table(x)
}
return(x)
}
# take the character form of the interval and put it into list form
# using get_interval_list, check if valid right away
convert_interval <- function(interval) {
start_val <- as.POSIXct("2017-01-01 00:00:00")
x <- tryCatch(
seq(start_val, length.out = 10, by = interval),
error = function(e){
stop("interval is not valid", call. = FALSE)
})
return(make_interval_list_from_string(interval))
}
make_interval_list_from_string <- function(interval_string) {
interval_split <- strsplit(interval_string, " ")[[1]]
if (length(interval_split) == 1) {
return(list(interval = interval_split,
step = 1))
} else {
return(list(interval = interval_split[2],
step = as.numeric(interval_split[1])))
}
}
# in order to compare different intervals we need to set them to the same unit
convert_int_to_hours <- function(interval_obj) {
# we take # month = # year / 12
hours_in_unit <- c(8760, 2190, 730, 168, 24, 1, 1 / 60, 1 / 3600)
names(hours_in_unit) <- c("year", "quarter", "month", "week", "day",
"hour", "min", "sec")
hours_in_unit[interval_obj$interval] * interval_obj$step
}
get_colname <- function(x, x_name, colname, interval_converted) {
if (is.null(colname)) {
if (interval_converted$step == 1) {
colname <- paste(x_name, interval_converted$interval, sep = "_")
} else {
colname <- paste(x_name, interval_converted$step,
interval_converted$interval, sep = "_")
}
}
return(colname)
}
uniform_interval_name <- function(interval) {
if (interval %in% c("y", "ye", "yea", "years")) {
interval <- "year"
} else if (interval %in% c("q", "qu", "qua", "quar", "quart", "quarte", "quarters")){
interval <- "quarter"
} else if (interval %in% c("m", "mo", "mon", "mont", "months")) {
interval <- "month"
} else if (interval %in% c("w", "we", "wee", "weeks")){
interval <- "week"
} else if (interval %in% c("d", "da", "days")) {
interval <- "day"
} else if (interval %in% c("h", "ho", "hou", "hours")) {
interval <- "hour"
} else if (interval %in% c("mi", "mins")) {
interval <- "min"
} else if (interval %in% c("s", "se", "secs")) {
interval <- "sec"
}
return(interval)
}
start_val_after_min_dt <- function(start_val, dt_var) {
if (is.null(start_val)) {
return(1:length(dt_var))
} else {
start_val <- to_posix(start_val, dt_var)$a
dt_var <- to_posix(start_val, dt_var)$b
ind <- dt_var > start_val
return(ind)
}
}
check_for_sorting <- function(dt_var) {
# filter out missing values, there will be a warning thrown for them later
dt_var <- dt_var[!is.na(dt_var)]
!all(dt_var[1:(length(dt_var) - 1)] <= dt_var[2:length(dt_var)])
}
check_for_NA_thicken <- function(dt_var, dt_var_name, colname) {
if (sum(is.na(dt_var)) > 0) {
dt_var <- dt_var[!is.na(dt_var)]
warn_mess <- sprintf(
"There are NA values in the column %s.
Returned dataframe contains original observations, with NA values for %s and %s.",
dt_var_name, dt_var_name, colname
)
warning(warn_mess, call. = FALSE)
}
dt_var
}
add_na_to_thicken <- function(thickened, na_ind) {
return_var <- c(thickened, rep(NA, length(na_ind)))
return_ind <- c(seq_along(thickened), na_ind - .5)
return_var_ord <- return_var[order(return_ind)]
attr(return_var_ord, "tzone") <- attr(thickened, "tzone")
return(return_var_ord)
}
| /R/thicken.R | no_license | foundinblank/padr | R | false | false | 8,762 | r | #' Add a variable of a higher interval to a data frame
#'
#' Take the datetime variable in a data frame and map this
#' to a variable of a higher interval. The mapping is added to the data frame
#' in a new variable. After applying \code{thicken} the user can aggregate the
#' other variables in the data frame to the higher interval, for instance using
#' \code{dplyr}.
#'
#' @param x A data frame containing at least one datetime variable of
#' class \code{Date}, \code{POSIXct} or \code{POSIXlt}.
#' @param interval The interval of the added datetime variable.
#' Any character string that would be accepted by \code{seq.Date} or
#' \code{seq.POSIXt}. It can only be higher than the interval and step size of
#' the input data.
#' @param colname The column name of the added variable. If \code{NULL} it will
#' be the name of the original datetime variable with the interval name added to
#' it (including the unit), separated by underscores.
#' @param rounding Should a value in the input datetime variable be mapped to
#' the closest value that is lower (\code{down}) or that is higher (\code{up})
#' than itself.
#' @param by Only needs to be specified when \code{x} contains multiple
#' variables of class \code{Date}, \code{POSIXct} or \code{POSIXlt}.
#' Indicates which to use for thickening.
#' @param start_val By default the first instance of \code{interval} that is lower
#' than the lowest value of the input datetime variable, with all time units on
#' default value. Specify \code{start_val} as an offset if you want the range
#' to be nonstandard.
#' @return The data frame \code{x} with the variable added to it.
#' @details When the datetime variable contains missing values, they are left
#' in place in the dataframe. The added column with the new datetime variable,
#' will have a missing values for these rows as well.
#'
#' See \code{vignette("padr")} for more information on \code{thicken}.
#' See \code{vignette("padr_implementation")} for detailed information on
#' daylight savings time, different timezones, and the implementation of
#' \code{thicken}.
#' @examples
#' x_hour <- seq(lubridate::ymd_hms('20160302 000000'), by = 'hour',
#' length.out = 200)
#' some_df <- data.frame(x_hour = x_hour)
#' thicken(some_df, 'week')
#' thicken(some_df, 'month')
#' thicken(some_df, 'day', start_val = lubridate::ymd_hms('20160301 120000'))
#'
#' library(dplyr)
#' x_df <- data.frame(
#' x = seq(lubridate::ymd(20130101), by = 'day', length.out = 1000) %>%
#' sample(500),
#' y = runif(500, 10, 50) %>% round) %>%
#' arrange(x)
#'
#' # get the max per month
#' x_df %>% thicken('month') %>% group_by(x_month) %>%
#' summarise(y_max = max(y))
#'
#' # get the average per week, but you want your week to start on Mondays
#' # instead of Sundays
#' x_df %>% thicken('week',
#' start_val = closest_weekday(x_df$x, 2)) %>%
#' group_by(x_week) %>% summarise(y_avg = mean(y))
#' @export
thicken <- function(x,
interval,
colname = NULL,
rounding = c('down',
'up'),
by = NULL,
start_val = NULL) {
is_df(x)
original_data_frame <- x
x <- as.data.frame(x)
dt_var_info <- get_dt_var_and_name(x, by)
dt_var <- dt_var_info$dt_var
dt_var_name <- dt_var_info$dt_var_name
check_start_and_end(start_val, NULL)
interval_converted <- convert_interval(interval)
interval_converted$interval <- uniform_interval_name(interval_converted$interval)
rounding <- match.arg(rounding)
if (check_for_sorting(dt_var)){
warning('Datetime variable was unsorted, result will be unsorted as well.',
call. = FALSE)
}
if (inherits(start_val, 'POSIXt') & inherits(dt_var, 'POSIXt')) {
start_val <- enforce_time_zone(start_val, dt_var)
}
ind_to_keep <- start_val_after_min_dt(start_val, dt_var)
x <- x[ind_to_keep, , drop = FALSE] #nolint
dt_var <- dt_var[ind_to_keep]
if (is.null(by)) {
x_name <- get_date_variables(x)
} else {
x_name <- by
}
colname <- get_colname(x, x_name, colname, interval_converted)
na_ind <- which(is.na(dt_var))
dt_var <- check_for_NA_thicken(dt_var, dt_var_name, colname)
spanned <- span(dt_var, interval_converted, start_val)
thickened <- round_thicken(dt_var, spanned, rounding)
if (all(all.equal(thickened, dt_var) == TRUE)) {
stop("The thickened result is equal to the original datetime variable,
the interval specified is too low for the interval of the datetime variable", call. = FALSE)
}
thickened_with_na <- add_na_to_thicken(thickened, na_ind)
thickened_frame <- data.frame(thickened_with_na)
return_frame <- dplyr::bind_cols(x, thickened_frame)
colnames(return_frame)[ncol(return_frame)] <- colname
set_to_original_type(return_frame, original_data_frame)
}
# restore to data_frame of data.table if the input data was of this type
set_to_original_type <- function(x,
original) {
if (inherits(original, "tbl_df")) {
x <- dplyr::as_data_frame(x)
grps <- as.character(dplyr::groups(original))
x <- dplyr::group_by_(x, .dots = grps)
} else if (inherits(original, "data.table")) {
x <- data.table::as.data.table(x)
}
return(x)
}
# take the character form of the interval and put it into list form
# using get_interval_list, check if valid right away
convert_interval <- function(interval) {
start_val <- as.POSIXct("2017-01-01 00:00:00")
x <- tryCatch(
seq(start_val, length.out = 10, by = interval),
error = function(e){
stop("interval is not valid", call. = FALSE)
})
return(make_interval_list_from_string(interval))
}
make_interval_list_from_string <- function(interval_string) {
interval_split <- strsplit(interval_string, " ")[[1]]
if (length(interval_split) == 1) {
return(list(interval = interval_split,
step = 1))
} else {
return(list(interval = interval_split[2],
step = as.numeric(interval_split[1])))
}
}
# in order to compare different intervals we need to set them to the same unit
convert_int_to_hours <- function(interval_obj) {
# we take # month = # year / 12
hours_in_unit <- c(8760, 2190, 730, 168, 24, 1, 1 / 60, 1 / 3600)
names(hours_in_unit) <- c("year", "quarter", "month", "week", "day",
"hour", "min", "sec")
hours_in_unit[interval_obj$interval] * interval_obj$step
}
get_colname <- function(x, x_name, colname, interval_converted) {
if (is.null(colname)) {
if (interval_converted$step == 1) {
colname <- paste(x_name, interval_converted$interval, sep = "_")
} else {
colname <- paste(x_name, interval_converted$step,
interval_converted$interval, sep = "_")
}
}
return(colname)
}
uniform_interval_name <- function(interval) {
if (interval %in% c("y", "ye", "yea", "years")) {
interval <- "year"
} else if (interval %in% c("q", "qu", "qua", "quar", "quart", "quarte", "quarters")){
interval <- "quarter"
} else if (interval %in% c("m", "mo", "mon", "mont", "months")) {
interval <- "month"
} else if (interval %in% c("w", "we", "wee", "weeks")){
interval <- "week"
} else if (interval %in% c("d", "da", "days")) {
interval <- "day"
} else if (interval %in% c("h", "ho", "hou", "hours")) {
interval <- "hour"
} else if (interval %in% c("mi", "mins")) {
interval <- "min"
} else if (interval %in% c("s", "se", "secs")) {
interval <- "sec"
}
return(interval)
}
start_val_after_min_dt <- function(start_val, dt_var) {
if (is.null(start_val)) {
return(1:length(dt_var))
} else {
start_val <- to_posix(start_val, dt_var)$a
dt_var <- to_posix(start_val, dt_var)$b
ind <- dt_var > start_val
return(ind)
}
}
check_for_sorting <- function(dt_var) {
# filter out missing values, there will be a warning thrown for them later
dt_var <- dt_var[!is.na(dt_var)]
!all(dt_var[1:(length(dt_var) - 1)] <= dt_var[2:length(dt_var)])
}
check_for_NA_thicken <- function(dt_var, dt_var_name, colname) {
if (sum(is.na(dt_var)) > 0) {
dt_var <- dt_var[!is.na(dt_var)]
warn_mess <- sprintf(
"There are NA values in the column %s.
Returned dataframe contains original observations, with NA values for %s and %s.",
dt_var_name, dt_var_name, colname
)
warning(warn_mess, call. = FALSE)
}
dt_var
}
add_na_to_thicken <- function(thickened, na_ind) {
return_var <- c(thickened, rep(NA, length(na_ind)))
return_ind <- c(seq_along(thickened), na_ind - .5)
return_var_ord <- return_var[order(return_ind)]
attr(return_var_ord, "tzone") <- attr(thickened, "tzone")
return(return_var_ord)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-shakespeare.R
\docType{data}
\name{shakespeare_1}
\alias{shakespeare_1}
\title{A Comedy of Errors}
\format{
igraph object
}
\source{
Raw data downloaded from https://github.com/mallaham/Shakespeare-Plays
}
\usage{
shakespeare_1
}
\description{
scene co-occurences in Shakespeare's "A Comedy of Errors"
}
\keyword{datasets}
| /man/shakespeare_1.Rd | permissive | schochastics/networkdata | R | false | true | 406 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-shakespeare.R
\docType{data}
\name{shakespeare_1}
\alias{shakespeare_1}
\title{A Comedy of Errors}
\format{
igraph object
}
\source{
Raw data downloaded from https://github.com/mallaham/Shakespeare-Plays
}
\usage{
shakespeare_1
}
\description{
scene co-occurences in Shakespeare's "A Comedy of Errors"
}
\keyword{datasets}
|
# BEMTOOL - Bio-Economic Model TOOLs - version 2.5
# Authors: G. Lembo, I. Bitetto, M.T. Facchini, M.T. Spedicato 2018
# COISPA Tecnologia & Ricerca, Via dei Trulli 18/20 - (Bari), Italy
# In case of use of the model, the Authors should be cited.
# If you have any comments or suggestions please contact the following e-mail address: facchini@coispa.it
# BEMTOOL is believed to be reliable. However, we disclaim any implied warranty or representation about its accuracy,
# completeness or appropriateness for any particular purpose.
# ------------------------------------------------------------------------------
# Definition of object types used in BEMTOOL code (S4 classes and methods)
# - Catch (for LANDINGS, DISCARDS and CATCHES)
# ------------------------------------------------------------------------------
# definition of object: bmtCatch
CATCH_TYPE <- c("LANDINGS", "DISCARDS", "CATCHES")
setClass(Class="bmtCatch",
representation=representation(
Ctype = "character",
numbers = "data.frame",
numbers.CI.perc = "data.frame",
totalweight = "numeric",
totalweight.CI.perc = "data.frame",
meanLength = "numeric",
meanLength.CI.perc = "data.frame",
meanWeight = "numeric",
meanWeight.CI.perc = "data.frame",
fishing_mortality = "numeric",
fishing_mortality.CI.perc = "data.frame") )
| /BEMTOOL-ver2.5-2018_0901/src/obj/bmtCatch.r | no_license | gustavdelius/BEMTOOL2.5 | R | false | false | 1,434 | r | # BEMTOOL - Bio-Economic Model TOOLs - version 2.5
# Authors: G. Lembo, I. Bitetto, M.T. Facchini, M.T. Spedicato 2018
# COISPA Tecnologia & Ricerca, Via dei Trulli 18/20 - (Bari), Italy
# In case of use of the model, the Authors should be cited.
# If you have any comments or suggestions please contact the following e-mail address: facchini@coispa.it
# BEMTOOL is believed to be reliable. However, we disclaim any implied warranty or representation about its accuracy,
# completeness or appropriateness for any particular purpose.
# ------------------------------------------------------------------------------
# Definition of object types used in BEMTOOL code (S4 classes and methods)
# - Catch (for LANDINGS, DISCARDS and CATCHES)
# ------------------------------------------------------------------------------
# definition of object: bmtCatch
CATCH_TYPE <- c("LANDINGS", "DISCARDS", "CATCHES")
setClass(Class="bmtCatch",
representation=representation(
Ctype = "character",
numbers = "data.frame",
numbers.CI.perc = "data.frame",
totalweight = "numeric",
totalweight.CI.perc = "data.frame",
meanLength = "numeric",
meanLength.CI.perc = "data.frame",
meanWeight = "numeric",
meanWeight.CI.perc = "data.frame",
fishing_mortality = "numeric",
fishing_mortality.CI.perc = "data.frame") )
|
options(scipen=999)
library(ape)
library(phytools)
library(RColorBrewer)
library(stats)
# read in proteins file
proteins <- read.FASTA("blochmania_AA_all.fasta", type="AA")
# read in blast results
blast <- read.table("blochmania_AA_all.blast", sep="\t", stringsAsFactors=F)
colnames(blast) <- c("query", "subject", "identity", "length", "e-value")
# read in name convsersion table
name_table <- read.table("faa_gene_conversion.txt", sep="\t", stringsAsFactors=F, quote="")
# get order of individuals in phylogeny
x_files <- list.files(pattern="^output_*")
samples <- substr(x_files, 8, nchar(x_files))
# reorder samples to phylogeny ordered top to bottom
samples <- samples[c(17, 12, 9, 5, 4, 10, 13, 15, 16, 1, 2, 3, 6, 8, 7, 11, 14)]
# subset genes to >= 50% identity
blast <- blast[blast$identity >= 50,]
# duplicate blast object
blast2 <- blast
# while loop until go through and eliminate all genes
queries <- list()
matches <- list()
counter <- 1
while(nrow(blast2) > 0) {
# subset to first query
a_rep <- blast2[blast2$query == unique(blast2$query)[1], ]
# get length of this query
a_length <- length(proteins[names(proteins) == a_rep[1,1]][[1]])
# subset blast matches to those at least 75% of query length
a_rep <- a_rep[a_rep$length >= 0.75 * a_length,]
# add queries and matches to lists
queries[[counter]] <- a_rep[1,1]
matches[[counter]] <- sapply(strsplit(a_rep[,2], "__"), "[[", 1)
# remove query and matches from blast2 object
blast2 <- blast2[blast2$query != a_rep[1,1] & blast2$subject != a_rep[1,1],]
blast2 <- blast2[blast2$query %in% a_rep$subject == FALSE,]
blast2 <- blast2[blast2$subject %in% a_rep$subject == FALSE,]
# add one to counter
counter <- counter + 1
}
queries <- unlist(queries)
# make an empty gene matrix (all zeros)
# make an empty data frame with the products as row names
for(a in 1:length(samples)) {
if(a == 1) {
gene_matrix <- rep(0,length(queries))
} else {
gene_matrix <- cbind(gene_matrix, rep(0,length(queries)))
}
}
rownames(gene_matrix) <- queries
colnames(gene_matrix) <- samples
head(gene_matrix)
# fill in matrix with presence / absence (1 / 0)
for(a in 1:length(queries)) {
a_rep <- colnames(gene_matrix) %in% matches[[a]]
a_rep[a_rep == T] <- 1
a_rep[a_rep == F] <- 0
gene_matrix[a,] <- a_rep
}
gene_matrix <- as.data.frame(gene_matrix)
# match gene products of gene_matrix and resort
gene_products <- name_table[match(rownames(gene_matrix), name_table[,1]),2]
gene_matrix <- cbind(gene_products, gene_matrix)
gene_matrix <- gene_matrix[order(gene_products),]
# write output and manually inspect (possibly merge those that were too distantly blast related to automatically match)
# also remove hypothetical proteins
write.table(gene_matrix, file="gene_pa_matrix.txt", sep="\t", quote=F, row.names=F)
x <- read.table("gene_pa_matrix_edited.txt", sep="\t", row.names=1, quote="", stringsAsFactors=F, header=T)
# get counts of total genes present
gene_counts <- as.vector(apply(x,2,sum))
# get sample names of gene counts and edit to match format of diversity table
renamed_samples <- sapply(strsplit(colnames(x), "_"), "[[", 1)
renamed_samples <- substr(renamed_samples, 2, nchar(renamed_samples))
for(a in 1:length(renamed_samples)) {
if(nchar(renamed_samples[a]) == 1) {
renamed_samples[a] <- paste("C-00", renamed_samples[a], sep="")
} else if(nchar(renamed_samples[a]) == 2) {
renamed_samples[a] <- paste("C-0", renamed_samples[a], sep="")
} else if(nchar(renamed_samples[a]) == 3) {
renamed_samples[a] <- paste("C-", renamed_samples[a], sep="")
}
}
# gene counts into data frame
gene_count <- data.frame(individual=as.character(renamed_samples), gene_count=as.numeric(gene_counts))
# add new names to x object and transpose
x <- t(x)
rownames(x) <- gene_count$individual
# read in diversity table
div <- read.table("01_heterozygosity_and_pop_sizes.txt", header=T, stringsAsFactors=F)
# merge the diversity and gene count tables
# reorder
gene_count <- gene_count[match(div$individual, gene_count$individual), ]
div <- cbind(div, gene_count$gene_count)
colnames(div)[8] <- "gene_count"
# write new table
write.table(div, file="01_het_popsizes_blochgenecount.txt", sep="\t", row.names=F, quote=F)
# read in phylogeny
tree <- read.nexus("camp_summed.tre")
# remove single quotes from tip labels
tree$tip.label <- gsub("'", "", tree$tip.label)
# midpoint root
tree <- midpoint.root(tree)
# remove outgroups
tree <- drop.tip(tree, c("SRX5650044", "SRX022802"))
# rotate tree for plotting heatmap in same phylogenetic conformation as other figure
nodes_to_rotate <- c(18,19,20,21,22,25,28,29,30,31,32,33)
for(a in nodes_to_rotate) {
tree <- rotate(tree, a)
}
#PIC
pop_size <- setNames(div[,"harmonic_pop"], div$individual)
gene_counts <- setNames(div[,"gene_count"], div$individual)
pic_pop_size <- pic(pop_size, tree)
pic_gene_counts <- pic(gene_counts, tree)
summary(lm(pic_gene_counts ~ pic_pop_size))
plot(pic_pop_size, pic_gene_counts, ylab="Blochmannia Gene Counts (PIC)", xlab="Host Harmonic Mean Pop. Size (PIC)")
# test phylogenetic signal of gene count
signal_lambda <- phylosig(tree, method="lambda", gene_counts, test=T)
signal_k <- phylosig(tree, method="K", gene_counts, test=T)
# test phylogenetic signal for each gene
ps_per_gene <- c("gene_name", "lambda", "pval")
for(a in 1:ncol(x)) {
# extract single gene
a_rep <- x[,a]
# continue if more than one value
if(length(unique(a_rep)) > 1) {
# run test (just the lambda)
a_signal_lambda <- phylosig(tree, method="lambda", a_rep, test=T)
a_output <- c(colnames(x)[a], a_signal_lambda$lambda, a_signal_lambda$P)
ps_per_gene <- rbind(ps_per_gene, a_output)
}
}
# make data frame of output
ps_per_gene <- data.frame(gene_name=as.character(ps_per_gene[2:nrow(ps_per_gene),1]),
lambda=as.numeric(ps_per_gene[2:nrow(ps_per_gene),2]),
pval=as.numeric(ps_per_gene[2:nrow(ps_per_gene),3]))
# add corrected p values to data frame
pval_corrected <- p.adjust(ps_per_gene$pval, method="BH")
ps_per_gene <- cbind(ps_per_gene, pval_corrected)
# write table output
write.table(ps_per_gene, file="gene_phylogenetic signal.txt", sep="\t", row.names=F, col.names=T, quote=F)
# which are significant?
sig_ps_per_gene <- ps_per_gene[ps_per_gene$pval_corrected < 0.05, ]
sig_x <- x[,colnames(x) %in% sig_ps_per_gene$gene_name]
# plot heatmap of genes
pdf(file="phylo_heatmap.pdf", height=2.5, width=6.5)
phylo.heatmap(tree,x,standardize=F, legend=F, labels=F, colors=c(brewer.pal(11,"RdYlGn")[c(7,11)]), fsize=0.7, split=c(0.3,0.7))
dev.off()
# plot heatmap of genes with significant phylogenetic signal
pdf(file="sig_phylo_heatmap.pdf", height=2.5, width=6.5)
phylo.heatmap(tree, sig_x,standardize=F, legend=F, labels=F, colors=c(brewer.pal(11,"RdYlGn")[c(7,11)]), fsize=0.7, split=c(0.3,0.7))
dev.off()
| /02_blochmannia/13_pangenome/03_analyze_blastp_results.r | permissive | Weihankk/camponotus_genomes1 | R | false | false | 6,814 | r |
options(scipen=999)
library(ape)
library(phytools)
library(RColorBrewer)
library(stats)
# read in proteins file
proteins <- read.FASTA("blochmania_AA_all.fasta", type="AA")
# read in blast results
blast <- read.table("blochmania_AA_all.blast", sep="\t", stringsAsFactors=F)
colnames(blast) <- c("query", "subject", "identity", "length", "e-value")
# read in name convsersion table
name_table <- read.table("faa_gene_conversion.txt", sep="\t", stringsAsFactors=F, quote="")
# get order of individuals in phylogeny
x_files <- list.files(pattern="^output_*")
samples <- substr(x_files, 8, nchar(x_files))
# reorder samples to phylogeny ordered top to bottom
samples <- samples[c(17, 12, 9, 5, 4, 10, 13, 15, 16, 1, 2, 3, 6, 8, 7, 11, 14)]
# subset genes to >= 50% identity
blast <- blast[blast$identity >= 50,]
# duplicate blast object
blast2 <- blast
# while loop until go through and eliminate all genes
queries <- list()
matches <- list()
counter <- 1
while(nrow(blast2) > 0) {
# subset to first query
a_rep <- blast2[blast2$query == unique(blast2$query)[1], ]
# get length of this query
a_length <- length(proteins[names(proteins) == a_rep[1,1]][[1]])
# subset blast matches to those at least 75% of query length
a_rep <- a_rep[a_rep$length >= 0.75 * a_length,]
# add queries and matches to lists
queries[[counter]] <- a_rep[1,1]
matches[[counter]] <- sapply(strsplit(a_rep[,2], "__"), "[[", 1)
# remove query and matches from blast2 object
blast2 <- blast2[blast2$query != a_rep[1,1] & blast2$subject != a_rep[1,1],]
blast2 <- blast2[blast2$query %in% a_rep$subject == FALSE,]
blast2 <- blast2[blast2$subject %in% a_rep$subject == FALSE,]
# add one to counter
counter <- counter + 1
}
queries <- unlist(queries)
# make an empty gene matrix (all zeros)
# make an empty data frame with the products as row names
for(a in 1:length(samples)) {
if(a == 1) {
gene_matrix <- rep(0,length(queries))
} else {
gene_matrix <- cbind(gene_matrix, rep(0,length(queries)))
}
}
rownames(gene_matrix) <- queries
colnames(gene_matrix) <- samples
head(gene_matrix)
# fill in matrix with presence / absence (1 / 0)
for(a in 1:length(queries)) {
a_rep <- colnames(gene_matrix) %in% matches[[a]]
a_rep[a_rep == T] <- 1
a_rep[a_rep == F] <- 0
gene_matrix[a,] <- a_rep
}
gene_matrix <- as.data.frame(gene_matrix)
# match gene products of gene_matrix and resort
gene_products <- name_table[match(rownames(gene_matrix), name_table[,1]),2]
gene_matrix <- cbind(gene_products, gene_matrix)
gene_matrix <- gene_matrix[order(gene_products),]
# write output and manually inspect (possibly merge those that were too distantly blast related to automatically match)
# also remove hypothetical proteins
write.table(gene_matrix, file="gene_pa_matrix.txt", sep="\t", quote=F, row.names=F)
x <- read.table("gene_pa_matrix_edited.txt", sep="\t", row.names=1, quote="", stringsAsFactors=F, header=T)
# get counts of total genes present
gene_counts <- as.vector(apply(x,2,sum))
# get sample names of gene counts and edit to match format of diversity table
renamed_samples <- sapply(strsplit(colnames(x), "_"), "[[", 1)
renamed_samples <- substr(renamed_samples, 2, nchar(renamed_samples))
for(a in 1:length(renamed_samples)) {
if(nchar(renamed_samples[a]) == 1) {
renamed_samples[a] <- paste("C-00", renamed_samples[a], sep="")
} else if(nchar(renamed_samples[a]) == 2) {
renamed_samples[a] <- paste("C-0", renamed_samples[a], sep="")
} else if(nchar(renamed_samples[a]) == 3) {
renamed_samples[a] <- paste("C-", renamed_samples[a], sep="")
}
}
# gene counts into data frame
gene_count <- data.frame(individual=as.character(renamed_samples), gene_count=as.numeric(gene_counts))
# add new names to x object and transpose
x <- t(x)
rownames(x) <- gene_count$individual
# read in diversity table
div <- read.table("01_heterozygosity_and_pop_sizes.txt", header=T, stringsAsFactors=F)
# merge the diversity and gene count tables
# reorder
gene_count <- gene_count[match(div$individual, gene_count$individual), ]
div <- cbind(div, gene_count$gene_count)
colnames(div)[8] <- "gene_count"
# write new table
write.table(div, file="01_het_popsizes_blochgenecount.txt", sep="\t", row.names=F, quote=F)
# read in phylogeny
tree <- read.nexus("camp_summed.tre")
# remove single quotes from tip labels
tree$tip.label <- gsub("'", "", tree$tip.label)
# midpoint root
tree <- midpoint.root(tree)
# remove outgroups
tree <- drop.tip(tree, c("SRX5650044", "SRX022802"))
# rotate tree for plotting heatmap in same phylogenetic conformation as other figure
nodes_to_rotate <- c(18,19,20,21,22,25,28,29,30,31,32,33)
for(a in nodes_to_rotate) {
tree <- rotate(tree, a)
}
#PIC
pop_size <- setNames(div[,"harmonic_pop"], div$individual)
gene_counts <- setNames(div[,"gene_count"], div$individual)
pic_pop_size <- pic(pop_size, tree)
pic_gene_counts <- pic(gene_counts, tree)
summary(lm(pic_gene_counts ~ pic_pop_size))
plot(pic_pop_size, pic_gene_counts, ylab="Blochmannia Gene Counts (PIC)", xlab="Host Harmonic Mean Pop. Size (PIC)")
# test phylogenetic signal of gene count
signal_lambda <- phylosig(tree, method="lambda", gene_counts, test=T)
signal_k <- phylosig(tree, method="K", gene_counts, test=T)
# test phylogenetic signal for each gene
ps_per_gene <- c("gene_name", "lambda", "pval")
for(a in 1:ncol(x)) {
# extract single gene
a_rep <- x[,a]
# continue if more than one value
if(length(unique(a_rep)) > 1) {
# run test (just the lambda)
a_signal_lambda <- phylosig(tree, method="lambda", a_rep, test=T)
a_output <- c(colnames(x)[a], a_signal_lambda$lambda, a_signal_lambda$P)
ps_per_gene <- rbind(ps_per_gene, a_output)
}
}
# make data frame of output
ps_per_gene <- data.frame(gene_name=as.character(ps_per_gene[2:nrow(ps_per_gene),1]),
lambda=as.numeric(ps_per_gene[2:nrow(ps_per_gene),2]),
pval=as.numeric(ps_per_gene[2:nrow(ps_per_gene),3]))
# add corrected p values to data frame
pval_corrected <- p.adjust(ps_per_gene$pval, method="BH")
ps_per_gene <- cbind(ps_per_gene, pval_corrected)
# write table output
write.table(ps_per_gene, file="gene_phylogenetic signal.txt", sep="\t", row.names=F, col.names=T, quote=F)
# which are significant?
sig_ps_per_gene <- ps_per_gene[ps_per_gene$pval_corrected < 0.05, ]
sig_x <- x[,colnames(x) %in% sig_ps_per_gene$gene_name]
# plot heatmap of genes
pdf(file="phylo_heatmap.pdf", height=2.5, width=6.5)
phylo.heatmap(tree,x,standardize=F, legend=F, labels=F, colors=c(brewer.pal(11,"RdYlGn")[c(7,11)]), fsize=0.7, split=c(0.3,0.7))
dev.off()
# plot heatmap of genes with significant phylogenetic signal
pdf(file="sig_phylo_heatmap.pdf", height=2.5, width=6.5)
phylo.heatmap(tree, sig_x,standardize=F, legend=F, labels=F, colors=c(brewer.pal(11,"RdYlGn")[c(7,11)]), fsize=0.7, split=c(0.3,0.7))
dev.off()
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -1.31769571426391e-96, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615834301-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,047 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -1.31769571426391e-96, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
`plot_aggregate_data` <-
function(aggr,classes,classcolors,...) {
for (i in 1:length(classes)) {
rates <- aggr$x[aggr$Class %in% classes[i]]
plot.xy(xy.coords(x=(1:length(rates))/length(rates),y=rates),
col=classcolors[i],...)
}
}
| /nmica-r-package/R/plot_aggregate_data.R | no_license | mz2/r-utilities | R | false | false | 253 | r | `plot_aggregate_data` <-
function(aggr,classes,classcolors,...) {
for (i in 1:length(classes)) {
rates <- aggr$x[aggr$Class %in% classes[i]]
plot.xy(xy.coords(x=(1:length(rates))/length(rates),y=rates),
col=classcolors[i],...)
}
}
|
#-------------------------------------------------------------------------------
# Build tests
svc <- Client(
client_info = ClientInfo(
endpoint = "https://test"
)
)
svc$handlers$build <- HandlerList(restxml_build)
#-------------------------------------------------------------------------------
# REST tests
test_that("no parameters", {
op1 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/2014-01-01/jobs"
)
req <- new_request(svc, op1, NULL, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/2014-01-01/jobs")
})
test_that("URI parameter with no location name", {
op2 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/2014-01-01/jobsByPipeline/{PipelineId}"
)
op_input2 <- function(PipelineId) {
args <- list(PipelineId = PipelineId)
interface <- Structure(
PipelineId = Scalar(type = "string", .tags = list(location = "uri"))
)
return(populate(args, interface))
}
input <- op_input2(
PipelineId = "foo"
)
req <- new_request(svc, op2, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/2014-01-01/jobsByPipeline/foo")
})
test_that("URI parameter with location name", {
op3 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/2014-01-01/jobsByPipeline/{PipelineId}"
)
op_input3 <- function(Foo) {
args <- list(Foo = Foo)
interface <- Structure(
Foo = Scalar(type = "string", .tags = list(location = "uri", locationName = "PipelineId"))
)
return(populate(args, interface))
}
input <- op_input3(
Foo = "bar"
)
req <- new_request(svc, op3, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/2014-01-01/jobsByPipeline/bar")
})
test_that("query string list of strings", {
op4 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/path"
)
op_input4 <- function(Items) {
args <- list(Items = Items)
interface <- Structure(
Items = List(Scalar(type = "string"), .tags = list(location = "querystring", locationName = "item"))
)
return(populate(args, interface))
}
input <- op_input4(
Items = list("value1", "value2")
)
req <- new_request(svc, op4, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/path?item=value1&item=value2")
})
test_that("query string map of strings", {
op5 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/2014-01-01/jobsByPipeline/{PipelineId}"
)
op_input5 <- function(PipelineId, QueryDoc) {
args <- list(PipelineId = PipelineId, QueryDoc = QueryDoc)
interface <- Structure(
PipelineId = Scalar(type = "string", .tags = list(location = "uri")),
QueryDoc = Map(Scalar(type = "string"), .tags = list(location = "querystring"))
)
return(populate(args, interface))
}
input <- op_input5(
PipelineId = "foo",
QueryDoc = list(
bar = "baz",
fizz = "buzz"
)
)
req <- new_request(svc, op5, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz")
})
test_that("query string map of lists of strings", {
op6 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/2014-01-01/jobsByPipeline/{PipelineId}"
)
op_input6 <- function(PipelineId, QueryDoc) {
args <- list(PipelineId = PipelineId, QueryDoc = QueryDoc)
interface <- Structure(
PipelineId = Scalar(type = "string", .tags = list(location = "uri")),
QueryDoc = Map(List(Scalar(type = "string")), .tags = list(location = "querystring"))
)
return(populate(args, interface))
}
input <- op_input6(
PipelineId = "id",
QueryDoc = list(
fizz = c("buzz", "pop"),
foo = c("bar", "baz")
)
)
req <- new_request(svc, op6, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/2014-01-01/jobsByPipeline/id?fizz=buzz&fizz=pop&foo=bar&foo=baz")
})
test_that("query string with bool (true)", {
op7 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/path"
)
op_input7 <- function(BoolQuery) {
args <- list(BoolQuery = BoolQuery)
interface <- Structure(
BoolQuery = Scalar(type = "boolean", .tags = list(location = "querystring", locationName = "bool-query"))
)
return(populate(args, interface))
}
input <- op_input7(
BoolQuery = TRUE
)
req <- new_request(svc, op7, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/path?bool-query=true")
})
test_that("query string with bool (false)", {
op8 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/path"
)
op_input8 <- function(BoolQuery) {
args <- list(BoolQuery = BoolQuery)
interface <- Structure(
BoolQuery = Scalar(type = "boolean", .tags = list(location = "querystring", locationName = "bool-query"))
)
return(populate(args, interface))
}
input <- op_input8(
BoolQuery = FALSE
)
req <- new_request(svc, op8, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/path?bool-query=false")
})
test_that("URI and query string parameters", {
op9 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/2014-01-01/jobsByPipeline/{PipelineId}"
)
op_input9 <- function(Ascending, PageToken, PipelineId) {
args <- list(Ascending = Ascending, PageToken = PageToken, PipelineId = PipelineId)
interface <- Structure(
Ascending = Scalar(type = "string", .tags = list(location = "querystring", locationName = "Ascending")),
PageToken = Scalar(type = "string", .tags = list(location = "querystring", locationName = "PageToken")),
PipelineId = Scalar(type = "string", .tags = list(location = "uri", locationName = "PipelineId"))
)
return(populate(args, interface))
}
input <- op_input9(
Ascending = "true",
PageToken = "bar",
PipelineId = "foo"
)
req <- new_request(svc, op9, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar")
})
#-------------------------------------------------------------------------------
# XML tests
test_that("Basic XML Case1", {
op_test <- Operation(name = "OperationName")
op_input_test <- function(Description, Name) {
args <- list(Description = Description, Name = Name)
interface <- Structure(
Description = Scalar(type = "string"),
Name = Scalar(type = "string"),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
Description = "bar",
Name = "foo"
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">bar</Description><Name xmlns="https://foo/">foo</Name></OperationRequest>')
})
test_that("Other Scalar Case1", {
op_test <- Operation(name = "OperationName")
op_input_test <- function(First, Second, Third, Fourth) {
args <- list(First = First, Second = Second, Third = Third, Fourth = Fourth)
interface <- Structure(
First = Scalar(type = "boolean"),
Fourth = Scalar(type = "integer"),
Second = Scalar(type = "boolean"),
Third = Scalar(type = "float"),
.tags = list(
locationName = "OperationRequest",
xmlURI = "https://foo/"
)
)
return(populate(args, interface))
}
input <- op_input_test(
First = TRUE,
Fourth = 3,
Second = FALSE,
Third = 1.2
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><First xmlns="https://foo/">true</First><Fourth xmlns="https://foo/">3</Fourth><Second xmlns="https://foo/">false</Second><Third xmlns="https://foo/">1.2</Third></OperationRequest>')
})
test_that("Nested Structure Case1", {
op_test <- Operation(name = "OperationRequest")
op_input_test <- function(Description, SubStructure) {
args <- list(Description = Description, SubStructure = SubStructure)
interface <- Structure(
Description = Scalar(type = "string"),
SubStructure = Structure(
Bar = Scalar(type = "string"),
Foo = Scalar(type = "string")
),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
Description = "baz",
SubStructure = list(
Bar = "b",
Foo = "a"
)
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">baz</Description><SubStructure xmlns="https://foo/"><Bar xmlns="https://foo/">b</Bar><Foo xmlns="https://foo/">a</Foo></SubStructure></OperationRequest>')
})
test_that("NonFlattened List Case1", {
op_test <- Operation(name = "OperationRequest")
op_input_test <- function(ListParam) {
args <- list(ListParam = ListParam)
interface <- Structure(
ListParam = List(
Scalar(type = "string")
),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
ListParam = list(
"one",
"two",
"three"
)
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><ListParam xmlns="https://foo/"><member xmlns="https://foo/">one</member><member xmlns="https://foo/">two</member><member xmlns="https://foo/">three</member></ListParam></OperationRequest>')
})
test_that("NonFlattened List With LocationName Case1", {
op_test <- Operation(name = "OperationRequest")
op_input_test <- function(ListParam) {
args <- list(ListParam = ListParam)
interface <- Structure(
ListParam = List(
Scalar(type = "string"),
.tags = list(
locationName = "AlternateName",
locationNameList = "NotMember"
)
),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
ListParam = list(
"one",
"two",
"three"
)
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><AlternateName xmlns="https://foo/"><NotMember xmlns="https://foo/">one</NotMember><NotMember xmlns="https://foo/">two</NotMember><NotMember xmlns="https://foo/">three</NotMember></AlternateName></OperationRequest>')
})
test_that("Flattened List Case1", {
op_test <- Operation(name = "OperationRequest")
op_input_test <- function(ListParam) {
args <- list(ListParam = ListParam)
interface <- Structure(
ListParam = List(
Scalar(type = "string"),
.tags = list(flattened = "true")
),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
ListParam = list(
"one",
"two",
"three"
)
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><ListParam xmlns="https://foo/">one</ListParam><ListParam xmlns="https://foo/">two</ListParam><ListParam xmlns="https://foo/">three</ListParam></OperationRequest>')
})
test_that("Flattened List with LocationName Case1", {
op_test <- Operation(name = "OperationRequest")
op_input_test <- function(ListParam) {
args <- list(ListParam = ListParam)
interface <- Structure(
ListParam = List(
Scalar(type = "string"),
.tags = list(flattened = "true", locationName = "item")
),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
ListParam = list(
"one",
"two",
"three"
)
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><item xmlns="https://foo/">one</item><item xmlns="https://foo/">two</item><item xmlns="https://foo/">three</item></OperationRequest>')
})
test_that("List of Structures Case1", {
op_test <- Operation(name = "OperationName")
op_input_test <- function(ListParam) {
args <- list(ListParam = ListParam)
interface <- Structure(
ListParam = List(
Structure(
Element = Scalar(
type = "string",
.tags = list(locationName = "value")
)
),
.tags = list(flattened = "true", locationName = "item")
),
.tags = list(
locationName = "OperationRequest",
xmlURI = "https://foo/"
)
)
return(populate(args, interface))
}
input <- op_input_test(
ListParam = list(
list(Element = "one"),
list(Element = "two"),
list(Element = "three")
)
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><item xmlns="https://foo/"><value xmlns="https://foo/">one</value></item><item xmlns="https://foo/"><value xmlns="https://foo/">two</value></item><item xmlns="https://foo/"><value xmlns="https://foo/">three</value></item></OperationRequest>')
})
test_that("Blob Case1", {
op_test <- Operation(name = "OperationName")
op_input_test <- function(StructureParam) {
args <- list(StructureParam = StructureParam)
interface <- Structure(
StructureParam = Structure(
B = Scalar(type = "blob", .tags = list(locationName = "b"))
),
.tags = list(
locationName = "OperationRequest",
xmlURI = "https://foo/"
)
)
return(populate(args, interface))
}
input <- op_input_test(
StructureParam = list(B = list(charToRaw("foo")))
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><StructureParam xmlns="https://foo/"><b xmlns="https://foo/">Zm9v</b></StructureParam></OperationRequest>')
})
test_that("skip empty argument", {
op_test <- Operation(name = "OperationName")
op_input_test <- function(Description, Name = NULL) {
args <- list(Description = Description, Name = Name)
interface <- Structure(
Description = Scalar(type = "string"),
Name = Scalar(type = "string"),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
Description = "bar"
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">bar</Description></OperationRequest>')
})
test_that("newline in XML", {
op_test <- Operation(name = "OperationName")
op_input_test <- function(Description) {
args <- list(Description = Description)
interface <- Structure(
Description = Scalar(type = "string"),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
Description = "foo\nbar"
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">foo
bar</Description></OperationRequest>')
})
test_that("parameters with no provided arguments are dropped", {
op_test <- Operation(name = "OperationRequest")
op_input_test <- function(Nested) {
args <- list(Nested = Nested)
interface <- Structure(
Nested = Structure(
Foo = Structure(
Bar = Scalar(type = "string")
),
Baz = List(
Structure(Qux = Scalar(type = "string"))
)
),
.tags = list(locationName = "OperationRequest")
)
return(populate(args, interface))
}
input <- op_input_test(
Nested = list(
Foo = list(
Bar = "abc123"
)
)
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, "<OperationRequest><Nested><Foo><Bar>abc123</Bar></Foo></Nested></OperationRequest>")
})
#-------------------------------------------------------------------------------
# Unmarshal tests
op <- Operation(name = "OperationName")
svc <- Client()
svc$handlers$unmarshal_meta <- HandlerList(restxml_unmarshal_meta)
svc$handlers$unmarshal <- HandlerList(restxml_unmarshal)
svc$handlers$unmarshal_error <- HandlerList(restxml_unmarshal_error)
op_output1 <- Structure(
Char = Scalar(type = "character"),
Double = Scalar(type = "double"),
FalseBool = Scalar(type = "boolean"),
Float = Scalar(type = "float"),
Long = Scalar(type = "long"),
Num = Scalar(type = "integer", .tags = list(locationName = "FooNum")),
Str = Scalar(type = "string"),
TrueBool = Scalar(type = "boolean")
)
test_that("unmarshal scalar members", {
req <- new_request(svc, op, NULL, op_output1)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Str>myname</Str><FooNum>123</FooNum><FalseBool>false</FalseBool><TrueBool>true</TrueBool><Float>1.2</Float><Double>1.3</Double><Long>200</Long><Char>a</Char><RequestId>request-id</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$Char, "a")
expect_equal(out$Double, 1.3)
expect_equal(out$FalseBool, FALSE)
expect_equal(out$Float, 1.2)
expect_equal(out$Long, 200L)
expect_equal(out$Num, 123L)
expect_equal(out$Str, "myname")
expect_equal(out$TrueBool, TRUE)
})
op_output2 <- Structure(
Blob = Scalar(type = "blob")
)
test_that("unmarshal blob", {
req <- new_request(svc, op, NULL, op_output2)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Blob>dmFsdWU=</Blob><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(rawToChar(out$Blob), "value")
})
op_output3 <- Structure(
ListMember = List(Scalar(type = "string"))
)
test_that("unmarshal list", {
req <- new_request(svc, op, NULL, op_output3)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><ListMember><member>abc</member><member>123</member></ListMember><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$ListMember[1], "abc")
expect_equal(out$ListMember[2], "123")
})
op_output4 <- Structure(
ListMember = List(Scalar(type = "string"))
)
test_that("unmarshal list", {
req <- new_request(svc, op, NULL, op_output4)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><ListMember><item>abc</item><item>123</item></ListMember><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$ListMember[1], "abc")
expect_equal(out$ListMember[2], "123")
})
op_output5 <- Structure(
ListMember = List(Scalar(type = "string"), .tags = list(flattened = TRUE))
)
test_that("unmarshal flattened list", {
req <- new_request(svc, op, NULL, op_output5)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><ListMember>abc</ListMember><ListMember>123</ListMember><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$ListMember[1], "abc")
expect_equal(out$ListMember[2], "123")
})
op_output6 <- Structure(
Map = Map(Structure(Foo = Scalar(.tags = list(locationName = "foo"))))
)
test_that("unmarshal map", {
req <- new_request(svc, op, NULL, op_output6)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Map><entry><key>qux</key><value><foo>bar</foo></value></entry><entry><key>baz</key><value><foo>bam</foo></value></entry></Map><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$Map$baz$Foo, "bam")
expect_equal(out$Map$qux$Foo, "bar")
})
op_output7 <- Structure(
Map = Map(Scalar(), .tags = list(flattened = TRUE))
)
test_that("unmarshal flattened map", {
req <- new_request(svc, op, NULL, op_output7)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Map><key>qux</key><value>bar</value></Map><Map><key>baz</key><value>bam</value></Map><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$Map$baz, "bam")
expect_equal(out$Map$qux, "bar")
})
op_output8 <- Structure(
Map = Map(Scalar(), .tags = list(locationNameKey = "foo", locationNameValue = "bar", flattened = TRUE))
)
test_that("unmarshal flattened named map", {
req <- new_request(svc, op, NULL, op_output8)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Map><foo>qux</foo><bar>bar</bar></Map><Map><foo>baz</foo><bar>bam</bar></Map><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$Map$baz, "bam")
expect_equal(out$Map$qux, "bar")
})
op_output9 <- Structure(
Foo = Scalar(type = "string")
)
test_that("unmarshal empty string", {
skip("skip")
req <- new_request(svc, op, NULL, op_output9)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Foo/><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$Foo, "")
})
op_output10 <- Structure(
FooEnum = Scalar(type = "string", .tags = list(enum = "OutputService10TestShapeEC2EnumType")),
ListEnums = List(Scalar(type = "string"))
)
test_that("unmarshal enum", {
req <- new_request(svc, op, NULL, op_output10)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><FooEnum>foo</FooEnum><ListEnums><member>foo</member><member>bar</member></ListEnums></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$FooEnum, "foo")
expect_equal(out$ListEnums[1], "foo")
expect_equal(out$ListEnums[2], "bar")
})
test_that("unmarshal timestamp", {
op_output11 <- Structure(
Timestamp = Scalar(type = "timestamp")
)
req <- new_request(svc, op, NULL, op_output11)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Timestamp>1970-01-01T00:00:00.000Z</Timestamp></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$Timestamp, unix_time(0))
})
test_that("unmarshal timestamp in header", {
op_output <- Structure(
Timestamp = Scalar(type = "timestamp", .tags = list(location = "header"))
)
req <- new_request(svc, op, NULL, op_output)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse></OperationNameResponse>")
)
req$http_response$header[["Timestamp"]] <- "Wed, 02 Oct 2002 13:00:00 GMT"
req <- unmarshal_meta(req)
req <- unmarshal(req)
out <- req$data
expected <- as.POSIXct("2002-10-02 13:00:00 GMT", tz = "GMT")
expect_equal(as.integer(out$Timestamp), as.integer(expected))
})
op_output11 <- Structure(
Body = Scalar(type = "string"),
Header = Scalar(type = "string", .tags = list(location = "header"))
)
test_that("unmarshal elements in header and body", {
req <- new_request(svc, op, NULL, op_output11)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Body>foo</Body><RequestId>request-id</RequestId></OperationNameResponse>")
)
req$http_response$header[["Header"]] <- "bar"
req <- unmarshal_meta(req)
req <- unmarshal(req)
out <- req$data
expect_equivalent(out$Body, "foo")
expect_equivalent(out$Header, "bar")
})
op_output12 <- Structure(
OperationNameResult = list(
Body = Scalar(type = "string"),
Header = Scalar(type = "string")
)
)
test_that("unmarshal result elements at root of xml", {
req <- new_request(svc, op, NULL, op_output12)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResult><Body>foo</Body><Header>bar</Header></OperationNameResult>")
)
req <- unmarshal_meta(req)
req <- unmarshal(req)
out <- req$data
expect_equivalent(out$OperationNameResult$Body, "foo")
expect_equivalent(out$OperationNameResult$Header, "bar")
})
op_output13 <- Structure(
Timestamp = Scalar(type = "timestamp")
)
test_that("unmarshal error", {
req <- new_request(svc, op, NULL, op_output13)
req$http_response <- HttpResponse(
status_code = 400,
body = charToRaw("<Response><Error><Code>Foo</Code><Message>Bar</Message><RequestID>Baz</RequestID></Error></Response>")
)
req <- unmarshal_error(req)
err <- req$error
expect_equal(err$message, "Bar")
expect_equal(err$code, "Foo")
expect_equal(err$status_code, 400)
expect_equal(err$error_response$RequestID, "Baz")
})
| /paws.common/tests/testthat/test_handlers_restxml.R | permissive | paws-r/paws | R | false | false | 25,897 | r | #-------------------------------------------------------------------------------
# Build tests
svc <- Client(
client_info = ClientInfo(
endpoint = "https://test"
)
)
svc$handlers$build <- HandlerList(restxml_build)
#-------------------------------------------------------------------------------
# REST tests
test_that("no parameters", {
op1 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/2014-01-01/jobs"
)
req <- new_request(svc, op1, NULL, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/2014-01-01/jobs")
})
test_that("URI parameter with no location name", {
op2 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/2014-01-01/jobsByPipeline/{PipelineId}"
)
op_input2 <- function(PipelineId) {
args <- list(PipelineId = PipelineId)
interface <- Structure(
PipelineId = Scalar(type = "string", .tags = list(location = "uri"))
)
return(populate(args, interface))
}
input <- op_input2(
PipelineId = "foo"
)
req <- new_request(svc, op2, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/2014-01-01/jobsByPipeline/foo")
})
test_that("URI parameter with location name", {
op3 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/2014-01-01/jobsByPipeline/{PipelineId}"
)
op_input3 <- function(Foo) {
args <- list(Foo = Foo)
interface <- Structure(
Foo = Scalar(type = "string", .tags = list(location = "uri", locationName = "PipelineId"))
)
return(populate(args, interface))
}
input <- op_input3(
Foo = "bar"
)
req <- new_request(svc, op3, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/2014-01-01/jobsByPipeline/bar")
})
test_that("query string list of strings", {
op4 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/path"
)
op_input4 <- function(Items) {
args <- list(Items = Items)
interface <- Structure(
Items = List(Scalar(type = "string"), .tags = list(location = "querystring", locationName = "item"))
)
return(populate(args, interface))
}
input <- op_input4(
Items = list("value1", "value2")
)
req <- new_request(svc, op4, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/path?item=value1&item=value2")
})
test_that("query string map of strings", {
op5 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/2014-01-01/jobsByPipeline/{PipelineId}"
)
op_input5 <- function(PipelineId, QueryDoc) {
args <- list(PipelineId = PipelineId, QueryDoc = QueryDoc)
interface <- Structure(
PipelineId = Scalar(type = "string", .tags = list(location = "uri")),
QueryDoc = Map(Scalar(type = "string"), .tags = list(location = "querystring"))
)
return(populate(args, interface))
}
input <- op_input5(
PipelineId = "foo",
QueryDoc = list(
bar = "baz",
fizz = "buzz"
)
)
req <- new_request(svc, op5, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz")
})
test_that("query string map of lists of strings", {
op6 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/2014-01-01/jobsByPipeline/{PipelineId}"
)
op_input6 <- function(PipelineId, QueryDoc) {
args <- list(PipelineId = PipelineId, QueryDoc = QueryDoc)
interface <- Structure(
PipelineId = Scalar(type = "string", .tags = list(location = "uri")),
QueryDoc = Map(List(Scalar(type = "string")), .tags = list(location = "querystring"))
)
return(populate(args, interface))
}
input <- op_input6(
PipelineId = "id",
QueryDoc = list(
fizz = c("buzz", "pop"),
foo = c("bar", "baz")
)
)
req <- new_request(svc, op6, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/2014-01-01/jobsByPipeline/id?fizz=buzz&fizz=pop&foo=bar&foo=baz")
})
test_that("query string with bool (true)", {
op7 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/path"
)
op_input7 <- function(BoolQuery) {
args <- list(BoolQuery = BoolQuery)
interface <- Structure(
BoolQuery = Scalar(type = "boolean", .tags = list(location = "querystring", locationName = "bool-query"))
)
return(populate(args, interface))
}
input <- op_input7(
BoolQuery = TRUE
)
req <- new_request(svc, op7, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/path?bool-query=true")
})
test_that("query string with bool (false)", {
op8 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/path"
)
op_input8 <- function(BoolQuery) {
args <- list(BoolQuery = BoolQuery)
interface <- Structure(
BoolQuery = Scalar(type = "boolean", .tags = list(location = "querystring", locationName = "bool-query"))
)
return(populate(args, interface))
}
input <- op_input8(
BoolQuery = FALSE
)
req <- new_request(svc, op8, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/path?bool-query=false")
})
test_that("URI and query string parameters", {
op9 <- Operation(
name = "OperationName",
http_method = "GET",
http_path = "/2014-01-01/jobsByPipeline/{PipelineId}"
)
op_input9 <- function(Ascending, PageToken, PipelineId) {
args <- list(Ascending = Ascending, PageToken = PageToken, PipelineId = PipelineId)
interface <- Structure(
Ascending = Scalar(type = "string", .tags = list(location = "querystring", locationName = "Ascending")),
PageToken = Scalar(type = "string", .tags = list(location = "querystring", locationName = "PageToken")),
PipelineId = Scalar(type = "string", .tags = list(location = "uri", locationName = "PipelineId"))
)
return(populate(args, interface))
}
input <- op_input9(
Ascending = "true",
PageToken = "bar",
PipelineId = "foo"
)
req <- new_request(svc, op9, input, NULL)
req <- build(req)
r <- req$http_request
expect_equal(build_url(r$url), "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar")
})
#-------------------------------------------------------------------------------
# XML tests
test_that("Basic XML Case1", {
op_test <- Operation(name = "OperationName")
op_input_test <- function(Description, Name) {
args <- list(Description = Description, Name = Name)
interface <- Structure(
Description = Scalar(type = "string"),
Name = Scalar(type = "string"),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
Description = "bar",
Name = "foo"
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">bar</Description><Name xmlns="https://foo/">foo</Name></OperationRequest>')
})
test_that("Other Scalar Case1", {
op_test <- Operation(name = "OperationName")
op_input_test <- function(First, Second, Third, Fourth) {
args <- list(First = First, Second = Second, Third = Third, Fourth = Fourth)
interface <- Structure(
First = Scalar(type = "boolean"),
Fourth = Scalar(type = "integer"),
Second = Scalar(type = "boolean"),
Third = Scalar(type = "float"),
.tags = list(
locationName = "OperationRequest",
xmlURI = "https://foo/"
)
)
return(populate(args, interface))
}
input <- op_input_test(
First = TRUE,
Fourth = 3,
Second = FALSE,
Third = 1.2
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><First xmlns="https://foo/">true</First><Fourth xmlns="https://foo/">3</Fourth><Second xmlns="https://foo/">false</Second><Third xmlns="https://foo/">1.2</Third></OperationRequest>')
})
test_that("Nested Structure Case1", {
op_test <- Operation(name = "OperationRequest")
op_input_test <- function(Description, SubStructure) {
args <- list(Description = Description, SubStructure = SubStructure)
interface <- Structure(
Description = Scalar(type = "string"),
SubStructure = Structure(
Bar = Scalar(type = "string"),
Foo = Scalar(type = "string")
),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
Description = "baz",
SubStructure = list(
Bar = "b",
Foo = "a"
)
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">baz</Description><SubStructure xmlns="https://foo/"><Bar xmlns="https://foo/">b</Bar><Foo xmlns="https://foo/">a</Foo></SubStructure></OperationRequest>')
})
test_that("NonFlattened List Case1", {
op_test <- Operation(name = "OperationRequest")
op_input_test <- function(ListParam) {
args <- list(ListParam = ListParam)
interface <- Structure(
ListParam = List(
Scalar(type = "string")
),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
ListParam = list(
"one",
"two",
"three"
)
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><ListParam xmlns="https://foo/"><member xmlns="https://foo/">one</member><member xmlns="https://foo/">two</member><member xmlns="https://foo/">three</member></ListParam></OperationRequest>')
})
test_that("NonFlattened List With LocationName Case1", {
op_test <- Operation(name = "OperationRequest")
op_input_test <- function(ListParam) {
args <- list(ListParam = ListParam)
interface <- Structure(
ListParam = List(
Scalar(type = "string"),
.tags = list(
locationName = "AlternateName",
locationNameList = "NotMember"
)
),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
ListParam = list(
"one",
"two",
"three"
)
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><AlternateName xmlns="https://foo/"><NotMember xmlns="https://foo/">one</NotMember><NotMember xmlns="https://foo/">two</NotMember><NotMember xmlns="https://foo/">three</NotMember></AlternateName></OperationRequest>')
})
test_that("Flattened List Case1", {
op_test <- Operation(name = "OperationRequest")
op_input_test <- function(ListParam) {
args <- list(ListParam = ListParam)
interface <- Structure(
ListParam = List(
Scalar(type = "string"),
.tags = list(flattened = "true")
),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
ListParam = list(
"one",
"two",
"three"
)
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><ListParam xmlns="https://foo/">one</ListParam><ListParam xmlns="https://foo/">two</ListParam><ListParam xmlns="https://foo/">three</ListParam></OperationRequest>')
})
test_that("Flattened List with LocationName Case1", {
op_test <- Operation(name = "OperationRequest")
op_input_test <- function(ListParam) {
args <- list(ListParam = ListParam)
interface <- Structure(
ListParam = List(
Scalar(type = "string"),
.tags = list(flattened = "true", locationName = "item")
),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
ListParam = list(
"one",
"two",
"three"
)
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><item xmlns="https://foo/">one</item><item xmlns="https://foo/">two</item><item xmlns="https://foo/">three</item></OperationRequest>')
})
test_that("List of Structures Case1", {
op_test <- Operation(name = "OperationName")
op_input_test <- function(ListParam) {
args <- list(ListParam = ListParam)
interface <- Structure(
ListParam = List(
Structure(
Element = Scalar(
type = "string",
.tags = list(locationName = "value")
)
),
.tags = list(flattened = "true", locationName = "item")
),
.tags = list(
locationName = "OperationRequest",
xmlURI = "https://foo/"
)
)
return(populate(args, interface))
}
input <- op_input_test(
ListParam = list(
list(Element = "one"),
list(Element = "two"),
list(Element = "three")
)
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><item xmlns="https://foo/"><value xmlns="https://foo/">one</value></item><item xmlns="https://foo/"><value xmlns="https://foo/">two</value></item><item xmlns="https://foo/"><value xmlns="https://foo/">three</value></item></OperationRequest>')
})
test_that("Blob Case1", {
op_test <- Operation(name = "OperationName")
op_input_test <- function(StructureParam) {
args <- list(StructureParam = StructureParam)
interface <- Structure(
StructureParam = Structure(
B = Scalar(type = "blob", .tags = list(locationName = "b"))
),
.tags = list(
locationName = "OperationRequest",
xmlURI = "https://foo/"
)
)
return(populate(args, interface))
}
input <- op_input_test(
StructureParam = list(B = list(charToRaw("foo")))
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><StructureParam xmlns="https://foo/"><b xmlns="https://foo/">Zm9v</b></StructureParam></OperationRequest>')
})
test_that("skip empty argument", {
op_test <- Operation(name = "OperationName")
op_input_test <- function(Description, Name = NULL) {
args <- list(Description = Description, Name = Name)
interface <- Structure(
Description = Scalar(type = "string"),
Name = Scalar(type = "string"),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
Description = "bar"
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">bar</Description></OperationRequest>')
})
test_that("newline in XML", {
op_test <- Operation(name = "OperationName")
op_input_test <- function(Description) {
args <- list(Description = Description)
interface <- Structure(
Description = Scalar(type = "string"),
.tags = list(locationName = "OperationRequest", xmlURI = "https://foo/")
)
return(populate(args, interface))
}
input <- op_input_test(
Description = "foo\nbar"
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, '<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">foo
bar</Description></OperationRequest>')
})
test_that("parameters with no provided arguments are dropped", {
op_test <- Operation(name = "OperationRequest")
op_input_test <- function(Nested) {
args <- list(Nested = Nested)
interface <- Structure(
Nested = Structure(
Foo = Structure(
Bar = Scalar(type = "string")
),
Baz = List(
Structure(Qux = Scalar(type = "string"))
)
),
.tags = list(locationName = "OperationRequest")
)
return(populate(args, interface))
}
input <- op_input_test(
Nested = list(
Foo = list(
Bar = "abc123"
)
)
)
req <- new_request(svc, op_test, input, NULL)
req <- build(req)
r <- req$body
expect_equal(r, "<OperationRequest><Nested><Foo><Bar>abc123</Bar></Foo></Nested></OperationRequest>")
})
#-------------------------------------------------------------------------------
# Unmarshal tests
op <- Operation(name = "OperationName")
svc <- Client()
svc$handlers$unmarshal_meta <- HandlerList(restxml_unmarshal_meta)
svc$handlers$unmarshal <- HandlerList(restxml_unmarshal)
svc$handlers$unmarshal_error <- HandlerList(restxml_unmarshal_error)
op_output1 <- Structure(
Char = Scalar(type = "character"),
Double = Scalar(type = "double"),
FalseBool = Scalar(type = "boolean"),
Float = Scalar(type = "float"),
Long = Scalar(type = "long"),
Num = Scalar(type = "integer", .tags = list(locationName = "FooNum")),
Str = Scalar(type = "string"),
TrueBool = Scalar(type = "boolean")
)
test_that("unmarshal scalar members", {
req <- new_request(svc, op, NULL, op_output1)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Str>myname</Str><FooNum>123</FooNum><FalseBool>false</FalseBool><TrueBool>true</TrueBool><Float>1.2</Float><Double>1.3</Double><Long>200</Long><Char>a</Char><RequestId>request-id</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$Char, "a")
expect_equal(out$Double, 1.3)
expect_equal(out$FalseBool, FALSE)
expect_equal(out$Float, 1.2)
expect_equal(out$Long, 200L)
expect_equal(out$Num, 123L)
expect_equal(out$Str, "myname")
expect_equal(out$TrueBool, TRUE)
})
op_output2 <- Structure(
Blob = Scalar(type = "blob")
)
test_that("unmarshal blob", {
req <- new_request(svc, op, NULL, op_output2)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Blob>dmFsdWU=</Blob><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(rawToChar(out$Blob), "value")
})
op_output3 <- Structure(
ListMember = List(Scalar(type = "string"))
)
test_that("unmarshal list", {
req <- new_request(svc, op, NULL, op_output3)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><ListMember><member>abc</member><member>123</member></ListMember><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$ListMember[1], "abc")
expect_equal(out$ListMember[2], "123")
})
op_output4 <- Structure(
ListMember = List(Scalar(type = "string"))
)
test_that("unmarshal list", {
req <- new_request(svc, op, NULL, op_output4)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><ListMember><item>abc</item><item>123</item></ListMember><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$ListMember[1], "abc")
expect_equal(out$ListMember[2], "123")
})
op_output5 <- Structure(
ListMember = List(Scalar(type = "string"), .tags = list(flattened = TRUE))
)
test_that("unmarshal flattened list", {
req <- new_request(svc, op, NULL, op_output5)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><ListMember>abc</ListMember><ListMember>123</ListMember><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$ListMember[1], "abc")
expect_equal(out$ListMember[2], "123")
})
op_output6 <- Structure(
Map = Map(Structure(Foo = Scalar(.tags = list(locationName = "foo"))))
)
test_that("unmarshal map", {
req <- new_request(svc, op, NULL, op_output6)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Map><entry><key>qux</key><value><foo>bar</foo></value></entry><entry><key>baz</key><value><foo>bam</foo></value></entry></Map><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$Map$baz$Foo, "bam")
expect_equal(out$Map$qux$Foo, "bar")
})
op_output7 <- Structure(
Map = Map(Scalar(), .tags = list(flattened = TRUE))
)
test_that("unmarshal flattened map", {
req <- new_request(svc, op, NULL, op_output7)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Map><key>qux</key><value>bar</value></Map><Map><key>baz</key><value>bam</value></Map><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$Map$baz, "bam")
expect_equal(out$Map$qux, "bar")
})
op_output8 <- Structure(
Map = Map(Scalar(), .tags = list(locationNameKey = "foo", locationNameValue = "bar", flattened = TRUE))
)
test_that("unmarshal flattened named map", {
req <- new_request(svc, op, NULL, op_output8)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Map><foo>qux</foo><bar>bar</bar></Map><Map><foo>baz</foo><bar>bam</bar></Map><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$Map$baz, "bam")
expect_equal(out$Map$qux, "bar")
})
op_output9 <- Structure(
Foo = Scalar(type = "string")
)
test_that("unmarshal empty string", {
skip("skip")
req <- new_request(svc, op, NULL, op_output9)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Foo/><RequestId>requestid</RequestId></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$Foo, "")
})
op_output10 <- Structure(
FooEnum = Scalar(type = "string", .tags = list(enum = "OutputService10TestShapeEC2EnumType")),
ListEnums = List(Scalar(type = "string"))
)
test_that("unmarshal enum", {
req <- new_request(svc, op, NULL, op_output10)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><FooEnum>foo</FooEnum><ListEnums><member>foo</member><member>bar</member></ListEnums></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$FooEnum, "foo")
expect_equal(out$ListEnums[1], "foo")
expect_equal(out$ListEnums[2], "bar")
})
test_that("unmarshal timestamp", {
op_output11 <- Structure(
Timestamp = Scalar(type = "timestamp")
)
req <- new_request(svc, op, NULL, op_output11)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Timestamp>1970-01-01T00:00:00.000Z</Timestamp></OperationNameResponse>")
)
req <- unmarshal(req)
out <- req$data
expect_equal(out$Timestamp, unix_time(0))
})
test_that("unmarshal timestamp in header", {
op_output <- Structure(
Timestamp = Scalar(type = "timestamp", .tags = list(location = "header"))
)
req <- new_request(svc, op, NULL, op_output)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse></OperationNameResponse>")
)
req$http_response$header[["Timestamp"]] <- "Wed, 02 Oct 2002 13:00:00 GMT"
req <- unmarshal_meta(req)
req <- unmarshal(req)
out <- req$data
expected <- as.POSIXct("2002-10-02 13:00:00 GMT", tz = "GMT")
expect_equal(as.integer(out$Timestamp), as.integer(expected))
})
op_output11 <- Structure(
Body = Scalar(type = "string"),
Header = Scalar(type = "string", .tags = list(location = "header"))
)
test_that("unmarshal elements in header and body", {
req <- new_request(svc, op, NULL, op_output11)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResponse><Body>foo</Body><RequestId>request-id</RequestId></OperationNameResponse>")
)
req$http_response$header[["Header"]] <- "bar"
req <- unmarshal_meta(req)
req <- unmarshal(req)
out <- req$data
expect_equivalent(out$Body, "foo")
expect_equivalent(out$Header, "bar")
})
op_output12 <- Structure(
OperationNameResult = list(
Body = Scalar(type = "string"),
Header = Scalar(type = "string")
)
)
test_that("unmarshal result elements at root of xml", {
req <- new_request(svc, op, NULL, op_output12)
req$http_response <- HttpResponse(
status_code = 200,
body = charToRaw("<OperationNameResult><Body>foo</Body><Header>bar</Header></OperationNameResult>")
)
req <- unmarshal_meta(req)
req <- unmarshal(req)
out <- req$data
expect_equivalent(out$OperationNameResult$Body, "foo")
expect_equivalent(out$OperationNameResult$Header, "bar")
})
op_output13 <- Structure(
Timestamp = Scalar(type = "timestamp")
)
test_that("unmarshal error", {
req <- new_request(svc, op, NULL, op_output13)
req$http_response <- HttpResponse(
status_code = 400,
body = charToRaw("<Response><Error><Code>Foo</Code><Message>Bar</Message><RequestID>Baz</RequestID></Error></Response>")
)
req <- unmarshal_error(req)
err <- req$error
expect_equal(err$message, "Bar")
expect_equal(err$code, "Foo")
expect_equal(err$status_code, 400)
expect_equal(err$error_response$RequestID, "Baz")
})
|
library(testthat)
library(helloRpkg)
test_check("helloRpkg")
| /tests/testthat.R | no_license | maximilianmordig/helloRpkg | R | false | false | 62 | r | library(testthat)
library(helloRpkg)
test_check("helloRpkg")
|
.libPaths(c("/home/kristinariemer/r_libs/", .libPaths()))
###########Pull input data###########
# gcc from ecoforecast.org
targets_gcc <- readr::read_csv("https://data.ecoforecast.org/targets/phenology/phenology-targets.csv.gz")
date <- format(max(targets_gcc$time), format = "%m-%d-%y")
write.csv(targets_gcc,
paste0("inputs_gcc/targets_gcc_", date, ".csv"), row.names = FALSE)
# weather forecast from NOAA
# If the version of noaa.R has changed and breaks the script,
# Run git reset --hard 223672814042e652384a89b82d87b040c4763d78 to access the correct version
library(readr)
library(dplyr)
library(udunits2)
library(plantecophys)
source("/home/kristinariemer/neon4cast/R/noaa.R")
###########Download weather data###########
pheno_sites <- c("BART", "CLBJ", "DELA", "GRSM",
"HARV", "MLBS", "SCBI", "SERC",
"STEI", "UKFS", "CPER", "DSNY",
"JORN", "KONZ", "OAES", "WOOD",
"ONAQ", "SRER")
download_noaa(siteID = pheno_sites, interval = "1hr", date = Sys.Date()-1)
noaa_fc <- stack_noaa()
###########Clean up weather data###########
# First, convert units
hourly <- noaa_fc %>%
tidyr::drop_na() %>% # the 36th day has NAs, exclude
rename(radiation = surface_downwelling_shortwave_flux_in_air) %>%
mutate(airtemp_C = ud.convert(air_temperature, "kelvin", "celsius"),
precip = ud.convert(precipitation_flux, "s^-1", "d^-1"), #kg per m2 is equivalent to mm
vpd = RHtoVPD(RH = relative_humidity, TdegC = airtemp_C),
rad_Mj_hr = ud.convert(radiation*60*60, "joule", "megajoule"),
date = as.Date(time))
# Next, summarize to daily by site and ensemble and drop ens00
daily_ens <- hourly %>%
group_by(siteID, ensemble, date) %>%
summarize(radiation = sum(rad_Mj_hr),
max_temp = max(airtemp_C),
min_temp = min(airtemp_C),
precip = max(precip),
vpd = mean(vpd)) %>%
ungroup() %>%
filter(ensemble != "ens00")
# Summarize to median hourly values across ensembles, then summarize to daily
hourly2 <- noaa_fc %>%
tidyr::drop_na() %>% # the 36th day has NAs, exclude
mutate(airtemp_C = ud.convert(air_temperature, "kelvin", "celsius"),
precip = ud.convert(precipitation_flux, "s^-1", "d^-1"), #kg per m2 is equivalent to mm
vpd = RHtoVPD(RH = relative_humidity, TdegC = airtemp_C)) %>%
group_by(siteID, time) %>%
summarize(radiation = median(surface_downwelling_shortwave_flux_in_air),
airtemp_C = median(airtemp_C),
precip = median(precip),
vpd = median(vpd),
rad_Mj_hr = ud.convert(radiation*60*60, "joule", "megajoule")) %>%
ungroup() %>%
mutate(date = as.Date(time))
daily <- hourly2 %>%
group_by(siteID, date) %>%
summarize(radiation = sum(rad_Mj_hr),
max_temp = max(airtemp_C),
min_temp = min(airtemp_C),
precip = max(precip),
vpd = mean(vpd)) %>%
ungroup()
###########Save weather csv###########
if(!dir.exists('inputs_weather/median')) {
dir.create('inputs_weather/median')
}
if(!dir.exists('inputs_weather/ensemble')) {
dir.create('inputs_weather/ensemble')
}
date <- min(daily$date)
ens <- unique(daily_ens$ensemble)
write_csv(daily, file = paste0('inputs_weather/median/NOAA_GEFS_35d_', date, '.csv'))
write_csv(daily_ens, file = paste0('inputs_weather/ensemble/NOAA_GEFS_35d_', date, '.csv'))
# for(e in ens){
# sub <- filter(daily_ens, ensemble == e)
# write_csv(sub, file = paste0('inputs_weather/ensemble/NOAA_GEFS_35d_',
# date, '_', e, '.csv'))
# }
| /ML/01_pull_gcc_weather.R | permissive | genophenoenvo/neon-efi-challenge | R | false | false | 3,624 | r | .libPaths(c("/home/kristinariemer/r_libs/", .libPaths()))
###########Pull input data###########
# gcc from ecoforecast.org
targets_gcc <- readr::read_csv("https://data.ecoforecast.org/targets/phenology/phenology-targets.csv.gz")
date <- format(max(targets_gcc$time), format = "%m-%d-%y")
write.csv(targets_gcc,
paste0("inputs_gcc/targets_gcc_", date, ".csv"), row.names = FALSE)
# weather forecast from NOAA
# If the version of noaa.R has changed and breaks the script,
# Run git reset --hard 223672814042e652384a89b82d87b040c4763d78 to access the correct version
library(readr)
library(dplyr)
library(udunits2)
library(plantecophys)
source("/home/kristinariemer/neon4cast/R/noaa.R")
###########Download weather data###########
pheno_sites <- c("BART", "CLBJ", "DELA", "GRSM",
"HARV", "MLBS", "SCBI", "SERC",
"STEI", "UKFS", "CPER", "DSNY",
"JORN", "KONZ", "OAES", "WOOD",
"ONAQ", "SRER")
download_noaa(siteID = pheno_sites, interval = "1hr", date = Sys.Date()-1)
noaa_fc <- stack_noaa()
###########Clean up weather data###########
# First, convert units
hourly <- noaa_fc %>%
tidyr::drop_na() %>% # the 36th day has NAs, exclude
rename(radiation = surface_downwelling_shortwave_flux_in_air) %>%
mutate(airtemp_C = ud.convert(air_temperature, "kelvin", "celsius"),
precip = ud.convert(precipitation_flux, "s^-1", "d^-1"), #kg per m2 is equivalent to mm
vpd = RHtoVPD(RH = relative_humidity, TdegC = airtemp_C),
rad_Mj_hr = ud.convert(radiation*60*60, "joule", "megajoule"),
date = as.Date(time))
# Next, summarize to daily by site and ensemble and drop ens00
daily_ens <- hourly %>%
group_by(siteID, ensemble, date) %>%
summarize(radiation = sum(rad_Mj_hr),
max_temp = max(airtemp_C),
min_temp = min(airtemp_C),
precip = max(precip),
vpd = mean(vpd)) %>%
ungroup() %>%
filter(ensemble != "ens00")
# Summarize to median hourly values across ensembles, then summarize to daily
hourly2 <- noaa_fc %>%
tidyr::drop_na() %>% # the 36th day has NAs, exclude
mutate(airtemp_C = ud.convert(air_temperature, "kelvin", "celsius"),
precip = ud.convert(precipitation_flux, "s^-1", "d^-1"), #kg per m2 is equivalent to mm
vpd = RHtoVPD(RH = relative_humidity, TdegC = airtemp_C)) %>%
group_by(siteID, time) %>%
summarize(radiation = median(surface_downwelling_shortwave_flux_in_air),
airtemp_C = median(airtemp_C),
precip = median(precip),
vpd = median(vpd),
rad_Mj_hr = ud.convert(radiation*60*60, "joule", "megajoule")) %>%
ungroup() %>%
mutate(date = as.Date(time))
daily <- hourly2 %>%
group_by(siteID, date) %>%
summarize(radiation = sum(rad_Mj_hr),
max_temp = max(airtemp_C),
min_temp = min(airtemp_C),
precip = max(precip),
vpd = mean(vpd)) %>%
ungroup()
###########Save weather csv###########
if(!dir.exists('inputs_weather/median')) {
dir.create('inputs_weather/median')
}
if(!dir.exists('inputs_weather/ensemble')) {
dir.create('inputs_weather/ensemble')
}
date <- min(daily$date)
ens <- unique(daily_ens$ensemble)
write_csv(daily, file = paste0('inputs_weather/median/NOAA_GEFS_35d_', date, '.csv'))
write_csv(daily_ens, file = paste0('inputs_weather/ensemble/NOAA_GEFS_35d_', date, '.csv'))
# for(e in ens){
# sub <- filter(daily_ens, ensemble == e)
# write_csv(sub, file = paste0('inputs_weather/ensemble/NOAA_GEFS_35d_',
# date, '_', e, '.csv'))
# }
|
##..............................................................................
## Project: Fisher-Boschloo test
## Purpose: Provide functions around Fisher-Boschloo test for the
## comparison of two groups with binary endpoint.
## Input: none
## Output: Functions:
## teststat_boschloo: Compute test statistic
## critval_boschloo: Find critical value
## power_boschloo: Compute exact probability
## of rejecting H_0 for an arbitrary rejection region
## samplesize_normal_appr: Compute sample size for
## normal approximation test
## samplesize_exact_boschloo: COmpute exact sample size
## samplesize_exact_Fisher: Compute sample size for
## Fisher's exact test
## Same functions for non-inferiority:
## teststat_boschloo_NI: Compute test statistic
## critval_boschloo_NI: Find critical value
## samplesize_Wang: Compute sample size for a
## normal approximation test by Wang
## samplesize_exact_boschloo_NI: COmpute exact sample size
## samplesize_exact_Fisher_NI: Compute sample size for
## Fisher's exact test
## p_value: Calculate p-value for NI (superiority is
## special case with gamma = 1)
## Date of creation: 2019-04-04
## Date of last update: 2020-02-18
## Author: Samuel Kilian
##..............................................................................
## Superiority #################################################################
# Default test problem (alternative = "greater"):
# H_0: p_E <= p_C
# H_1: p_E > p_C
teststat_boschloo <- function(df, n_C, n_E){
# Take data frame df with variable x_C and x_E representing all possible
# response pairs for group sizes n_C and n_E and add test statistic (conditional
# fisher p-values for H_0: p_E <= p_C).
if (
n_C+1 != df %>% dplyr::pull(x_C) %>% unique() %>% length() |
n_E+1 != df %>% dplyr::pull(x_E) %>% unique() %>% length()
) {
stop("Values of x_C and x_E have to fit n_C and n_E.")
}
# Compute p-values of Fisher's exact test from hypergeometric distribution
# for every s
df %>%
dplyr::mutate(s = x_C+x_E) %>%
dplyr::group_by(s) %>%
dplyr::do(
.,
dplyr::mutate(
.,
cond_p = stats::phyper(x_C, n_C, n_E, s[1])
)
) %>%
return()
}
critval_boschloo <- function(alpha, n_C, n_E, size_acc = 4){
# Compute raised nominal level for Fisher-Boschloo test for true level alpha
# and sample sizes n_C and n_E.
# Accuracy of obtaining maximum size (dependent on p) can be defined by size_acc.
# Output: Nominal level (critical value) and exact size.
# Total sample size
n <- n_C+n_E
# Possible values for the total number of responders s
s.area <- 0:n
# Initiate elements for loop
# Create list of p.values (test statistic) for every s
p.value.list <- list()
for (s in s.area) {
p.value.list[[s+1]] <- stats::phyper(max(s-n_E, 0):min(s, n_C), n_C, n_E, s)
}
# Ordered data frame of p-values mapped to every s
data.frame(
p.value = unlist(p.value.list),
s = rep(s.area, c(1:min(n_C, n_E), rep(min(n_C, n_E)+1, max(n_C, n_E)-min(n_C, n_E)+1), n+1-(max(n_C, n_E)+1):n))
) %>%
dplyr::arrange(p.value, s) ->
ordered.p.values
# Vector of p-values and vector of s
p.values <- ordered.p.values$p.value
s.vec <- ordered.p.values$s
# Start with critical value = alpha and define the corresponding index
start.index <- sum(p.values <= alpha)
# Calculate boundaries c(s) of rejection region for every s for first critical
# value = alpha
ordered.p.values %>%
dplyr::group_by(s) %>%
dplyr::summarise(c = suppressWarnings(max(p.value[p.value <= alpha]))) %>%
dplyr::arrange(s) %>%
dplyr::pull(c) ->
start.bounds
# Determine rough approximation of critical value iteratively
size <- 0
i <- start.index
bounds <- start.bounds
while (size <= alpha) {
# Iterate critical value to next step
i <- i+1
# Determine s where boundary changes in this step
new.s <- s.vec[i]
# Determine the new boundary for specific s
new.c <- p.values[i]
# Update boundaries of rejection region
bounds[new.s+1] <- new.c
# Calculate values to find approximate maximum of size
order.help <- choose(n, s.area)*bounds
# Determine p-values where size is approximately maximal
max.ps <- s.area[order.help >= 0.9*max(order.help)]/n
# Determine maximal size for the specific p-values
size <- 0
for (p in max.ps) {
sum(bounds[bounds != -Inf]*stats::dbinom(s.area[bounds != -Inf], n, p)) %>%
max(c(size, .)) ->
size
}
}
# Exit function if size of smallest possible rejection region is too high
if (i <= 1) {
warning("The rejection region of the test is empty.")
return(c(nom_alpha_mid = 0, size = 0))
}
# If two or more possible results have the same p-values, they have to fall
# in the same region. The rejection region is shrinked until this condition
# is fulfilled.
while(p.values[i-1] == p.values[i] & i > 1){
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
}
# Exit function if size of smallest possible rejection region is too high
if (i <= 1) {
warning("The rejection region of the test is empty.")
return(c(nom_alpha_mid = 0, size = 0))
}
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
# Create grid for p with 51 points to compute more accurate maximum of size.
p <- seq(0, 1, by = 0.02)
# Compute size for every p in grid and take maximum
sapply(
p,
function(x) sum(bounds[bounds != -Inf]*stats::dbinom(s.area[bounds != -Inf], n, x))
) %>%
max() ->
max.size
# If maximum size is too high, shrink rejection region and compute new maximum
# size
while (max.size > alpha) {
# Exit function if size of smallest possible rejection region is too high
if (i <= 1) {
warning("The rejection region of the test is empty.")
return(c(nom_alpha_mid = 0, size = 0))
}
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
while(p.values[i-1] == p.values[i] & i > 1){
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
}
sapply(
p,
function(x) sum(bounds[bounds != -Inf]*stats::dbinom(s.area[bounds != -Inf], n, x))
) %>%
max() ->
max.size
}
# Create grid for p with specified accuracy to compute maximum size with
# desired accuracy
p <- seq(0, 1, by = 10^-size_acc)
# Compute maximum size
sapply(
p,
function(x) sum(bounds[bounds != -Inf]*stats::dbinom(s.area[bounds != -Inf], n, x))
) %>%
max() ->
max.size
# If maximum size is too high, shrink rejection region
while (max.size > alpha) {
# Exit function if size of smallest possible rejection region is too high
if (i <= 1) {
warning("The rejection region of the test is empty.")
return(c(nom_alpha_mid = 0, size = 0))
}
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
while(p.values[i-1] == p.values[i] & i > 1){
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
}
sapply(
p,
function(x) sum(bounds[bounds != -Inf]*stats::dbinom(s.area[bounds != -Inf], n, x))
) %>%
max() ->
max.size
}
# Define nominal alpha as mean of highest p-value in rejection region and
# lowest p-value in acceptance region
nom_alpha_mid <- (p.values[i] + p.values[i+1])/2
return(c(nom_alpha_mid = nom_alpha_mid, size = max.size))
}
power_boschloo <- function(df, n_C, n_E, p_CA, p_EA){
# Take data frame df with variable x_C and x_E representing all possible
# response pairs for group sizes n_C and n_E, variable reject indicating
# whether coordinates belong to rejection region.
# Compute exact prob. of rejection region for all pairs (p_CA, p_EA).
if (
n_C+1 != df %>% dplyr::pull(x_C) %>% unique() %>% length() |
n_E+1 != df %>% dplyr::pull(x_E) %>% unique() %>% length()
) {
stop("Values of x_C and x_E have to fit n_C and n_E.")
}
if (
length(p_CA) != length(p_EA) |
!all(p_CA >= 0 & p_CA <= 1 & p_EA >= 0 & p_EA <= 1)
) {
stop("p_CA and p_EA must have same length and values in [0, 1].")
}
# compute uncond. size for every p
sapply(
1:length(p_CA),
function(i) {
df %>%
dplyr::filter(reject) %>%
dplyr::mutate(prob = stats::dbinom(x_C, n_C, p_CA[i])*stats::dbinom(x_E, n_E, p_EA[i])) %>%
dplyr::pull(prob) %>%
sum()
}
) ->
result
names(result) <- paste(p_CA, p_EA, sep = ", ")
return(result)
}
samplesize_normal_appr <- function(p_EA, p_CA, alpha, beta, r){
# Calculate approximate sample size for normal approximation test for specified
# level alpha, power, allocation ratio r = n_E/n_C and true rates p_CA, p_EA.
# Output: Sample sizes per group (n_C, n_E).
p_0 <- (p_CA + r*p_EA)/(1+r)
Delta_A <- p_EA - p_CA
n_C <- ceiling(1/r*(stats::qnorm(1-alpha)*sqrt((1+r)*p_0*(1-p_0)) + stats::qnorm(1-beta)*sqrt(r*p_CA*(1-p_CA) + p_EA*(1-p_EA)))^2 / Delta_A^2)
n_E <- r*n_C %>% ceiling()
return(
list(n_C = n_C, n_E = n_E)
)
}
samplesize_exact_boschloo <- function(p_EA, p_CA, alpha, beta, r, size_acc = 4, alternative = "greater"){
# Calculate exact sample size for Fisher-Boschloo test and specified
# level alpha, power, allocation ratio r = n_E/n_C and true rates p_CA, p_EA.
# Accuracy of calculating the critical value can be specified by size_acc.
# Output: Sample sizes per group (n_C, n_E), nominal alpha and exact power.
# Check if input is correctly specified
check.0.1(
c(p_EA, p_CA, alpha, beta),
"p_EA, p_CA, alpha, beta have to lie in interval (0,1)."
)
check.pos.int(
size_acc,
"size_acc has to be positive integer."
)
if (
any(
sapply(
list(p_EA, p_CA, alpha, beta, r, size_acc, alternative),
length
) != 1
)
) {
stop("Input values have to be single values.")
}
if (r <= 0) {
stop("r has to be greater than 0.")
}
if (!(alternative %in% c("less", "greater"))) {
warning("alternative has to be \"less\" or \"greater\". Will be treated as \"greater\".")
alternative <- "greater"
}
if (p_CA >= p_EA & alternative == "greater") {
stop("p_EA has to be greater than p_CA.")
}
if (alternative == "less") {
if (p_EA >= p_CA) {
stop("p_CA has to be greater than p_EA.")
}
# Inverse p_EA and p_CA to test the switched hypothesis
p_EA <- 1-p_EA
p_CA <- 1-p_CA
}
# Estimate sample size with approximate formula
n_appr <- samplesize_normal_appr(
p_EA = p_EA,
p_CA = p_CA,
alpha = alpha,
beta = beta,
r = r
)
# Use estimates as starting values
n_C <- n_appr[["n_C"]]
n_E <- n_appr[["n_E"]]
# Initiate data frame for starting sample size
expand.grid(
x_C = 0:n_C,
x_E = 0:n_E
) %>%
teststat_boschloo(n_C = n_C, n_E = n_E) ->
df
# Calculate raised nominal level for starting values
nom_alpha <- critval_boschloo(alpha = alpha, n_C = n_C, n_E = n_E, size_acc = size_acc)["nom_alpha_mid"]
# Calculate exact power for starting values
df %>%
dplyr::mutate(reject = cond_p <= nom_alpha) %>%
power_boschloo(n_C = n_C, n_E = n_E, p_CA = p_CA, p_EA = p_EA) ->
exact_power
# Decrease sample size if power is too high
if(exact_power > 1-beta){
while(exact_power > 1-beta){
# Store power and nominal level of last iteration
last_power <- exact_power
last_alpha <- nom_alpha
# Decrease sample size by minimal amount possible with allocation ratio r
if (r >= 1) {
n_C <- n_C - 1
n_E <- ceiling(r*n_C)
} else {
n_E <- n_E - 1
n_C <- ceiling(1/r*n_E)
}
# Initiate data frame
expand.grid(
x_C = 0:n_C,
x_E = 0:n_E
) %>%
teststat_boschloo(n_C = n_C, n_E = n_E) ->
df
# Calculate raised nominal level
nom_alpha <- critval_boschloo(alpha = alpha, n_C = n_C, n_E = n_E, size_acc = size_acc)["nom_alpha_mid"]
# Calculate exact power
df %>%
dplyr::mutate(reject = cond_p <= nom_alpha) %>%
power_boschloo(n_C = n_C, n_E = n_E, p_CA = p_CA, p_EA = p_EA) ->
exact_power
}
# Go one step back
if (r >= 1) {
n_C <- n_C + 1
n_E <- ceiling(r*n_C)
} else {
n_E <- n_E + 1
n_C <- ceiling(1/r*n_E)
}
exact_power <- last_power
nom_alpha <- last_alpha
}
# If power is too low: increase sample size until power is achieved
while (exact_power < 1-beta) {
if (r >= 1) {
n_C <- n_C + 1
n_E <- ceiling(r*n_C)
} else {
n_E <- n_E + 1
n_C <- ceiling(1/r*n_E)
}
# Initiate data frame
expand.grid(
x_C = 0:n_C,
x_E = 0:n_E
) %>%
teststat_boschloo(n_C = n_C, n_E = n_E) ->
df
# Calculate raised nominal level
nom_alpha <- critval_boschloo(alpha = alpha, n_C = n_C, n_E = n_E, size_acc = size_acc)["nom_alpha_mid"]
# Calculate exact power
df %>%
dplyr::mutate(reject = cond_p <= nom_alpha) %>%
power_boschloo(n_C = n_C, n_E = n_E, p_CA = p_CA, p_EA = p_EA) ->
exact_power
}
return(
list(
n_C = n_C,
n_E = n_E,
nom_alpha = nom_alpha,
exact_power = exact_power
)
)
}
# Non-Inferiority ##############################################################
# Default test problem (alternative = "greater"):
# H_0: OR(p_E, p_A) <= gamma
# H_1: OR(p_E, p_A) > gamma
# with 0 < gamma < 1
teststat_boschloo_NI <- function(df, n_C, n_E, gamma){
# Take data frame df with variable x_C and x_E representing all possible
# response pairs for group sizes n_C and n_E and add conditional fisher p-values
# for H_0: OR(p_E, p_A) <= gamma.
if (
n_C+1 != df %>% dplyr::pull(x_C) %>% unique() %>% length() |
n_E+1 != df %>% dplyr::pull(x_E) %>% unique() %>% length()
) {
stop("Values of x_C and x_E have to fit n_C and n_E.")
}
# Compute p-values of Fisher's exact test from Fisher's noncentral
# hypergeometric distribution for every s
df %>%
dplyr::mutate(s = x_C+x_E) %>%
dplyr::group_by(s) %>%
dplyr::do(
.,
dplyr::mutate(
.,
cond_p = BiasedUrn::pFNCHypergeo(x_C, n_C, n_E, s[1], 1/gamma)
)
) %>%
return()
}
critval_boschloo_NI <- function(alpha, n_C, n_E, gamma, size_acc = 3){
# Compute raised nominal level for Fisher-Boschloo test for true level alpha
# and sample sizes n_C and n_E.
# Accuracy of obtaining maximum size (dependent on p) can be defined by size_acc.
# Output: Nominal level (critical value) and exact size.
# Total sample size
n <- n_C+n_E
# Possible values for the total number of responders s
s.area <- 0:n
# Initiate elements for loop
# Create list of p.values (test statistic) for every s
p.value.list <- list()
for (s in s.area) {
p.value.list[[s+1]] <- BiasedUrn::pFNCHypergeo(max(s-n_E, 0):min(s, n_C), n_C, n_E, s, 1/gamma)
}
# Ordered data frame of p-values mapped to every s
data.frame(
p.value = unlist(p.value.list),
s = rep(s.area, c(1:min(n_C, n_E), rep(min(n_C, n_E)+1, max(n_C, n_E)-min(n_C, n_E)+1), n+1-(max(n_C, n_E)+1):n))
) %>%
dplyr::arrange(p.value, s) ->
ordered.p.values
# Vector of p-value and vector of s
p.values <- ordered.p.values$p.value
s.vec <- ordered.p.values$s
# Start with critical value = alpha and define the corresponding index
start.index <- sum(p.values <= alpha)
# Calculate boundaries c(s) of rejection region for every s for first critical
# value = alpha
ordered.p.values %>%
dplyr::group_by(s) %>%
dplyr::summarise(c = suppressWarnings(max(p.value[p.value <= alpha]))) %>%
dplyr::arrange(s) %>%
dplyr::pull(c) ->
start.bounds
# Determine maximal nominal alpha iteratively
max.size <- 0
i <- start.index
bounds <- start.bounds
# Help function to efficiently compute logarithmic binomial coefficient
logchoose <- function(o, u){
if(u > o){stop("u cannot be greater than o!")}
sum(log(seq_len(o-max(o-u, u))+max(o-u, u))) - sum(log(seq_len(min(o-u, u))))
}
# Help function to compute P(S=s) under constant odds ratio gamma
Compute.s.prob.vec <- function(p_CA){
p_EA <- 1/(1+(1-p_CA)/(gamma*p_CA))
k.range <- 0:n_C
sapply(
k.range,
function(y) logchoose(n_C, y) - y*log(gamma)
) ->
add.1
s.minus.k.range <- 0:n_E
sapply(
s.minus.k.range,
function(y) logchoose(n_E, y)
) ->
add.2
sapply(
s.area,
function(x){
k <- max(x-n_E, 0):min(x, n_C)
help.val <- add.1[k+1] + add.2[x-k+1]
help.val <- help.val + n_C*log(1-p_CA) + (n_E-x)*log(1-p_EA) + x*log(p_EA)
sum(exp(help.val))
}
)
}
# Create grid with 9 points fo p_CA (must not contain 0 or 1)
p_CA <- seq(0.1, 0.9, by = 10^-1)
# Create list of probabilites P(S=s) for every p_CA in grid
lapply(
p_CA,
Compute.s.prob.vec
) ->
s.prob.vec.list
# Increase nominal alpha
while (max.size <= alpha) {
# Iterate critical value to next step
i <- i+1
# Determine s where boundary changes in this step
new.s <- s.vec[i]
# Determine the new boundary for specific s
new.c <- p.values[i]
# Update boundaries of rejection region
bounds[new.s+1] <- new.c
# Compute size for every p in grid and take maximum
sapply(
1:length(p_CA),
function(x) sum(bounds[bounds != -Inf]*s.prob.vec.list[[x]][bounds != -Inf])
) %>%
max() ->
max.size
}
# Go one step back
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
# If two or more possible results have the same p-values, they have to fall
# in the same region. The rejection region is shrinked until this condition
# is fulfilled.
while(p.values[i+1] == p.values[i]){
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
}
# Compute maximal size with increasing accuracy
for (grid.acc in 2:size_acc) {
# Define grid
p_CA <- seq(10^-grid.acc, 1-10^-grid.acc, by = 10^-grid.acc)
# Compute probabilities P(S=s)
lapply(
p_CA,
Compute.s.prob.vec
) ->
s.prob.vec.list
# Compute maximum size
sapply(
1:length(p_CA),
function(x) sum(bounds[bounds != -Inf]*s.prob.vec.list[[x]][bounds != -Inf])
) %>%
max() ->
max.size
# Shrink rejection region if size is too high
while (max.size > alpha) {
# Reduce rejection region
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
while(p.values[i+1] == p.values[i]){
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
}
# Compute maximum size
sapply(
1:length(p_CA),
function(x) sum(bounds[bounds != -Inf]*s.prob.vec.list[[x]][bounds != -Inf])
) %>%
max() ->
max.size
}
}
# # If two or more possible results have the same p-values, they have to fall
# # in the same region. The rejection region is shrinked until this condition
# # is fulfilled.
# while(p.values[i-1] == p.values[i]){
# bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
# i <- i-1
# }
# # Recalculate maximal size
# sapply(
# 1:length(p_CA),
# function(x) sum(bounds[bounds != -Inf]*s.prob.vec.list[[x]][bounds != -Inf])
# ) %>%
# max() ->
# max.size
# Define nominal alpha as mean of highest p-value in rejection region and
# lowest p-value in acceptance region
nom_alpha_mid <- (p.values[i] + p.values[i+1])/2
return(c(nom_alpha_mid = nom_alpha_mid, size = max.size))
}
samplesize_Wang <- function(p_EA, p_CA, gamma, alpha, beta, r){
# Calculate approximate sample size for approximate test for specified
# level alpha, power, allocation ratio r = n_E/n_C, true rates p_CA, p_EA and
# OR-NI.margin gamma.
# Output: Sample sizes per group (n_C, n_E).
theta_A <- p_EA*(1-p_CA)/(p_CA*(1-p_EA))
n_C <- ceiling(1/r*(stats::qnorm(1-alpha) + stats::qnorm(1-beta))^2 * (1/(p_EA*(1-p_EA)) + r/(p_CA*(1-p_CA))) / (log(theta_A) - log(gamma))^2)
n_E <- r*n_C %>% ceiling()
return(
list(n_C = n_C, n_E = n_E)
)
}
samplesize_exact_boschloo_NI <- function(p_EA, p_CA, gamma, alpha, beta, r, size_acc = 3, alternative = "greater"){
# Calculate exact sample size for Fisher-Boschloo test and specified
# level alpha, power, allocation ratio r = n_E/n_C, true rates p_CA, p_EA and
# OR-NI-margin gamma.
# Accuracy of calculating the critical value can be specified by size_acc.
# Output: Sample sizes per group (n_C, n_E), nominal alpha and exact power.
# Check if input is correctly specified
check.0.1(
c(p_EA, p_CA, alpha, beta),
"p_EA, p_CA, alpha, beta have to lie in interval (0,1)."
)
check.pos.int(
size_acc,
"size_acc has to be positive integer."
)
if (
any(
sapply(
list(p_EA, p_CA, gamma, alpha, beta, r, size_acc, alternative),
length
) != 1
)
) {
stop("Input values have to be single values.")
}
if (any(c(r, gamma) <= 0)) {
stop("r and gamma have to be positive.")
}
if (!(alternative %in% c("less", "greater"))) {
warning("alternative has to be \"less\" or \"greater\". Will be treated as \"greater\".")
alternative <- "greater"
}
if (p_EA*(1-p_CA)/(p_CA*(1-p_EA)) <= gamma & alternative == "greater") {
stop("OR(p_EA, p_CA) has to be greater than gamma.")
}
if (alternative == "less") {
if (p_EA*(1-p_CA)/(p_CA*(1-p_EA)) >= gamma) {
stop("OR(p_EA, p_CA) has to be smaller than gamma.")
}
# Inverse p_EA, p_CA and gamma to test the switched hypothesis
p_EA <- 1-p_EA
p_CA <- 1-p_CA
gamma <- 1/gamma
}
# Estimate sample size with approximate formula
n_appr <- samplesize_Wang(
p_EA = p_EA,
p_CA = p_CA,
gamma = gamma,
alpha = alpha,
beta = beta,
r = r
)
# Use estimates as starting values
n_C <- n_appr[["n_C"]]
n_E <- n_appr[["n_E"]]
# Initiate data frame
expand.grid(
x_C = 0:n_C,
x_E = 0:n_E
) %>%
teststat_boschloo_NI(n_C = n_C, n_E = n_E, gamma = gamma) ->
df
# Calculate raised nominal level
nom_alpha <- critval_boschloo_NI(alpha = alpha, n_C = n_C, n_E = n_E, gamma = gamma, size_acc = size_acc)["nom_alpha_mid"]
# Calculate exact power
df %>%
dplyr::mutate(reject = cond_p <= nom_alpha) %>%
power_boschloo(n_C = n_C, n_E = n_E, p_CA = p_CA, p_EA = p_EA) ->
exact_power
# Decrease sample size if power is too high
if(exact_power > 1-beta){
while(exact_power > 1-beta){
# Store power and nominal level of last iteration
last_power <- exact_power
last_alpha <- nom_alpha
# Decrease sample size by minimal amount possible with allocation ratio r
if (r >= 1) {
n_C <- n_C - 1
n_E <- ceiling(r*n_C)
} else {
n_E <- n_E - 1
n_C <- ceiling(1/r*n_E)
}
# Initiate data frame
expand.grid(
x_C = 0:n_C,
x_E = 0:n_E
) %>%
teststat_boschloo_NI(n_C = n_C, n_E = n_E, gamma = gamma) ->
df
# Calculate raised nominal level
nom_alpha <- critval_boschloo_NI(alpha = alpha, n_C = n_C, n_E = n_E, gamma = gamma, size_acc = size_acc)["nom_alpha_mid"]
# Calculate exact power
df %>%
dplyr::mutate(reject = cond_p <= nom_alpha) %>%
power_boschloo(n_C = n_C, n_E = n_E, p_CA = p_CA, p_EA = p_EA) ->
exact_power
}
# Go one step back
if (r >= 1) {
n_C <- n_C + 1
n_E <- ceiling(r*n_C)
} else {
n_E <- n_E + 1
n_C <- ceiling(1/r*n_E)
}
exact_power <- last_power
nom_alpha <- last_alpha
}
# If power is too low: increase sample size until power is achieved
while (exact_power < 1-beta) {
if (r >= 1) {
n_C <- n_C + 1
n_E <- ceiling(r*n_C)
} else {
n_E <- n_E + 1
n_C <- ceiling(1/r*n_E)
}
# Initiate data frame
expand.grid(
x_C = 0:n_C,
x_E = 0:n_E
) %>%
teststat_boschloo_NI(n_C = n_C, n_E = n_E, gamma = gamma) ->
df
# Calculate raised nominal level
nom_alpha <- critval_boschloo_NI(alpha = alpha, n_C = n_C, n_E = n_E, gamma = gamma, size_acc = size_acc)["nom_alpha_mid"]
# Calculate exact power
df %>%
dplyr::mutate(reject = cond_p <= nom_alpha) %>%
power_boschloo(n_C = n_C, n_E = n_E, p_CA = p_CA, p_EA = p_EA) ->
exact_power
}
return(
list(
n_C = n_C,
n_E = n_E,
nom_alpha = nom_alpha,
exact_power = exact_power
)
)
}
p_value_boschloo <- function(x_E., x_C., n_E, n_C, gamma, size_acc = 3, alternative = "greater"){
# Calculate p-values for a specific result (x_E., x_C.)
# Check if input is correctly specified
check.pos.int(
size_acc,
"size_acc has to be positive integer."
)
check.pos.int(
c(x_E.+1, x_C.+1, n_E, n_C, n_E-x_E.+1, n_C-x_C.+1),
"n_E, n_C have to be positive integers and x_E. in {0, ..., n_E}, x_C. in {0, ..., n_C}."
)
if (
any(
sapply(
list(x_E., x_C., n_E, n_C, gamma, size_acc, alternative),
length
) != 1
)
) {
stop("Input values have to be single values.")
}
if (gamma <= 0) {
stop("gamma has to be positive.")
}
if (!(alternative %in% c("less", "greater"))) {
warning("alternative has to be \"less\" or \"greater\". Will be treated as \"greater\".")
alternative <- "greater"
}
if (alternative == "less") {
# Inverse values for other-sided hypothesis
x_E. <- n_E - x_E.
x_C. <- n_C - x_C.
gamma <- 1/gamma
}
# Define grid for p_C
p_C <- seq(10^-size_acc, 1-10^-size_acc, by = 10^-size_acc)
# Find corresponding values of p_E such that (p_C, p_E) lie on the border of
# the null hypothesis
p_E <- 1/(1+(1-p_C)/(gamma*p_C))
expand.grid(
x_C = 0:n_C,
x_E = 0:n_E
) %>%
teststat_boschloo_NI(n_E = n_E, n_C = n_C, gamma = gamma) %>%
dplyr::ungroup() %>%
dplyr::mutate(
reject = cond_p <= cond_p[x_E == x_E. & x_C == x_C.]
) %>%
power_boschloo(n_C, n_E, p_C, p_E) -> # function power actually computes the rejection probability which in this case is the p-value
p.values
return(p.values)
}
| /R/boschloo-functions.R | permissive | s-kilian/binary | R | false | false | 27,743 | r | ##..............................................................................
## Project: Fisher-Boschloo test
## Purpose: Provide functions around Fisher-Boschloo test for the
## comparison of two groups with binary endpoint.
## Input: none
## Output: Functions:
## teststat_boschloo: Compute test statistic
## critval_boschloo: Find critical value
## power_boschloo: Compute exact probability
## of rejecting H_0 for an arbitrary rejection region
## samplesize_normal_appr: Compute sample size for
## normal approximation test
## samplesize_exact_boschloo: COmpute exact sample size
## samplesize_exact_Fisher: Compute sample size for
## Fisher's exact test
## Same functions for non-inferiority:
## teststat_boschloo_NI: Compute test statistic
## critval_boschloo_NI: Find critical value
## samplesize_Wang: Compute sample size for a
## normal approximation test by Wang
## samplesize_exact_boschloo_NI: COmpute exact sample size
## samplesize_exact_Fisher_NI: Compute sample size for
## Fisher's exact test
## p_value: Calculate p-value for NI (superiority is
## special case with gamma = 1)
## Date of creation: 2019-04-04
## Date of last update: 2020-02-18
## Author: Samuel Kilian
##..............................................................................
## Superiority #################################################################
# Default test problem (alternative = "greater"):
# H_0: p_E <= p_C
# H_1: p_E > p_C
teststat_boschloo <- function(df, n_C, n_E){
# Take data frame df with variable x_C and x_E representing all possible
# response pairs for group sizes n_C and n_E and add test statistic (conditional
# fisher p-values for H_0: p_E <= p_C).
if (
n_C+1 != df %>% dplyr::pull(x_C) %>% unique() %>% length() |
n_E+1 != df %>% dplyr::pull(x_E) %>% unique() %>% length()
) {
stop("Values of x_C and x_E have to fit n_C and n_E.")
}
# Compute p-values of Fisher's exact test from hypergeometric distribution
# for every s
df %>%
dplyr::mutate(s = x_C+x_E) %>%
dplyr::group_by(s) %>%
dplyr::do(
.,
dplyr::mutate(
.,
cond_p = stats::phyper(x_C, n_C, n_E, s[1])
)
) %>%
return()
}
critval_boschloo <- function(alpha, n_C, n_E, size_acc = 4){
# Compute raised nominal level for Fisher-Boschloo test for true level alpha
# and sample sizes n_C and n_E.
# Accuracy of obtaining maximum size (dependent on p) can be defined by size_acc.
# Output: Nominal level (critical value) and exact size.
# Total sample size
n <- n_C+n_E
# Possible values for the total number of responders s
s.area <- 0:n
# Initiate elements for loop
# Create list of p.values (test statistic) for every s
p.value.list <- list()
for (s in s.area) {
p.value.list[[s+1]] <- stats::phyper(max(s-n_E, 0):min(s, n_C), n_C, n_E, s)
}
# Ordered data frame of p-values mapped to every s
data.frame(
p.value = unlist(p.value.list),
s = rep(s.area, c(1:min(n_C, n_E), rep(min(n_C, n_E)+1, max(n_C, n_E)-min(n_C, n_E)+1), n+1-(max(n_C, n_E)+1):n))
) %>%
dplyr::arrange(p.value, s) ->
ordered.p.values
# Vector of p-values and vector of s
p.values <- ordered.p.values$p.value
s.vec <- ordered.p.values$s
# Start with critical value = alpha and define the corresponding index
start.index <- sum(p.values <= alpha)
# Calculate boundaries c(s) of rejection region for every s for first critical
# value = alpha
ordered.p.values %>%
dplyr::group_by(s) %>%
dplyr::summarise(c = suppressWarnings(max(p.value[p.value <= alpha]))) %>%
dplyr::arrange(s) %>%
dplyr::pull(c) ->
start.bounds
# Determine rough approximation of critical value iteratively
size <- 0
i <- start.index
bounds <- start.bounds
while (size <= alpha) {
# Iterate critical value to next step
i <- i+1
# Determine s where boundary changes in this step
new.s <- s.vec[i]
# Determine the new boundary for specific s
new.c <- p.values[i]
# Update boundaries of rejection region
bounds[new.s+1] <- new.c
# Calculate values to find approximate maximum of size
order.help <- choose(n, s.area)*bounds
# Determine p-values where size is approximately maximal
max.ps <- s.area[order.help >= 0.9*max(order.help)]/n
# Determine maximal size for the specific p-values
size <- 0
for (p in max.ps) {
sum(bounds[bounds != -Inf]*stats::dbinom(s.area[bounds != -Inf], n, p)) %>%
max(c(size, .)) ->
size
}
}
# Exit function if size of smallest possible rejection region is too high
if (i <= 1) {
warning("The rejection region of the test is empty.")
return(c(nom_alpha_mid = 0, size = 0))
}
# If two or more possible results have the same p-values, they have to fall
# in the same region. The rejection region is shrinked until this condition
# is fulfilled.
while(p.values[i-1] == p.values[i] & i > 1){
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
}
# Exit function if size of smallest possible rejection region is too high
if (i <= 1) {
warning("The rejection region of the test is empty.")
return(c(nom_alpha_mid = 0, size = 0))
}
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
# Create grid for p with 51 points to compute more accurate maximum of size.
p <- seq(0, 1, by = 0.02)
# Compute size for every p in grid and take maximum
sapply(
p,
function(x) sum(bounds[bounds != -Inf]*stats::dbinom(s.area[bounds != -Inf], n, x))
) %>%
max() ->
max.size
# If maximum size is too high, shrink rejection region and compute new maximum
# size
while (max.size > alpha) {
# Exit function if size of smallest possible rejection region is too high
if (i <= 1) {
warning("The rejection region of the test is empty.")
return(c(nom_alpha_mid = 0, size = 0))
}
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
while(p.values[i-1] == p.values[i] & i > 1){
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
}
sapply(
p,
function(x) sum(bounds[bounds != -Inf]*stats::dbinom(s.area[bounds != -Inf], n, x))
) %>%
max() ->
max.size
}
# Create grid for p with specified accuracy to compute maximum size with
# desired accuracy
p <- seq(0, 1, by = 10^-size_acc)
# Compute maximum size
sapply(
p,
function(x) sum(bounds[bounds != -Inf]*stats::dbinom(s.area[bounds != -Inf], n, x))
) %>%
max() ->
max.size
# If maximum size is too high, shrink rejection region
while (max.size > alpha) {
# Exit function if size of smallest possible rejection region is too high
if (i <= 1) {
warning("The rejection region of the test is empty.")
return(c(nom_alpha_mid = 0, size = 0))
}
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
while(p.values[i-1] == p.values[i] & i > 1){
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
}
sapply(
p,
function(x) sum(bounds[bounds != -Inf]*stats::dbinom(s.area[bounds != -Inf], n, x))
) %>%
max() ->
max.size
}
# Define nominal alpha as mean of highest p-value in rejection region and
# lowest p-value in acceptance region
nom_alpha_mid <- (p.values[i] + p.values[i+1])/2
return(c(nom_alpha_mid = nom_alpha_mid, size = max.size))
}
power_boschloo <- function(df, n_C, n_E, p_CA, p_EA){
# Take data frame df with variable x_C and x_E representing all possible
# response pairs for group sizes n_C and n_E, variable reject indicating
# whether coordinates belong to rejection region.
# Compute exact prob. of rejection region for all pairs (p_CA, p_EA).
if (
n_C+1 != df %>% dplyr::pull(x_C) %>% unique() %>% length() |
n_E+1 != df %>% dplyr::pull(x_E) %>% unique() %>% length()
) {
stop("Values of x_C and x_E have to fit n_C and n_E.")
}
if (
length(p_CA) != length(p_EA) |
!all(p_CA >= 0 & p_CA <= 1 & p_EA >= 0 & p_EA <= 1)
) {
stop("p_CA and p_EA must have same length and values in [0, 1].")
}
# compute uncond. size for every p
sapply(
1:length(p_CA),
function(i) {
df %>%
dplyr::filter(reject) %>%
dplyr::mutate(prob = stats::dbinom(x_C, n_C, p_CA[i])*stats::dbinom(x_E, n_E, p_EA[i])) %>%
dplyr::pull(prob) %>%
sum()
}
) ->
result
names(result) <- paste(p_CA, p_EA, sep = ", ")
return(result)
}
samplesize_normal_appr <- function(p_EA, p_CA, alpha, beta, r){
# Calculate approximate sample size for normal approximation test for specified
# level alpha, power, allocation ratio r = n_E/n_C and true rates p_CA, p_EA.
# Output: Sample sizes per group (n_C, n_E).
p_0 <- (p_CA + r*p_EA)/(1+r)
Delta_A <- p_EA - p_CA
n_C <- ceiling(1/r*(stats::qnorm(1-alpha)*sqrt((1+r)*p_0*(1-p_0)) + stats::qnorm(1-beta)*sqrt(r*p_CA*(1-p_CA) + p_EA*(1-p_EA)))^2 / Delta_A^2)
n_E <- r*n_C %>% ceiling()
return(
list(n_C = n_C, n_E = n_E)
)
}
samplesize_exact_boschloo <- function(p_EA, p_CA, alpha, beta, r, size_acc = 4, alternative = "greater"){
# Calculate exact sample size for Fisher-Boschloo test and specified
# level alpha, power, allocation ratio r = n_E/n_C and true rates p_CA, p_EA.
# Accuracy of calculating the critical value can be specified by size_acc.
# Output: Sample sizes per group (n_C, n_E), nominal alpha and exact power.
# Check if input is correctly specified
check.0.1(
c(p_EA, p_CA, alpha, beta),
"p_EA, p_CA, alpha, beta have to lie in interval (0,1)."
)
check.pos.int(
size_acc,
"size_acc has to be positive integer."
)
if (
any(
sapply(
list(p_EA, p_CA, alpha, beta, r, size_acc, alternative),
length
) != 1
)
) {
stop("Input values have to be single values.")
}
if (r <= 0) {
stop("r has to be greater than 0.")
}
if (!(alternative %in% c("less", "greater"))) {
warning("alternative has to be \"less\" or \"greater\". Will be treated as \"greater\".")
alternative <- "greater"
}
if (p_CA >= p_EA & alternative == "greater") {
stop("p_EA has to be greater than p_CA.")
}
if (alternative == "less") {
if (p_EA >= p_CA) {
stop("p_CA has to be greater than p_EA.")
}
# Inverse p_EA and p_CA to test the switched hypothesis
p_EA <- 1-p_EA
p_CA <- 1-p_CA
}
# Estimate sample size with approximate formula
n_appr <- samplesize_normal_appr(
p_EA = p_EA,
p_CA = p_CA,
alpha = alpha,
beta = beta,
r = r
)
# Use estimates as starting values
n_C <- n_appr[["n_C"]]
n_E <- n_appr[["n_E"]]
# Initiate data frame for starting sample size
expand.grid(
x_C = 0:n_C,
x_E = 0:n_E
) %>%
teststat_boschloo(n_C = n_C, n_E = n_E) ->
df
# Calculate raised nominal level for starting values
nom_alpha <- critval_boschloo(alpha = alpha, n_C = n_C, n_E = n_E, size_acc = size_acc)["nom_alpha_mid"]
# Calculate exact power for starting values
df %>%
dplyr::mutate(reject = cond_p <= nom_alpha) %>%
power_boschloo(n_C = n_C, n_E = n_E, p_CA = p_CA, p_EA = p_EA) ->
exact_power
# Decrease sample size if power is too high
if(exact_power > 1-beta){
while(exact_power > 1-beta){
# Store power and nominal level of last iteration
last_power <- exact_power
last_alpha <- nom_alpha
# Decrease sample size by minimal amount possible with allocation ratio r
if (r >= 1) {
n_C <- n_C - 1
n_E <- ceiling(r*n_C)
} else {
n_E <- n_E - 1
n_C <- ceiling(1/r*n_E)
}
# Initiate data frame
expand.grid(
x_C = 0:n_C,
x_E = 0:n_E
) %>%
teststat_boschloo(n_C = n_C, n_E = n_E) ->
df
# Calculate raised nominal level
nom_alpha <- critval_boschloo(alpha = alpha, n_C = n_C, n_E = n_E, size_acc = size_acc)["nom_alpha_mid"]
# Calculate exact power
df %>%
dplyr::mutate(reject = cond_p <= nom_alpha) %>%
power_boschloo(n_C = n_C, n_E = n_E, p_CA = p_CA, p_EA = p_EA) ->
exact_power
}
# Go one step back
if (r >= 1) {
n_C <- n_C + 1
n_E <- ceiling(r*n_C)
} else {
n_E <- n_E + 1
n_C <- ceiling(1/r*n_E)
}
exact_power <- last_power
nom_alpha <- last_alpha
}
# If power is too low: increase sample size until power is achieved
while (exact_power < 1-beta) {
if (r >= 1) {
n_C <- n_C + 1
n_E <- ceiling(r*n_C)
} else {
n_E <- n_E + 1
n_C <- ceiling(1/r*n_E)
}
# Initiate data frame
expand.grid(
x_C = 0:n_C,
x_E = 0:n_E
) %>%
teststat_boschloo(n_C = n_C, n_E = n_E) ->
df
# Calculate raised nominal level
nom_alpha <- critval_boschloo(alpha = alpha, n_C = n_C, n_E = n_E, size_acc = size_acc)["nom_alpha_mid"]
# Calculate exact power
df %>%
dplyr::mutate(reject = cond_p <= nom_alpha) %>%
power_boschloo(n_C = n_C, n_E = n_E, p_CA = p_CA, p_EA = p_EA) ->
exact_power
}
return(
list(
n_C = n_C,
n_E = n_E,
nom_alpha = nom_alpha,
exact_power = exact_power
)
)
}
# Non-Inferiority ##############################################################
# Default test problem (alternative = "greater"):
# H_0: OR(p_E, p_A) <= gamma
# H_1: OR(p_E, p_A) > gamma
# with 0 < gamma < 1
teststat_boschloo_NI <- function(df, n_C, n_E, gamma){
# Take data frame df with variable x_C and x_E representing all possible
# response pairs for group sizes n_C and n_E and add conditional fisher p-values
# for H_0: OR(p_E, p_A) <= gamma.
if (
n_C+1 != df %>% dplyr::pull(x_C) %>% unique() %>% length() |
n_E+1 != df %>% dplyr::pull(x_E) %>% unique() %>% length()
) {
stop("Values of x_C and x_E have to fit n_C and n_E.")
}
# Compute p-values of Fisher's exact test from Fisher's noncentral
# hypergeometric distribution for every s
df %>%
dplyr::mutate(s = x_C+x_E) %>%
dplyr::group_by(s) %>%
dplyr::do(
.,
dplyr::mutate(
.,
cond_p = BiasedUrn::pFNCHypergeo(x_C, n_C, n_E, s[1], 1/gamma)
)
) %>%
return()
}
critval_boschloo_NI <- function(alpha, n_C, n_E, gamma, size_acc = 3){
# Compute raised nominal level for Fisher-Boschloo test for true level alpha
# and sample sizes n_C and n_E.
# Accuracy of obtaining maximum size (dependent on p) can be defined by size_acc.
# Output: Nominal level (critical value) and exact size.
# Total sample size
n <- n_C+n_E
# Possible values for the total number of responders s
s.area <- 0:n
# Initiate elements for loop
# Create list of p.values (test statistic) for every s
p.value.list <- list()
for (s in s.area) {
p.value.list[[s+1]] <- BiasedUrn::pFNCHypergeo(max(s-n_E, 0):min(s, n_C), n_C, n_E, s, 1/gamma)
}
# Ordered data frame of p-values mapped to every s
data.frame(
p.value = unlist(p.value.list),
s = rep(s.area, c(1:min(n_C, n_E), rep(min(n_C, n_E)+1, max(n_C, n_E)-min(n_C, n_E)+1), n+1-(max(n_C, n_E)+1):n))
) %>%
dplyr::arrange(p.value, s) ->
ordered.p.values
# Vector of p-value and vector of s
p.values <- ordered.p.values$p.value
s.vec <- ordered.p.values$s
# Start with critical value = alpha and define the corresponding index
start.index <- sum(p.values <= alpha)
# Calculate boundaries c(s) of rejection region for every s for first critical
# value = alpha
ordered.p.values %>%
dplyr::group_by(s) %>%
dplyr::summarise(c = suppressWarnings(max(p.value[p.value <= alpha]))) %>%
dplyr::arrange(s) %>%
dplyr::pull(c) ->
start.bounds
# Determine maximal nominal alpha iteratively
max.size <- 0
i <- start.index
bounds <- start.bounds
# Help function to efficiently compute logarithmic binomial coefficient
logchoose <- function(o, u){
if(u > o){stop("u cannot be greater than o!")}
sum(log(seq_len(o-max(o-u, u))+max(o-u, u))) - sum(log(seq_len(min(o-u, u))))
}
# Help function to compute P(S=s) under constant odds ratio gamma
Compute.s.prob.vec <- function(p_CA){
p_EA <- 1/(1+(1-p_CA)/(gamma*p_CA))
k.range <- 0:n_C
sapply(
k.range,
function(y) logchoose(n_C, y) - y*log(gamma)
) ->
add.1
s.minus.k.range <- 0:n_E
sapply(
s.minus.k.range,
function(y) logchoose(n_E, y)
) ->
add.2
sapply(
s.area,
function(x){
k <- max(x-n_E, 0):min(x, n_C)
help.val <- add.1[k+1] + add.2[x-k+1]
help.val <- help.val + n_C*log(1-p_CA) + (n_E-x)*log(1-p_EA) + x*log(p_EA)
sum(exp(help.val))
}
)
}
# Create grid with 9 points fo p_CA (must not contain 0 or 1)
p_CA <- seq(0.1, 0.9, by = 10^-1)
# Create list of probabilites P(S=s) for every p_CA in grid
lapply(
p_CA,
Compute.s.prob.vec
) ->
s.prob.vec.list
# Increase nominal alpha
while (max.size <= alpha) {
# Iterate critical value to next step
i <- i+1
# Determine s where boundary changes in this step
new.s <- s.vec[i]
# Determine the new boundary for specific s
new.c <- p.values[i]
# Update boundaries of rejection region
bounds[new.s+1] <- new.c
# Compute size for every p in grid and take maximum
sapply(
1:length(p_CA),
function(x) sum(bounds[bounds != -Inf]*s.prob.vec.list[[x]][bounds != -Inf])
) %>%
max() ->
max.size
}
# Go one step back
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
# If two or more possible results have the same p-values, they have to fall
# in the same region. The rejection region is shrinked until this condition
# is fulfilled.
while(p.values[i+1] == p.values[i]){
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
}
# Compute maximal size with increasing accuracy
for (grid.acc in 2:size_acc) {
# Define grid
p_CA <- seq(10^-grid.acc, 1-10^-grid.acc, by = 10^-grid.acc)
# Compute probabilities P(S=s)
lapply(
p_CA,
Compute.s.prob.vec
) ->
s.prob.vec.list
# Compute maximum size
sapply(
1:length(p_CA),
function(x) sum(bounds[bounds != -Inf]*s.prob.vec.list[[x]][bounds != -Inf])
) %>%
max() ->
max.size
# Shrink rejection region if size is too high
while (max.size > alpha) {
# Reduce rejection region
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
while(p.values[i+1] == p.values[i]){
bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
i <- i-1
}
# Compute maximum size
sapply(
1:length(p_CA),
function(x) sum(bounds[bounds != -Inf]*s.prob.vec.list[[x]][bounds != -Inf])
) %>%
max() ->
max.size
}
}
# # If two or more possible results have the same p-values, they have to fall
# # in the same region. The rejection region is shrinked until this condition
# # is fulfilled.
# while(p.values[i-1] == p.values[i]){
# bounds[s.vec[i]+1] <- suppressWarnings(p.values[1:(i-1)][s.vec[1:(i-1)] == s.vec[i]] %>% utils::tail(1) %>% max())
# i <- i-1
# }
# # Recalculate maximal size
# sapply(
# 1:length(p_CA),
# function(x) sum(bounds[bounds != -Inf]*s.prob.vec.list[[x]][bounds != -Inf])
# ) %>%
# max() ->
# max.size
# Define nominal alpha as mean of highest p-value in rejection region and
# lowest p-value in acceptance region
nom_alpha_mid <- (p.values[i] + p.values[i+1])/2
return(c(nom_alpha_mid = nom_alpha_mid, size = max.size))
}
samplesize_Wang <- function(p_EA, p_CA, gamma, alpha, beta, r){
# Calculate approximate sample size for approximate test for specified
# level alpha, power, allocation ratio r = n_E/n_C, true rates p_CA, p_EA and
# OR-NI.margin gamma.
# Output: Sample sizes per group (n_C, n_E).
theta_A <- p_EA*(1-p_CA)/(p_CA*(1-p_EA))
n_C <- ceiling(1/r*(stats::qnorm(1-alpha) + stats::qnorm(1-beta))^2 * (1/(p_EA*(1-p_EA)) + r/(p_CA*(1-p_CA))) / (log(theta_A) - log(gamma))^2)
n_E <- r*n_C %>% ceiling()
return(
list(n_C = n_C, n_E = n_E)
)
}
samplesize_exact_boschloo_NI <- function(p_EA, p_CA, gamma, alpha, beta, r, size_acc = 3, alternative = "greater"){
# Calculate exact sample size for Fisher-Boschloo test and specified
# level alpha, power, allocation ratio r = n_E/n_C, true rates p_CA, p_EA and
# OR-NI-margin gamma.
# Accuracy of calculating the critical value can be specified by size_acc.
# Output: Sample sizes per group (n_C, n_E), nominal alpha and exact power.
# Check if input is correctly specified
check.0.1(
c(p_EA, p_CA, alpha, beta),
"p_EA, p_CA, alpha, beta have to lie in interval (0,1)."
)
check.pos.int(
size_acc,
"size_acc has to be positive integer."
)
if (
any(
sapply(
list(p_EA, p_CA, gamma, alpha, beta, r, size_acc, alternative),
length
) != 1
)
) {
stop("Input values have to be single values.")
}
if (any(c(r, gamma) <= 0)) {
stop("r and gamma have to be positive.")
}
if (!(alternative %in% c("less", "greater"))) {
warning("alternative has to be \"less\" or \"greater\". Will be treated as \"greater\".")
alternative <- "greater"
}
if (p_EA*(1-p_CA)/(p_CA*(1-p_EA)) <= gamma & alternative == "greater") {
stop("OR(p_EA, p_CA) has to be greater than gamma.")
}
if (alternative == "less") {
if (p_EA*(1-p_CA)/(p_CA*(1-p_EA)) >= gamma) {
stop("OR(p_EA, p_CA) has to be smaller than gamma.")
}
# Inverse p_EA, p_CA and gamma to test the switched hypothesis
p_EA <- 1-p_EA
p_CA <- 1-p_CA
gamma <- 1/gamma
}
# Estimate sample size with approximate formula
n_appr <- samplesize_Wang(
p_EA = p_EA,
p_CA = p_CA,
gamma = gamma,
alpha = alpha,
beta = beta,
r = r
)
# Use estimates as starting values
n_C <- n_appr[["n_C"]]
n_E <- n_appr[["n_E"]]
# Initiate data frame
expand.grid(
x_C = 0:n_C,
x_E = 0:n_E
) %>%
teststat_boschloo_NI(n_C = n_C, n_E = n_E, gamma = gamma) ->
df
# Calculate raised nominal level
nom_alpha <- critval_boschloo_NI(alpha = alpha, n_C = n_C, n_E = n_E, gamma = gamma, size_acc = size_acc)["nom_alpha_mid"]
# Calculate exact power
df %>%
dplyr::mutate(reject = cond_p <= nom_alpha) %>%
power_boschloo(n_C = n_C, n_E = n_E, p_CA = p_CA, p_EA = p_EA) ->
exact_power
# Decrease sample size if power is too high
if(exact_power > 1-beta){
while(exact_power > 1-beta){
# Store power and nominal level of last iteration
last_power <- exact_power
last_alpha <- nom_alpha
# Decrease sample size by minimal amount possible with allocation ratio r
if (r >= 1) {
n_C <- n_C - 1
n_E <- ceiling(r*n_C)
} else {
n_E <- n_E - 1
n_C <- ceiling(1/r*n_E)
}
# Initiate data frame
expand.grid(
x_C = 0:n_C,
x_E = 0:n_E
) %>%
teststat_boschloo_NI(n_C = n_C, n_E = n_E, gamma = gamma) ->
df
# Calculate raised nominal level
nom_alpha <- critval_boschloo_NI(alpha = alpha, n_C = n_C, n_E = n_E, gamma = gamma, size_acc = size_acc)["nom_alpha_mid"]
# Calculate exact power
df %>%
dplyr::mutate(reject = cond_p <= nom_alpha) %>%
power_boschloo(n_C = n_C, n_E = n_E, p_CA = p_CA, p_EA = p_EA) ->
exact_power
}
# Go one step back
if (r >= 1) {
n_C <- n_C + 1
n_E <- ceiling(r*n_C)
} else {
n_E <- n_E + 1
n_C <- ceiling(1/r*n_E)
}
exact_power <- last_power
nom_alpha <- last_alpha
}
# If power is too low: increase sample size until power is achieved
while (exact_power < 1-beta) {
if (r >= 1) {
n_C <- n_C + 1
n_E <- ceiling(r*n_C)
} else {
n_E <- n_E + 1
n_C <- ceiling(1/r*n_E)
}
# Initiate data frame
expand.grid(
x_C = 0:n_C,
x_E = 0:n_E
) %>%
teststat_boschloo_NI(n_C = n_C, n_E = n_E, gamma = gamma) ->
df
# Calculate raised nominal level
nom_alpha <- critval_boschloo_NI(alpha = alpha, n_C = n_C, n_E = n_E, gamma = gamma, size_acc = size_acc)["nom_alpha_mid"]
# Calculate exact power
df %>%
dplyr::mutate(reject = cond_p <= nom_alpha) %>%
power_boschloo(n_C = n_C, n_E = n_E, p_CA = p_CA, p_EA = p_EA) ->
exact_power
}
return(
list(
n_C = n_C,
n_E = n_E,
nom_alpha = nom_alpha,
exact_power = exact_power
)
)
}
p_value_boschloo <- function(x_E., x_C., n_E, n_C, gamma, size_acc = 3, alternative = "greater"){
# Calculate p-values for a specific result (x_E., x_C.)
# Check if input is correctly specified
check.pos.int(
size_acc,
"size_acc has to be positive integer."
)
check.pos.int(
c(x_E.+1, x_C.+1, n_E, n_C, n_E-x_E.+1, n_C-x_C.+1),
"n_E, n_C have to be positive integers and x_E. in {0, ..., n_E}, x_C. in {0, ..., n_C}."
)
if (
any(
sapply(
list(x_E., x_C., n_E, n_C, gamma, size_acc, alternative),
length
) != 1
)
) {
stop("Input values have to be single values.")
}
if (gamma <= 0) {
stop("gamma has to be positive.")
}
if (!(alternative %in% c("less", "greater"))) {
warning("alternative has to be \"less\" or \"greater\". Will be treated as \"greater\".")
alternative <- "greater"
}
if (alternative == "less") {
# Inverse values for other-sided hypothesis
x_E. <- n_E - x_E.
x_C. <- n_C - x_C.
gamma <- 1/gamma
}
# Define grid for p_C
p_C <- seq(10^-size_acc, 1-10^-size_acc, by = 10^-size_acc)
# Find corresponding values of p_E such that (p_C, p_E) lie on the border of
# the null hypothesis
p_E <- 1/(1+(1-p_C)/(gamma*p_C))
expand.grid(
x_C = 0:n_C,
x_E = 0:n_E
) %>%
teststat_boschloo_NI(n_E = n_E, n_C = n_C, gamma = gamma) %>%
dplyr::ungroup() %>%
dplyr::mutate(
reject = cond_p <= cond_p[x_E == x_E. & x_C == x_C.]
) %>%
power_boschloo(n_C, n_E, p_C, p_E) -> # function power actually computes the rejection probability which in this case is the p-value
p.values
return(p.values)
}
|
args<-commandArgs(TRUE)
i<-args[1]
spp<-i
# print(i)
# if(i=="wgEncodeHaibTfbsGm12891Pu1Pcr1xAlnRep0"){ next }
# spp<-gsub(".bam.*","",i)
# info<-master[master$spp_filename==i,]
print(i)
# fl<-system(paste("wc -l /home/wangj2/anearline/encode_Jan2011/meme-chip.signalvalue/spp.optimal.",spp,"/spp.optimal.",spp,".top500-1000.fimo.txt | cut -f1 -d \" \"",sep=""),intern=T)
fl<-system( "wc -l top500-1000.meme_fimo_out/fimo.txt | cut -f1 -d \" \"",intern=T)
print(fl)
if(as.numeric(fl)<=1) { next }
fimo<-read.table("top500-1000.meme_fimo_out/fimo.txt", sep="\t")
if(nrow(fimo)>0)
{
fimo<-data.frame(fimo[,1:2],apply(fimo,1,function(x) if(150 %in% seq(as.numeric(x[3]),as.numeric(x[4]))) { 0 } else { min(abs(as.numeric(x[4])-150),abs(as.numeric(x[3])-150)) }))
}
fl<-system( "wc -l top500-1000.sameGC.random.meme_fimo_out/fimo.txt | cut -f1 -d \" \"",intern=T)
if(as.numeric(fl)<=1) { next }
fimo.random<-read.table("top500-1000.sameGC.random.meme_fimo_out/fimo.txt",sep="\t")
if(nrow(fimo.random)>0)
{
fimo.random<-data.frame(fimo.random[,1:2],apply(fimo.random,1,function(x) if(150 %in% seq(as.numeric(x[3]),as.numeric(x[4]))) { 0 } else { min(abs(as.numeric(x[4])-150),abs(as.numeric(x[3])-150)) }))
fimo.random<-data.frame(fimo.random,t(apply(fimo.random,1,function(x) unlist(strsplit(as.character(x[2]),"\\."))))[,2])
}
for(j in seq(1,5,1))
{
motif<-as.data.frame(tapply(fimo[fimo[,1]==j,3],fimo[fimo[,1]==j,2],min))
sample.table<-matrix(0,1,100)
for(k in seq(1,100,1))
{
motif.random<-as.data.frame(tapply(fimo.random[fimo.random[,1]==j & fimo.random[,4]==k,3],fimo.random[fimo.random[,1]==j & fimo.random[,4]==k,2],min))
sample.table[1,k]<-sum(!is.na(motif.random))
}
write.table(data.frame(spp,j,sum(!is.na(motif)),sample.table),paste("fimo.top500-1000.pct.summary.100samples.xTFBS.clean",sep=""),col.name=F,row.names=F,sep="\t",quote=F,append=T)
}
| /backend/bin/fimo.top500-1000.random.summary.sge.R | no_license | joshuabhk/factorbook-motif-pipeline | R | false | false | 1,906 | r | args<-commandArgs(TRUE)
i<-args[1]
spp<-i
# print(i)
# if(i=="wgEncodeHaibTfbsGm12891Pu1Pcr1xAlnRep0"){ next }
# spp<-gsub(".bam.*","",i)
# info<-master[master$spp_filename==i,]
print(i)
# fl<-system(paste("wc -l /home/wangj2/anearline/encode_Jan2011/meme-chip.signalvalue/spp.optimal.",spp,"/spp.optimal.",spp,".top500-1000.fimo.txt | cut -f1 -d \" \"",sep=""),intern=T)
fl<-system( "wc -l top500-1000.meme_fimo_out/fimo.txt | cut -f1 -d \" \"",intern=T)
print(fl)
if(as.numeric(fl)<=1) { next }
fimo<-read.table("top500-1000.meme_fimo_out/fimo.txt", sep="\t")
if(nrow(fimo)>0)
{
fimo<-data.frame(fimo[,1:2],apply(fimo,1,function(x) if(150 %in% seq(as.numeric(x[3]),as.numeric(x[4]))) { 0 } else { min(abs(as.numeric(x[4])-150),abs(as.numeric(x[3])-150)) }))
}
fl<-system( "wc -l top500-1000.sameGC.random.meme_fimo_out/fimo.txt | cut -f1 -d \" \"",intern=T)
if(as.numeric(fl)<=1) { next }
fimo.random<-read.table("top500-1000.sameGC.random.meme_fimo_out/fimo.txt",sep="\t")
if(nrow(fimo.random)>0)
{
fimo.random<-data.frame(fimo.random[,1:2],apply(fimo.random,1,function(x) if(150 %in% seq(as.numeric(x[3]),as.numeric(x[4]))) { 0 } else { min(abs(as.numeric(x[4])-150),abs(as.numeric(x[3])-150)) }))
fimo.random<-data.frame(fimo.random,t(apply(fimo.random,1,function(x) unlist(strsplit(as.character(x[2]),"\\."))))[,2])
}
for(j in seq(1,5,1))
{
motif<-as.data.frame(tapply(fimo[fimo[,1]==j,3],fimo[fimo[,1]==j,2],min))
sample.table<-matrix(0,1,100)
for(k in seq(1,100,1))
{
motif.random<-as.data.frame(tapply(fimo.random[fimo.random[,1]==j & fimo.random[,4]==k,3],fimo.random[fimo.random[,1]==j & fimo.random[,4]==k,2],min))
sample.table[1,k]<-sum(!is.na(motif.random))
}
write.table(data.frame(spp,j,sum(!is.na(motif)),sample.table),paste("fimo.top500-1000.pct.summary.100samples.xTFBS.clean",sep=""),col.name=F,row.names=F,sep="\t",quote=F,append=T)
}
|
library(plotly)
data <- read.csv(results)
df <- data.frame(x = 1:3,
y = 1:3,
ymin = (1:3) - runif(3),
ymax = (1:3) + runif(3),
xmin = (1:3) - runif(3),
xmax = (1:3) + runif(3))
p <- ggplot(results, data = df,aes(x = x, y = y)) +
geom_point() +
geom_errorbarh(aes(xmin = xmin,xmax = xmax))
p <- ggplotly(p)
p
library(ggplot2)
library(tidyverse)
install.packages(Hmisc)
library(Hmisc)
results %>% ggplot(aes(y=Log.Error, x=Visualization)) +
stat_summary(fun.data = "mean_cl_boot", colour = "red", size = 0.7) +
coord_flip()
| /confidence-intervals2.R | no_license | moneill0/DataVisExperiment | R | false | false | 626 | r | library(plotly)
data <- read.csv(results)
df <- data.frame(x = 1:3,
y = 1:3,
ymin = (1:3) - runif(3),
ymax = (1:3) + runif(3),
xmin = (1:3) - runif(3),
xmax = (1:3) + runif(3))
p <- ggplot(results, data = df,aes(x = x, y = y)) +
geom_point() +
geom_errorbarh(aes(xmin = xmin,xmax = xmax))
p <- ggplotly(p)
p
library(ggplot2)
library(tidyverse)
install.packages(Hmisc)
library(Hmisc)
results %>% ggplot(aes(y=Log.Error, x=Visualization)) +
stat_summary(fun.data = "mean_cl_boot", colour = "red", size = 0.7) +
coord_flip()
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(readr)
library(shiny)
library(RJSONIO)
library(curl)
library(base64enc)
# Define server logic required to draw a histogram
# Read the secrets
secret_hologram <- read_delim("./secret_hologram.txt",
"\t", escape_double = FALSE, trim_ws = TRUE)
# Get the devices ID
base_url <- "https://dashboard.hologram.io/api/1/devices?"
tag <- "cona2018"
built_url <- paste0(base_url,
"orgid=",secret_hologram$orgid,"&",
"tagname=",tag,"&",
"apikey=",secret_hologram$apikey)
req1 <- curl_fetch_memory(built_url)
jreq1 <- fromJSON(rawToChar(req1$content))$data
nsites <- length(jreq1)
curr_data <- data.frame(deviceid = (1:nsites))
for (i in (1:nsites)){
curr_data$deviceid[i] <- jreq1[[i]]$id
}
# Get the latest measurements
base_url <- "https://dashboard.hologram.io/api/1/csr/rdm?"
curr_data$PM1 <- -1
curr_data$PM2.5 <- -1
curr_data$PM10 <- -1
curr_data$Temperature <- -99
curr_data$RH <- -1
curr_data$Timestamp <- as.POSIXct("2018-05-01 00:00:00",tz='UTC')
i <- 1
for (i in (1:nsites)){
built_url <- paste0(base_url,
"deviceid=",curr_data$deviceid[i],"&",
"limit=1&",
"orgid=",secret_hologram$orgid,"&",
"apikey=",secret_hologram$apikey)
req2 <- curl_fetch_memory(built_url)
jreq2 <- fromJSON(rawToChar(req2$content))$data
payload <- fromJSON(rawToChar(base64decode(fromJSON(jreq2[[1]]$data)$data)))
curr_data$Timestamp[i] <- as.POSIXct(jreq2[[1]]$logged,tz='UTC')
curr_data$PM1[i] <- payload[1]
curr_data$PM2.5[i] <- payload[2]
curr_data$PM10[i] <- payload[3]
curr_data$Temperature[i] <- payload[7]
curr_data$RH[i] <- payload[8]
}
c_plot <- ggplot(data = curr_data,aes(x=deviceid))+
geom_bar(aes(y=PM1),stat = StatIdentity) +
geom_text(aes(y=PM1,label=Timestamp),hjust=0, vjust=0) +
ylim(0,max(curr_data$PM1))
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
ggplot(data = curr_data,aes(x=deviceid))+
geom_bar(aes(y=PM1),stat = StatIdentity) +
geom_text(aes(y=PM1,label=Timestamp),hjust=0, vjust=0) +
ylim(0,max(curr_data$PM1))
})
})
| /server.R | permissive | guolivar/cona-arrowtown2019 | R | false | false | 2,424 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(readr)
library(shiny)
library(RJSONIO)
library(curl)
library(base64enc)
# Define server logic required to draw a histogram
# Read the secrets
secret_hologram <- read_delim("./secret_hologram.txt",
"\t", escape_double = FALSE, trim_ws = TRUE)
# Get the devices ID
base_url <- "https://dashboard.hologram.io/api/1/devices?"
tag <- "cona2018"
built_url <- paste0(base_url,
"orgid=",secret_hologram$orgid,"&",
"tagname=",tag,"&",
"apikey=",secret_hologram$apikey)
req1 <- curl_fetch_memory(built_url)
jreq1 <- fromJSON(rawToChar(req1$content))$data
nsites <- length(jreq1)
curr_data <- data.frame(deviceid = (1:nsites))
for (i in (1:nsites)){
curr_data$deviceid[i] <- jreq1[[i]]$id
}
# Get the latest measurements
base_url <- "https://dashboard.hologram.io/api/1/csr/rdm?"
curr_data$PM1 <- -1
curr_data$PM2.5 <- -1
curr_data$PM10 <- -1
curr_data$Temperature <- -99
curr_data$RH <- -1
curr_data$Timestamp <- as.POSIXct("2018-05-01 00:00:00",tz='UTC')
i <- 1
for (i in (1:nsites)){
built_url <- paste0(base_url,
"deviceid=",curr_data$deviceid[i],"&",
"limit=1&",
"orgid=",secret_hologram$orgid,"&",
"apikey=",secret_hologram$apikey)
req2 <- curl_fetch_memory(built_url)
jreq2 <- fromJSON(rawToChar(req2$content))$data
payload <- fromJSON(rawToChar(base64decode(fromJSON(jreq2[[1]]$data)$data)))
curr_data$Timestamp[i] <- as.POSIXct(jreq2[[1]]$logged,tz='UTC')
curr_data$PM1[i] <- payload[1]
curr_data$PM2.5[i] <- payload[2]
curr_data$PM10[i] <- payload[3]
curr_data$Temperature[i] <- payload[7]
curr_data$RH[i] <- payload[8]
}
c_plot <- ggplot(data = curr_data,aes(x=deviceid))+
geom_bar(aes(y=PM1),stat = StatIdentity) +
geom_text(aes(y=PM1,label=Timestamp),hjust=0, vjust=0) +
ylim(0,max(curr_data$PM1))
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
ggplot(data = curr_data,aes(x=deviceid))+
geom_bar(aes(y=PM1),stat = StatIdentity) +
geom_text(aes(y=PM1,label=Timestamp),hjust=0, vjust=0) +
ylim(0,max(curr_data$PM1))
})
})
|
testlist <- list(b = 19L)
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result) | /mcga/inst/testfiles/ByteVectorToDoubles/AFL_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1613105247-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 92 | r | testlist <- list(b = 19L)
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/viz.R
\name{cluster_heatmap}
\alias{cluster_heatmap}
\title{Create clustered Heatmap}
\usage{
cluster_heatmap(d, n, colorscheme = c(rev(google_colors$Blue$accent[4]),
"black", google_colors$Red$accent[4]), square = F, show = T)
}
\arguments{
\item{d}{Tibble with an ID column, a key column and a value column}
\item{n}{number of clusters}
\item{colorscheme}{The Colorscheme for the heatmap}
\item{square}{Make the tiles square (TRUE or FALSE)}
\item{show}{Print plot}
}
\description{
Create clustered Heatmap
}
\examples{
}
| /man/cluster_heatmap.Rd | no_license | joelgsponer/waRRior2 | R | false | true | 609 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/viz.R
\name{cluster_heatmap}
\alias{cluster_heatmap}
\title{Create clustered Heatmap}
\usage{
cluster_heatmap(d, n, colorscheme = c(rev(google_colors$Blue$accent[4]),
"black", google_colors$Red$accent[4]), square = F, show = T)
}
\arguments{
\item{d}{Tibble with an ID column, a key column and a value column}
\item{n}{number of clusters}
\item{colorscheme}{The Colorscheme for the heatmap}
\item{square}{Make the tiles square (TRUE or FALSE)}
\item{show}{Print plot}
}
\description{
Create clustered Heatmap
}
\examples{
}
|
### CLUSTERS ###
# Note: this code works exclusively for 3 different clusters!
# Import ggplot2 package
library(ggplot2)
# Import ggalt package
library(ggalt)
# Import ggfortify package
library(ggfortify)
# Pre-set the classic theme
theme_set(theme_classic())
# Load and name data set
df <- read.csv()
# Apply the prcomp function
pca_mod <- prcomp(df)
# Name the categoric column and select it from original data set
df_pc <- data.frame(pca_mod$x, = df$)
# Type in the first out of the three categories (classes)
df_pc_1 <- df_pc[df_pc$ == "", ]
# Type in the second out of the three categories (classes)
df_pc_2 <- df_pc[df_pc$ == "", ]
# Type in the third out of the three categories (classes)
df_pc_3 <- df_pc[df_pc$ == "", ]
ggplot(data = df_pc, # do not modify this line of code
aes(x = PC1, # do not modify this line of code
y = PC2, # do not modify this line of code
col = )) + # set the color variable
geom_point(aes(shape = ), # set the shape variable (same as above)
size = ) + # set the sahpes sizes
labs(title = "", # type in the plot title
subtitle = "", # type in the plot subtitle
caption = "") + # type in additional captions
coord_cartesian(xlim = 1.2 * c(min(df_pc$PC1), max(df_pc$PC1)), # do not modify this line of code
ylim = 1.2 * c(min(df_pc$PC2), max(df_pc$PC2))) + # do not modify this line of code
geom_encircle(data = df_pc_1, # do not modify this line of code
aes(x = PC1, # do not modify this line of code
y = PC2)) + # do not modify this line of code
geom_encircle(data = df_pc_2, # do not modify this line of code
aes(x = PC1, # do not modify this line of code
y = PC2)) + # do not modify this line of code
geom_encircle(data = df_pc_3, # do not modify this line of code
aes(x = PC1, # do not modify this line of code
y = PC2)) # do not modify this line of code
# EXAMPLE
library(ggplot2)
library(ggalt)
library(ggfortify)
theme_set(theme_classic())
df <- iris[c(1, 2, 3, 4)]
pca_mod <- prcomp(df)
df_pc <- data.frame(pca_mod$x, Species = iris$Species)
df_pc_vir <- df_pc[df_pc$Species == "virginica", ]
df_pc_set <- df_pc[df_pc$Species == "setosa", ]
df_pc_ver <- df_pc[df_pc$Species == "versicolor", ]
ggplot(data = df_pc,
aes(x = PC1,
y = PC2,
col = Species)) +
geom_point(aes(shape = Species),
size = 2) +
labs(title = "Clusters",
subtitle = "With the principal componentes PC1 and PC2 as X and Y axis",
caption = "Source: Iris dataset") +
coord_cartesian(xlim = 1.2 * c(min(df_pc$PC1), max(df_pc$PC1)),
ylim = 1.2 * c(min(df_pc$PC2), max(df_pc$PC2))) +
geom_encircle(data = df_pc_vir,
aes(x = PC1,
y = PC2)) +
geom_encircle(data = df_pc_set,
aes(x = PC1,
y = PC2)) +
geom_encircle(data = df_pc_ver,
aes(x = PC1,
y = PC2))
| /7. Groups/Clusters.R | no_license | rsalaza4/Data-Visualization-with-ggplot2 | R | false | false | 4,354 | r | ### CLUSTERS ###
# Note: this code works exclusively for 3 different clusters!
# Import ggplot2 package
library(ggplot2)
# Import ggalt package
library(ggalt)
# Import ggfortify package
library(ggfortify)
# Pre-set the classic theme
theme_set(theme_classic())
# Load and name data set
df <- read.csv()
# Apply the prcomp function
pca_mod <- prcomp(df)
# Name the categoric column and select it from original data set
df_pc <- data.frame(pca_mod$x, = df$)
# Type in the first out of the three categories (classes)
df_pc_1 <- df_pc[df_pc$ == "", ]
# Type in the second out of the three categories (classes)
df_pc_2 <- df_pc[df_pc$ == "", ]
# Type in the third out of the three categories (classes)
df_pc_3 <- df_pc[df_pc$ == "", ]
ggplot(data = df_pc, # do not modify this line of code
aes(x = PC1, # do not modify this line of code
y = PC2, # do not modify this line of code
col = )) + # set the color variable
geom_point(aes(shape = ), # set the shape variable (same as above)
size = ) + # set the sahpes sizes
labs(title = "", # type in the plot title
subtitle = "", # type in the plot subtitle
caption = "") + # type in additional captions
coord_cartesian(xlim = 1.2 * c(min(df_pc$PC1), max(df_pc$PC1)), # do not modify this line of code
ylim = 1.2 * c(min(df_pc$PC2), max(df_pc$PC2))) + # do not modify this line of code
geom_encircle(data = df_pc_1, # do not modify this line of code
aes(x = PC1, # do not modify this line of code
y = PC2)) + # do not modify this line of code
geom_encircle(data = df_pc_2, # do not modify this line of code
aes(x = PC1, # do not modify this line of code
y = PC2)) + # do not modify this line of code
geom_encircle(data = df_pc_3, # do not modify this line of code
aes(x = PC1, # do not modify this line of code
y = PC2)) # do not modify this line of code
# EXAMPLE
library(ggplot2)
library(ggalt)
library(ggfortify)
theme_set(theme_classic())
df <- iris[c(1, 2, 3, 4)]
pca_mod <- prcomp(df)
df_pc <- data.frame(pca_mod$x, Species = iris$Species)
df_pc_vir <- df_pc[df_pc$Species == "virginica", ]
df_pc_set <- df_pc[df_pc$Species == "setosa", ]
df_pc_ver <- df_pc[df_pc$Species == "versicolor", ]
ggplot(data = df_pc,
aes(x = PC1,
y = PC2,
col = Species)) +
geom_point(aes(shape = Species),
size = 2) +
labs(title = "Clusters",
subtitle = "With the principal componentes PC1 and PC2 as X and Y axis",
caption = "Source: Iris dataset") +
coord_cartesian(xlim = 1.2 * c(min(df_pc$PC1), max(df_pc$PC1)),
ylim = 1.2 * c(min(df_pc$PC2), max(df_pc$PC2))) +
geom_encircle(data = df_pc_vir,
aes(x = PC1,
y = PC2)) +
geom_encircle(data = df_pc_set,
aes(x = PC1,
y = PC2)) +
geom_encircle(data = df_pc_ver,
aes(x = PC1,
y = PC2))
|
## This file defines common functions used for data processing.
load_data_file <- function (file, row_names) {
bench <- read.table(file, sep="\t", header=FALSE, col.names=row_names, fill=TRUE)
bench$rid = seq_len(nrow(bench))
bench
}
prepare_vm_names <- function(data) {
name_map <- list("Java" = "Java",
"PyPy" = "PyPy",
"RPySOM-recursive-jit" = "RPySOM",
"RPySOM-jit" = "RPySOM",
"RTruffleSOM-jit" = "RTruffleSOM",
"TruffleSOM-graal" = "TruffleSOM",
"TruffleSOM-graal-no-split" = "TruffleSOM.ns",
"SOMpp" = "SOM++")
# Rename
levels(data$VM) <- map_names(
levels(data$VM),
name_map)
data
}
prepare_exp_names <- function(data) {
name_map <- list("baseline" = "baseline",
"without-args-in-frame" = "typed args",
"without-array-strategies" = "array strategies",
"without-blocks-without-context" = "min. escaping closures",
"without-catch-nonlocal-return-node" = "catch-return nodes",
"without-control-specialization" = "lower control structures",
"without-custom-PICs" = "inline caching",
"without-eager-primitives" = "inline basic ops.",
"without-execute-void" = "opt. side-effect-free",
"without-global-caching-and-opt" = "cache globals",
"without-local-nonlocal-var-distinction" = "opt. local vars",
"without-object-layout" = "typed fields",
"without-splitting-frame-variables" = "min. escaping vars",
"without-unessential-lowering-prims" = "lower common ops",
"without-var-access-specialization" = "typed vars")
levels(data$Var) <- map_names(levels(data$Var), name_map)
data
}
map_names <- function(old_names, name_map) {
for (i in 1:length(old_names)) {
old_name <- old_names[[i]]
if (!is.null(name_map[[old_name]])) {
old_names[i] <- name_map[[old_name]]
}
}
old_names
}
| /papers/oopsla-2015/scripts/data-processing.R | no_license | smarr/BenchR | R | false | false | 2,467 | r | ## This file defines common functions used for data processing.
load_data_file <- function (file, row_names) {
bench <- read.table(file, sep="\t", header=FALSE, col.names=row_names, fill=TRUE)
bench$rid = seq_len(nrow(bench))
bench
}
prepare_vm_names <- function(data) {
name_map <- list("Java" = "Java",
"PyPy" = "PyPy",
"RPySOM-recursive-jit" = "RPySOM",
"RPySOM-jit" = "RPySOM",
"RTruffleSOM-jit" = "RTruffleSOM",
"TruffleSOM-graal" = "TruffleSOM",
"TruffleSOM-graal-no-split" = "TruffleSOM.ns",
"SOMpp" = "SOM++")
# Rename
levels(data$VM) <- map_names(
levels(data$VM),
name_map)
data
}
prepare_exp_names <- function(data) {
name_map <- list("baseline" = "baseline",
"without-args-in-frame" = "typed args",
"without-array-strategies" = "array strategies",
"without-blocks-without-context" = "min. escaping closures",
"without-catch-nonlocal-return-node" = "catch-return nodes",
"without-control-specialization" = "lower control structures",
"without-custom-PICs" = "inline caching",
"without-eager-primitives" = "inline basic ops.",
"without-execute-void" = "opt. side-effect-free",
"without-global-caching-and-opt" = "cache globals",
"without-local-nonlocal-var-distinction" = "opt. local vars",
"without-object-layout" = "typed fields",
"without-splitting-frame-variables" = "min. escaping vars",
"without-unessential-lowering-prims" = "lower common ops",
"without-var-access-specialization" = "typed vars")
levels(data$Var) <- map_names(levels(data$Var), name_map)
data
}
map_names <- function(old_names, name_map) {
for (i in 1:length(old_names)) {
old_name <- old_names[[i]]
if (!is.null(name_map[[old_name]])) {
old_names[i] <- name_map[[old_name]]
}
}
old_names
}
|
if (!exists("limited") | !exists("times")) {
data <- read.delim("household_power_consumption.txt", sep=";",
na.strings="?",
colClasses=c("character",
"character",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric"))
limited <- data[data["Date"]=="1/2/2007" | data["Date"]=="2/2/2007",]
times <- strptime(paste(limited[,"Date"], limited[,"Time"]),"%d/%m/%Y %H:%M:%S")
} | /loaddata.R | no_license | mmangino/ExData_Plotting1 | R | false | false | 695 | r | if (!exists("limited") | !exists("times")) {
data <- read.delim("household_power_consumption.txt", sep=";",
na.strings="?",
colClasses=c("character",
"character",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric"))
limited <- data[data["Date"]=="1/2/2007" | data["Date"]=="2/2/2007",]
times <- strptime(paste(limited[,"Date"], limited[,"Time"]),"%d/%m/%Y %H:%M:%S")
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tz.R
\name{dtt_set_tz}
\alias{dtt_set_tz}
\alias{dtt_set_tz.POSIXct}
\title{Set Time Zone}
\usage{
dtt_set_tz(x, tz = dtt_default_tz(), ...)
\method{dtt_set_tz}{POSIXct}(x, tz = dtt_default_tz(), ...)
}
\arguments{
\item{x}{A date/time vector.}
\item{tz}{A string of the new time zone.}
\item{...}{Unused.}
}
\value{
The date time vector with the new time zone.
}
\description{
Sets the time zone for a date time vector without adjusting the clock time.
Equivalent to \code{lubridate::force_tz()}.
}
\section{Methods (by class)}{
\itemize{
\item \code{POSIXct}: Set the time zone for a POSIXct vector
}}
\examples{
dtt_set_tz(as.POSIXct("1970-01-01", tz = "Etc/GMT+8"), tz = "UTC")
}
\seealso{
\code{\link[=dtt_adjust_tz]{dtt_adjust_tz()}}
Other tz:
\code{\link{dtt_adjust_tz}()},
\code{\link{dtt_sys_tz}()},
\code{\link{dtt_tz}()}
}
\concept{tz}
| /man/dtt_set_tz.Rd | permissive | minghao2016/dttr2 | R | false | true | 931 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tz.R
\name{dtt_set_tz}
\alias{dtt_set_tz}
\alias{dtt_set_tz.POSIXct}
\title{Set Time Zone}
\usage{
dtt_set_tz(x, tz = dtt_default_tz(), ...)
\method{dtt_set_tz}{POSIXct}(x, tz = dtt_default_tz(), ...)
}
\arguments{
\item{x}{A date/time vector.}
\item{tz}{A string of the new time zone.}
\item{...}{Unused.}
}
\value{
The date time vector with the new time zone.
}
\description{
Sets the time zone for a date time vector without adjusting the clock time.
Equivalent to \code{lubridate::force_tz()}.
}
\section{Methods (by class)}{
\itemize{
\item \code{POSIXct}: Set the time zone for a POSIXct vector
}}
\examples{
dtt_set_tz(as.POSIXct("1970-01-01", tz = "Etc/GMT+8"), tz = "UTC")
}
\seealso{
\code{\link[=dtt_adjust_tz]{dtt_adjust_tz()}}
Other tz:
\code{\link{dtt_adjust_tz}()},
\code{\link{dtt_sys_tz}()},
\code{\link{dtt_tz}()}
}
\concept{tz}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biomart.R
\name{get_genes_of_goterm_helper}
\alias{get_genes_of_goterm_helper}
\title{Get genes associated with GO-term based on BiomaRt
This will get the genes of the selected GO-term and
\emph{all} child-terms.}
\usage{
get_genes_of_goterm_helper(go_accession, ensembl)
}
\arguments{
\item{go_accession}{ID of GO term}
\item{ensembl}{Biomart connection}
}
\description{
Get genes associated with GO-term based on BiomaRt
This will get the genes of the selected GO-term and
\emph{all} child-terms.
}
| /man/get_genes_of_goterm_helper.Rd | permissive | paulklemm/rmyknife | R | false | true | 580 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biomart.R
\name{get_genes_of_goterm_helper}
\alias{get_genes_of_goterm_helper}
\title{Get genes associated with GO-term based on BiomaRt
This will get the genes of the selected GO-term and
\emph{all} child-terms.}
\usage{
get_genes_of_goterm_helper(go_accession, ensembl)
}
\arguments{
\item{go_accession}{ID of GO term}
\item{ensembl}{Biomart connection}
}
\description{
Get genes associated with GO-term based on BiomaRt
This will get the genes of the selected GO-term and
\emph{all} child-terms.
}
|
setwd("C:/Users/nade/Desktop/Data Analysis R")
# 1. Series: Is a given species more or less likely to
# become extinct in "Star Trek: The Original Series"
# or "Star Trek: The Next Generation?
# 2. Alignment: Is a given species more or less likely to become
# extinct if it is a friend or foe of the Enterprise (the main
# starship on "Star Trek")?
# 3. Series x Alignment: Is there an interaction between these variables?
# install.packages("purrr")
### LOAD PACKAGES ###
library(dplyr)
library(purrr)
# We have three data files, and we want to read them at the same time
# and then combine them into one data file
# As a result, we need to use the purrr library
# purrr allows to read multiple files with list.files() method
# then perform the same action on each file with map() method
# then use reduce() method to combine all of them into one data frame
# read.table() assumes all files have the same number of columns and same name of columns
# "na.strings = c("", NA)" empty cells are named NA
### READ IN DATA ###
data = list.files(path = "data/rcourse4", full.names = T) %>%
map(read.table, header = T, sep = "\t", na.strings = c("", NA)) %>%
reduce(rbind)
# note I built a new folder called rcourse4 in the path, because all the data
# from lesson1 to lesson4 are mixed together, the original Page's code won't work
# because lesson1,2,3's data frames will be read as well, thus column numbers won't
# match
### 1. First, we only want to look at data from "The Original Series" and
# "The Next Generation", so we're going to drop any data from "The Animated Series"
# The function factor is used to encode a vector as a factor (the terms 'category'
# and 'enumerated type' are also used for factors).
### CLEAN DATA ###
# Currently there is a column called "conservation", which is coded for the
# likelihood of a species becoming extinct. The codings are: 1) LC - least concern,
# 2) NT - near threatened, 3) VU - vulnerable, 4) EN - endangered, 5) CR - critically
# endangered, 6) EW - extinct in the wild, and 7) EX - extinct. If you look at the data
# you'll see that most species have the classification of "LC", so for our analysis
# we're going to look at "LC" species versus all other species as our dependent variable.
# First we're going to filter out any data where "conservation" is an "NA"
# We can do this with the handy "!is.na()" call. Recall that an "!" means "is not"
# so what we're saying is "if it's not an "NA" keep it", this was why we wanted
# to make sure empty cells were read in as "NA"s earlier
# Next we'll make a new column called "extinct" for our logistic regression
# using the "mutate()" call, where an "LC" species gets a "0", not likely to
# become extinct, and all other species a "1", for possible to become extinct
# There's still one more thing we need to do in our cleaning script.
# The data reports all species that appear or are discussed in a given episode.
# As a result, some species occur more than others if they are in several episodes.
# We don't want to bias our data towards species that appear on the show a lot,
# so we're only going to include each species once per series. To do this we'll
# do a "group_by()" call including "series", "alignment", and "alien", we then
# do an "arrange()" call to order the data by episode number, and finally
# we use a "filter()" call with "row_number()" to pull out only the first row,
# or the first occurrence of a given species within our other variables.
# For a more detailed explanation of the code watch the video above.
# The last line ungroups our data.
data_clean = data %>%
filter(series != "tas") %>%
mutate(series = factor(series)) %>%
filter(alignment == "foe" | alignment == "friend") %>%
mutate(alignment = factor(alignment)) %>%
filter(!is.na(conservation)) %>%
mutate(extinct = ifelse(conservation == "LC", 0, 1)) %>%
group_by(series, alignment, alien) %>%
arrange(episode) %>%
filter(row_number() == 1) %>%
ungroup()
### DONE ###
| /scripts/rcourse_lesson4_cleaning.R | no_license | kangnade/Data-Analysis-R | R | false | false | 4,019 | r | setwd("C:/Users/nade/Desktop/Data Analysis R")
# 1. Series: Is a given species more or less likely to
# become extinct in "Star Trek: The Original Series"
# or "Star Trek: The Next Generation?
# 2. Alignment: Is a given species more or less likely to become
# extinct if it is a friend or foe of the Enterprise (the main
# starship on "Star Trek")?
# 3. Series x Alignment: Is there an interaction between these variables?
# install.packages("purrr")
### LOAD PACKAGES ###
library(dplyr)
library(purrr)
# We have three data files, and we want to read them at the same time
# and then combine them into one data file
# As a result, we need to use the purrr library
# purrr allows to read multiple files with list.files() method
# then perform the same action on each file with map() method
# then use reduce() method to combine all of them into one data frame
# read.table() assumes all files have the same number of columns and same name of columns
# "na.strings = c("", NA)" empty cells are named NA
### READ IN DATA ###
data = list.files(path = "data/rcourse4", full.names = T) %>%
map(read.table, header = T, sep = "\t", na.strings = c("", NA)) %>%
reduce(rbind)
# note I built a new folder called rcourse4 in the path, because all the data
# from lesson1 to lesson4 are mixed together, the original Page's code won't work
# because lesson1,2,3's data frames will be read as well, thus column numbers won't
# match
### 1. First, we only want to look at data from "The Original Series" and
# "The Next Generation", so we're going to drop any data from "The Animated Series"
# The function factor is used to encode a vector as a factor (the terms 'category'
# and 'enumerated type' are also used for factors).
### CLEAN DATA ###
# Currently there is a column called "conservation", which is coded for the
# likelihood of a species becoming extinct. The codings are: 1) LC - least concern,
# 2) NT - near threatened, 3) VU - vulnerable, 4) EN - endangered, 5) CR - critically
# endangered, 6) EW - extinct in the wild, and 7) EX - extinct. If you look at the data
# you'll see that most species have the classification of "LC", so for our analysis
# we're going to look at "LC" species versus all other species as our dependent variable.
# First we're going to filter out any data where "conservation" is an "NA"
# We can do this with the handy "!is.na()" call. Recall that an "!" means "is not"
# so what we're saying is "if it's not an "NA" keep it", this was why we wanted
# to make sure empty cells were read in as "NA"s earlier
# Next we'll make a new column called "extinct" for our logistic regression
# using the "mutate()" call, where an "LC" species gets a "0", not likely to
# become extinct, and all other species a "1", for possible to become extinct
# There's still one more thing we need to do in our cleaning script.
# The data reports all species that appear or are discussed in a given episode.
# As a result, some species occur more than others if they are in several episodes.
# We don't want to bias our data towards species that appear on the show a lot,
# so we're only going to include each species once per series. To do this we'll
# do a "group_by()" call including "series", "alignment", and "alien", we then
# do an "arrange()" call to order the data by episode number, and finally
# we use a "filter()" call with "row_number()" to pull out only the first row,
# or the first occurrence of a given species within our other variables.
# For a more detailed explanation of the code watch the video above.
# The last line ungroups our data.
data_clean = data %>%
filter(series != "tas") %>%
mutate(series = factor(series)) %>%
filter(alignment == "foe" | alignment == "friend") %>%
mutate(alignment = factor(alignment)) %>%
filter(!is.na(conservation)) %>%
mutate(extinct = ifelse(conservation == "LC", 0, 1)) %>%
group_by(series, alignment, alien) %>%
arrange(episode) %>%
filter(row_number() == 1) %>%
ungroup()
### DONE ###
|
#' @title List available metagenomes on NCBI Genbank
#' @description List available metagenomes on NCBI genbank. NCBI genbank
#' allows users to download entire metagenomes of several metagenome projects.
#' This function lists all available metagenomes that can then be downloaded via
#' \code{\link{getMetaGenomes}}.
#' @param details a boolean value specifying whether only the scientific names
#' of stored metagenomes shall be returned (\code{details = FALSE}) or all
#' information such as "organism_name","bioproject",
#' etc (\code{details = TRUE}).
#' @author Hajk-Georg Drost
#' @examples
#' # retrieve available metagenome projects at NCBI Genbank
#' listMetaGenomes()
#' # retrieve detailed information on available metagenome projects
#' # at NCBI Genbank
#' listMetaGenomes(details = TRUE)
#' @seealso \code{\link{getMetaGenomes}}, \code{\link{getMetaGenomeSummary}}
#' @export
listMetaGenomes <- function(details = FALSE) {
metagenome.summary <- getMetaGenomeSummary()
if (!details)
return(unique(metagenome.summary$organism_name))
if (details)
return(metagenome.summary)
}
| /R/listMetaGenomes.R | no_license | arpankbasak/biomartr | R | false | false | 1,140 | r | #' @title List available metagenomes on NCBI Genbank
#' @description List available metagenomes on NCBI genbank. NCBI genbank
#' allows users to download entire metagenomes of several metagenome projects.
#' This function lists all available metagenomes that can then be downloaded via
#' \code{\link{getMetaGenomes}}.
#' @param details a boolean value specifying whether only the scientific names
#' of stored metagenomes shall be returned (\code{details = FALSE}) or all
#' information such as "organism_name","bioproject",
#' etc (\code{details = TRUE}).
#' @author Hajk-Georg Drost
#' @examples
#' # retrieve available metagenome projects at NCBI Genbank
#' listMetaGenomes()
#' # retrieve detailed information on available metagenome projects
#' # at NCBI Genbank
#' listMetaGenomes(details = TRUE)
#' @seealso \code{\link{getMetaGenomes}}, \code{\link{getMetaGenomeSummary}}
#' @export
listMetaGenomes <- function(details = FALSE) {
metagenome.summary <- getMetaGenomeSummary()
if (!details)
return(unique(metagenome.summary$organism_name))
if (details)
return(metagenome.summary)
}
|
### Copyright Okiriza Wibisono & Ali Akbar S.
### July 2015
library(shiny)
shinyUI(fluidPage(
titlePanel('Linear Regression Simulation'),
sidebarLayout(
sidebarPanel(
tags$div(
numericInput('num_mean', 'Mean', value = 0, step = 0.01),
numericInput('num_sd', 'Standard deviation', value = 1.0, step = 0.01),
style = 'width: 200px'
),
sliderInput('num_randomness', 'Randomness', value = 20, min = 0, max = 100),
sliderInput('sld_num_data', 'Number of data points', min = 2, max = 100, value = 50),
actionButton('btn_generate', 'Generate new data', style='background-color: steelblue; color: white'),
shiny::hr(),
radioButtons('rad_show_line', 'Line to show', selected = 'draw', choices = list(
'None' = 'none',
'Draw (try clicking on the plot)' = 'draw',
'Least squares fit' = 'lse',
'Both' = 'both'
)),
checkboxInput('chk_show_errors', 'Show deviations?', value=FALSE)
),
mainPanel(
h3("Results"),
fluidRow(
column(2,
h4("Coefficients"),
h5(em(a("Intercept", href = "https://en.wikipedia.org/wiki/Y-intercept"))),
h5(em(a("Slope", href = "https://en.wikipedia.org/wiki/Slope")))
),
column(2,
h4("Your line", style = "color: blue; text-decoration: underline"),
h5(strong(textOutput("user_intercept"))),
h5(strong(textOutput("user_slope")))
),
column(2,
h4("LSE line", style = "color: green; text-decoration: underline"),
h5(textOutput("lse_intercept")),
h5(textOutput("lse_slope"))
),
column(2,
h4("Eval Metrics"),
h5(em(a("RMSE", href = "https://en.wikipedia.org/wiki/Root-mean-square_deviation"))),
h5(em("RSE")),
h5(em(a("MAE", href = "https://en.wikipedia.org/wiki/Mean_absolute_error"))),
h5(em(a("R-squared", href = "https://en.wikipedia.org/wiki/Coefficient_of_determination")))
),
column(2,
h4("Your line", style = "color: blue; text-decoration: underline"),
h5(strong(textOutput("user_RMSE"))),
h5(strong(textOutput("user_RSE"))),
h5(strong(textOutput("user_MAE"))),
h5(strong(textOutput("user_R2")))
),
column(2,
h4("LSE line", style = "color: green; text-decoration: underline"),
h5(textOutput("lse_RMSE")),
h5(textOutput("lse_RSE")),
h5(textOutput("lse_MAE")),
h5(textOutput("lse_R2"))
)
),
plotOutput('plot', click = 'plot_click')
)
)
))
| /skrip/shiny/linreg/ui.R | no_license | GANGGAANURAGA/tentangdata | R | false | false | 3,176 | r | ### Copyright Okiriza Wibisono & Ali Akbar S.
### July 2015
library(shiny)
shinyUI(fluidPage(
titlePanel('Linear Regression Simulation'),
sidebarLayout(
sidebarPanel(
tags$div(
numericInput('num_mean', 'Mean', value = 0, step = 0.01),
numericInput('num_sd', 'Standard deviation', value = 1.0, step = 0.01),
style = 'width: 200px'
),
sliderInput('num_randomness', 'Randomness', value = 20, min = 0, max = 100),
sliderInput('sld_num_data', 'Number of data points', min = 2, max = 100, value = 50),
actionButton('btn_generate', 'Generate new data', style='background-color: steelblue; color: white'),
shiny::hr(),
radioButtons('rad_show_line', 'Line to show', selected = 'draw', choices = list(
'None' = 'none',
'Draw (try clicking on the plot)' = 'draw',
'Least squares fit' = 'lse',
'Both' = 'both'
)),
checkboxInput('chk_show_errors', 'Show deviations?', value=FALSE)
),
mainPanel(
h3("Results"),
fluidRow(
column(2,
h4("Coefficients"),
h5(em(a("Intercept", href = "https://en.wikipedia.org/wiki/Y-intercept"))),
h5(em(a("Slope", href = "https://en.wikipedia.org/wiki/Slope")))
),
column(2,
h4("Your line", style = "color: blue; text-decoration: underline"),
h5(strong(textOutput("user_intercept"))),
h5(strong(textOutput("user_slope")))
),
column(2,
h4("LSE line", style = "color: green; text-decoration: underline"),
h5(textOutput("lse_intercept")),
h5(textOutput("lse_slope"))
),
column(2,
h4("Eval Metrics"),
h5(em(a("RMSE", href = "https://en.wikipedia.org/wiki/Root-mean-square_deviation"))),
h5(em("RSE")),
h5(em(a("MAE", href = "https://en.wikipedia.org/wiki/Mean_absolute_error"))),
h5(em(a("R-squared", href = "https://en.wikipedia.org/wiki/Coefficient_of_determination")))
),
column(2,
h4("Your line", style = "color: blue; text-decoration: underline"),
h5(strong(textOutput("user_RMSE"))),
h5(strong(textOutput("user_RSE"))),
h5(strong(textOutput("user_MAE"))),
h5(strong(textOutput("user_R2")))
),
column(2,
h4("LSE line", style = "color: green; text-decoration: underline"),
h5(textOutput("lse_RMSE")),
h5(textOutput("lse_RSE")),
h5(textOutput("lse_MAE")),
h5(textOutput("lse_R2"))
)
),
plotOutput('plot', click = 'plot_click')
)
)
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean.R
\name{eq_clean_wrong_parsed_numeric_vectors}
\alias{eq_clean_wrong_parsed_numeric_vectors}
\title{Clean magnitude variables}
\usage{
eq_clean_wrong_parsed_numeric_vectors(dataframe)
}
\arguments{
\item{dataframe}{A tibble. The raw NOAA dataset}
}
\value{
tibble. A data frame with magnitudes cleaned
}
\description{
Magnitude variables are parsed as character vectors. This functions mutate
them into numeric.
}
\examples{
\dontrun{
eq_get_data() \%>\% eq_clean_worng_parsed_numeric_vectors
}
}
| /man/eq_clean_wrong_parsed_numeric_vectors.Rd | no_license | rhkaz/noaamsdr | R | false | true | 584 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean.R
\name{eq_clean_wrong_parsed_numeric_vectors}
\alias{eq_clean_wrong_parsed_numeric_vectors}
\title{Clean magnitude variables}
\usage{
eq_clean_wrong_parsed_numeric_vectors(dataframe)
}
\arguments{
\item{dataframe}{A tibble. The raw NOAA dataset}
}
\value{
tibble. A data frame with magnitudes cleaned
}
\description{
Magnitude variables are parsed as character vectors. This functions mutate
them into numeric.
}
\examples{
\dontrun{
eq_get_data() \%>\% eq_clean_worng_parsed_numeric_vectors
}
}
|
# Exercise-2
# What are informatics courses about?
# Set up
library(tidytext)
library(dplyr)
library(stringr)
library(ggplot2)
library(rvest)
# Read in web page
# Extract descriptions of each course into a dataframe (may take multiple steps)
# How many courses are in the catalogue?
# Create a tidytext sturcture of all words
# Which words do we use to describe our courses?
# Create a set of stop words by adding (more) irrelevant words to the stop_words dataframe
# Remove stop words by performing an anti_join with the stop_words dataframe
# Which non stop-words are most common?
# Use ggplot to make a horizontal bar chart of the word frequencies of non-stop words
| /exercise-2/exercise.R | permissive | baochau1212/m20-text | R | false | false | 688 | r | # Exercise-2
# What are informatics courses about?
# Set up
library(tidytext)
library(dplyr)
library(stringr)
library(ggplot2)
library(rvest)
# Read in web page
# Extract descriptions of each course into a dataframe (may take multiple steps)
# How many courses are in the catalogue?
# Create a tidytext sturcture of all words
# Which words do we use to describe our courses?
# Create a set of stop words by adding (more) irrelevant words to the stop_words dataframe
# Remove stop words by performing an anti_join with the stop_words dataframe
# Which non stop-words are most common?
# Use ggplot to make a horizontal bar chart of the word frequencies of non-stop words
|
testlist <- list(b = c(50462592L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result) | /mcga/inst/testfiles/ByteVectorToDoubles/AFL_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1613102438-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 182 | r | testlist <- list(b = c(50462592L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result) |
\name{pcAlgo}
\title{PC-Algorithm [OLD]: Estimate Skeleton or Equivalence Class of a DAG}
\alias{pcAlgo}
\alias{pcAlgo.Perfect}
\description{
This function is DEPRECATED! Use \code{\link{skeleton}}, \code{\link{pc}} or
\code{\link{fci}} instead.
Use the PC-algorithm to estimate the underlying graph
(\dQuote{skeleton}) or the equivalence class (CPDAG) of a DAG.
}
\usage{
pcAlgo(dm = NA, C = NA, n=NA, alpha, corMethod = "standard",
verbose=FALSE, directed=FALSE, G=NULL, datatype = "continuous",
NAdelete=TRUE, m.max=Inf, u2pd = "rand", psepset=FALSE)
%% pcAlgo.Perfect() is also deprecated and was never documented
}
\arguments{
\item{dm}{Data matrix; rows correspond to samples, cols correspond to
nodes.}
\item{C}{Correlation matrix; this is an alternative for specifying the
data matrix.}
\item{n}{Sample size; this is only needed if the data matrix is not
provided.}
\item{alpha}{Significance level for the individual partial correlation tests.}
\item{corMethod}{A character string speciyfing the method for
(partial) correlation estimation.
"standard", "QnStable", "Qn" or "ogkQn" for standard and robust (based on
the Qn scale estimator without and with OGK) correlation
estimation. For robust estimation, we recommend \code{"QnStable"}.}
\item{verbose}{0-no output, 1-small output, 2-details;using 1 and 2
makes the function very much slower}
\item{directed}{If \code{FALSE}, the underlying skeleton is computed;
if \code{TRUE}, the underlying CPDAG is computed}
\item{G}{The adjacency matrix of the graph from which the algorithm
should start (logical)}
\item{datatype}{Distinguish between discrete and continuous data}
\item{NAdelete}{Delete edge if pval=NA (for discrete data)}
\item{m.max}{Maximal size of conditioning set}
\item{u2pd}{Function used for converting skeleton to cpdag. "rand"
(use udag2pdag); "relaxed" (use udag2pdagRelaxed); "retry" (use
udag2pdagSpecial)}
\item{psepset}{If true, also possible separation sets are tested.}
}
\value{
An object of \code{\link{class}} \code{"pcAlgo"} (see
\code{\linkS4class{pcAlgo}}) containing an undirected graph
(object of \code{\link{class}} \code{"graph"}, see
\code{\link[graph]{graph-class}} from the package \pkg{graph})
(without weigths) as estimate of the skeleton or the CPDAG of the
underlying DAG.
}
\references{
P. Spirtes, C. Glymour and R. Scheines (2000)
\emph{Causation, Prediction, and Search}, 2nd edition, The MIT Press.
Kalisch M. and P. B\"uhlmann (2007)
\emph{Estimating high-dimensional
directed acyclic graphs with the PC-algorithm};
JMLR, Vol. 8, 613-636, 2007.
}
\author{
Markus Kalisch (\email{kalisch@stat.math.ethz.ch}) and Martin Maechler.
}
\keyword{multivariate}
\keyword{models}
\keyword{graphs}
| /man/pcAlgo.Rd | no_license | cran/pcalg | R | false | false | 2,824 | rd | \name{pcAlgo}
\title{PC-Algorithm [OLD]: Estimate Skeleton or Equivalence Class of a DAG}
\alias{pcAlgo}
\alias{pcAlgo.Perfect}
\description{
This function is DEPRECATED! Use \code{\link{skeleton}}, \code{\link{pc}} or
\code{\link{fci}} instead.
Use the PC-algorithm to estimate the underlying graph
(\dQuote{skeleton}) or the equivalence class (CPDAG) of a DAG.
}
\usage{
pcAlgo(dm = NA, C = NA, n=NA, alpha, corMethod = "standard",
verbose=FALSE, directed=FALSE, G=NULL, datatype = "continuous",
NAdelete=TRUE, m.max=Inf, u2pd = "rand", psepset=FALSE)
%% pcAlgo.Perfect() is also deprecated and was never documented
}
\arguments{
\item{dm}{Data matrix; rows correspond to samples, cols correspond to
nodes.}
\item{C}{Correlation matrix; this is an alternative for specifying the
data matrix.}
\item{n}{Sample size; this is only needed if the data matrix is not
provided.}
\item{alpha}{Significance level for the individual partial correlation tests.}
\item{corMethod}{A character string speciyfing the method for
(partial) correlation estimation.
"standard", "QnStable", "Qn" or "ogkQn" for standard and robust (based on
the Qn scale estimator without and with OGK) correlation
estimation. For robust estimation, we recommend \code{"QnStable"}.}
\item{verbose}{0-no output, 1-small output, 2-details;using 1 and 2
makes the function very much slower}
\item{directed}{If \code{FALSE}, the underlying skeleton is computed;
if \code{TRUE}, the underlying CPDAG is computed}
\item{G}{The adjacency matrix of the graph from which the algorithm
should start (logical)}
\item{datatype}{Distinguish between discrete and continuous data}
\item{NAdelete}{Delete edge if pval=NA (for discrete data)}
\item{m.max}{Maximal size of conditioning set}
\item{u2pd}{Function used for converting skeleton to cpdag. "rand"
(use udag2pdag); "relaxed" (use udag2pdagRelaxed); "retry" (use
udag2pdagSpecial)}
\item{psepset}{If true, also possible separation sets are tested.}
}
\value{
An object of \code{\link{class}} \code{"pcAlgo"} (see
\code{\linkS4class{pcAlgo}}) containing an undirected graph
(object of \code{\link{class}} \code{"graph"}, see
\code{\link[graph]{graph-class}} from the package \pkg{graph})
(without weigths) as estimate of the skeleton or the CPDAG of the
underlying DAG.
}
\references{
P. Spirtes, C. Glymour and R. Scheines (2000)
\emph{Causation, Prediction, and Search}, 2nd edition, The MIT Press.
Kalisch M. and P. B\"uhlmann (2007)
\emph{Estimating high-dimensional
directed acyclic graphs with the PC-algorithm};
JMLR, Vol. 8, 613-636, 2007.
}
\author{
Markus Kalisch (\email{kalisch@stat.math.ethz.ch}) and Martin Maechler.
}
\keyword{multivariate}
\keyword{models}
\keyword{graphs}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 22677
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 22677
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/genbuf/genbuf11b4y.sat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2726
c no.of clauses 22677
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 22677
c
c QBFLIB/Tentrup/genbuf/genbuf11b4y.sat.qdimacs 2726 22677 E1 [] 0 72 2654 22677 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Tentrup/genbuf/genbuf11b4y.sat/genbuf11b4y.sat.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 627 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 22677
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 22677
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/genbuf/genbuf11b4y.sat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2726
c no.of clauses 22677
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 22677
c
c QBFLIB/Tentrup/genbuf/genbuf11b4y.sat.qdimacs 2726 22677 E1 [] 0 72 2654 22677 NONE
|
library(broman)
tab <- read.table("table.txt", header=TRUE)
bgcolor <- rgb(0.1, 0.1, 0.1, maxColorValue=1)
pdf("../Figs/table_fig.pdf", height=4, width=4, pointsize=12)
par(bg=bgcolor, fg="white", col.axis="white", col.main="white", col.lab="white")
layout(cbind(1,2,3), width=c(1.65,1,1))
xlab <- expression(paste(-log[10], " P"))
lev <- factor(tab[,1], levels=tab[,1])
par(mar=c(4.1,5.6,3.1,0.6))
dotplot(lev, -log10(tab[,2]), rotate=TRUE, main=colnames(tab)[2], ylab="", xlim=c(0, 31),
xaxs="i", xlab=xlab, bg="violetred", cex=1.5, bgcolor="gray88")
par(mar=c(4.1,0.6, 3.1,0.6))
nam <- " "
for(i in 2:10) nam <- c(" ", paste0(" ", nam))
nam <- factor(nam, levels=nam)
dotplot(nam, -log10(tab[,3]), rotate=TRUE,yat=NA, main=colnames(tab)[3], xlim=c(0, 31),
xaxs="i", xlab=xlab, bg="violetred", cex=1.5, bgcolor="gray88")
dotplot(nam, -log10(tab[,4]), rotate=TRUE,yat=NA, main=colnames(tab)[4], xlim=c(0, 31),
xaxs="i", xlab=xlab, bg="violetred", cex=1.5, bgcolor="gray88")
dev.off()
tab <- tab[order(tab[,2]),]
lev <- factor(tab[,1], levels=tab[,1])
pdf("../Figs/table_fig_B.pdf", height=4, width=4, pointsize=12)
par(bg=bgcolor, fg="white", col.axis="white", col.main="white", col.lab="white")
layout(cbind(1,2,3), width=c(1.65,1,1))
xlab <- expression(paste(-log[10], " P"))
par(mar=c(4.1,5.6,3.1,0.6))
dotplot(lev, -log10(tab[,2]), rotate=TRUE, main=colnames(tab)[2], ylab="", xlim=c(0, 31),
xaxs="i", xlab=xlab, bg="violetred", cex=1.5, bgcolor="gray88")
par(mar=c(4.1,0.6, 3.1,0.6))
dotplot(nam, -log10(tab[,3]), rotate=TRUE,yat=NA, main=colnames(tab)[3], xlim=c(0, 31),
xaxs="i", xlab=xlab, bg="violetred", cex=1.5, bgcolor="gray88")
dotplot(nam, -log10(tab[,4]), rotate=TRUE,yat=NA, main=colnames(tab)[4], xlim=c(0, 31),
xaxs="i", xlab=xlab, bg="violetred", cex=1.5, bgcolor="gray88")
dev.off()
| /R/table_fig.R | permissive | kbroman/Talk_ICQG2016 | R | false | false | 1,858 | r | library(broman)
tab <- read.table("table.txt", header=TRUE)
bgcolor <- rgb(0.1, 0.1, 0.1, maxColorValue=1)
pdf("../Figs/table_fig.pdf", height=4, width=4, pointsize=12)
par(bg=bgcolor, fg="white", col.axis="white", col.main="white", col.lab="white")
layout(cbind(1,2,3), width=c(1.65,1,1))
xlab <- expression(paste(-log[10], " P"))
lev <- factor(tab[,1], levels=tab[,1])
par(mar=c(4.1,5.6,3.1,0.6))
dotplot(lev, -log10(tab[,2]), rotate=TRUE, main=colnames(tab)[2], ylab="", xlim=c(0, 31),
xaxs="i", xlab=xlab, bg="violetred", cex=1.5, bgcolor="gray88")
par(mar=c(4.1,0.6, 3.1,0.6))
nam <- " "
for(i in 2:10) nam <- c(" ", paste0(" ", nam))
nam <- factor(nam, levels=nam)
dotplot(nam, -log10(tab[,3]), rotate=TRUE,yat=NA, main=colnames(tab)[3], xlim=c(0, 31),
xaxs="i", xlab=xlab, bg="violetred", cex=1.5, bgcolor="gray88")
dotplot(nam, -log10(tab[,4]), rotate=TRUE,yat=NA, main=colnames(tab)[4], xlim=c(0, 31),
xaxs="i", xlab=xlab, bg="violetred", cex=1.5, bgcolor="gray88")
dev.off()
tab <- tab[order(tab[,2]),]
lev <- factor(tab[,1], levels=tab[,1])
pdf("../Figs/table_fig_B.pdf", height=4, width=4, pointsize=12)
par(bg=bgcolor, fg="white", col.axis="white", col.main="white", col.lab="white")
layout(cbind(1,2,3), width=c(1.65,1,1))
xlab <- expression(paste(-log[10], " P"))
par(mar=c(4.1,5.6,3.1,0.6))
dotplot(lev, -log10(tab[,2]), rotate=TRUE, main=colnames(tab)[2], ylab="", xlim=c(0, 31),
xaxs="i", xlab=xlab, bg="violetred", cex=1.5, bgcolor="gray88")
par(mar=c(4.1,0.6, 3.1,0.6))
dotplot(nam, -log10(tab[,3]), rotate=TRUE,yat=NA, main=colnames(tab)[3], xlim=c(0, 31),
xaxs="i", xlab=xlab, bg="violetred", cex=1.5, bgcolor="gray88")
dotplot(nam, -log10(tab[,4]), rotate=TRUE,yat=NA, main=colnames(tab)[4], xlim=c(0, 31),
xaxs="i", xlab=xlab, bg="violetred", cex=1.5, bgcolor="gray88")
dev.off()
|
getData <- function() {
setClass('myDate')
setAs("character", "myDate", function(from) as.Date(from, format="%d/%m/%Y") )
allDat <- read.table("household_power_consumption.txt", header=TRUE, sep=";",
colClasses=c('myDate', 'character', 'numeric', 'numeric', 'numeric',
'numeric', 'numeric', 'numeric', 'numeric'), na.strings = "?")
myDat <- allDat[(allDat$Date >= "2007-02-01") & (allDat$Date <= "2007-02-02"),]
# we may want to order them by date if we are not already
myDat <- myDat[with(myDat, order(myDat$Date, myDat$Time)), ]
return(myDat);
}
plot3 <- function() {
x <- getData()
# use png device, a dev.copy makes legend labels to be truncated
png(file = "plot3.png", width = 480, height = 480)
# reset mfrow so that plots have the default
par(mfrow=c(1,1))
plot(x$Sub_metering_1~as.POSIXct(paste(x$Date, x$Time), format="%Y-%m-%d %H:%M:%S"),
ann=FALSE, type="n")
lines(x$Sub_metering_1~as.POSIXct(paste(x$Date, x$Time)), col="black")
lines(x$Sub_metering_2~as.POSIXct(paste(x$Date, x$Time)), col="red")
lines(x$Sub_metering_3~as.POSIXct(paste(x$Date, x$Time)), col="blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1, 1, 1), col=c("black", "red", "blue"))
title(ylab='Energy sub metering')
dev.off()
}
plot3() | /plot3.R | no_license | sbalki2000/ExData_Plotting1 | R | false | false | 1,294 | r | getData <- function() {
setClass('myDate')
setAs("character", "myDate", function(from) as.Date(from, format="%d/%m/%Y") )
allDat <- read.table("household_power_consumption.txt", header=TRUE, sep=";",
colClasses=c('myDate', 'character', 'numeric', 'numeric', 'numeric',
'numeric', 'numeric', 'numeric', 'numeric'), na.strings = "?")
myDat <- allDat[(allDat$Date >= "2007-02-01") & (allDat$Date <= "2007-02-02"),]
# we may want to order them by date if we are not already
myDat <- myDat[with(myDat, order(myDat$Date, myDat$Time)), ]
return(myDat);
}
plot3 <- function() {
x <- getData()
# use png device, a dev.copy makes legend labels to be truncated
png(file = "plot3.png", width = 480, height = 480)
# reset mfrow so that plots have the default
par(mfrow=c(1,1))
plot(x$Sub_metering_1~as.POSIXct(paste(x$Date, x$Time), format="%Y-%m-%d %H:%M:%S"),
ann=FALSE, type="n")
lines(x$Sub_metering_1~as.POSIXct(paste(x$Date, x$Time)), col="black")
lines(x$Sub_metering_2~as.POSIXct(paste(x$Date, x$Time)), col="red")
lines(x$Sub_metering_3~as.POSIXct(paste(x$Date, x$Time)), col="blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1, 1, 1), col=c("black", "red", "blue"))
title(ylab='Energy sub metering')
dev.off()
}
plot3() |
\name{carpet_data}
\alias{carpet_data}
\docType{data}
\title{
Carpet Data
}
\description{
Carper research design (orthogonal design)
}
\usage{data(carpet_data)}
\format{
A data frame with 16 observations on the following 7 variables.
\describe{
\item{\code{Profile}}{Profile no}
\item{\code{Package}}{package design has three levels; A, B, C}
\item{\code{Brand}}{brand name has three levels; K2R, Glory, Bissell}
\item{\code{Price}}{has three levels; $1.19, $1.39, $1.59}
\item{\code{Seal}}{Good Housekeeping seal has two levels, No, Yes}
\item{\code{Money}}{money-back guarante has two levels; No, Yes}
\item{\code{Ranks}}{Rank by respondents, Greatest to Least}
}
}
\details{
http://www.unileon.es/ficheros/servicios/informatica/spss/english/IBM-SPSS_conjoint.pdf
}
\source{
SPSS 19
}
\references{
Green, P. E. and Y. Wind (1973), Multi-Attribute Decisions in Marketing. New York: Holt, Rinehart & Winston
}
\examples{
library(faisalconjoint)
data(carpet_data)
carpet_data
}
| /man/carpet_data.Rd | no_license | mbailo/faisalconjoint | R | false | false | 1,048 | rd | \name{carpet_data}
\alias{carpet_data}
\docType{data}
\title{
Carpet Data
}
\description{
Carper research design (orthogonal design)
}
\usage{data(carpet_data)}
\format{
A data frame with 16 observations on the following 7 variables.
\describe{
\item{\code{Profile}}{Profile no}
\item{\code{Package}}{package design has three levels; A, B, C}
\item{\code{Brand}}{brand name has three levels; K2R, Glory, Bissell}
\item{\code{Price}}{has three levels; $1.19, $1.39, $1.59}
\item{\code{Seal}}{Good Housekeeping seal has two levels, No, Yes}
\item{\code{Money}}{money-back guarante has two levels; No, Yes}
\item{\code{Ranks}}{Rank by respondents, Greatest to Least}
}
}
\details{
http://www.unileon.es/ficheros/servicios/informatica/spss/english/IBM-SPSS_conjoint.pdf
}
\source{
SPSS 19
}
\references{
Green, P. E. and Y. Wind (1973), Multi-Attribute Decisions in Marketing. New York: Holt, Rinehart & Winston
}
\examples{
library(faisalconjoint)
data(carpet_data)
carpet_data
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.