content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
---
title: "Fit Logistic Model to Previous Dataset - Week 8.2 Assignment"
author: "Andrea Fox"
date: "October 20th 2019"
---
library(caTools)
library(caret)
setwd("C:/Users/Andrea Fox/OneDrive/Documents/R/DSC520 Statistics Using R")
binaryxy <- read.csv("binary-classifier-data.csv", header = TRUE)
binaryxy$label <- as.factor(binaryxy$label)
split <- sample.split(binaryxy, SplitRatio = 0.8)
split
train <- subset(binaryxy, split == "TRUE")
test <- subset(binaryxy, split == "FALSE")
myModel <- glm(label ~ x + y, data = train, family = 'binomial')
summary(myModel)
res <- predict(myModel, test, type = "response")
res
res <- predict(myModel, train, type = "response")
res
confmatrix <- table(Actual_Value = train$label, Predicted_Value = res > 0.5)
confmatrix
(confmatrix[[1,1]] + confmatrix[[2,2]]) / sum(confmatrix)
install.packages("class")
library(class)
NROW(train)
sqrt(999)
knn.31 <- knn(train = train, test = test, cl = train$label, k = 31)
knn.32 <- knn(train = train, test = test, cl = train$label, k = 32)
ACC.31 <- 100 * sum(test$label == knn.31)/NROW(test$label)
ACC.32 <- 100 * sum(test$label == knn.32)/NROW(test$label)
ACC.31
ACC.32 | /DSC520 - Statistics for Data Science/Assignment 8/assignment_8.2_FoxAndrea.R | no_license | anfox86/Masters-courses | R | false | false | 1,186 | r | ---
title: "Fit Logistic Model to Previous Dataset - Week 8.2 Assignment"
author: "Andrea Fox"
date: "October 20th 2019"
---
library(caTools)
library(caret)
setwd("C:/Users/Andrea Fox/OneDrive/Documents/R/DSC520 Statistics Using R")
binaryxy <- read.csv("binary-classifier-data.csv", header = TRUE)
binaryxy$label <- as.factor(binaryxy$label)
split <- sample.split(binaryxy, SplitRatio = 0.8)
split
train <- subset(binaryxy, split == "TRUE")
test <- subset(binaryxy, split == "FALSE")
myModel <- glm(label ~ x + y, data = train, family = 'binomial')
summary(myModel)
res <- predict(myModel, test, type = "response")
res
res <- predict(myModel, train, type = "response")
res
confmatrix <- table(Actual_Value = train$label, Predicted_Value = res > 0.5)
confmatrix
(confmatrix[[1,1]] + confmatrix[[2,2]]) / sum(confmatrix)
install.packages("class")
library(class)
NROW(train)
sqrt(999)
knn.31 <- knn(train = train, test = test, cl = train$label, k = 31)
knn.32 <- knn(train = train, test = test, cl = train$label, k = 32)
ACC.31 <- 100 * sum(test$label == knn.31)/NROW(test$label)
ACC.32 <- 100 * sum(test$label == knn.32)/NROW(test$label)
ACC.31
ACC.32 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/intestinalData.R
\docType{data}
\name{intestinalData}
\alias{intestinalData}
\title{Single-cell transcriptome data of intestinal epithelial cells}
\format{A sparse matrix (using the \pkg{Matrix}) with cells as columns and genes as rows. Entries are raw transcript counts.}
\usage{
intestinalData
}
\value{
None
}
\description{
This dataset contains gene expression values, i. e. transcript counts, of 278 intestinal epithelial cells.
}
\references{
Grün et al. (2016) Cell Stem Cell 19(2): 266-77 <DOI:10.1016/j.stem.2016.05.010>
(\href{https://www.ncbi.nlm.nih.gov/pubmed/27345837}{PubMed})
}
\keyword{datasets}
| /methods/RaceID3_StemID2_package-master/man/intestinalData.Rd | no_license | waynewu6250/Clustering-for-scRNAseq | R | false | true | 692 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/intestinalData.R
\docType{data}
\name{intestinalData}
\alias{intestinalData}
\title{Single-cell transcriptome data of intestinal epithelial cells}
\format{A sparse matrix (using the \pkg{Matrix}) with cells as columns and genes as rows. Entries are raw transcript counts.}
\usage{
intestinalData
}
\value{
None
}
\description{
This dataset contains gene expression values, i. e. transcript counts, of 278 intestinal epithelial cells.
}
\references{
Grün et al. (2016) Cell Stem Cell 19(2): 266-77 <DOI:10.1016/j.stem.2016.05.010>
(\href{https://www.ncbi.nlm.nih.gov/pubmed/27345837}{PubMed})
}
\keyword{datasets}
|
library(cricketr)
### Name: bowlerPerfHomeAway
### Title: This function analyses the performance of the bowler at home and
### overseas
### Aliases: bowlerPerfHomeAway
### Keywords: ~kwd1 ~kwd2
### ** Examples
# Get or use the <bowler>.csv obtained with getPlayerDataSp()
#kumbleSp <-getPlayerDataSp(30176,".","kumblesp.csv","bowling")
# Retrieve the file path of a data file installed with cricketr
path <- system.file("data", "kumblesp.csv", package = "cricketr")
bowlerPerfHomeAway(path,"Anil Kumble")
# Note: This example uses the file kumble.csv from the /data directory. However
# you can use any directory as long as the data file exists in that directory.
| /data/genthat_extracted_code/cricketr/examples/bowlerPerfHomeAway.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 676 | r | library(cricketr)
### Name: bowlerPerfHomeAway
### Title: This function analyses the performance of the bowler at home and
### overseas
### Aliases: bowlerPerfHomeAway
### Keywords: ~kwd1 ~kwd2
### ** Examples
# Get or use the <bowler>.csv obtained with getPlayerDataSp()
#kumbleSp <-getPlayerDataSp(30176,".","kumblesp.csv","bowling")
# Retrieve the file path of a data file installed with cricketr
path <- system.file("data", "kumblesp.csv", package = "cricketr")
bowlerPerfHomeAway(path,"Anil Kumble")
# Note: This example uses the file kumble.csv from the /data directory. However
# you can use any directory as long as the data file exists in that directory.
|
context("variance types")
library(flashr)
set.seed(666)
n <- 40
p <- 60
LF <- outer(rep(1, n), rep(1, p))
M <- LF + 0.1 * rnorm(n * p)
flash.M <- flash_set_data(M)
test_that("estimating variance by row produces identical estimates to flashr", {
f <- flashier(M, greedy.Kmax = 1, var.type = 1)
flashr.res <- flashr:::flash_update_precision(flash.M, to.flashr(f$fit), "by_row")
expect_equal(f$fit$tau, flashr.res$tau[, 1])
})
test_that("estimating variance by column produces identical estimates to flashr", {
f <- flashier(M, greedy.Kmax = 1, var.type = 2)
flashr.res <- flashr:::flash_update_precision(flash.M, to.flashr(f$fit), "by_column")
expect_equal(f$fit$tau, flashr.res$tau[1, ])
})
test_that("zero variance type (with S constant) produces same fit as flashr", {
f <- flashier(M, S = 0.1, var.type = NULL)
expect_equal(f$fit$tau, f$fit$given.tau)
flashr.res <- flashr::flash(flashr::flash_set_data(M, S = 0.1), Kmax = 1,
var_type = "zero", nullcheck = FALSE)
expect_equal(f$obj, flashr.res$objective)
expect_true(max(abs(flashr.res$fitted_values - lowrank.expand(f$fit$EF))) < 1e-6)
})
test_that("zero variance type (with S low-rank) produces same fit as flashr", {
S <- 0.1 + 0.01 * rnorm(n)
data <- set.flash.data(M, S, S.dim = 1)
f <- flashier(data, greedy.Kmax = 1, var.type = NULL)
expect_equal(f$fit$tau, f$fit$given.tau)
flash.S <- matrix(S, nrow = n, ncol = p)
flashr.res <- flashr::flash(flashr::flash_set_data(M, S = flash.S), Kmax = 1,
var_type = "zero", nullcheck = FALSE)
expect_equal(f$obj, flashr.res$objective)
expect_true(max(abs(flashr.res$fitted_values - lowrank.expand(f$fit$EF))) < 1e-6)
})
test_that("zero variance type (with S a matrix) produces same fit as flashr", {
S <- matrix(0.1 + 0.01 * rnorm(n * p), nrow = n, ncol = p)
f <- flashier(M, S = S, var.type = NULL)
expect_equal(f$fit$tau, f$fit$given.tau)
flashr.res <- flashr::flash(flashr::flash_set_data(M, S = S), Kmax = 1,
var_type = "zero", nullcheck = FALSE)
expect_equal(f$obj, flashr.res$objective)
expect_true(max(abs(flashr.res$fitted_values - lowrank.expand(f$fit$EF))) < 1e-6)
})
test_that("constant S + constant estimation works", {
f <- flashier(M, S = 0.2, var.type = 0, greedy.Kmax = 1, output.lvl = 3)
expect_equal(f$fit$tau, f$fit$given.tau)
f <- flashier(M, S = 0.05, var.type = 0, greedy.Kmax = 1, output.lvl = 3)
expect_equal(f$fit$tau, f$fit$est.tau)
})
test_that("by column S + by column estimation works", {
tau = c(rep(50, 10), rep(250, p - 10))
data <- set.flash.data(M, S = 1 / sqrt(tau), S.dim = 2)
f <- flashier(data, var.type = 2, greedy.Kmax = 1, output.lvl = 3)
expect_equal(f$fit$tau[1:10], rep(50, 10))
expect_equal(f$fit$tau[-(1:10)], f$fit$est.tau[-(1:10)])
})
test_that("kroncker variance estimation works", {
Y <- matrix(10, nrow = 100, ncol = 100) + 0.1 * rnorm(100 * 100)
f <- flashier(Y, var.type = c(1, 2), greedy.Kmax = 1)
tau.mat <- r1.expand(f$fit$tau)
expect_equal(mean(tau.mat), 100, tol = 0.1)
R2 <- (Y - lowrank.expand(f$fit$EF))^2
R2 <- R2 + lowrank.expand(f$fit$EF2) - lowrank.expand(lowrank.square(f$fit$EF))
neg.llik <- function(x) {
tau <- outer(x[1:100], x[101:200])
return(-sum(log(tau)) + sum(R2 * tau))
}
optim.soln <- optim(rep(1, 200), neg.llik, method = "L-BFGS-B", lower = 0)
optim.tau <- outer(optim.soln$par[1:100], optim.soln$par[101:200])
expect_equal(tau.mat, optim.tau, tol = 0.1, scale = 1)
})
test_that("basic noisy variance estimation works", {
f.const <- flashier(M, var.type = 0, greedy.Kmax = 1)
f.noisy <- flashier(M, S = matrix(0.01, nrow = nrow(M), ncol = ncol(M)),
var.type = 0, greedy.Kmax = 1)
expect_equal(f.const$fit$tau, f.noisy$fit$tau[1, 1], tol = 0.5, scale = 1)
expect_equal(f.const$objective, f.noisy$objective, tol = 0.01, scale = 1)
})
test_that("fixed + by_column estimation works", {
f.bycol <- flashier(M, var.type = 2, greedy.Kmax = 1)
f.noisy <- flashier(M,
S = (matrix(0.01, nrow = nrow(M), ncol = ncol(M))
+ 0.001 * rnorm(length(M))),
var.type = 2, greedy.Kmax = 1)
expect_equal(f.bycol$fit$tau, f.noisy$fit$tau[1, ], tol = 0.5, scale = 1)
expect_equal(f.bycol$objective, f.noisy$objective, tol = 0.1, scale = 1)
})
test_that("fixed + kronecker estimation works", {
f.kron <- flashier(M, var.type = c(1, 2), greedy.Kmax = 0)
f.noisy <- flashier(M, S = matrix(0.01, nrow = nrow(M), ncol = ncol(M)),
var.type = c(1, 2), greedy.Kmax = 0)
expect_equal(r1.expand(f.kron$fit$tau), f.noisy$fit$tau, tol = 0.01, scale = 1)
expect_equal(f.kron$objective, f.noisy$objective, tol = 0.01, scale = 1)
f.kron <- flashier(M, var.type = c(1, 2), greedy.Kmax = 1)
f.noisy <- flashier(M, S = matrix(0.01, nrow = nrow(M), ncol = ncol(M)),
var.type = c(1, 2), greedy.Kmax = 1)
expect_equal(r1.expand(f.kron$fit$tau), f.noisy$fit$tau, tol = 1, scale = 1)
expect_equal(f.kron$objective, f.noisy$objective, tol = 0.05, scale = 1)
})
| /tests/testthat/test_var_types.R | no_license | jhmarcus/flashier | R | false | false | 5,171 | r | context("variance types")
library(flashr)
set.seed(666)
n <- 40
p <- 60
LF <- outer(rep(1, n), rep(1, p))
M <- LF + 0.1 * rnorm(n * p)
flash.M <- flash_set_data(M)
test_that("estimating variance by row produces identical estimates to flashr", {
f <- flashier(M, greedy.Kmax = 1, var.type = 1)
flashr.res <- flashr:::flash_update_precision(flash.M, to.flashr(f$fit), "by_row")
expect_equal(f$fit$tau, flashr.res$tau[, 1])
})
test_that("estimating variance by column produces identical estimates to flashr", {
f <- flashier(M, greedy.Kmax = 1, var.type = 2)
flashr.res <- flashr:::flash_update_precision(flash.M, to.flashr(f$fit), "by_column")
expect_equal(f$fit$tau, flashr.res$tau[1, ])
})
test_that("zero variance type (with S constant) produces same fit as flashr", {
f <- flashier(M, S = 0.1, var.type = NULL)
expect_equal(f$fit$tau, f$fit$given.tau)
flashr.res <- flashr::flash(flashr::flash_set_data(M, S = 0.1), Kmax = 1,
var_type = "zero", nullcheck = FALSE)
expect_equal(f$obj, flashr.res$objective)
expect_true(max(abs(flashr.res$fitted_values - lowrank.expand(f$fit$EF))) < 1e-6)
})
test_that("zero variance type (with S low-rank) produces same fit as flashr", {
S <- 0.1 + 0.01 * rnorm(n)
data <- set.flash.data(M, S, S.dim = 1)
f <- flashier(data, greedy.Kmax = 1, var.type = NULL)
expect_equal(f$fit$tau, f$fit$given.tau)
flash.S <- matrix(S, nrow = n, ncol = p)
flashr.res <- flashr::flash(flashr::flash_set_data(M, S = flash.S), Kmax = 1,
var_type = "zero", nullcheck = FALSE)
expect_equal(f$obj, flashr.res$objective)
expect_true(max(abs(flashr.res$fitted_values - lowrank.expand(f$fit$EF))) < 1e-6)
})
test_that("zero variance type (with S a matrix) produces same fit as flashr", {
S <- matrix(0.1 + 0.01 * rnorm(n * p), nrow = n, ncol = p)
f <- flashier(M, S = S, var.type = NULL)
expect_equal(f$fit$tau, f$fit$given.tau)
flashr.res <- flashr::flash(flashr::flash_set_data(M, S = S), Kmax = 1,
var_type = "zero", nullcheck = FALSE)
expect_equal(f$obj, flashr.res$objective)
expect_true(max(abs(flashr.res$fitted_values - lowrank.expand(f$fit$EF))) < 1e-6)
})
test_that("constant S + constant estimation works", {
f <- flashier(M, S = 0.2, var.type = 0, greedy.Kmax = 1, output.lvl = 3)
expect_equal(f$fit$tau, f$fit$given.tau)
f <- flashier(M, S = 0.05, var.type = 0, greedy.Kmax = 1, output.lvl = 3)
expect_equal(f$fit$tau, f$fit$est.tau)
})
test_that("by column S + by column estimation works", {
tau = c(rep(50, 10), rep(250, p - 10))
data <- set.flash.data(M, S = 1 / sqrt(tau), S.dim = 2)
f <- flashier(data, var.type = 2, greedy.Kmax = 1, output.lvl = 3)
expect_equal(f$fit$tau[1:10], rep(50, 10))
expect_equal(f$fit$tau[-(1:10)], f$fit$est.tau[-(1:10)])
})
test_that("kroncker variance estimation works", {
Y <- matrix(10, nrow = 100, ncol = 100) + 0.1 * rnorm(100 * 100)
f <- flashier(Y, var.type = c(1, 2), greedy.Kmax = 1)
tau.mat <- r1.expand(f$fit$tau)
expect_equal(mean(tau.mat), 100, tol = 0.1)
R2 <- (Y - lowrank.expand(f$fit$EF))^2
R2 <- R2 + lowrank.expand(f$fit$EF2) - lowrank.expand(lowrank.square(f$fit$EF))
neg.llik <- function(x) {
tau <- outer(x[1:100], x[101:200])
return(-sum(log(tau)) + sum(R2 * tau))
}
optim.soln <- optim(rep(1, 200), neg.llik, method = "L-BFGS-B", lower = 0)
optim.tau <- outer(optim.soln$par[1:100], optim.soln$par[101:200])
expect_equal(tau.mat, optim.tau, tol = 0.1, scale = 1)
})
test_that("basic noisy variance estimation works", {
f.const <- flashier(M, var.type = 0, greedy.Kmax = 1)
f.noisy <- flashier(M, S = matrix(0.01, nrow = nrow(M), ncol = ncol(M)),
var.type = 0, greedy.Kmax = 1)
expect_equal(f.const$fit$tau, f.noisy$fit$tau[1, 1], tol = 0.5, scale = 1)
expect_equal(f.const$objective, f.noisy$objective, tol = 0.01, scale = 1)
})
test_that("fixed + by_column estimation works", {
f.bycol <- flashier(M, var.type = 2, greedy.Kmax = 1)
f.noisy <- flashier(M,
S = (matrix(0.01, nrow = nrow(M), ncol = ncol(M))
+ 0.001 * rnorm(length(M))),
var.type = 2, greedy.Kmax = 1)
expect_equal(f.bycol$fit$tau, f.noisy$fit$tau[1, ], tol = 0.5, scale = 1)
expect_equal(f.bycol$objective, f.noisy$objective, tol = 0.1, scale = 1)
})
test_that("fixed + kronecker estimation works", {
f.kron <- flashier(M, var.type = c(1, 2), greedy.Kmax = 0)
f.noisy <- flashier(M, S = matrix(0.01, nrow = nrow(M), ncol = ncol(M)),
var.type = c(1, 2), greedy.Kmax = 0)
expect_equal(r1.expand(f.kron$fit$tau), f.noisy$fit$tau, tol = 0.01, scale = 1)
expect_equal(f.kron$objective, f.noisy$objective, tol = 0.01, scale = 1)
f.kron <- flashier(M, var.type = c(1, 2), greedy.Kmax = 1)
f.noisy <- flashier(M, S = matrix(0.01, nrow = nrow(M), ncol = ncol(M)),
var.type = c(1, 2), greedy.Kmax = 1)
expect_equal(r1.expand(f.kron$fit$tau), f.noisy$fit$tau, tol = 1, scale = 1)
expect_equal(f.kron$objective, f.noisy$objective, tol = 0.05, scale = 1)
})
|
#### Model Parameters ####
# Model run directory
runDir <- "/glade/scratch/jamesmcc/tuolumne.calibration/"
# Route link file
rtlinkFile <- paste0(runDir, "/RUN.TEMPLATE/DOMAIN/RouteLink.nc")
#### DDS Parameters ####
# Perturbation parameter (default=0.2)
r <- 0.2
# Number of iterations (default=1000)
m <- 500
# Parameter bounds
# Must create a data table called paramBnds with one row per parameter and columns labeled:
# "param" for parameter name, "ini" for initial value, "min" for minimum value, "max" for maximum value
paramBnds <- read.table(paste0(runDir, "/param_bnds.txt"), header=TRUE, sep=" ", stringsAsFactors=FALSE)
#### Model Evaluation Parameters ####
# Gage ID to extract from the model output and compare against the obs
#siteId <- "02245500"
# R dataset containing observations
# Must contain an object called obsDf containing columns:
# "POSIXct" for POSIXct data, "obs" for streamflow data
#obsFile <- paste0(runDir, "/OBS/obsDaily.Rdata")
# Objective function
# Must contain a function to be minimized, with two arguments (in order): model, obs
objFn <- function (m, o, w=0.5, p=1) { # Negative weighted mean NSE and log NSE
# NSE
err1 <- sum((m - o)^2, na.rm=T)
err2 <- sum((o - mean(o, na.rm=T))^2, na.rm=T)
nse <- 1 - (err1/err2)
# Ln NSE
lnm <- log(m + 1e-04)
lno <- log(o + 1e-04)
err1 <- sum((lnm - lno)^2, na.rm=T)
err2 <- sum((lno - mean(lno, na.rm=T))^2, na.rm=T)
lnnse <- 1 - (err1/err2)
# Weighted mean
res <- ((w^p) * (nse^p) + (w^p) * (lnnse^p))^(1/p)
0-res
}
#objFn <- function (m, o) { # Negative NSE
# err1 <- sum((m - o)^2, na.rm=T)
# err2 <- sum((o - mean(o, na.rm=T))^2, na.rm=T)
# ns <- 1 - (err1/err2)
# 0-ns
#}
ObjFunSpaceRmse <- function(m, o, mvar,ovar) {
## treat NA?
rmse <- array(NA, dim=c(length(m),length(m[[1]][[mvar]])))
theNames <- names(m)
for(tt in 1:length(m)) {
nn <- theNames[tt]
rmse[tt,] <- as.vector( m[[tt]][[mvar]] - o[[tt]][[ovar]])
}
sqrt(mean(rmse^2, na.rm=TRUE ) )
}
# Start date for evaluation period (e.g., after spinup period)
startDate <- as.POSIXct("2008-10-01", format="%Y-%m-%d", tz="UTC")
# Archive model run output files?
archiveOutput <- FALSE
# Archive model run files?
archiveRun <- FALSE
| /CalibDemo/namelist.calib.2d.R | no_license | bsu-wrudisill/wrfhydro_calib | R | false | false | 2,288 | r | #### Model Parameters ####
# Model run directory
runDir <- "/glade/scratch/jamesmcc/tuolumne.calibration/"
# Route link file
rtlinkFile <- paste0(runDir, "/RUN.TEMPLATE/DOMAIN/RouteLink.nc")
#### DDS Parameters ####
# Perturbation parameter (default=0.2)
r <- 0.2
# Number of iterations (default=1000)
m <- 500
# Parameter bounds
# Must create a data table called paramBnds with one row per parameter and columns labeled:
# "param" for parameter name, "ini" for initial value, "min" for minimum value, "max" for maximum value
paramBnds <- read.table(paste0(runDir, "/param_bnds.txt"), header=TRUE, sep=" ", stringsAsFactors=FALSE)
#### Model Evaluation Parameters ####
# Gage ID to extract from the model output and compare against the obs
#siteId <- "02245500"
# R dataset containing observations
# Must contain an object called obsDf containing columns:
# "POSIXct" for POSIXct data, "obs" for streamflow data
#obsFile <- paste0(runDir, "/OBS/obsDaily.Rdata")
# Objective function
# Must contain a function to be minimized, with two arguments (in order): model, obs
objFn <- function (m, o, w=0.5, p=1) { # Negative weighted mean NSE and log NSE
# NSE
err1 <- sum((m - o)^2, na.rm=T)
err2 <- sum((o - mean(o, na.rm=T))^2, na.rm=T)
nse <- 1 - (err1/err2)
# Ln NSE
lnm <- log(m + 1e-04)
lno <- log(o + 1e-04)
err1 <- sum((lnm - lno)^2, na.rm=T)
err2 <- sum((lno - mean(lno, na.rm=T))^2, na.rm=T)
lnnse <- 1 - (err1/err2)
# Weighted mean
res <- ((w^p) * (nse^p) + (w^p) * (lnnse^p))^(1/p)
0-res
}
#objFn <- function (m, o) { # Negative NSE
# err1 <- sum((m - o)^2, na.rm=T)
# err2 <- sum((o - mean(o, na.rm=T))^2, na.rm=T)
# ns <- 1 - (err1/err2)
# 0-ns
#}
ObjFunSpaceRmse <- function(m, o, mvar,ovar) {
## treat NA?
rmse <- array(NA, dim=c(length(m),length(m[[1]][[mvar]])))
theNames <- names(m)
for(tt in 1:length(m)) {
nn <- theNames[tt]
rmse[tt,] <- as.vector( m[[tt]][[mvar]] - o[[tt]][[ovar]])
}
sqrt(mean(rmse^2, na.rm=TRUE ) )
}
# Start date for evaluation period (e.g., after spinup period)
startDate <- as.POSIXct("2008-10-01", format="%Y-%m-%d", tz="UTC")
# Archive model run output files?
archiveOutput <- FALSE
# Archive model run files?
archiveRun <- FALSE
|
azureApiHeaders <- function(token) {
headers <- c(Host = "management.azure.com",
Authorization = token,
`Content-type` = "application/json")
httr::add_headers(.headers = headers)
}
# convert verbose=TRUE to httr verbose
set_verbosity <- function(verbose = FALSE) {
if (verbose) httr::verbose(TRUE) else NULL
}
extractUrlArguments <- function(x) {
ptn <- ".*\\?(.*?)"
args <- grepl("\\?", x)
z <- if (args) gsub(ptn, "\\1", x) else ""
if (z == "") {
""
} else {
z <- strsplit(z, "&")[[1]]
z <- sort(z)
z <- paste(z, collapse = "\n")
z <- gsub("=", ":", z)
paste0("\n", z)
}
}
callAzureStorageApi <- function(url, verb = "GET", storageKey, storageAccount,
headers = NULL, container = NULL, CMD, size = nchar(content), contenttype = NULL,
content = NULL,
verbose = FALSE) {
dateStamp <- httr::http_date(Sys.time())
verbosity <- set_verbosity(verbose)
if (missing(CMD) || is.null(CMD)) CMD <- extractUrlArguments(url)
sig <- createAzureStorageSignature(url = url, verb = verb,
key = storageKey, storageAccount = storageAccount, container = container,
headers = headers, CMD = CMD, size = size,
contenttype = contenttype, dateStamp = dateStamp, verbose = verbose)
azToken <- paste0("SharedKey ", storageAccount, ":", sig)
switch(verb,
"GET" = GET(url, add_headers(.headers = c(Authorization = azToken,
`Content-Length` = "0",
`x-ms-version` = "2015-04-05",
`x-ms-date` = dateStamp)
),
verbosity),
"PUT" = PUT(url, add_headers(.headers = c(Authorization = azToken,
`Content-Length` = nchar(content),
`x-ms-version` = "2015-04-05",
`x-ms-date` = dateStamp,
`x-ms-blob-type` = "Blockblob",
`Content-type` = "text/plain; charset=UTF-8")),
body = content,
verbosity)
)
}
createAzureStorageSignature <- function(url, verb,
key, storageAccount, container = NULL,
headers = NULL, CMD = NULL, size = NULL, contenttype = NULL, dateStamp, verbose = FALSE) {
if (missing(dateStamp)) {
dateStamp <- httr::http_date(Sys.time())
}
arg1 <- if (length(headers)) {
paste0(headers, "\nx-ms-date:", dateStamp, "\nx-ms-version:2015-04-05")
} else {
paste0("x-ms-date:", dateStamp, "\nx-ms-version:2015-04-05")
}
arg2 <- paste0("/", storageAccount, "/", container, CMD)
SIG <- paste0(verb, "\n\n\n", size, "\n\n", contenttype, "\n\n\n\n\n\n\n",
arg1, "\n", arg2)
if (verbose) message(paste0("TRACE: STRINGTOSIGN: ", SIG))
base64encode(hmac(key = base64decode(key),
object = iconv(SIG, "ASCII", to = "UTF-8"),
algo = "sha256",
raw = TRUE)
)
}
x_ms_date <- function() httr::http_date(Sys.time())
azure_storage_header <- function(shared_key, date = x_ms_date(), content_length = 0) {
if(!is.character(shared_key)) stop("Expecting a character for `shared_key`")
headers <- c(
Authorization = shared_key,
`Content-Length` = as.character(content_length),
`x-ms-version` = "2015-04-05",
`x-ms-date` = date
)
add_headers(.headers = headers)
}
getSig <- function(azureActiveContext, url, verb, key, storageAccount,
headers = NULL, container = NULL, CMD = NULL, size = NULL, contenttype = NULL,
date = x_ms_date(), verbose = FALSE) {
arg1 <- if (length(headers)) {
paste0(headers, "\nx-ms-date:", date, "\nx-ms-version:2015-04-05")
} else {
paste0("x-ms-date:", date, "\nx-ms-version:2015-04-05")
}
arg2 <- paste0("/", storageAccount, "/", container, CMD)
SIG <- paste0(verb, "\n\n\n", size, "\n\n", contenttype, "\n\n\n\n\n\n\n",
arg1, "\n", arg2)
if (verbose) message(paste0("TRACE: STRINGTOSIGN: ", SIG))
base64encode(hmac(key = base64decode(key),
object = iconv(SIG, "ASCII", to = "UTF-8"),
algo = "sha256",
raw = TRUE)
)
}
stopWithAzureError <- function(r) {
if (status_code(r) < 300) return()
msg <- paste0(as.character(sys.call(1))[1], "()") # Name of calling fucntion
addToMsg <- function(x) {
if (!is.null(x)) x <- strwrap(x)
if(is.null(x)) msg else c(msg, x)
}
if(inherits(content(r), "xml_document")){
rr <- XML::xmlToList(XML::xmlParse(content(r)))
msg <- addToMsg(rr$Code)
msg <- addToMsg(rr$Message)
msg <- addToMsg(rr$AuthenticationErrorDetail)
} else {
rr <- content(r)
msg <- addToMsg(rr$code)
msg <- addToMsg(rr$message)
msg <- addToMsg(rr$error$message)
}
msg <- addToMsg(paste0("Return code: ", status_code(r)))
msg <- paste(msg, collapse = "\n")
stop(msg, call. = FALSE)
}
extractResourceGroupname <- function(x) gsub(".*?/resourceGroups/(.*?)(/.*)*$", "\\1", x)
extractSubscriptionID <- function(x) gsub(".*?/subscriptions/(.*?)(/.*)*$", "\\1", x)
extractStorageAccount <- function(x) gsub(".*?/storageAccounts/(.*?)(/.*)*$", "\\1", x)
refreshStorageKey <- function(azureActiveContext, storageAccount, resourceGroup){
if (storageAccount != azureActiveContext$storageAccount ||
length(azureActiveContext$storageKey) == 0
) {
message("Fetching Storage Key..")
azureSAGetKey(azureActiveContext, resourceGroup = resourceGroup, storageAccount = storageAccount)
} else {
azureActiveContext$storageKey
}
}
updateAzureActiveContext <- function(x, storageAccount, storageKey, resourceGroup, container, blob, directory) {
# updates the active azure context in place
assert_that(is.azureActiveContext(x))
if (!missing(storageAccount)) x$storageAccount <- storageAccount
if (!missing(resourceGroup)) x$resourceGroup <- resourceGroup
if (!missing(storageKey)) x$storageKey <- storageKey
if (!missing(container)) x$container <- container
if (!missing(blob)) x$blob <- blob
if (!missing(directory)) x$directory <- directory
TRUE
}
| /R/internal.R | no_license | amarabou/AzureSMR | R | false | false | 6,316 | r | azureApiHeaders <- function(token) {
headers <- c(Host = "management.azure.com",
Authorization = token,
`Content-type` = "application/json")
httr::add_headers(.headers = headers)
}
# convert verbose=TRUE to httr verbose
set_verbosity <- function(verbose = FALSE) {
if (verbose) httr::verbose(TRUE) else NULL
}
extractUrlArguments <- function(x) {
ptn <- ".*\\?(.*?)"
args <- grepl("\\?", x)
z <- if (args) gsub(ptn, "\\1", x) else ""
if (z == "") {
""
} else {
z <- strsplit(z, "&")[[1]]
z <- sort(z)
z <- paste(z, collapse = "\n")
z <- gsub("=", ":", z)
paste0("\n", z)
}
}
callAzureStorageApi <- function(url, verb = "GET", storageKey, storageAccount,
headers = NULL, container = NULL, CMD, size = nchar(content), contenttype = NULL,
content = NULL,
verbose = FALSE) {
dateStamp <- httr::http_date(Sys.time())
verbosity <- set_verbosity(verbose)
if (missing(CMD) || is.null(CMD)) CMD <- extractUrlArguments(url)
sig <- createAzureStorageSignature(url = url, verb = verb,
key = storageKey, storageAccount = storageAccount, container = container,
headers = headers, CMD = CMD, size = size,
contenttype = contenttype, dateStamp = dateStamp, verbose = verbose)
azToken <- paste0("SharedKey ", storageAccount, ":", sig)
switch(verb,
"GET" = GET(url, add_headers(.headers = c(Authorization = azToken,
`Content-Length` = "0",
`x-ms-version` = "2015-04-05",
`x-ms-date` = dateStamp)
),
verbosity),
"PUT" = PUT(url, add_headers(.headers = c(Authorization = azToken,
`Content-Length` = nchar(content),
`x-ms-version` = "2015-04-05",
`x-ms-date` = dateStamp,
`x-ms-blob-type` = "Blockblob",
`Content-type` = "text/plain; charset=UTF-8")),
body = content,
verbosity)
)
}
createAzureStorageSignature <- function(url, verb,
key, storageAccount, container = NULL,
headers = NULL, CMD = NULL, size = NULL, contenttype = NULL, dateStamp, verbose = FALSE) {
if (missing(dateStamp)) {
dateStamp <- httr::http_date(Sys.time())
}
arg1 <- if (length(headers)) {
paste0(headers, "\nx-ms-date:", dateStamp, "\nx-ms-version:2015-04-05")
} else {
paste0("x-ms-date:", dateStamp, "\nx-ms-version:2015-04-05")
}
arg2 <- paste0("/", storageAccount, "/", container, CMD)
SIG <- paste0(verb, "\n\n\n", size, "\n\n", contenttype, "\n\n\n\n\n\n\n",
arg1, "\n", arg2)
if (verbose) message(paste0("TRACE: STRINGTOSIGN: ", SIG))
base64encode(hmac(key = base64decode(key),
object = iconv(SIG, "ASCII", to = "UTF-8"),
algo = "sha256",
raw = TRUE)
)
}
x_ms_date <- function() httr::http_date(Sys.time())
azure_storage_header <- function(shared_key, date = x_ms_date(), content_length = 0) {
if(!is.character(shared_key)) stop("Expecting a character for `shared_key`")
headers <- c(
Authorization = shared_key,
`Content-Length` = as.character(content_length),
`x-ms-version` = "2015-04-05",
`x-ms-date` = date
)
add_headers(.headers = headers)
}
getSig <- function(azureActiveContext, url, verb, key, storageAccount,
headers = NULL, container = NULL, CMD = NULL, size = NULL, contenttype = NULL,
date = x_ms_date(), verbose = FALSE) {
arg1 <- if (length(headers)) {
paste0(headers, "\nx-ms-date:", date, "\nx-ms-version:2015-04-05")
} else {
paste0("x-ms-date:", date, "\nx-ms-version:2015-04-05")
}
arg2 <- paste0("/", storageAccount, "/", container, CMD)
SIG <- paste0(verb, "\n\n\n", size, "\n\n", contenttype, "\n\n\n\n\n\n\n",
arg1, "\n", arg2)
if (verbose) message(paste0("TRACE: STRINGTOSIGN: ", SIG))
base64encode(hmac(key = base64decode(key),
object = iconv(SIG, "ASCII", to = "UTF-8"),
algo = "sha256",
raw = TRUE)
)
}
stopWithAzureError <- function(r) {
if (status_code(r) < 300) return()
msg <- paste0(as.character(sys.call(1))[1], "()") # Name of calling fucntion
addToMsg <- function(x) {
if (!is.null(x)) x <- strwrap(x)
if(is.null(x)) msg else c(msg, x)
}
if(inherits(content(r), "xml_document")){
rr <- XML::xmlToList(XML::xmlParse(content(r)))
msg <- addToMsg(rr$Code)
msg <- addToMsg(rr$Message)
msg <- addToMsg(rr$AuthenticationErrorDetail)
} else {
rr <- content(r)
msg <- addToMsg(rr$code)
msg <- addToMsg(rr$message)
msg <- addToMsg(rr$error$message)
}
msg <- addToMsg(paste0("Return code: ", status_code(r)))
msg <- paste(msg, collapse = "\n")
stop(msg, call. = FALSE)
}
extractResourceGroupname <- function(x) gsub(".*?/resourceGroups/(.*?)(/.*)*$", "\\1", x)
extractSubscriptionID <- function(x) gsub(".*?/subscriptions/(.*?)(/.*)*$", "\\1", x)
extractStorageAccount <- function(x) gsub(".*?/storageAccounts/(.*?)(/.*)*$", "\\1", x)
refreshStorageKey <- function(azureActiveContext, storageAccount, resourceGroup){
if (storageAccount != azureActiveContext$storageAccount ||
length(azureActiveContext$storageKey) == 0
) {
message("Fetching Storage Key..")
azureSAGetKey(azureActiveContext, resourceGroup = resourceGroup, storageAccount = storageAccount)
} else {
azureActiveContext$storageKey
}
}
updateAzureActiveContext <- function(x, storageAccount, storageKey, resourceGroup, container, blob, directory) {
# updates the active azure context in place
assert_that(is.azureActiveContext(x))
if (!missing(storageAccount)) x$storageAccount <- storageAccount
if (!missing(resourceGroup)) x$resourceGroup <- resourceGroup
if (!missing(storageKey)) x$storageKey <- storageKey
if (!missing(container)) x$container <- container
if (!missing(blob)) x$blob <- blob
if (!missing(directory)) x$directory <- directory
TRUE
}
|
testlist <- list(A = structure(c(2.15638315824787e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613122189-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 323 | r | testlist <- list(A = structure(c(2.15638315824787e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
context("OriginPeriod")
startDates = seq(as.Date("2001/01/01"), as.Date("2010/12/31"), by="1 year")
endDates = startDates + as.period(1, "year") - days(1)
period = as.period(1, "year")
moniker = paste0("AY ", as.character(year(startDates)))
type = "Accident"
# Dummy data
GenericTestOP = function(){
op = OriginPeriod(startDates, endDates, period, moniker, type)
op
}
test_that("Construction", {
x = OriginPeriod(startDates)
expect_true(is.OriginPeriod(x))
x = OriginPeriod(startDates, endDates)
expect_true(is.OriginPeriod(x))
x = OriginPeriod(startDates, Period=period)
expect_true(is.OriginPeriod(x))
# x = OriginPeriod(startDates, period)
# expect_true(is.OriginPeriod(x))
x = OriginPeriod(startDates, endDates, Moniker=moniker)
expect_true(is.OriginPeriod(x))
x = OriginPeriod(startDates, endDates, Type=type)
expect_true(is.OriginPeriod(x))
x = OriginPeriod(startDates, endDates, Moniker=moniker, Type=type)
expect_true(is.OriginPeriod(x))
x = OriginPeriod(startDates, Period=period, Moniker=moniker, Type=type)
expect_true(is.OriginPeriod(x))
x = OriginPeriod(startDates, Type="Accident")
expect_true(is.OriginPeriod(x))
# This won't work! R assumes that the unnamed argument is meant to be Period, so
# it will dispatch the default method
expect_error(OriginPeriod(startDates, endDates, moniker, type))
# integers
accidentYears = seq(2001:2010)
x = OriginPeriod(accidentYears, StartMonth = 7, StartDay = 1, Moniker=moniker)
expect_true(is.OriginPeriod(x))
expect_true(month(x$StartDate[1]) == 7)
x = OriginPeriod(accidentYears, Type=type, Moniker=moniker)
expect_true(is.OriginPeriod(x))
expect_true(month(x$StartDate[1]) == 1)
x = OriginPeriod(accidentYears)
expect_true(is.OriginPeriod(x))
# semi-annual
startDates = seq(as.Date("2001/01/01"), as.Date("2005/12/31"), by="6 months")
endDates = startDates + as.period(6, "months") - days(1)
x = OriginPeriod(startDates, endDates)
expect_true(is.OriginPeriod(x))
expect_true(length(x) == 10)
op = OriginPeriod(StartDate = as.Date("2001-01-01"), EndDate = as.Date("2010-07-01")
, Period=as.period(6, "months"))
expect_true(is.OriginPeriod(op))
op = OriginPeriod(StartDate = as.Date("2001-01-01"), Period=as.period(6, "months")
, NumPeriods=20)
expect_true(is.OriginPeriod(op))
})
test_that("Accessors", {
x = OriginPeriod(2001:2010)
y = x[1]
expect_true(is.OriginPeriod(y))
expect_true(length(y) == 1)
y = x[2:3]
expect_true(is.OriginPeriod(y))
expect_true(length(y) == 2)
y = x["2004-01-01"]
expect_true(is.OriginPeriod(y))
expect_true(length(y) == 1)
y = x[c("2004-01-01", "2005-01-01")]
expect_true(is.OriginPeriod(y))
expect_true(length(y) == 2)
y = x[c(1, 8)]
expect_true(is.OriginPeriod(y))
expect_true(length(y) == 2)
y = x$StartDate
expect_true(is.Date(y))
y = x$Type
expect_true(is.character(y))
y = x$Moniker[3]
expect_true(is.character(y))
})
test_that("Assignment", {
x = OriginPeriod(seq(2001:2010))
x$Type = "Report"
expect_true(x$Type == "Report")
x$Moniker[3] ="blah"
expect_true(x$Moniker[3] == "blah")
expect_error(x$Moniker[5:6] <- "blah")
x$Moniker[5:6] = c("AY 2005", "AY 2006")
expect_error(x$Moniker[] <- "blah")
expect_error(x$Moniker <- seq(2001:2010))
x$Moniker[] = as.character(seq(1, length(x)))
expect_true(x$Moniker[1] == 1)
})
test_that("Comparison", {
x = OriginPeriod(seq(2001, 2005))
y = OriginPeriod(seq(2002, 2006))
expect_true(x != y)
expect_true(x == x)
})
test_that("Conversion", {
x = OriginPeriod(seq(2001, 2005))
z = as.data.frame(x)
expect_true(is.data.frame(z))
})
test_that("Concatenate", {
x = OriginPeriod(startDates)
y = OriginPeriod(max(startDates) + as.period(1, "year"))
z = rbind(x, y)
expect_true(length(z) == length(x) + length(y))
z = c(x, y)
expect_true(length(z) == length(x) + length(y))
expect_error(z <- rbind(x, x))
x = Grow(x, Length=2)
expect_true(length(x@Moniker) == 12)
expect_true(x@Moniker[12] == "New moniker 2")
})
test_that("Persistence", {
startDates = seq(as.Date("2001/01/01"), as.Date("2010/12/31"), by="1 year")
endDates = startDates + as.period(1, "year") - days(1)
period = as.period(1, "year")
moniker = paste0("AY ", as.character(year(startDates)))
type = "Accident"
op = OriginPeriod(startDates, Period=as.period(1, "years"), Moniker=moniker, Type=type)
write.excel(op, "OriginPeriod.xlsx", overwrite=TRUE)
})
#=============================================
# rep, subset, arithmetic ?
| /inst/tests/test-OriginPeriod.R | no_license | PirateGrunt/MRMR | R | false | false | 4,697 | r | context("OriginPeriod")
startDates = seq(as.Date("2001/01/01"), as.Date("2010/12/31"), by="1 year")
endDates = startDates + as.period(1, "year") - days(1)
period = as.period(1, "year")
moniker = paste0("AY ", as.character(year(startDates)))
type = "Accident"
# Dummy data
GenericTestOP = function(){
op = OriginPeriod(startDates, endDates, period, moniker, type)
op
}
test_that("Construction", {
x = OriginPeriod(startDates)
expect_true(is.OriginPeriod(x))
x = OriginPeriod(startDates, endDates)
expect_true(is.OriginPeriod(x))
x = OriginPeriod(startDates, Period=period)
expect_true(is.OriginPeriod(x))
# x = OriginPeriod(startDates, period)
# expect_true(is.OriginPeriod(x))
x = OriginPeriod(startDates, endDates, Moniker=moniker)
expect_true(is.OriginPeriod(x))
x = OriginPeriod(startDates, endDates, Type=type)
expect_true(is.OriginPeriod(x))
x = OriginPeriod(startDates, endDates, Moniker=moniker, Type=type)
expect_true(is.OriginPeriod(x))
x = OriginPeriod(startDates, Period=period, Moniker=moniker, Type=type)
expect_true(is.OriginPeriod(x))
x = OriginPeriod(startDates, Type="Accident")
expect_true(is.OriginPeriod(x))
# This won't work! R assumes that the unnamed argument is meant to be Period, so
# it will dispatch the default method
expect_error(OriginPeriod(startDates, endDates, moniker, type))
# integers
accidentYears = seq(2001:2010)
x = OriginPeriod(accidentYears, StartMonth = 7, StartDay = 1, Moniker=moniker)
expect_true(is.OriginPeriod(x))
expect_true(month(x$StartDate[1]) == 7)
x = OriginPeriod(accidentYears, Type=type, Moniker=moniker)
expect_true(is.OriginPeriod(x))
expect_true(month(x$StartDate[1]) == 1)
x = OriginPeriod(accidentYears)
expect_true(is.OriginPeriod(x))
# semi-annual
startDates = seq(as.Date("2001/01/01"), as.Date("2005/12/31"), by="6 months")
endDates = startDates + as.period(6, "months") - days(1)
x = OriginPeriod(startDates, endDates)
expect_true(is.OriginPeriod(x))
expect_true(length(x) == 10)
op = OriginPeriod(StartDate = as.Date("2001-01-01"), EndDate = as.Date("2010-07-01")
, Period=as.period(6, "months"))
expect_true(is.OriginPeriod(op))
op = OriginPeriod(StartDate = as.Date("2001-01-01"), Period=as.period(6, "months")
, NumPeriods=20)
expect_true(is.OriginPeriod(op))
})
test_that("Accessors", {
x = OriginPeriod(2001:2010)
y = x[1]
expect_true(is.OriginPeriod(y))
expect_true(length(y) == 1)
y = x[2:3]
expect_true(is.OriginPeriod(y))
expect_true(length(y) == 2)
y = x["2004-01-01"]
expect_true(is.OriginPeriod(y))
expect_true(length(y) == 1)
y = x[c("2004-01-01", "2005-01-01")]
expect_true(is.OriginPeriod(y))
expect_true(length(y) == 2)
y = x[c(1, 8)]
expect_true(is.OriginPeriod(y))
expect_true(length(y) == 2)
y = x$StartDate
expect_true(is.Date(y))
y = x$Type
expect_true(is.character(y))
y = x$Moniker[3]
expect_true(is.character(y))
})
test_that("Assignment", {
x = OriginPeriod(seq(2001:2010))
x$Type = "Report"
expect_true(x$Type == "Report")
x$Moniker[3] ="blah"
expect_true(x$Moniker[3] == "blah")
expect_error(x$Moniker[5:6] <- "blah")
x$Moniker[5:6] = c("AY 2005", "AY 2006")
expect_error(x$Moniker[] <- "blah")
expect_error(x$Moniker <- seq(2001:2010))
x$Moniker[] = as.character(seq(1, length(x)))
expect_true(x$Moniker[1] == 1)
})
test_that("Comparison", {
x = OriginPeriod(seq(2001, 2005))
y = OriginPeriod(seq(2002, 2006))
expect_true(x != y)
expect_true(x == x)
})
test_that("Conversion", {
x = OriginPeriod(seq(2001, 2005))
z = as.data.frame(x)
expect_true(is.data.frame(z))
})
test_that("Concatenate", {
x = OriginPeriod(startDates)
y = OriginPeriod(max(startDates) + as.period(1, "year"))
z = rbind(x, y)
expect_true(length(z) == length(x) + length(y))
z = c(x, y)
expect_true(length(z) == length(x) + length(y))
expect_error(z <- rbind(x, x))
x = Grow(x, Length=2)
expect_true(length(x@Moniker) == 12)
expect_true(x@Moniker[12] == "New moniker 2")
})
test_that("Persistence", {
startDates = seq(as.Date("2001/01/01"), as.Date("2010/12/31"), by="1 year")
endDates = startDates + as.period(1, "year") - days(1)
period = as.period(1, "year")
moniker = paste0("AY ", as.character(year(startDates)))
type = "Accident"
op = OriginPeriod(startDates, Period=as.period(1, "years"), Moniker=moniker, Type=type)
write.excel(op, "OriginPeriod.xlsx", overwrite=TRUE)
})
#=============================================
# rep, subset, arithmetic ?
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imagebuilder_operations.R
\name{imagebuilder_list_image_build_versions}
\alias{imagebuilder_list_image_build_versions}
\title{Returns a list of image build versions}
\usage{
imagebuilder_list_image_build_versions(
imageVersionArn,
filters = NULL,
maxResults = NULL,
nextToken = NULL
)
}
\arguments{
\item{imageVersionArn}{[required] The Amazon Resource Name (ARN) of the image whose build versions you
want to retrieve.}
\item{filters}{Use the following filters to streamline results:
\itemize{
\item \code{name}
\item \code{osVersion}
\item \code{platform}
\item \code{type}
\item \code{version}
}}
\item{maxResults}{The maximum items to return in a request.}
\item{nextToken}{A token to specify where to start paginating. This is the NextToken from
a previously truncated response.}
}
\description{
Returns a list of image build versions.
See \url{https://www.paws-r-sdk.com/docs/imagebuilder_list_image_build_versions/} for full documentation.
}
\keyword{internal}
| /cran/paws.compute/man/imagebuilder_list_image_build_versions.Rd | permissive | paws-r/paws | R | false | true | 1,058 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imagebuilder_operations.R
\name{imagebuilder_list_image_build_versions}
\alias{imagebuilder_list_image_build_versions}
\title{Returns a list of image build versions}
\usage{
imagebuilder_list_image_build_versions(
imageVersionArn,
filters = NULL,
maxResults = NULL,
nextToken = NULL
)
}
\arguments{
\item{imageVersionArn}{[required] The Amazon Resource Name (ARN) of the image whose build versions you
want to retrieve.}
\item{filters}{Use the following filters to streamline results:
\itemize{
\item \code{name}
\item \code{osVersion}
\item \code{platform}
\item \code{type}
\item \code{version}
}}
\item{maxResults}{The maximum items to return in a request.}
\item{nextToken}{A token to specify where to start paginating. This is the NextToken from
a previously truncated response.}
}
\description{
Returns a list of image build versions.
See \url{https://www.paws-r-sdk.com/docs/imagebuilder_list_image_build_versions/} for full documentation.
}
\keyword{internal}
|
\name{Lambda4}
\alias{Lambda4}
\title{Collection of Internal Consistency Reliability Coefficients.}
\description{
Currently the package includes 14 methods for calculating internal
consistency reliability but is still growing. The package allows users
access to whichever reliability estimator is deemed most appropriate for
their situation.
}
\section{Functions}{
\itemize{
\item \code{angoff}: Compute Angoff Coefficient
\item \code{bin.combs}: Generate Unique Binary Combinations
\item \code{cov.lambda4}: Compute Covariance Maximized Lambda4
\item \code{impute.cov}: Compute Covariance Matrix
\item \code{kristof}: Compute Kristof Coefficient
\item \code{lambda1}: Compute Guttman's Lambda 1 Coefficient
\item \code{lambda2}: Compute Guttman's Lambda 2 Coefficient
\item \code{lambda3}: Compute Guttman's Lambda 3 Coefficient (Coefficent Alpha)
\item \code{lambda5}: Compute Guttman's Lambda 5 Coefficient
\item \code{lambda6}: Compute Guttman's Lambda 6 Coefficient
\item \code{lambdas}: Compute Guttman's Lambda Coefficients
\item \code{omega.tot}: Compute McDonald's Omega Total
\item \code{quant.lambda4}: Compute Quantile Lambda 4
\item \code{raju}: Compute Raju's Coefficient
\item \code{user.lambda4}: Compute User Specified Lambda 4 (Split-Half)
}
}
\author{
Tyler Hunt \email{tyler@psychoanalytix.com}
}
\references{
Cronbach L (1951). "Coefficient Alpha and the Internal Structure of Tests." Psychometrika, 16, 297-334.
Guttman L (1945). "A Basis for Analyzing Test-Retest Reliability." Psychometrika, 10, 255-282.
Callender J, Osburn H (1977). "A Method for Maximizing and Cross-Validating Split-Half Reliability Coefficients." Educational and Psychological Measurement, 37, 819-826.
Callender J, Osburn H (1979). "An Empirical Comparison of Coefficient Alpha, Guttman's Lambda2 and Msplit Maximized Split-Half Reliability Estimates." Journal of Educational Measurement, 16, 89-99.
Sijtsma K (2009). "On the Use, Misuse, and Very Limited Usefulness of Cronbach's Alpha." Psychometrika, 74(1), 107-120.
}
| /man/Lambda4.Rd | no_license | JackStat/Lambda4 | R | false | false | 2,076 | rd | \name{Lambda4}
\alias{Lambda4}
\title{Collection of Internal Consistency Reliability Coefficients.}
\description{
Currently the package includes 14 methods for calculating internal
consistency reliability but is still growing. The package allows users
access to whichever reliability estimator is deemed most appropriate for
their situation.
}
\section{Functions}{
\itemize{
\item \code{angoff}: Compute Angoff Coefficient
\item \code{bin.combs}: Generate Unique Binary Combinations
\item \code{cov.lambda4}: Compute Covariance Maximized Lambda4
\item \code{impute.cov}: Compute Covariance Matrix
\item \code{kristof}: Compute Kristof Coefficient
\item \code{lambda1}: Compute Guttman's Lambda 1 Coefficient
\item \code{lambda2}: Compute Guttman's Lambda 2 Coefficient
\item \code{lambda3}: Compute Guttman's Lambda 3 Coefficient (Coefficent Alpha)
\item \code{lambda5}: Compute Guttman's Lambda 5 Coefficient
\item \code{lambda6}: Compute Guttman's Lambda 6 Coefficient
\item \code{lambdas}: Compute Guttman's Lambda Coefficients
\item \code{omega.tot}: Compute McDonald's Omega Total
\item \code{quant.lambda4}: Compute Quantile Lambda 4
\item \code{raju}: Compute Raju's Coefficient
\item \code{user.lambda4}: Compute User Specified Lambda 4 (Split-Half)
}
}
\author{
Tyler Hunt \email{tyler@psychoanalytix.com}
}
\references{
Cronbach L (1951). "Coefficient Alpha and the Internal Structure of Tests." Psychometrika, 16, 297-334.
Guttman L (1945). "A Basis for Analyzing Test-Retest Reliability." Psychometrika, 10, 255-282.
Callender J, Osburn H (1977). "A Method for Maximizing and Cross-Validating Split-Half Reliability Coefficients." Educational and Psychological Measurement, 37, 819-826.
Callender J, Osburn H (1979). "An Empirical Comparison of Coefficient Alpha, Guttman's Lambda2 and Msplit Maximized Split-Half Reliability Estimates." Journal of Educational Measurement, 16, 89-99.
Sijtsma K (2009). "On the Use, Misuse, and Very Limited Usefulness of Cronbach's Alpha." Psychometrika, 74(1), 107-120.
}
|
### Cargamos las paqueterias
library(tidyverse)
library(lubridate)
### Cargamos los datos a la workspace
municipios.cdmx <- read.csv("cdmx_rutas_municipios.csv", stringsAsFactors=T)
### Municipio de origen
(col.mun.origen<-select(municipios.cdmx, municipios_origen) %>%
group_by(municipios_origen) %>%
count(municipios_origen, sort = TRUE))
### Convertimos a date la columna pickup_date
(col.mun.origen$pickup_date<-as.Date(col.mun.origen$pickup_date,"%d/%m/%Y"))
### Ordenamos las fechas en orden ascendete de año
col.mun.origen<-arrange(col.mun.origen, pickup_date)
### Municipio de destino
(col.mun.destino<-select(municipios.cdmx, municipios_destino) %>%
group_by(municipios_destino) %>%
count(municipios_destino, sort = TRUE))
### Gráfico municipios de destino por tipo de transporte
ggplot(municipio.transporte.d) +
geom_bar(aes(x = reorder(Transporte, n), y=n, fill = Transporte), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("municipios de destino") +
facet_wrap(municipios_destino~., scales = "free_x", strip.position = "top")
### Gráfico municipios de destino por taxi libre
ggplot(filter(municipio.transporte.d, Transporte == "Taxi Libre")) +
geom_bar(aes(x = reorder(municipios_destino, n), y=n, fill = municipios_destino), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("Municipios de destino Taxi Libre")
### Gráfico municipios de destino por Taxi de Sitio
ggplot(filter(municipio.transporte.d, Transporte == "Taxi de Sitio")) +
geom_bar(aes(x = reorder(municipios_destino, n), y=n, fill = municipios_destino), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("Municipios de destino Taxi de Sitio")
### Gráfico municipios de destino por Radio Taxi
ggplot(filter(municipio.transporte.d, Transporte == "Radio Taxi")) +
geom_bar(aes(x = reorder(municipios_destino, n), y=n, fill = municipios_destino), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("Municipios de destino Radio Taxi")
### Gráfico municipios de destino por UberX
ggplot(filter(municipio.transporte.d, Transporte == "UberX")) +
geom_bar(aes(x = reorder(municipios_destino, n), y=n, fill = municipios_destino), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("Municipios de destino UberX")
### Gráfico municipios de destino por UberXL
ggplot(filter(municipio.transporte.d, Transporte == "UberXL")) +
geom_bar(aes(x = reorder(municipios_destino, n), y=n, fill = municipios_destino), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("Municipios de destino UberXL")
### Gráfico municipios de destino por UberSUV
ggplot(filter(municipio.transporte.d, Transporte == "UberSUV")) +
geom_bar(aes(x = reorder(municipios_destino, n), y=n, fill = municipios_destino), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("Municipios de destino UberSUV")
### Gráfico municipios de destino por UberBlack
ggplot(filter(municipio.transporte.d, Transporte == "UberBlack")) +
geom_bar(aes(x = reorder(municipios_destino, n), y=n, fill = municipios_destino), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("Municipios de destino UberBlack")
| /4. Exploración de datos/Heatmaps/CDMX Top 10 municipios Destino.R | no_license | DavidGilP/Proyecto_R_Transporte_CDMX | R | false | false | 3,305 | r | ### Cargamos las paqueterias
library(tidyverse)
library(lubridate)
### Cargamos los datos a la workspace
municipios.cdmx <- read.csv("cdmx_rutas_municipios.csv", stringsAsFactors=T)
### Municipio de origen
(col.mun.origen<-select(municipios.cdmx, municipios_origen) %>%
group_by(municipios_origen) %>%
count(municipios_origen, sort = TRUE))
### Convertimos a date la columna pickup_date
(col.mun.origen$pickup_date<-as.Date(col.mun.origen$pickup_date,"%d/%m/%Y"))
### Ordenamos las fechas en orden ascendete de año
col.mun.origen<-arrange(col.mun.origen, pickup_date)
### Municipio de destino
(col.mun.destino<-select(municipios.cdmx, municipios_destino) %>%
group_by(municipios_destino) %>%
count(municipios_destino, sort = TRUE))
### Gráfico municipios de destino por tipo de transporte
ggplot(municipio.transporte.d) +
geom_bar(aes(x = reorder(Transporte, n), y=n, fill = Transporte), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("municipios de destino") +
facet_wrap(municipios_destino~., scales = "free_x", strip.position = "top")
### Gráfico municipios de destino por taxi libre
ggplot(filter(municipio.transporte.d, Transporte == "Taxi Libre")) +
geom_bar(aes(x = reorder(municipios_destino, n), y=n, fill = municipios_destino), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("Municipios de destino Taxi Libre")
### Gráfico municipios de destino por Taxi de Sitio
ggplot(filter(municipio.transporte.d, Transporte == "Taxi de Sitio")) +
geom_bar(aes(x = reorder(municipios_destino, n), y=n, fill = municipios_destino), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("Municipios de destino Taxi de Sitio")
### Gráfico municipios de destino por Radio Taxi
ggplot(filter(municipio.transporte.d, Transporte == "Radio Taxi")) +
geom_bar(aes(x = reorder(municipios_destino, n), y=n, fill = municipios_destino), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("Municipios de destino Radio Taxi")
### Gráfico municipios de destino por UberX
ggplot(filter(municipio.transporte.d, Transporte == "UberX")) +
geom_bar(aes(x = reorder(municipios_destino, n), y=n, fill = municipios_destino), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("Municipios de destino UberX")
### Gráfico municipios de destino por UberXL
ggplot(filter(municipio.transporte.d, Transporte == "UberXL")) +
geom_bar(aes(x = reorder(municipios_destino, n), y=n, fill = municipios_destino), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("Municipios de destino UberXL")
### Gráfico municipios de destino por UberSUV
ggplot(filter(municipio.transporte.d, Transporte == "UberSUV")) +
geom_bar(aes(x = reorder(municipios_destino, n), y=n, fill = municipios_destino), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("Municipios de destino UberSUV")
### Gráfico municipios de destino por UberBlack
ggplot(filter(municipio.transporte.d, Transporte == "UberBlack")) +
geom_bar(aes(x = reorder(municipios_destino, n), y=n, fill = municipios_destino), stat = "identity", show.legend = FALSE) +
coord_flip(clip = 'off') + ggtitle("Municipios de destino UberBlack")
|
Heatmap <- function(output.file="heatmap.png")
{
object=out.edgeR$dge
# normalization
dge <- calcNormFactors(object, method=normalizationMethod)
# countspermillion
countspermi <- cpm(dge, normalized.lib.sizes=TRUE)
# Now pick the genes with the top variance over all samples:
rv <- rowVars(countspermi)
idx <- order(-rv)[1:20]
# Plotting
png(filename=output.file,width=min(3600,1800+800*ncol(counts)/10),height=1500,res=300)
pheatmap(countspermi[idx,], main="Heatmap")
dev.off()
}
| /edgeR_multi/3.0/heatmap.R | no_license | cyverse/docker-builds | R | false | false | 529 | r | Heatmap <- function(output.file="heatmap.png")
{
object=out.edgeR$dge
# normalization
dge <- calcNormFactors(object, method=normalizationMethod)
# countspermillion
countspermi <- cpm(dge, normalized.lib.sizes=TRUE)
# Now pick the genes with the top variance over all samples:
rv <- rowVars(countspermi)
idx <- order(-rv)[1:20]
# Plotting
png(filename=output.file,width=min(3600,1800+800*ncol(counts)/10),height=1500,res=300)
pheatmap(countspermi[idx,], main="Heatmap")
dev.off()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/consumersurveys_objects.R
\name{MobileAppPanel}
\alias{MobileAppPanel}
\title{MobileAppPanel Object}
\usage{
MobileAppPanel(country = NULL, isPublicPanel = NULL, language = NULL,
mobileAppPanelId = NULL, name = NULL, owners = NULL)
}
\arguments{
\item{country}{Country code for the country of the users that the panel contains}
\item{isPublicPanel}{Whether or not the panel is accessible to all API users}
\item{language}{Language code that the panel can target}
\item{mobileAppPanelId}{Unique panel ID string}
\item{name}{Human readable name of the audience panel}
\item{owners}{List of email addresses for users who can target members of this panel}
}
\value{
MobileAppPanel object
}
\description{
MobileAppPanel Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Representation of an individual pre-defined panel object defining a targeted audience of opinion rewards mobile app users.
}
\seealso{
Other MobileAppPanel functions: \code{\link{mobileapppanels.update}}
}
| /googleconsumersurveysv2.auto/man/MobileAppPanel.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 1,094 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/consumersurveys_objects.R
\name{MobileAppPanel}
\alias{MobileAppPanel}
\title{MobileAppPanel Object}
\usage{
MobileAppPanel(country = NULL, isPublicPanel = NULL, language = NULL,
mobileAppPanelId = NULL, name = NULL, owners = NULL)
}
\arguments{
\item{country}{Country code for the country of the users that the panel contains}
\item{isPublicPanel}{Whether or not the panel is accessible to all API users}
\item{language}{Language code that the panel can target}
\item{mobileAppPanelId}{Unique panel ID string}
\item{name}{Human readable name of the audience panel}
\item{owners}{List of email addresses for users who can target members of this panel}
}
\value{
MobileAppPanel object
}
\description{
MobileAppPanel Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Representation of an individual pre-defined panel object defining a targeted audience of opinion rewards mobile app users.
}
\seealso{
Other MobileAppPanel functions: \code{\link{mobileapppanels.update}}
}
|
#' @include aaa.R
#'
NULL
#' @export
tween_elements <- function(data, time, group, ease, timerange, nframes) {
if (!all(data[[ease]] %in% validEase)) {
stop("All names given in the easing column must be valid easers")
}
if (missing(timerange) || is.null(timerange)) {
timerange <- range(data[[time]])
}
if (missing(nframes) || is.null(nframes)) {
nframes <- ceiling(diff(timerange) + 1)
}
framelength <- diff(timerange) / nframes
specialCols <- c(group, ease)
group <- as.character(data[[group]])
data <- data[order(group, data[[time]]), ]
frame <- round((data$time - timerange[1]) / framelength)
ease <- as.character(data[[ease]])
data <- data[, !names(data) %in% specialCols, drop = FALSE]
colClasses <- col_classes(data)
tweendata <- lapply(seq_along(data), function(i) {
d <- d[[i]]
switch(
colClasses[i],
numeric = interpolate_numeric_element(d, group, frame, ease),
factor = interpolate_factor_element(d, group, frame, ease),
character = interpolate_character_element(d, group, frame, ease),
colour = interpolate_colour_element(d, group, frame, ease),
date = interpolate_date_element(d, group, frame, ease),
datetime = interpolate_datetime_element(d, group, frame, ease),
constant = interpolate_constant_element(d, group, frame, ease)
)
})
tweenInfo <- tweendata[[1]][, c('group', 'frame')]
tweendata <- as.data.frame(lapply(tweendata, `[[`, i = 'data'))
names(tweendata) <- names(data)
tweendata$.frame <- tweenInfo$frame
tweendata$.group <- tweenInfo$group
attr(tweendata, 'framelength') <- framelength
tweendata
}
| /R/tween_elements.R | no_license | arturocm/tweenr | R | false | false | 1,763 | r | #' @include aaa.R
#'
NULL
#' @export
tween_elements <- function(data, time, group, ease, timerange, nframes) {
if (!all(data[[ease]] %in% validEase)) {
stop("All names given in the easing column must be valid easers")
}
if (missing(timerange) || is.null(timerange)) {
timerange <- range(data[[time]])
}
if (missing(nframes) || is.null(nframes)) {
nframes <- ceiling(diff(timerange) + 1)
}
framelength <- diff(timerange) / nframes
specialCols <- c(group, ease)
group <- as.character(data[[group]])
data <- data[order(group, data[[time]]), ]
frame <- round((data$time - timerange[1]) / framelength)
ease <- as.character(data[[ease]])
data <- data[, !names(data) %in% specialCols, drop = FALSE]
colClasses <- col_classes(data)
tweendata <- lapply(seq_along(data), function(i) {
d <- d[[i]]
switch(
colClasses[i],
numeric = interpolate_numeric_element(d, group, frame, ease),
factor = interpolate_factor_element(d, group, frame, ease),
character = interpolate_character_element(d, group, frame, ease),
colour = interpolate_colour_element(d, group, frame, ease),
date = interpolate_date_element(d, group, frame, ease),
datetime = interpolate_datetime_element(d, group, frame, ease),
constant = interpolate_constant_element(d, group, frame, ease)
)
})
tweenInfo <- tweendata[[1]][, c('group', 'frame')]
tweendata <- as.data.frame(lapply(tweendata, `[[`, i = 'data'))
names(tweendata) <- names(data)
tweendata$.frame <- tweenInfo$frame
tweendata$.group <- tweenInfo$group
attr(tweendata, 'framelength') <- framelength
tweendata
}
|
library(dplyr)
library(lubridate)
data<-read.csv("household_power_consumption.txt",sep = ";",nrows = 550000,na.strings = "?")
da<-as.Date(dmy(data$Date))
data<-mutate(data,Date=da)
newdata1<-filter(data,Date == "2007-02-01")
newdata2<-filter(data,Date == "2007-02-02")
newdata<-rbind(newdata1,newdata2)
png('plot1.png',480,480)
par(mfrow=c(1,1), mar=c(4.5,4,3.5,1.5), oma=c(0,0,0,0),cex.main=1,cex.lab=0.9,cex.axis=0.9)
yrange=c(0,1200)
ticks <- pretty(yrange)
labels <- format(ticks, scientific=FALSE)
hist(as.numeric(newdata$Global_active_power),#*2.11/1000,
col = "red",main = "Global Active Power"
,xlab = "Global Active Power (Kilowatts)",yaxt="n")
axis(2, at = ticks, labels = labels, las = 0)#, cex.axis=0.8)
dev.off() | /plot1.r | no_license | dhaead/ExData_Plotting1 | R | false | false | 742 | r |
library(dplyr)
library(lubridate)
data<-read.csv("household_power_consumption.txt",sep = ";",nrows = 550000,na.strings = "?")
da<-as.Date(dmy(data$Date))
data<-mutate(data,Date=da)
newdata1<-filter(data,Date == "2007-02-01")
newdata2<-filter(data,Date == "2007-02-02")
newdata<-rbind(newdata1,newdata2)
png('plot1.png',480,480)
par(mfrow=c(1,1), mar=c(4.5,4,3.5,1.5), oma=c(0,0,0,0),cex.main=1,cex.lab=0.9,cex.axis=0.9)
yrange=c(0,1200)
ticks <- pretty(yrange)
labels <- format(ticks, scientific=FALSE)
hist(as.numeric(newdata$Global_active_power),#*2.11/1000,
col = "red",main = "Global Active Power"
,xlab = "Global Active Power (Kilowatts)",yaxt="n")
axis(2, at = ticks, labels = labels, las = 0)#, cex.axis=0.8)
dev.off() |
library(ape)
testtree <- read.tree("7318_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="7318_1_unrooted.txt") | /codeml_files/newick_trees_processed/7318_1/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("7318_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="7318_1_unrooted.txt") |
#' Topological Data Analysis of graphs
#'
#' @import igraph
#' @import spam
#' @import FNN
#' @import pdist
#' @import dplyr
#' @import TDA
#' @import randomcoloR
#' @import linkprediction
#'
#' @docType package
#' @name graphTDA
NULL
| /R/package.R | no_license | aida-ugent/graphTDA | R | false | false | 235 | r | #' Topological Data Analysis of graphs
#'
#' @import igraph
#' @import spam
#' @import FNN
#' @import pdist
#' @import dplyr
#' @import TDA
#' @import randomcoloR
#' @import linkprediction
#'
#' @docType package
#' @name graphTDA
NULL
|
# quick quasi-Newton for Latent Space Ranking Model
# no longer using this algo for estimation (b/cworse than optim's)
# but still using the predict.lsqn function at the bottom for prediction based on params
#
# make sure diag of Y is 0 if not counting self-edges
#
# Implementation notes/questions:
# better to update after each z or all z's at once? (seems better all z's at once, i.e. simultaneous updates, they all move from old z location together)
# stata j sometimes sneaking away. probably because it has far lowest citation count
# likelihood levels off after certain #runs, why? yet in terms of correlation to mcmc value fit may be getting better
# improve ad-hoc tuning/Z search to be more dynamic for any input data set
# Bayesian priors mean we shouldn't have to center paramters, but could it speed fitting?
# without bayesian, non-identifiability between sender, receiver, beta (+- constant)
# how sensitivite to hyperpriors?
# note behavior of low connectivity nodes, e.g. if something only sends to 1 other things in network,
# positions aren't reliable, can tail away from rest of nodes and make visualization worse
# it's sender and/or receiver coef is conflated with position#
# To DO:
# Allow for different variances for z's in different dimensions? Seems realistic.
# Long term: write the *binomial* quasi-Newton algo
# library ####
library(ergm)
library(latentnet)
library(geoR) # for divnchisq
library(gtools) #for logit function
#Source the new llik function(s)
source("~/Documents/citation/latent_ranking/latent_ranking_repo/llik.R")
# This is the OLD log-likelihood function: ####
# if est is "MAP" return l(Y|theta) + l(theta) ; if "Y" return l(Y|theta) ; if "theta" return l(theta)
# if object has parameter values they will be used (not including hyperparameters)
# family = poisson fits the normal poisson latent space model
# family = binomial fits a quasi-Stiglery (quasi-symmetric) type model with positions -- off by a constant
# [removed] family = poisson.c fits a constricted poisson position model (T_ij = ~Y_ij + ~Y_ji) by fitting upper tri and letting lower tri be the remainder. Note this is odd because it doesn't demand that E(Cij) = T - E(Cji) given the parameters (positions + random effects + intercept), just that E(C_ji) is a reminder not a glm prediction. This also means the last sender param has no info.
# better way to do this with penalty for missing the total instread of sharp constraint (which probably can't be realized )]
llik_old <- function(object=NULL, Y=NULL, sender=NULL, receiver=NULL, beta=NULL,
Z=NULL, sender.var = 10, receiver.var = 10, Z.var = 10,
beta.var = 9, sender.var.df = 3, receiver.var.df = 3, Z.var.df = NULL, #N = number of nodes
prior.sender.var = 1, prior.receiver.var = 1, prior.Z.var = NULL,
est = "MAP", family = "poisson") {
if(is.null(Y)) {Y = object$model$Ym}
if(is.null(Z)) {Z = object$Z}
if(is.null(sender)) {sender = object$sender}
if(is.null(receiver)) {receiver = object$receiver}
if(is.null(beta)) {beta = object$beta}
N = nrow(Y)
if(is.null(Z.var.df)) {Z.var.df = sqrt(N)}
if(is.null(prior.Z.var)) { prior.Z.var = N/8}
if(!is.null(object$sender.var)) sender.var = object$sender.var
if(!is.null(object$receiver.var)) receiver.var = object$receiver.var
if(!is.null(object$Z.var)) Z.var = object$Z.var
if(!is.null(object$beta.var)) beta.var = object$beta.var
Z_dist = as.matrix(dist(Z, upper = T))
l_lambda = t(receiver + t(sender - Z_dist)) + beta;
if (family == "poisson") {
lgamma.constant = sum(lgamma(as.vector(Y+1)), na.rm = T)
lambda = exp(l_lambda); diag(lambda) = 0
pY = sum( Y * l_lambda - lambda, na.rm = T) - lgamma.constant
}
# if (family == "poisson.c") {
# lambda = exp(l_lambda); diag(lambda) = 0
# Tmatrix = Y + t(Y); diag(Tmatrix) = 0
# lambda[lambda > Tmatrix] = Tmatrix[lambda > Tmatrix]
# lambda[lower.tri(lambda)] = (Tmatrix - t(lambda))[lower.tri(lambda)]
# l_lambda = log(lambda); diag(l_lambda) = NA
# pY = sum( Y * l_lambda - lambda, na.rm = T)
# }
if (family == "binomial") {
lambda = inv.logit(l_lambda)
Yt = Y + t(Y); diag(Yt) = 0
pY = sum( Y * log(lambda), na.rm = T) + sum((Yt - Y)*log(1-lambda), na.rm = T)
}
if (est == "Y") {return(pY)}
ptheta = log(exp(-beta^2/(2*beta.var)) / sqrt(2*pi*beta.var)) +
sum(log(exp(-sender^2/(2*sender.var)) / sqrt(2*pi*sender.var))) +
sum(log(exp(-receiver^2/(2*receiver.var)) / sqrt(2*pi*receiver.var))) +
sum(log(exp(-Z^2/(2*Z.var)) / sqrt(2*pi*Z.var))) +
log(dinvchisq(sender.var, sender.var.df, prior.sender.var)) +
log(dinvchisq(receiver.var, receiver.var.df, prior.receiver.var)) +
log(dinvchisq(Z.var, Z.var.df, prior.Z.var))
if (est == "theta") {return(ptheta)}
map = pY + ptheta # = p(Y|theta) + p(theta)
if (est == "MAP") {return(map)}
}
# quasi-Netwon ####
lsqn <- function(Y, N=nrow(Y), D = 2, runs = 10, tol = .01, #Y is graph, N = number of nodes
# hyperparameters - using defaults from ergmm
v_a = 3, v_b = 3, v_z = sqrt(N),
s2_a = 1, s2_b = 1, s2_z = N/8,
sigma2_B = 9,
# prior initial values (will be updated)
Z.init = "MDS",
RE.init = "rnorm",
user.start = list(), #If user supplies values they will be used, override [].init
sigma2_a = 10, sigma2_b = 10, sigma2_z = 10,
stepsize.init.a = 1, stepsize.init.b = 1,
stepsize.init.B = 1, stepsize.init.z = 1,
noSelfEdges = 1,
jmax = 50, epsilon = 1e-10) {
r = 0
if (noSelfEdges) {diag(Y) = 0}
while (r <= runs) {
while (r == 0) {
# initialize postions (Z) ####
# random normal (default), multidimenstional scaling (MDS), or user-specified
if (Z.init == "user" | !is.null(user.start$Z)) {
Z = user.start$Z
} else {
if (Z.init == "rnorm") {
Z = matrix(rnorm(D*N), ncol = D)
} else {
if (Z.init == "MDS") {
Z_dist = as.matrix(dist(Y))
Z = cmdscale(Z_dist, k = D)
}
}
# Standardize Z to center at origin, [0,1] range
Z = scale(Z, scale = F) #translate to origin
Z = Z/max(abs(Z)) # shrink - improve this?
}
z = t(Z)
Z_dist = as.matrix(dist(Z), upper = T)
# initialize a, b, B ####
# latentnet type initialization
if (RE.init == "latentnet") {
a = logit( (rowSums(Y!=0) + 1)/(N-1+2) ) - (1/N) * sum(logit( (rowSums(Y!=0) + 1) / (N - 1 + 2)) )
b = logit( (colSums(Y!=0) + 1)/(N-1+2) ) - (1/N) * sum(logit( (colSums(Y!=0) + 1) / (N - 1 + 2)) )
sigma2_a = var(a)
sigma2_b = var(b)
B = ( 1/(N*(N-1)) * sum(Y>mean(Y)) + mean(Z_dist))
sigma2_z = var(as.vector(z))
} else {
if (!is.null(user.start$sender)) {a = user.start$sender} else {a = rnorm(N)}
if (!is.null(user.start$receiver)) {b = user.start$receiver} else {b = rnorm(N)}
B = 0 #intercept
if (!is.null(user.start$sender.var)) {sigma2_a = user.start$sender.var} #else default 10
if (!is.null(user.start$receiver.var)) {sigma2_b = user.start$receiver.var} #else default 10
if (!is.null(user.start$Z.var)) {sigma2_z = user.start$Z.var} #else default 10
}
r = r+1
}
#print(r)
# Update Z, sigma2_z, B, a, sigma2_a, b, sigma2_b ####
# sigma updates are closed form. Others by coordinate ascent
# Z ####
# - init ####
Z_dist = as.matrix(dist(Z, upper = T)) #just in case
stepsize.z = matrix(stepsize.init.z, D, N)
zid_zjd = lapply(1:N, function(x) {t(Z[x,] - z)}) # N, N x d matrices
dist_inv = 1/Z_dist; diag(dist_inv) = rep(0, N) #term1 = cbind(term1, term1)
yij_yji = Y + t(Y) # each entry is y_ij + y_ji
tmp1 = yij_yji * dist_inv
exp_term = exp(sweep(t(b - Z_dist + B), 1, a, "+")); # = t(b + t(a-Z_dist) +B)
exp_term = exp_term + t(exp_term) # each entry is ij + ji
diag(exp_term) = 0
tmp2 = dist_inv * exp_term
#first deriv wrt z_id:
diff_z = sapply(1:N, function(i) {colSums(tmp2[i,]*zid_zjd[[i]])}) - #d x N
sapply(1:N, function(i) {colSums(tmp1[i,]*zid_zjd[[i]])}) - #d x N
z/sigma2_z # d x N
zsign = sign(diff_z)
#second deriv wrt z_id:
tmp3 = dist_inv^(3) * (yij_yji - exp_term*(1 + Z_dist))
diff_z2 = t(t(sapply(1:N, function(i) {colSums(tmp3[i,]*(zid_zjd[[i]])^2)})) + #N x d
colSums(dist_inv * (-yij_yji + exp_term)) #N, added to above by column
- 1/sigma2_z)
zsign2 = sign(diff_z2)
# - update ####
for (i in sample(N)) { #update one dimension at a time? randomize order? fix first point to prevent translations?
for (d in sample(D)) {
j = 0
while(abs(diff_z[d,i]) > tol & j <= jmax) { #
br = FALSE
#consider a new zid:
znew_i = z[,i]; znew_i[d]= znew_i[d] + stepsize.z[d,i]*zsign[d,i]*-zsign2[d,i]
tmpdist = sqrt(colSums((znew_i - t(Z))^2)); if (noSelfEdges) {tmpdist[i] = 1}
#recalculate zdiff for that zid
diff_z[d,i] = - sum(yij_yji[i,] * 1/tmpdist * (znew_i[d] - Z[,d])) +
sum(1/tmpdist * (znew_i[d] - Z[,d]) *
(exp(B + a[i] + b - tmpdist) + exp(B + a + b[i] - tmpdist))) -
znew_i[d]/sigma2_z #yij_yji is zero if i = j so don't worry about Z not updated to znew on that line
#compre to z orig, did we cross a 0?
if (!is.na(diff_z[d,i]) && sign(diff_z[d,i]) != zsign[d,i]) {
s = z[d,i] + seq(0, 1, length.out = 11) * stepsize.z[d,i]*zsign[d,i]*-zsign2[d,i]
tmpdiff = sapply(s, function(x) {
znew_i[d]= x
tmpdist = sqrt(colSums((znew_i - t(Z))^2)); if (noSelfEdges) {tmpdist[i] = 1}
return(- sum(yij_yji[i,] * 1/tmpdist * (znew_i[d] - Z[,d])) +
sum(1/tmpdist * (znew_i[d] - Z[,d]) *
(exp(B + a[i] + b - tmpdist) + exp(B + a + b[i] - tmpdist))) -
znew_i[d]/sigma2_z)})
#update lower case z (not upper case Z yet)
z[d,i] = s[which.max(sign(tmpdiff)!=zsign[d,i])]
#Z[i,d] = z[d,i]
stepsize.z[d,i] = stepsize.z[d,i]/10
if (stepsize.z[d,i] < epsilon) {br = TRUE}
diff_z[d,i] = tmpdiff[which.max(sign(tmpdiff)!=zsign[d,i])]
zsign[d,i] = sign(diff_z[d,i])
znew_i = z[,i]
tmpdist = sqrt(colSums((znew_i - t(Z))^2)); if (noSelfEdges) {tmpdist[i] = 1}
tmpexp = exp(B + a[i] + b - tmpdist) + exp(B + a + b[i] - tmpdist)
tmpexp[i] = 0
tmp4 = 1/tmpdist^(3) * (yij_yji[i,] - tmpexp*(1 + tmpdist))
diff_z2[d,i] = sum(tmp4*(znew_i[d] - t(Z)[d,])^2) +
sum(1/tmpdist * (-yij_yji[i,] + tmpexp)) - 1/sigma2_z
zsign2[d,i] = sign(diff_z2[d,i])
} else {
if (stepsize.z[d,i] <= max(abs(z))) { stepsize.z[d,i] = stepsize.z[d,i] * 2
} else {
#print("had to look in both directions")
#look in both directions
s = z[d,i] + seq(-1, 1, length.out = 101) * stepsize.z[d,i]
s = s[-51]
tmpdiff = sapply(s, function(x) {
znew_i[d]= x
tmpdist = sqrt(colSums((znew_i - t(Z))^2)); if (noSelfEdges) {tmpdist[i] = 1}
return(- sum(yij_yji[i,] * 1/tmpdist * (znew_i[d] - Z[,d])) +
sum(1/tmpdist * (znew_i[d] - Z[,d]) *
(exp(B + a[i] + b - tmpdist) + exp(B + a + b[i] - tmpdist))) -
znew_i[d]/sigma2_z)})
stepsize.z[d,i] = abs(s[which.min(abs(tmpdiff))] - z[d,i]) #don't let it pick itself
z[d,i] = s[which.min(abs(tmpdiff))] #update lower case z (not upper case Z yet)
diff_z[d,i] = tmpdiff[which.min(abs(tmpdiff))]
zsign[d,i] = sign(diff_z[d,i])
znew_i = z[,i]
tmpdist = sqrt(colSums((znew_i - t(Z))^2)); if (noSelfEdges) {tmpdist[i] = 1}
tmpexp = exp(B + a[i] + b - tmpdist) + exp(B + a + b[i] - tmpdist)
tmpexp[i] = 0
tmp4 = 1/tmpdist^(3) * (yij_yji[i,] - tmpexp*(1 + tmpdist))
diff_z2[d,i] = sum(tmp4*(znew_i[d] - t(Z)[d,])^2) +
sum(1/tmpdist * (-yij_yji[i,] + tmpexp)) - 1/sigma2_z
zsign2[d,i] = sign(diff_z2[d,i])
}
}
if(br) {break}
j = j+1
}
}
}
z = z - rowMeans(z)
Z = t(z)
Z_dist = as.matrix(dist(Z, upper = T))
# sigma2_z ####
sigma2_z = (sum(z^2) + v_z*s2_z) / (N*d + 2 + v_z) #z has length N*d
#B: ####
stepsize.B = stepsize.init.B
lambdamat = exp(sweep(t(b - Z_dist + B), 1, a, "+"))
diag(lambdamat) = diag(lambdamat)*(1-noSelfEdges)
lambdamat[is.na(Y)] = NA
diff_B = sum( Y - lambdamat) - B/sigma2_B #in
#second deriv always negative -> concave
Bsign = sign(diff_B)
while (abs(diff_B) > tol) {
Bnew = B + stepsize.B*Bsign
lambdamat = exp(sweep(t(b - Z_dist + Bnew), 1, a, "+"))
diag(lambdamat) = diag(lambdamat)*(1-noSelfEdges)
lambdamat[is.na(Y)] = NA
diff_B = sum( Y - lambdamat) - Bnew/sigma2_B
if (sign(diff_B) != Bsign) { #look in this range
s = B + seq(0, 1, length.out = 11) * stepsize.B*Bsign
tmp = sapply(s, function(B) {
lambdamat = exp(sweep(t(b - Z_dist + B), 1, a, "+"))
diag(lambdamat) = diag(lambdamat)*(1-noSelfEdges)
sum( Y - lambdamat) - B/sigma2_B
})
B = s[which.max(sign(tmp)!=Bsign)]
stepsize.B = stepsize.B/10
diff_B = tmp[which.max(sign(tmp)!=Bsign)]
Bsign = sign(diff_B)
} else {stepsize.B = stepsize.B * 2}
}
# a ####
#init
stepsize.a = rep(stepsize.init.a, N)
lambdamat = exp(sweep(t(b - Z_dist + B), 1, a, "+"))
diag(lambdamat) = diag(lambdamat)*(1-noSelfEdges)
diff_a = rowSums(Y) - rowSums(lambdamat) + - a/sigma2_a #i,j entry is i to j
asign = sign(diff_a)
#go
while (max(abs(diff_a)) > tol) {
for (i in 1:N) {
while (abs(diff_a[i]) > tol) {
anew = a[i] + stepsize.a[i]*asign[i]
lambdavec = exp(B + anew + b - Z_dist[i,])
lambdavec[i] = lambdavec[i]*(1-noSelfEdges) #if necessary, remove self edge
lambdavec[is.na(Y[i,])] = NA #remove missing edges
diff_a[i] = sum( Y[i,]) - sum(lambdavec) - anew/sigma2_a
if (sign(diff_a[i]) != asign[i]) {
s = a[i] + seq(0, 1, length.out = 11) * stepsize.a[i]*asign[i]
tmp = sum( Y[i,]) - sapply(s, function(a) {
lambda = exp(B + a + b - Z_dist[i,])
if (noSelfEdges) {lambda[i] = 0}
return( sum(lambda) + a/sigma2_a)
})
a[i] = s[which.max(sign(tmp)!=asign[i])]
stepsize.a[i] = stepsize.a[i]/10
diff_a[i] = tmp[which.max(sign(tmp)!=asign[i])]
asign[i] = sign(diff_a[i])
} else {stepsize.a[i] = stepsize.a[i] * 2}
}
}
}
#sigma2_a ####
sigma2_a = (sum(a^2) + v_a*s2_a) / (N + 2 + v_a)
# b ####
#init
stepsize.b = rep(stepsize.init.b, N)
lambdamat = exp(sweep(t(b - Z_dist + B), 1, a, "+"))
diag(lambdamat) = diag(lambdamat)*(1-noSelfEdges)
lambdamat[is.na(Y)] = NA
diff_b = colSums(Y) - colSums(lambdamat) - b/sigma2_b #i,j entry is i to j
bsign = sign(diff_b)
#go
while (max(abs(diff_b)) > tol) {
for (i in 1:N) {
while (abs(diff_b[i]) > tol) {
bnew = b[i] + stepsize.b[i]*bsign[i]
lambdavec = exp(B + bnew + a - Z_dist[,i])
lambdavec[i] = lambdavec[i]*(1-noSelfEdges) #if necessary, remove self edge
diff_b[i] = sum(Y[,i]) - sum(lambdavec) - bnew/sigma2_b
if (sign(diff_b[i]) != bsign[i]) {
s = b[i] + seq(0, 1, length.out = 11) * stepsize.b[i]*bsign[i]
tmp = sum( Y[,i] ) - sapply(s, function(b) {
lambda = exp(B + a + b - Z_dist[i,])
if (noSelfEdges) {lambda[i] = 0}
return( sum(lambda) + b/sigma2_b)
})
b[i] = s[which.max(sign(tmp)!=bsign[i])]
stepsize.b[i] = stepsize.b[i]/10
diff_b[i] = tmp[which.max(sign(tmp)!=bsign[i])]
bsign[i] = sign(diff_b[i])
} else {stepsize.b[i] = stepsize.b[i] * 2}
}
}
}
# sigma2_b ####
sigma2_b = (sum(b^2) + v_b*s2_b) / (N + 2 + v_b)
# likelihood ####
currentllik = llik(Y=Y, sender = a, receiver = b, beta = B, Z = t(z), sender.var = sigma2_a,
receiver.var = sigma2_b, Z.var = sigma2_z, beta.var = sigma2_B,
prior.sender.var = s2_a, prior.receiver.var = s2_b, prior.Z.var = s2_z,
sender.var.df = v_a, receiver.var.df = v_b, Z.var.df = v_z)
if (r ==1) {threshold = currentllik; maxllik = currentllik}
print(currentllik)
if (currentllik > maxllik) {
maxllik = currentllik
map = list(Z = scale(Z, scale =F), sender = a, receiver = b, beta = B, beta.var = sigma2_B,
sender.var = sigma2_a, receiver.var = sigma2_b, Z.var = sigma2_z, #or just recalculate these given other params
diff_Z = diff_z, diff_sender = diff_a, diff_receiver = diff_b, diff_Z2 = diff_z2,
stepsize.Z = stepsize.z, llik = maxllik)
}
if (currentllik < threshold) {cat("Took a wrong step...try again with a different seed");
return(NULL)}
# ending tasks ####
r = r+1
} #shift positions to origin mean before returning
return(list(last = list(Z = scale(Z, scale = F), sender = a, receiver = b, beta = B,
sender.var = sigma2_a, receiver.var = sigma2_b, Z.var = sigma2_z,
diff_Z = diff_z, diff_sender = diff_a, diff_receiver = diff_b, diff_Z2 = diff_z2,
stepsize.Z = stepsize.z, llik = currentllik),
map = map,
prior = list(beta.var = sigma2_B, sender.var.df = v_a, receiver.var.df = v_b, Z.var.df = v_z,
prior.sender.var = s2_a, prior.receiver.var = s2_b, prior.Z.var = s2_z))
)
}
# quasi-Netwon prediction ####
# predict network based on quasi-Newton fit
# either based on point estimate (non-random) or random draw|MAP
predict.lsqn <-function(model, type = "Y", names = NULL) {
N = nrow(model$map$Z)
Z_dist = as.matrix(dist(model$map$Z, upper = TRUE), N, N)
lambda = exp(t(model$map$receiver + t(model$map$sender - Z_dist)) + model$map$beta); diag(lambda) = NA
if (!is.null(names)) {row.names(lambda) = names}
if (type == "Y") return(lambda) else {
if (type == "rpois") {
if (!is.null(names)) {row.names(lambda) = names}
return(matrix(rpois(N^2, lambda), N, N))
}
}
}
| /old/ls_quasi_newton.R | no_license | jcarlen/latent_ranking | R | false | false | 20,517 | r | # quick quasi-Newton for Latent Space Ranking Model
# no longer using this algo for estimation (b/cworse than optim's)
# but still using the predict.lsqn function at the bottom for prediction based on params
#
# make sure diag of Y is 0 if not counting self-edges
#
# Implementation notes/questions:
# better to update after each z or all z's at once? (seems better all z's at once, i.e. simultaneous updates, they all move from old z location together)
# stata j sometimes sneaking away. probably because it has far lowest citation count
# likelihood levels off after certain #runs, why? yet in terms of correlation to mcmc value fit may be getting better
# improve ad-hoc tuning/Z search to be more dynamic for any input data set
# Bayesian priors mean we shouldn't have to center paramters, but could it speed fitting?
# without bayesian, non-identifiability between sender, receiver, beta (+- constant)
# how sensitivite to hyperpriors?
# note behavior of low connectivity nodes, e.g. if something only sends to 1 other things in network,
# positions aren't reliable, can tail away from rest of nodes and make visualization worse
# it's sender and/or receiver coef is conflated with position#
# To DO:
# Allow for different variances for z's in different dimensions? Seems realistic.
# Long term: write the *binomial* quasi-Newton algo
# library ####
library(ergm)
library(latentnet)
library(geoR) # for divnchisq
library(gtools) #for logit function
#Source the new llik function(s)
source("~/Documents/citation/latent_ranking/latent_ranking_repo/llik.R")
# This is the OLD log-likelihood function: ####
# if est is "MAP" return l(Y|theta) + l(theta) ; if "Y" return l(Y|theta) ; if "theta" return l(theta)
# if object has parameter values they will be used (not including hyperparameters)
# family = poisson fits the normal poisson latent space model
# family = binomial fits a quasi-Stiglery (quasi-symmetric) type model with positions -- off by a constant
# [removed] family = poisson.c fits a constricted poisson position model (T_ij = ~Y_ij + ~Y_ji) by fitting upper tri and letting lower tri be the remainder. Note this is odd because it doesn't demand that E(Cij) = T - E(Cji) given the parameters (positions + random effects + intercept), just that E(C_ji) is a reminder not a glm prediction. This also means the last sender param has no info.
# better way to do this with penalty for missing the total instread of sharp constraint (which probably can't be realized )]
llik_old <- function(object=NULL, Y=NULL, sender=NULL, receiver=NULL, beta=NULL,
Z=NULL, sender.var = 10, receiver.var = 10, Z.var = 10,
beta.var = 9, sender.var.df = 3, receiver.var.df = 3, Z.var.df = NULL, #N = number of nodes
prior.sender.var = 1, prior.receiver.var = 1, prior.Z.var = NULL,
est = "MAP", family = "poisson") {
if(is.null(Y)) {Y = object$model$Ym}
if(is.null(Z)) {Z = object$Z}
if(is.null(sender)) {sender = object$sender}
if(is.null(receiver)) {receiver = object$receiver}
if(is.null(beta)) {beta = object$beta}
N = nrow(Y)
if(is.null(Z.var.df)) {Z.var.df = sqrt(N)}
if(is.null(prior.Z.var)) { prior.Z.var = N/8}
if(!is.null(object$sender.var)) sender.var = object$sender.var
if(!is.null(object$receiver.var)) receiver.var = object$receiver.var
if(!is.null(object$Z.var)) Z.var = object$Z.var
if(!is.null(object$beta.var)) beta.var = object$beta.var
Z_dist = as.matrix(dist(Z, upper = T))
l_lambda = t(receiver + t(sender - Z_dist)) + beta;
if (family == "poisson") {
lgamma.constant = sum(lgamma(as.vector(Y+1)), na.rm = T)
lambda = exp(l_lambda); diag(lambda) = 0
pY = sum( Y * l_lambda - lambda, na.rm = T) - lgamma.constant
}
# if (family == "poisson.c") {
# lambda = exp(l_lambda); diag(lambda) = 0
# Tmatrix = Y + t(Y); diag(Tmatrix) = 0
# lambda[lambda > Tmatrix] = Tmatrix[lambda > Tmatrix]
# lambda[lower.tri(lambda)] = (Tmatrix - t(lambda))[lower.tri(lambda)]
# l_lambda = log(lambda); diag(l_lambda) = NA
# pY = sum( Y * l_lambda - lambda, na.rm = T)
# }
if (family == "binomial") {
lambda = inv.logit(l_lambda)
Yt = Y + t(Y); diag(Yt) = 0
pY = sum( Y * log(lambda), na.rm = T) + sum((Yt - Y)*log(1-lambda), na.rm = T)
}
if (est == "Y") {return(pY)}
ptheta = log(exp(-beta^2/(2*beta.var)) / sqrt(2*pi*beta.var)) +
sum(log(exp(-sender^2/(2*sender.var)) / sqrt(2*pi*sender.var))) +
sum(log(exp(-receiver^2/(2*receiver.var)) / sqrt(2*pi*receiver.var))) +
sum(log(exp(-Z^2/(2*Z.var)) / sqrt(2*pi*Z.var))) +
log(dinvchisq(sender.var, sender.var.df, prior.sender.var)) +
log(dinvchisq(receiver.var, receiver.var.df, prior.receiver.var)) +
log(dinvchisq(Z.var, Z.var.df, prior.Z.var))
if (est == "theta") {return(ptheta)}
map = pY + ptheta # = p(Y|theta) + p(theta)
if (est == "MAP") {return(map)}
}
# quasi-Netwon ####
lsqn <- function(Y, N=nrow(Y), D = 2, runs = 10, tol = .01, #Y is graph, N = number of nodes
# hyperparameters - using defaults from ergmm
v_a = 3, v_b = 3, v_z = sqrt(N),
s2_a = 1, s2_b = 1, s2_z = N/8,
sigma2_B = 9,
# prior initial values (will be updated)
Z.init = "MDS",
RE.init = "rnorm",
user.start = list(), #If user supplies values they will be used, override [].init
sigma2_a = 10, sigma2_b = 10, sigma2_z = 10,
stepsize.init.a = 1, stepsize.init.b = 1,
stepsize.init.B = 1, stepsize.init.z = 1,
noSelfEdges = 1,
jmax = 50, epsilon = 1e-10) {
r = 0
if (noSelfEdges) {diag(Y) = 0}
while (r <= runs) {
while (r == 0) {
# initialize postions (Z) ####
# random normal (default), multidimenstional scaling (MDS), or user-specified
if (Z.init == "user" | !is.null(user.start$Z)) {
Z = user.start$Z
} else {
if (Z.init == "rnorm") {
Z = matrix(rnorm(D*N), ncol = D)
} else {
if (Z.init == "MDS") {
Z_dist = as.matrix(dist(Y))
Z = cmdscale(Z_dist, k = D)
}
}
# Standardize Z to center at origin, [0,1] range
Z = scale(Z, scale = F) #translate to origin
Z = Z/max(abs(Z)) # shrink - improve this?
}
z = t(Z)
Z_dist = as.matrix(dist(Z), upper = T)
# initialize a, b, B ####
# latentnet type initialization
if (RE.init == "latentnet") {
a = logit( (rowSums(Y!=0) + 1)/(N-1+2) ) - (1/N) * sum(logit( (rowSums(Y!=0) + 1) / (N - 1 + 2)) )
b = logit( (colSums(Y!=0) + 1)/(N-1+2) ) - (1/N) * sum(logit( (colSums(Y!=0) + 1) / (N - 1 + 2)) )
sigma2_a = var(a)
sigma2_b = var(b)
B = ( 1/(N*(N-1)) * sum(Y>mean(Y)) + mean(Z_dist))
sigma2_z = var(as.vector(z))
} else {
if (!is.null(user.start$sender)) {a = user.start$sender} else {a = rnorm(N)}
if (!is.null(user.start$receiver)) {b = user.start$receiver} else {b = rnorm(N)}
B = 0 #intercept
if (!is.null(user.start$sender.var)) {sigma2_a = user.start$sender.var} #else default 10
if (!is.null(user.start$receiver.var)) {sigma2_b = user.start$receiver.var} #else default 10
if (!is.null(user.start$Z.var)) {sigma2_z = user.start$Z.var} #else default 10
}
r = r+1
}
#print(r)
# Update Z, sigma2_z, B, a, sigma2_a, b, sigma2_b ####
# sigma updates are closed form. Others by coordinate ascent
# Z ####
# - init ####
Z_dist = as.matrix(dist(Z, upper = T)) #just in case
stepsize.z = matrix(stepsize.init.z, D, N)
zid_zjd = lapply(1:N, function(x) {t(Z[x,] - z)}) # N, N x d matrices
dist_inv = 1/Z_dist; diag(dist_inv) = rep(0, N) #term1 = cbind(term1, term1)
yij_yji = Y + t(Y) # each entry is y_ij + y_ji
tmp1 = yij_yji * dist_inv
exp_term = exp(sweep(t(b - Z_dist + B), 1, a, "+")); # = t(b + t(a-Z_dist) +B)
exp_term = exp_term + t(exp_term) # each entry is ij + ji
diag(exp_term) = 0
tmp2 = dist_inv * exp_term
#first deriv wrt z_id:
diff_z = sapply(1:N, function(i) {colSums(tmp2[i,]*zid_zjd[[i]])}) - #d x N
sapply(1:N, function(i) {colSums(tmp1[i,]*zid_zjd[[i]])}) - #d x N
z/sigma2_z # d x N
zsign = sign(diff_z)
#second deriv wrt z_id:
tmp3 = dist_inv^(3) * (yij_yji - exp_term*(1 + Z_dist))
diff_z2 = t(t(sapply(1:N, function(i) {colSums(tmp3[i,]*(zid_zjd[[i]])^2)})) + #N x d
colSums(dist_inv * (-yij_yji + exp_term)) #N, added to above by column
- 1/sigma2_z)
zsign2 = sign(diff_z2)
# - update ####
for (i in sample(N)) { #update one dimension at a time? randomize order? fix first point to prevent translations?
for (d in sample(D)) {
j = 0
while(abs(diff_z[d,i]) > tol & j <= jmax) { #
br = FALSE
#consider a new zid:
znew_i = z[,i]; znew_i[d]= znew_i[d] + stepsize.z[d,i]*zsign[d,i]*-zsign2[d,i]
tmpdist = sqrt(colSums((znew_i - t(Z))^2)); if (noSelfEdges) {tmpdist[i] = 1}
#recalculate zdiff for that zid
diff_z[d,i] = - sum(yij_yji[i,] * 1/tmpdist * (znew_i[d] - Z[,d])) +
sum(1/tmpdist * (znew_i[d] - Z[,d]) *
(exp(B + a[i] + b - tmpdist) + exp(B + a + b[i] - tmpdist))) -
znew_i[d]/sigma2_z #yij_yji is zero if i = j so don't worry about Z not updated to znew on that line
#compre to z orig, did we cross a 0?
if (!is.na(diff_z[d,i]) && sign(diff_z[d,i]) != zsign[d,i]) {
s = z[d,i] + seq(0, 1, length.out = 11) * stepsize.z[d,i]*zsign[d,i]*-zsign2[d,i]
tmpdiff = sapply(s, function(x) {
znew_i[d]= x
tmpdist = sqrt(colSums((znew_i - t(Z))^2)); if (noSelfEdges) {tmpdist[i] = 1}
return(- sum(yij_yji[i,] * 1/tmpdist * (znew_i[d] - Z[,d])) +
sum(1/tmpdist * (znew_i[d] - Z[,d]) *
(exp(B + a[i] + b - tmpdist) + exp(B + a + b[i] - tmpdist))) -
znew_i[d]/sigma2_z)})
#update lower case z (not upper case Z yet)
z[d,i] = s[which.max(sign(tmpdiff)!=zsign[d,i])]
#Z[i,d] = z[d,i]
stepsize.z[d,i] = stepsize.z[d,i]/10
if (stepsize.z[d,i] < epsilon) {br = TRUE}
diff_z[d,i] = tmpdiff[which.max(sign(tmpdiff)!=zsign[d,i])]
zsign[d,i] = sign(diff_z[d,i])
znew_i = z[,i]
tmpdist = sqrt(colSums((znew_i - t(Z))^2)); if (noSelfEdges) {tmpdist[i] = 1}
tmpexp = exp(B + a[i] + b - tmpdist) + exp(B + a + b[i] - tmpdist)
tmpexp[i] = 0
tmp4 = 1/tmpdist^(3) * (yij_yji[i,] - tmpexp*(1 + tmpdist))
diff_z2[d,i] = sum(tmp4*(znew_i[d] - t(Z)[d,])^2) +
sum(1/tmpdist * (-yij_yji[i,] + tmpexp)) - 1/sigma2_z
zsign2[d,i] = sign(diff_z2[d,i])
} else {
if (stepsize.z[d,i] <= max(abs(z))) { stepsize.z[d,i] = stepsize.z[d,i] * 2
} else {
#print("had to look in both directions")
#look in both directions
s = z[d,i] + seq(-1, 1, length.out = 101) * stepsize.z[d,i]
s = s[-51]
tmpdiff = sapply(s, function(x) {
znew_i[d]= x
tmpdist = sqrt(colSums((znew_i - t(Z))^2)); if (noSelfEdges) {tmpdist[i] = 1}
return(- sum(yij_yji[i,] * 1/tmpdist * (znew_i[d] - Z[,d])) +
sum(1/tmpdist * (znew_i[d] - Z[,d]) *
(exp(B + a[i] + b - tmpdist) + exp(B + a + b[i] - tmpdist))) -
znew_i[d]/sigma2_z)})
stepsize.z[d,i] = abs(s[which.min(abs(tmpdiff))] - z[d,i]) #don't let it pick itself
z[d,i] = s[which.min(abs(tmpdiff))] #update lower case z (not upper case Z yet)
diff_z[d,i] = tmpdiff[which.min(abs(tmpdiff))]
zsign[d,i] = sign(diff_z[d,i])
znew_i = z[,i]
tmpdist = sqrt(colSums((znew_i - t(Z))^2)); if (noSelfEdges) {tmpdist[i] = 1}
tmpexp = exp(B + a[i] + b - tmpdist) + exp(B + a + b[i] - tmpdist)
tmpexp[i] = 0
tmp4 = 1/tmpdist^(3) * (yij_yji[i,] - tmpexp*(1 + tmpdist))
diff_z2[d,i] = sum(tmp4*(znew_i[d] - t(Z)[d,])^2) +
sum(1/tmpdist * (-yij_yji[i,] + tmpexp)) - 1/sigma2_z
zsign2[d,i] = sign(diff_z2[d,i])
}
}
if(br) {break}
j = j+1
}
}
}
z = z - rowMeans(z)
Z = t(z)
Z_dist = as.matrix(dist(Z, upper = T))
# sigma2_z ####
sigma2_z = (sum(z^2) + v_z*s2_z) / (N*d + 2 + v_z) #z has length N*d
#B: ####
stepsize.B = stepsize.init.B
lambdamat = exp(sweep(t(b - Z_dist + B), 1, a, "+"))
diag(lambdamat) = diag(lambdamat)*(1-noSelfEdges)
lambdamat[is.na(Y)] = NA
diff_B = sum( Y - lambdamat) - B/sigma2_B #in
#second deriv always negative -> concave
Bsign = sign(diff_B)
while (abs(diff_B) > tol) {
Bnew = B + stepsize.B*Bsign
lambdamat = exp(sweep(t(b - Z_dist + Bnew), 1, a, "+"))
diag(lambdamat) = diag(lambdamat)*(1-noSelfEdges)
lambdamat[is.na(Y)] = NA
diff_B = sum( Y - lambdamat) - Bnew/sigma2_B
if (sign(diff_B) != Bsign) { #look in this range
s = B + seq(0, 1, length.out = 11) * stepsize.B*Bsign
tmp = sapply(s, function(B) {
lambdamat = exp(sweep(t(b - Z_dist + B), 1, a, "+"))
diag(lambdamat) = diag(lambdamat)*(1-noSelfEdges)
sum( Y - lambdamat) - B/sigma2_B
})
B = s[which.max(sign(tmp)!=Bsign)]
stepsize.B = stepsize.B/10
diff_B = tmp[which.max(sign(tmp)!=Bsign)]
Bsign = sign(diff_B)
} else {stepsize.B = stepsize.B * 2}
}
# a ####
#init
stepsize.a = rep(stepsize.init.a, N)
lambdamat = exp(sweep(t(b - Z_dist + B), 1, a, "+"))
diag(lambdamat) = diag(lambdamat)*(1-noSelfEdges)
diff_a = rowSums(Y) - rowSums(lambdamat) + - a/sigma2_a #i,j entry is i to j
asign = sign(diff_a)
#go
while (max(abs(diff_a)) > tol) {
for (i in 1:N) {
while (abs(diff_a[i]) > tol) {
anew = a[i] + stepsize.a[i]*asign[i]
lambdavec = exp(B + anew + b - Z_dist[i,])
lambdavec[i] = lambdavec[i]*(1-noSelfEdges) #if necessary, remove self edge
lambdavec[is.na(Y[i,])] = NA #remove missing edges
diff_a[i] = sum( Y[i,]) - sum(lambdavec) - anew/sigma2_a
if (sign(diff_a[i]) != asign[i]) {
s = a[i] + seq(0, 1, length.out = 11) * stepsize.a[i]*asign[i]
tmp = sum( Y[i,]) - sapply(s, function(a) {
lambda = exp(B + a + b - Z_dist[i,])
if (noSelfEdges) {lambda[i] = 0}
return( sum(lambda) + a/sigma2_a)
})
a[i] = s[which.max(sign(tmp)!=asign[i])]
stepsize.a[i] = stepsize.a[i]/10
diff_a[i] = tmp[which.max(sign(tmp)!=asign[i])]
asign[i] = sign(diff_a[i])
} else {stepsize.a[i] = stepsize.a[i] * 2}
}
}
}
#sigma2_a ####
sigma2_a = (sum(a^2) + v_a*s2_a) / (N + 2 + v_a)
# b ####
#init
stepsize.b = rep(stepsize.init.b, N)
lambdamat = exp(sweep(t(b - Z_dist + B), 1, a, "+"))
diag(lambdamat) = diag(lambdamat)*(1-noSelfEdges)
lambdamat[is.na(Y)] = NA
diff_b = colSums(Y) - colSums(lambdamat) - b/sigma2_b #i,j entry is i to j
bsign = sign(diff_b)
#go
while (max(abs(diff_b)) > tol) {
for (i in 1:N) {
while (abs(diff_b[i]) > tol) {
bnew = b[i] + stepsize.b[i]*bsign[i]
lambdavec = exp(B + bnew + a - Z_dist[,i])
lambdavec[i] = lambdavec[i]*(1-noSelfEdges) #if necessary, remove self edge
diff_b[i] = sum(Y[,i]) - sum(lambdavec) - bnew/sigma2_b
if (sign(diff_b[i]) != bsign[i]) {
s = b[i] + seq(0, 1, length.out = 11) * stepsize.b[i]*bsign[i]
tmp = sum( Y[,i] ) - sapply(s, function(b) {
lambda = exp(B + a + b - Z_dist[i,])
if (noSelfEdges) {lambda[i] = 0}
return( sum(lambda) + b/sigma2_b)
})
b[i] = s[which.max(sign(tmp)!=bsign[i])]
stepsize.b[i] = stepsize.b[i]/10
diff_b[i] = tmp[which.max(sign(tmp)!=bsign[i])]
bsign[i] = sign(diff_b[i])
} else {stepsize.b[i] = stepsize.b[i] * 2}
}
}
}
# sigma2_b ####
sigma2_b = (sum(b^2) + v_b*s2_b) / (N + 2 + v_b)
# likelihood ####
currentllik = llik(Y=Y, sender = a, receiver = b, beta = B, Z = t(z), sender.var = sigma2_a,
receiver.var = sigma2_b, Z.var = sigma2_z, beta.var = sigma2_B,
prior.sender.var = s2_a, prior.receiver.var = s2_b, prior.Z.var = s2_z,
sender.var.df = v_a, receiver.var.df = v_b, Z.var.df = v_z)
if (r ==1) {threshold = currentllik; maxllik = currentllik}
print(currentllik)
if (currentllik > maxllik) {
maxllik = currentllik
map = list(Z = scale(Z, scale =F), sender = a, receiver = b, beta = B, beta.var = sigma2_B,
sender.var = sigma2_a, receiver.var = sigma2_b, Z.var = sigma2_z, #or just recalculate these given other params
diff_Z = diff_z, diff_sender = diff_a, diff_receiver = diff_b, diff_Z2 = diff_z2,
stepsize.Z = stepsize.z, llik = maxllik)
}
if (currentllik < threshold) {cat("Took a wrong step...try again with a different seed");
return(NULL)}
# ending tasks ####
r = r+1
} #shift positions to origin mean before returning
return(list(last = list(Z = scale(Z, scale = F), sender = a, receiver = b, beta = B,
sender.var = sigma2_a, receiver.var = sigma2_b, Z.var = sigma2_z,
diff_Z = diff_z, diff_sender = diff_a, diff_receiver = diff_b, diff_Z2 = diff_z2,
stepsize.Z = stepsize.z, llik = currentllik),
map = map,
prior = list(beta.var = sigma2_B, sender.var.df = v_a, receiver.var.df = v_b, Z.var.df = v_z,
prior.sender.var = s2_a, prior.receiver.var = s2_b, prior.Z.var = s2_z))
)
}
# quasi-Netwon prediction ####
# predict network based on quasi-Newton fit
# either based on point estimate (non-random) or random draw|MAP
predict.lsqn <-function(model, type = "Y", names = NULL) {
N = nrow(model$map$Z)
Z_dist = as.matrix(dist(model$map$Z, upper = TRUE), N, N)
lambda = exp(t(model$map$receiver + t(model$map$sender - Z_dist)) + model$map$beta); diag(lambda) = NA
if (!is.null(names)) {row.names(lambda) = names}
if (type == "Y") return(lambda) else {
if (type == "rpois") {
if (!is.null(names)) {row.names(lambda) = names}
return(matrix(rpois(N^2, lambda), N, N))
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parsnip-seasonal_reg.R
\name{seasonal_reg}
\alias{seasonal_reg}
\title{General Interface for Multiple Seasonality Regression Models (TBATS, STLM)}
\usage{
seasonal_reg(
mode = "regression",
seasonal_period_1 = NULL,
seasonal_period_2 = NULL,
seasonal_period_3 = NULL
)
}
\arguments{
\item{mode}{A single character string for the type of model.
The only possible value for this model is "regression".}
\item{seasonal_period_1}{(required) The primary seasonal frequency.
Uses \code{"auto"} by default.
A character phrase of "auto" or time-based phrase of "2 weeks"
can be used if a date or date-time variable is provided.
See Fit Details below.}
\item{seasonal_period_2}{(optional) A second seasonal frequency.
Is \code{NULL} by default.
A character phrase of "auto" or time-based phrase of "2 weeks"
can be used if a date or date-time variable is provided.
See Fit Details below.}
\item{seasonal_period_3}{(optional) A third seasonal frequency.
Is \code{NULL} by default.
A character phrase of "auto" or time-based phrase of "2 weeks"
can be used if a date or date-time variable is provided.
See Fit Details below.}
}
\description{
\code{seasonal_reg()} is a way to generate a \emph{specification} of an
Seasonal Decomposition model
before fitting and allows the model to be created using
different packages. Currently the only package is \code{forecast}.
}
\details{
The data given to the function are not saved and are only used
to determine the \emph{mode} of the model. For \code{seasonal_reg()}, the
mode will always be "regression".
The model can be created using the \code{fit()} function using the
following \emph{engines}:
\itemize{
\item "tbats" - Connects to \code{forecast::tbats()}
\item "stlm_ets" - Connects to \code{forecast::stlm()}, \code{method = "ets"}
\item "stlm_arima" - Connects to \code{forecast::stlm()}, \code{method = "arima"}
}
}
\section{Engine Details}{
The standardized parameter names in \code{modeltime} can be mapped to their original
names in each engine:\tabular{lll}{
modeltime \tab forecast::stlm \tab forecast::tbats \cr
seasonal_period_1, seasonal_period_2, seasonal_period_3 \tab msts(seasonal.periods) \tab msts(seasonal.periods) \cr
}
Other options can be set using \code{set_engine()}.
The engines use \code{forecast::stlm()}.
Function Parameters:\preformatted{## function (y, s.window = 7 + 4 * seq(6), robust = FALSE, method = c("ets",
## "arima"), modelfunction = NULL, model = NULL, etsmodel = "ZZN", lambda = NULL,
## biasadj = FALSE, xreg = NULL, allow.multiplicative.trend = FALSE, x = y,
## ...)
}
\strong{tbats}
\itemize{
\item \strong{Method:} Uses \code{method = "tbats"}, which by default is auto-TBATS.
\item \strong{Xregs:} Univariate. Cannot accept Exogenous Regressors (xregs). Xregs are ignored.
}
\strong{stlm_ets}
\itemize{
\item \strong{Method:} Uses \code{method = "stlm_ets"}, which by default is auto-ETS.
\item \strong{Xregs:} Univariate. Cannot accept Exogenous Regressors (xregs). Xregs are ignored.
}
\strong{stlm_arima}
\itemize{
\item \strong{Method:} Uses \code{method = "stlm_arima"}, which by default is auto-ARIMA.
\item \strong{Xregs:} Multivariate. Can accept Exogenous Regressors (xregs).
}
}
\section{Fit Details}{
\strong{Date and Date-Time Variable}
It's a requirement to have a date or date-time variable as a predictor.
The \code{fit()} interface accepts date and date-time features and handles them internally.
\itemize{
\item \code{fit(y ~ date)}
}
\emph{Seasonal Period Specification}
The period can be non-seasonal (\verb{seasonal_period = 1 or "none"}) or
yearly seasonal (e.g. For monthly time stamps, \code{seasonal_period = 12}, \code{seasonal_period = "12 months"}, or \code{seasonal_period = "yearly"}).
There are 3 ways to specify:
\enumerate{
\item \code{seasonal_period = "auto"}: A seasonal period is selected based on the periodicity of the data (e.g. 12 if monthly)
\item \code{seasonal_period = 12}: A numeric frequency. For example, 12 is common for monthly data
\item \code{seasonal_period = "1 year"}: A time-based phrase. For example, "1 year" would convert to 12 for monthly data.
}
\strong{Univariate (No xregs, Exogenous Regressors):}
For univariate analysis, you must include a date or date-time feature. Simply use:
\itemize{
\item Formula Interface (recommended): \code{fit(y ~ date)} will ignore xreg's.
\item XY Interface: \code{fit_xy(x = data[,"date"], y = data$y)} will ignore xreg's.
}
\strong{Multivariate (xregs, Exogenous Regressors)}
\itemize{
\item The \code{tbats} engine \emph{cannot} accept Xregs.
\item The \code{stlm_ets} engine \emph{cannot} accept Xregs.
\item The \code{stlm_arima} engine \emph{can} accept Xregs
}
The \code{xreg} parameter is populated using the \code{fit()} or \code{fit_xy()} function:
\itemize{
\item Only \code{factor}, \verb{ordered factor}, and \code{numeric} data will be used as xregs.
\item Date and Date-time variables are not used as xregs
\item \code{character} data should be converted to factor.
}
\emph{Xreg Example:} Suppose you have 3 features:
\enumerate{
\item \code{y} (target)
\item \code{date} (time stamp),
\item \code{month.lbl} (labeled month as a ordered factor).
}
The \code{month.lbl} is an exogenous regressor that can be passed to the \code{seasonal_reg()} using
\code{fit()}:
\itemize{
\item \code{fit(y ~ date + month.lbl)} will pass \code{month.lbl} on as an exogenous regressor.
\item \code{fit_xy(data[,c("date", "month.lbl")], y = data$y)} will pass x, where x is a data frame containing \code{month.lbl}
and the \code{date} feature. Only \code{month.lbl} will be used as an exogenous regressor.
}
Note that date or date-time class values are excluded from \code{xreg}.
}
\examples{
library(dplyr)
library(parsnip)
library(rsample)
library(timetk)
library(modeltime)
# Data
taylor_30_min
# Split Data 80/20
splits <- initial_time_split(taylor_30_min, prop = 0.8)
# ---- STLM ETS ----
# Model Spec
model_spec <- seasonal_reg() \%>\%
set_engine("stlm_ets")
# Fit Spec
model_fit <- model_spec \%>\%
fit(log(value) ~ date, data = training(splits))
model_fit
# ---- STLM ARIMA ----
# Model Spec
model_spec <- seasonal_reg() \%>\%
set_engine("stlm_arima")
# Fit Spec
model_fit <- model_spec \%>\%
fit(log(value) ~ date, data = training(splits))
model_fit
}
\seealso{
\code{\link[=fit.model_spec]{fit.model_spec()}}, \code{\link[=set_engine]{set_engine()}}
}
| /man/seasonal_reg.Rd | permissive | silverf62/modeltime | R | false | true | 6,497 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parsnip-seasonal_reg.R
\name{seasonal_reg}
\alias{seasonal_reg}
\title{General Interface for Multiple Seasonality Regression Models (TBATS, STLM)}
\usage{
seasonal_reg(
mode = "regression",
seasonal_period_1 = NULL,
seasonal_period_2 = NULL,
seasonal_period_3 = NULL
)
}
\arguments{
\item{mode}{A single character string for the type of model.
The only possible value for this model is "regression".}
\item{seasonal_period_1}{(required) The primary seasonal frequency.
Uses \code{"auto"} by default.
A character phrase of "auto" or time-based phrase of "2 weeks"
can be used if a date or date-time variable is provided.
See Fit Details below.}
\item{seasonal_period_2}{(optional) A second seasonal frequency.
Is \code{NULL} by default.
A character phrase of "auto" or time-based phrase of "2 weeks"
can be used if a date or date-time variable is provided.
See Fit Details below.}
\item{seasonal_period_3}{(optional) A third seasonal frequency.
Is \code{NULL} by default.
A character phrase of "auto" or time-based phrase of "2 weeks"
can be used if a date or date-time variable is provided.
See Fit Details below.}
}
\description{
\code{seasonal_reg()} is a way to generate a \emph{specification} of an
Seasonal Decomposition model
before fitting and allows the model to be created using
different packages. Currently the only package is \code{forecast}.
}
\details{
The data given to the function are not saved and are only used
to determine the \emph{mode} of the model. For \code{seasonal_reg()}, the
mode will always be "regression".
The model can be created using the \code{fit()} function using the
following \emph{engines}:
\itemize{
\item "tbats" - Connects to \code{forecast::tbats()}
\item "stlm_ets" - Connects to \code{forecast::stlm()}, \code{method = "ets"}
\item "stlm_arima" - Connects to \code{forecast::stlm()}, \code{method = "arima"}
}
}
\section{Engine Details}{
The standardized parameter names in \code{modeltime} can be mapped to their original
names in each engine:\tabular{lll}{
modeltime \tab forecast::stlm \tab forecast::tbats \cr
seasonal_period_1, seasonal_period_2, seasonal_period_3 \tab msts(seasonal.periods) \tab msts(seasonal.periods) \cr
}
Other options can be set using \code{set_engine()}.
The engines use \code{forecast::stlm()}.
Function Parameters:\preformatted{## function (y, s.window = 7 + 4 * seq(6), robust = FALSE, method = c("ets",
## "arima"), modelfunction = NULL, model = NULL, etsmodel = "ZZN", lambda = NULL,
## biasadj = FALSE, xreg = NULL, allow.multiplicative.trend = FALSE, x = y,
## ...)
}
\strong{tbats}
\itemize{
\item \strong{Method:} Uses \code{method = "tbats"}, which by default is auto-TBATS.
\item \strong{Xregs:} Univariate. Cannot accept Exogenous Regressors (xregs). Xregs are ignored.
}
\strong{stlm_ets}
\itemize{
\item \strong{Method:} Uses \code{method = "stlm_ets"}, which by default is auto-ETS.
\item \strong{Xregs:} Univariate. Cannot accept Exogenous Regressors (xregs). Xregs are ignored.
}
\strong{stlm_arima}
\itemize{
\item \strong{Method:} Uses \code{method = "stlm_arima"}, which by default is auto-ARIMA.
\item \strong{Xregs:} Multivariate. Can accept Exogenous Regressors (xregs).
}
}
\section{Fit Details}{
\strong{Date and Date-Time Variable}
It's a requirement to have a date or date-time variable as a predictor.
The \code{fit()} interface accepts date and date-time features and handles them internally.
\itemize{
\item \code{fit(y ~ date)}
}
\emph{Seasonal Period Specification}
The period can be non-seasonal (\verb{seasonal_period = 1 or "none"}) or
yearly seasonal (e.g. For monthly time stamps, \code{seasonal_period = 12}, \code{seasonal_period = "12 months"}, or \code{seasonal_period = "yearly"}).
There are 3 ways to specify:
\enumerate{
\item \code{seasonal_period = "auto"}: A seasonal period is selected based on the periodicity of the data (e.g. 12 if monthly)
\item \code{seasonal_period = 12}: A numeric frequency. For example, 12 is common for monthly data
\item \code{seasonal_period = "1 year"}: A time-based phrase. For example, "1 year" would convert to 12 for monthly data.
}
\strong{Univariate (No xregs, Exogenous Regressors):}
For univariate analysis, you must include a date or date-time feature. Simply use:
\itemize{
\item Formula Interface (recommended): \code{fit(y ~ date)} will ignore xreg's.
\item XY Interface: \code{fit_xy(x = data[,"date"], y = data$y)} will ignore xreg's.
}
\strong{Multivariate (xregs, Exogenous Regressors)}
\itemize{
\item The \code{tbats} engine \emph{cannot} accept Xregs.
\item The \code{stlm_ets} engine \emph{cannot} accept Xregs.
\item The \code{stlm_arima} engine \emph{can} accept Xregs
}
The \code{xreg} parameter is populated using the \code{fit()} or \code{fit_xy()} function:
\itemize{
\item Only \code{factor}, \verb{ordered factor}, and \code{numeric} data will be used as xregs.
\item Date and Date-time variables are not used as xregs
\item \code{character} data should be converted to factor.
}
\emph{Xreg Example:} Suppose you have 3 features:
\enumerate{
\item \code{y} (target)
\item \code{date} (time stamp),
\item \code{month.lbl} (labeled month as a ordered factor).
}
The \code{month.lbl} is an exogenous regressor that can be passed to the \code{seasonal_reg()} using
\code{fit()}:
\itemize{
\item \code{fit(y ~ date + month.lbl)} will pass \code{month.lbl} on as an exogenous regressor.
\item \code{fit_xy(data[,c("date", "month.lbl")], y = data$y)} will pass x, where x is a data frame containing \code{month.lbl}
and the \code{date} feature. Only \code{month.lbl} will be used as an exogenous regressor.
}
Note that date or date-time class values are excluded from \code{xreg}.
}
\examples{
library(dplyr)
library(parsnip)
library(rsample)
library(timetk)
library(modeltime)
# Data
taylor_30_min
# Split Data 80/20
splits <- initial_time_split(taylor_30_min, prop = 0.8)
# ---- STLM ETS ----
# Model Spec
model_spec <- seasonal_reg() \%>\%
set_engine("stlm_ets")
# Fit Spec
model_fit <- model_spec \%>\%
fit(log(value) ~ date, data = training(splits))
model_fit
# ---- STLM ARIMA ----
# Model Spec
model_spec <- seasonal_reg() \%>\%
set_engine("stlm_arima")
# Fit Spec
model_fit <- model_spec \%>\%
fit(log(value) ~ date, data = training(splits))
model_fit
}
\seealso{
\code{\link[=fit.model_spec]{fit.model_spec()}}, \code{\link[=set_engine]{set_engine()}}
}
|
testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61630111770444e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615828427-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 734 | r | testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61630111770444e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
#' @title A Real Experiment Dose Data
#' @description A group of real experiment data based on up-and-down method.
#' @docType data
#' @keywords datasets
#' @name groupSN
#' @usage groupSN
#' @format A data of 38 samples and 2 variables:
#' \describe{
#' \item{responseSequence}{A value of 0 or 1 indicating the experiment outcome.
#' 0 refers to a failure outcome while 1 refers to a success.}
#' \item{doseSequence}{The dose given in each experiment.}
#' }
#' @source The data is from the article in the references below.
#' @references Niu B, Xiao JY, Fang Y, et al. Sevoflurane-induced isoelectric EEG and burst suppression: differential and
#' antagonistic effect of added nitrous oxide. Anaesthesia 2017; 72: 570-9.
NULL
| /R/groupSN.R | no_license | cran/ed50 | R | false | false | 744 | r | #' @title A Real Experiment Dose Data
#' @description A group of real experiment data based on up-and-down method.
#' @docType data
#' @keywords datasets
#' @name groupSN
#' @usage groupSN
#' @format A data of 38 samples and 2 variables:
#' \describe{
#' \item{responseSequence}{A value of 0 or 1 indicating the experiment outcome.
#' 0 refers to a failure outcome while 1 refers to a success.}
#' \item{doseSequence}{The dose given in each experiment.}
#' }
#' @source The data is from the article in the references below.
#' @references Niu B, Xiao JY, Fang Y, et al. Sevoflurane-induced isoelectric EEG and burst suppression: differential and
#' antagonistic effect of added nitrous oxide. Anaesthesia 2017; 72: 570-9.
NULL
|
rmarkdown::render(
input = list.files("Rmd/", full.names = TRUE),
output_dir = "output/",
knit_root_dir = getwd()
)
| /example/basic-R-Markdown/render.R | permissive | GiuseppeTT/R-Project-Structure | R | false | false | 128 | r | rmarkdown::render(
input = list.files("Rmd/", full.names = TRUE),
output_dir = "output/",
knit_root_dir = getwd()
)
|
library("lattice")
library(gridExtra)
## Example data
data=matrix(FractionEradicated[rev(1:length(c2range)),] , length(c1range) , length(c2range))
rownames(data)=paste( rep("",length(c1range)) , rev(c1range) , sep=" ")
colnames(data)= paste( rep("",length(c2range)), c2range , sep=" ")
## Try it out
## Try it out
p1<-levelplot(t(data[c(nrow(data):1) , ]), xlab="Empathy constant (c2)",ylab="Risk aversion constant (c1)",main="Beta=0.1")
p2<-levelplot(t(data[c(nrow(data):1) , ]), xlab="Empathy constant (c2)",ylab="Risk aversion constant (c1)",main="Beta=0.2")
p3<-levelplot(t(data[c(nrow(data):1) , ]), xlab="Empathy constant (c2)",ylab="Risk aversion constant (c1)",main="Beta=0.3, No Jerks")
grid.arrange(p3, nrow = 1) | /Heatmap_code.R | no_license | marlinfiggins/DiseaseNetworkGamesSIS | R | false | false | 732 | r | library("lattice")
library(gridExtra)
## Example data
data=matrix(FractionEradicated[rev(1:length(c2range)),] , length(c1range) , length(c2range))
rownames(data)=paste( rep("",length(c1range)) , rev(c1range) , sep=" ")
colnames(data)= paste( rep("",length(c2range)), c2range , sep=" ")
## Try it out
## Try it out
p1<-levelplot(t(data[c(nrow(data):1) , ]), xlab="Empathy constant (c2)",ylab="Risk aversion constant (c1)",main="Beta=0.1")
p2<-levelplot(t(data[c(nrow(data):1) , ]), xlab="Empathy constant (c2)",ylab="Risk aversion constant (c1)",main="Beta=0.2")
p3<-levelplot(t(data[c(nrow(data):1) , ]), xlab="Empathy constant (c2)",ylab="Risk aversion constant (c1)",main="Beta=0.3, No Jerks")
grid.arrange(p3, nrow = 1) |
##' Tabulate the results of a generalized linear regression analysis.
##'
##' The table shows changes in mean for linear regression and
##' odds ratios for logistic regression (family = binomial).
##' @title Tabulize regression coefficients with confidence intervals and p-values.
##' @export
##' @param object A \code{glm} object.
##' @param confint.method See \code{regressionTable}.
##' @param pvalue.method See \code{regressionTable}.
##' @param digits A vector of two integer values. These determine how to round
##' numbers (first value) and p-values (second value). E.g., c(1,3) would
##' mean 1 digit for all numbers and 3 digits for p-values.
##' The actual rounding is done by \code{summary.regressionTable}.
##' @param print If \code{FALSE} do not print results.
##' @param factor.reference Style for showing results for categorical. See \code{regressionTable}.
##' @param intercept See \code{regressionTable}.
##' @param units See \code{regressionTable}.
##' @param ... passed to \code{summary.regressionTable} and also
##' to \code{labelUnits}.
##' @param reference Style for showing results for categorical
##' variables. If \code{"extraline"} show an additional line for the
##' reference category.
##' @return Table with regression coefficients, confidence intervals and p-values.
##' @author Thomas Alexander Gerds <tag@@biostat.ku.dk>
##' @examples
##' data(Diabetes)
##' ## Linear regression
##' f = glm(bp.2s~frame+gender+age,data=Diabetes)
##' publish(f)
##' publish(f,factor.reference="inline")
##' publish(f,pvalue.stars=TRUE)
##' publish(f,ci.format="(l,u)")
##'
##' ### interaction
##' fit = glm(bp.2s~frame+gender*age,data=Diabetes)
##' summary(fit)
##' publish(fit)
##'
##' Fit = glm(bp.2s~frame*gender+age,data=Diabetes)
##' publish(Fit)
##'
##' ## Logistic regression
##' Diabetes$hyper1 <- factor(1*(Diabetes$bp.1s>140))
##' lrfit <- glm(hyper1~frame+gender+age,data=Diabetes,family=binomial)
##' publish(lrfit)
##'
##' ### interaction
##' lrfit1 <- glm(hyper1~frame+gender*age,data=Diabetes,family=binomial)
##' publish(lrfit1)
##'
##' lrfit2 <- glm(hyper1~frame*gender+age,data=Diabetes,family=binomial)
##' publish(lrfit2)
##'
##' ## Poisson regression
##' data(trace)
##' trace <- Units(trace,list("age"="years"))
##' fit <- glm(dead ~ smoking+sex+age+Time+offset(log(ObsTime)), family="poisson",data=trace)
##' rtf <- regressionTable(fit,factor.reference = "inline")
##' summary(rtf)
##' publish(fit)
##'
##' ## gls regression
##' if (requireNamespace("nlme",quietly=TRUE)){
##' requireNamespace("lava",quietly=TRUE)
##' library(lava)
##' library(nlme)
##' m <- lvm(Y ~ X1 + gender + group + Interaction)
##' distribution(m, ~gender) <- binomial.lvm()
##' distribution(m, ~group) <- binomial.lvm(size = 2)
##' constrain(m, Interaction ~ gender + group) <- function(x){x[,1]*x[,2]}
##' d <- sim(m, 1e2)
##' d$gender <- factor(d$gender, labels = letters[1:2])
##' d$group <- factor(d$group)
##'
##' e.gls <- gls(Y ~ X1 + gender*group, data = d,
##' weights = varIdent(form = ~1|group))
##' publish(e.gls)
##'
##' ## lme
##' fm1 <- lme(distance ~ age*Sex,
##' random = ~1|Subject,
##' data = Orthodont)
##' res <- publish(fm1)
##' }
##' @export
publish.glm <- function(object,
confint.method,
pvalue.method,
digits=c(2,4),
print=TRUE,
factor.reference="extraline",
intercept=ifelse((is.null(object$family)||object$family$family=="gaussian"),1L,0L),
units=NULL,
...){
if (missing(confint.method)) confint.method="default"
if (missing(pvalue.method))
pvalue.method=switch(confint.method,
"robust"={"robust"},
"simultaneous"={"simultaneous"},
"default")
rt <- regressionTable(object,
confint.method=confint.method,
pvalue.method=pvalue.method,
factor.reference=factor.reference,
intercept=intercept,
units=units)
srt <- summary.regressionTable(rt,
digits=digits,
print=FALSE,...)
if (print==TRUE)
publish(srt$regressionTable,...)
invisible(srt)
}
##' @export
publish.lm <- publish.glm
##' @export
publish.gls <- publish.glm
##' @export
publish.lme <- publish.glm
##' @export
publish.geeglm <- publish.glm
| /R/publish.glm.R | no_license | tagteam/Publish | R | false | false | 4,614 | r | ##' Tabulate the results of a generalized linear regression analysis.
##'
##' The table shows changes in mean for linear regression and
##' odds ratios for logistic regression (family = binomial).
##' @title Tabulize regression coefficients with confidence intervals and p-values.
##' @export
##' @param object A \code{glm} object.
##' @param confint.method See \code{regressionTable}.
##' @param pvalue.method See \code{regressionTable}.
##' @param digits A vector of two integer values. These determine how to round
##' numbers (first value) and p-values (second value). E.g., c(1,3) would
##' mean 1 digit for all numbers and 3 digits for p-values.
##' The actual rounding is done by \code{summary.regressionTable}.
##' @param print If \code{FALSE} do not print results.
##' @param factor.reference Style for showing results for categorical. See \code{regressionTable}.
##' @param intercept See \code{regressionTable}.
##' @param units See \code{regressionTable}.
##' @param ... passed to \code{summary.regressionTable} and also
##' to \code{labelUnits}.
##' @param reference Style for showing results for categorical
##' variables. If \code{"extraline"} show an additional line for the
##' reference category.
##' @return Table with regression coefficients, confidence intervals and p-values.
##' @author Thomas Alexander Gerds <tag@@biostat.ku.dk>
##' @examples
##' data(Diabetes)
##' ## Linear regression
##' f = glm(bp.2s~frame+gender+age,data=Diabetes)
##' publish(f)
##' publish(f,factor.reference="inline")
##' publish(f,pvalue.stars=TRUE)
##' publish(f,ci.format="(l,u)")
##'
##' ### interaction
##' fit = glm(bp.2s~frame+gender*age,data=Diabetes)
##' summary(fit)
##' publish(fit)
##'
##' Fit = glm(bp.2s~frame*gender+age,data=Diabetes)
##' publish(Fit)
##'
##' ## Logistic regression
##' Diabetes$hyper1 <- factor(1*(Diabetes$bp.1s>140))
##' lrfit <- glm(hyper1~frame+gender+age,data=Diabetes,family=binomial)
##' publish(lrfit)
##'
##' ### interaction
##' lrfit1 <- glm(hyper1~frame+gender*age,data=Diabetes,family=binomial)
##' publish(lrfit1)
##'
##' lrfit2 <- glm(hyper1~frame*gender+age,data=Diabetes,family=binomial)
##' publish(lrfit2)
##'
##' ## Poisson regression
##' data(trace)
##' trace <- Units(trace,list("age"="years"))
##' fit <- glm(dead ~ smoking+sex+age+Time+offset(log(ObsTime)), family="poisson",data=trace)
##' rtf <- regressionTable(fit,factor.reference = "inline")
##' summary(rtf)
##' publish(fit)
##'
##' ## gls regression
##' if (requireNamespace("nlme",quietly=TRUE)){
##' requireNamespace("lava",quietly=TRUE)
##' library(lava)
##' library(nlme)
##' m <- lvm(Y ~ X1 + gender + group + Interaction)
##' distribution(m, ~gender) <- binomial.lvm()
##' distribution(m, ~group) <- binomial.lvm(size = 2)
##' constrain(m, Interaction ~ gender + group) <- function(x){x[,1]*x[,2]}
##' d <- sim(m, 1e2)
##' d$gender <- factor(d$gender, labels = letters[1:2])
##' d$group <- factor(d$group)
##'
##' e.gls <- gls(Y ~ X1 + gender*group, data = d,
##' weights = varIdent(form = ~1|group))
##' publish(e.gls)
##'
##' ## lme
##' fm1 <- lme(distance ~ age*Sex,
##' random = ~1|Subject,
##' data = Orthodont)
##' res <- publish(fm1)
##' }
##' @export
publish.glm <- function(object,
confint.method,
pvalue.method,
digits=c(2,4),
print=TRUE,
factor.reference="extraline",
intercept=ifelse((is.null(object$family)||object$family$family=="gaussian"),1L,0L),
units=NULL,
...){
if (missing(confint.method)) confint.method="default"
if (missing(pvalue.method))
pvalue.method=switch(confint.method,
"robust"={"robust"},
"simultaneous"={"simultaneous"},
"default")
rt <- regressionTable(object,
confint.method=confint.method,
pvalue.method=pvalue.method,
factor.reference=factor.reference,
intercept=intercept,
units=units)
srt <- summary.regressionTable(rt,
digits=digits,
print=FALSE,...)
if (print==TRUE)
publish(srt$regressionTable,...)
invisible(srt)
}
##' @export
publish.lm <- publish.glm
##' @export
publish.gls <- publish.glm
##' @export
publish.lme <- publish.glm
##' @export
publish.geeglm <- publish.glm
|
#' Sample Means Ns
#'
#' @param vec a numeric vector, to be selected from
#' @param reps integer, quantity of times to sample for each value of ns
#' @param ns vector of integers, sample sizes
#'
#' @return
#' @export
#'
#' @examples
sample_means_ns <- function(vec, reps, ns){
data.frame(
sample_mean = unlist(map(ns, function(x) many_sample_means(vec, x, reps))),
n = rep(ns, each=reps)
)
}
| /R/sample_means_ns.R | no_license | GenghisKhandybar/meansRepo | R | false | false | 406 | r | #' Sample Means Ns
#'
#' @param vec a numeric vector, to be selected from
#' @param reps integer, quantity of times to sample for each value of ns
#' @param ns vector of integers, sample sizes
#'
#' @return
#' @export
#'
#' @examples
sample_means_ns <- function(vec, reps, ns){
data.frame(
sample_mean = unlist(map(ns, function(x) many_sample_means(vec, x, reps))),
n = rep(ns, each=reps)
)
}
|
## ----------------------------------------------------------------------------------------
library(ape)
library(phytools)
library(tidyverse)
#tree and data
treeparrot<-read.nexus("../data/ptree2")
## ----------------------------------------------------------------------------------------
Body_mass <- read.csv(("../../humm-swift/data/dunning_part1_measures.csv"))
Body_mass <- plyr::ddply(
Body_mass,
plyr::.(Species.name),
function(i) {
data.frame(
mass_mean= mean(i$mean,na.rm = T)
)
})
library(tidyverse)
Body_mass <- rename(Body_mass,Species= Species.name)
Body_mass <- mutate(Body_mass, Species= str_replace(Species, " ", "_"))
extraparrot <- read.csv("../data/unique parrots.csv")
Body_mass <- bind_rows(Body_mass,extraparrot) %>%
dplyr::select(-c(notes, reference))
## ----------------------------------------------------------------------------------------
parrotdata <- read.table("../data/formatparrotforphy.txt",header = T)
parrotdata <- left_join(parrotdata, Body_mass)
## ----------------------------------------------------------------------------------------
newparrottree<-drop.tip(treeparrot, setdiff(treeparrot$tip.label, parrotdata$Species))
plotTree(newparrottree)
setdiff (parrotdata$Species,newparrottree$tip.label)
compdata<-comparative.data(newparrottree, parrotdata, names.col="Species")
## ----------------------------------------------------------------------------------------
PGLS<-pgls(log(f0_mean) ~ log(mass_mean), data=compdata)
summary(PGLS)
## ----------------------------------------------------------------------------------------
library(ggplot2)
# The plot:
ggplot(parrotdata, aes(log(mass_mean), log(f0_mean))) +
geom_point(size=5) +
# geom_text(aes(label=Species),hjust=-0.1, vjust=0.4) +
xlab("ln Body mass (g)") +
ylab("Fundamental frequency (Hz)") +
geom_abline(
intercept=PGLS$model$coef[1],
slope=PGLS$model$coef[2],
colour="red",
size=1.3
)
| /janna-pgls for BD VS F0.R | no_license | jannaslove/Body-mass-vs-F0 | R | false | false | 2,028 | r | ## ----------------------------------------------------------------------------------------
library(ape)
library(phytools)
library(tidyverse)
#tree and data
treeparrot<-read.nexus("../data/ptree2")
## ----------------------------------------------------------------------------------------
Body_mass <- read.csv(("../../humm-swift/data/dunning_part1_measures.csv"))
Body_mass <- plyr::ddply(
Body_mass,
plyr::.(Species.name),
function(i) {
data.frame(
mass_mean= mean(i$mean,na.rm = T)
)
})
library(tidyverse)
Body_mass <- rename(Body_mass,Species= Species.name)
Body_mass <- mutate(Body_mass, Species= str_replace(Species, " ", "_"))
extraparrot <- read.csv("../data/unique parrots.csv")
Body_mass <- bind_rows(Body_mass,extraparrot) %>%
dplyr::select(-c(notes, reference))
## ----------------------------------------------------------------------------------------
parrotdata <- read.table("../data/formatparrotforphy.txt",header = T)
parrotdata <- left_join(parrotdata, Body_mass)
## ----------------------------------------------------------------------------------------
newparrottree<-drop.tip(treeparrot, setdiff(treeparrot$tip.label, parrotdata$Species))
plotTree(newparrottree)
setdiff (parrotdata$Species,newparrottree$tip.label)
compdata<-comparative.data(newparrottree, parrotdata, names.col="Species")
## ----------------------------------------------------------------------------------------
PGLS<-pgls(log(f0_mean) ~ log(mass_mean), data=compdata)
summary(PGLS)
## ----------------------------------------------------------------------------------------
library(ggplot2)
# The plot:
ggplot(parrotdata, aes(log(mass_mean), log(f0_mean))) +
geom_point(size=5) +
# geom_text(aes(label=Species),hjust=-0.1, vjust=0.4) +
xlab("ln Body mass (g)") +
ylab("Fundamental frequency (Hz)") +
geom_abline(
intercept=PGLS$model$coef[1],
slope=PGLS$model$coef[2],
colour="red",
size=1.3
)
|
set.seed(555)
possible.ns <- seq(from=100, to=2000, by=50) # The sample sizes we'll be considering
powers_1 <- rep(NA, length(possible.ns)) # Empty object to collect simulation estimates
alpha <- 0.05 # Standard significance level
sims <- 500 # Number of simulations to conduct for each N
# ==================================================
# Effect size 1
# ==================================================
#### Outer loop to vary the number of subjects ####
for (j in 1:length(possible.ns)){
N <- possible.ns[j] # Pick the jth value for N
significant.experiments <- rep(NA, sims) # Empty object to count significant experiments
#### Inner loop to conduct experiments "sims" times over for each N ####
for (i in 1:sims){
Y0 <- rnorm(n = N, mean = -0.17, sd = 0.49) # control potential outcome
tau <- 0.02-(-0.17) # Hypothesize treatment effect
Y1 <- Y0 + tau # treatment potential outcome
Z.sim <- rbinom(n=N, size=1, prob=.5) # Do a random assignment
Y.sim <- Y1*Z.sim + Y0*(1-Z.sim) # Reveal outcomes according to assignment
fit.sim <- lm(Y.sim ~ Z.sim) # Do analysis (Simple regression)
p.value <- summary(fit.sim)$coefficients[2,4] # Extract p-values
significant.experiments[i] <- (p.value <= alpha) # Determine significance according to p <= 0.05
}
powers_1[j] <- mean(significant.experiments) # store average success rate (power) for each N
}
# ==================================================
# Effect size 2
# ==================================================
set.seed(555)
powers_2 <- rep(NA, length(possible.ns)) # Empty object to collect simulation estimates
#### Outer loop to vary the number of subjects ####
for (j in 1:length(possible.ns)){
N <- possible.ns[j] # Pick the jth value for N
significant.experiments <- rep(NA, sims) # Empty object to count significant experiments
#### Inner loop to conduct experiments "sims" times over for each N ####
for (i in 1:sims){
Y0 <- rnorm(n = N, mean = -0.17, sd = 0.49) # control potential outcome
tau <- (0.02-(-0.17))/2 # Hypothesize treatment effect
Y1 <- Y0 + tau # treatment potential outcome
Z.sim <- rbinom(n=N, size=1, prob=.5) # Do a random assignment
Y.sim <- Y1*Z.sim + Y0*(1-Z.sim) # Reveal outcomes according to assignment
fit.sim <- lm(Y.sim ~ Z.sim) # Do analysis (Simple regression)
p.value <- summary(fit.sim)$coefficients[2,4] # Extract p-values
significant.experiments[i] <- (p.value <= alpha) # Determine significance according to p <= 0.05
}
powers_2[j] <- mean(significant.experiments) # store average success rate (power) for each N
}
# ==================================================
# Gather in one dataset
# ==================================================
power_df <- data.frame(sample_size = rep(possible.ns, 2),
power = c(powers_1, powers_2),
group = factor(c(rep("Fuld effektstørrelse fra Hainmueller et al. (2017)", 39),
rep("Halv effektstørrelse fra Hainmueller et al. (2017)", 39))))
power_df <- data.frame(sample_size = possible.ns,
power = powers_1)
scaleFUN <- function(x) sprintf("%.1f", x)
# ==================================================
# Plot it
# ==================================================
library(ggplot2)
ggplot(power_df, aes(x = sample_size, y = power)) +
geom_segment(aes(x = 0, xend = 2000, y = 0.9, yend = 0.9), linetype = 2, size = 0.5) +
geom_line(data = subset(power_df, group == "Fuld effektstørrelse fra Hainmueller et al. (2017)")) +
geom_line(data = subset(power_df, group == "Halv effektstørrelse fra Hainmueller et al. (2017)")) +
geom_point(size = 3, fill = "white", color = "black", aes(shape = group),
alpha = 1) +
scale_shape_manual(values = c(21, 24)) +
labs(x = "Stikprøvestørrelse",
y = "Power") +
scale_x_continuous(breaks = seq(0,2000, 250),
labels = c("0", "250", "500", "750", "1.000", "1.250", "1.500", "1.750", "2.000"),
limits = c(0, 2000)) +
scale_y_continuous(breaks = seq(0.1,1, .1),
labels = scaleFUN,
limits = c(0.1, 1)) +
theme_bw() +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.direction = "vertical")
ggplot(power_df, aes(x = sample_size, y = power)) +
geom_segment(aes(x = 0, xend = 2000, y = 0.9, yend = 0.9), linetype = 2, size = 0.5) +
geom_line(data = power_df) +
geom_point(size = 3, fill = "white", color = "black",
shape = 21,
alpha = 1) +
labs(x = "Stikprøvestørrelse",
y = "Power") +
scale_x_continuous(breaks = seq(0,2000, 250),
labels = c("0", "250", "500", "750", "1.000", "1.250", "1.500", "1.750", "2.000"),
limits = c(0, 2000)) +
scale_y_continuous(breaks = seq(0.1,1, .1),
labels = scaleFUN,
limits = c(0.1, 1)) +
theme_bw() +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.direction = "vertical")
| /kgb.R | no_license | jvieroe/snippets | R | false | false | 5,574 | r | set.seed(555)
possible.ns <- seq(from=100, to=2000, by=50) # The sample sizes we'll be considering
powers_1 <- rep(NA, length(possible.ns)) # Empty object to collect simulation estimates
alpha <- 0.05 # Standard significance level
sims <- 500 # Number of simulations to conduct for each N
# ==================================================
# Effect size 1
# ==================================================
#### Outer loop to vary the number of subjects ####
for (j in 1:length(possible.ns)){
N <- possible.ns[j] # Pick the jth value for N
significant.experiments <- rep(NA, sims) # Empty object to count significant experiments
#### Inner loop to conduct experiments "sims" times over for each N ####
for (i in 1:sims){
Y0 <- rnorm(n = N, mean = -0.17, sd = 0.49) # control potential outcome
tau <- 0.02-(-0.17) # Hypothesize treatment effect
Y1 <- Y0 + tau # treatment potential outcome
Z.sim <- rbinom(n=N, size=1, prob=.5) # Do a random assignment
Y.sim <- Y1*Z.sim + Y0*(1-Z.sim) # Reveal outcomes according to assignment
fit.sim <- lm(Y.sim ~ Z.sim) # Do analysis (Simple regression)
p.value <- summary(fit.sim)$coefficients[2,4] # Extract p-values
significant.experiments[i] <- (p.value <= alpha) # Determine significance according to p <= 0.05
}
powers_1[j] <- mean(significant.experiments) # store average success rate (power) for each N
}
# ==================================================
# Effect size 2
# ==================================================
set.seed(555)
powers_2 <- rep(NA, length(possible.ns)) # Empty object to collect simulation estimates
#### Outer loop to vary the number of subjects ####
for (j in 1:length(possible.ns)){
N <- possible.ns[j] # Pick the jth value for N
significant.experiments <- rep(NA, sims) # Empty object to count significant experiments
#### Inner loop to conduct experiments "sims" times over for each N ####
for (i in 1:sims){
Y0 <- rnorm(n = N, mean = -0.17, sd = 0.49) # control potential outcome
tau <- (0.02-(-0.17))/2 # Hypothesize treatment effect
Y1 <- Y0 + tau # treatment potential outcome
Z.sim <- rbinom(n=N, size=1, prob=.5) # Do a random assignment
Y.sim <- Y1*Z.sim + Y0*(1-Z.sim) # Reveal outcomes according to assignment
fit.sim <- lm(Y.sim ~ Z.sim) # Do analysis (Simple regression)
p.value <- summary(fit.sim)$coefficients[2,4] # Extract p-values
significant.experiments[i] <- (p.value <= alpha) # Determine significance according to p <= 0.05
}
powers_2[j] <- mean(significant.experiments) # store average success rate (power) for each N
}
# ==================================================
# Gather in one dataset
# ==================================================
power_df <- data.frame(sample_size = rep(possible.ns, 2),
power = c(powers_1, powers_2),
group = factor(c(rep("Fuld effektstørrelse fra Hainmueller et al. (2017)", 39),
rep("Halv effektstørrelse fra Hainmueller et al. (2017)", 39))))
power_df <- data.frame(sample_size = possible.ns,
power = powers_1)
scaleFUN <- function(x) sprintf("%.1f", x)
# ==================================================
# Plot it
# ==================================================
library(ggplot2)
ggplot(power_df, aes(x = sample_size, y = power)) +
geom_segment(aes(x = 0, xend = 2000, y = 0.9, yend = 0.9), linetype = 2, size = 0.5) +
geom_line(data = subset(power_df, group == "Fuld effektstørrelse fra Hainmueller et al. (2017)")) +
geom_line(data = subset(power_df, group == "Halv effektstørrelse fra Hainmueller et al. (2017)")) +
geom_point(size = 3, fill = "white", color = "black", aes(shape = group),
alpha = 1) +
scale_shape_manual(values = c(21, 24)) +
labs(x = "Stikprøvestørrelse",
y = "Power") +
scale_x_continuous(breaks = seq(0,2000, 250),
labels = c("0", "250", "500", "750", "1.000", "1.250", "1.500", "1.750", "2.000"),
limits = c(0, 2000)) +
scale_y_continuous(breaks = seq(0.1,1, .1),
labels = scaleFUN,
limits = c(0.1, 1)) +
theme_bw() +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.direction = "vertical")
ggplot(power_df, aes(x = sample_size, y = power)) +
geom_segment(aes(x = 0, xend = 2000, y = 0.9, yend = 0.9), linetype = 2, size = 0.5) +
geom_line(data = power_df) +
geom_point(size = 3, fill = "white", color = "black",
shape = 21,
alpha = 1) +
labs(x = "Stikprøvestørrelse",
y = "Power") +
scale_x_continuous(breaks = seq(0,2000, 250),
labels = c("0", "250", "500", "750", "1.000", "1.250", "1.500", "1.750", "2.000"),
limits = c(0, 2000)) +
scale_y_continuous(breaks = seq(0.1,1, .1),
labels = scaleFUN,
limits = c(0.1, 1)) +
theme_bw() +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.direction = "vertical")
|
\name{adaptiveDesign_binomial}
\alias{adaptiveDesign_binomial}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Simulate adaptive design where control sample size is adjusted according to ESS for binomial outcome
}
\description{
Simulate adaptive design where control sample size is adapted according to prior effective sample size for binomial outcome as in Schmidli at al (2014).
}
\usage{
adaptiveDesign_binomial(ctl.prior, treat.prior,
N1, Ntarget, Nmin, M,
pc, pt,
discard.prior = TRUE,
vague = mixbeta(c(1, 1, 1)),
ess = "ecss",
ehss.method = "mix.moment", subtractESSofVague=TRUE,
min.ecss, D=MSE,
decision)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{ctl.prior}{
RBesT betaMix object (or powerprior object created by \code{\link{as.powerprior}}) as prior for the control group}
\item{treat.prior}{
RBesT betaMix object (or powerprior object created by \code{\link{as.powerprior}}) as prior for the treatment group
}
\item{N1}{
Sample size in each group at interim
}
\item{Ntarget}{
Target sample size in control group
}
\item{Nmin}{
Minimum number of samples in control group after interim analysis
}
\item{M}{
Final sample size in treatment group
}
\item{pc}{
True control rate
}
\item{pt}{
True treatment rate
}
\item{discard.prior}{
Replace prior by vague prior if ESS<0?
}
\item{vague}{
RBesT betaMix object (single component mixture prior) serving as baseline vague prior
}
\item{ess}{
either "ecss" or "ehss" for effective current or historical sample size, respectively.
}
\item{ehss.method}{
if ess=="ehss". Specify version of EHSS as in \code{\link{ehss}}.
}
\item{subtractESSofVague}{
Removes prior ESS of vague component from interim EHSS
}
\item{min.ecss}{
if ess=="ecss". Minimal ECSS of interest (negative). A large absolute value of min.ecss is computational expensive, could be set to -1 if \code{discard.prior=TRUE} and no interest in the ECSS estimate itself.
}
\item{D}{
A function that measures informatives, e.g. \code{\link{MSE}} or user-specified function
}
\item{decision}{
function created by \code{\link[RBesT]{decision2S}}.
}
}
\details{
The traditional approach to prior effective sample size (prior ESS) is aimed at quantifying prior informativeness, but is not aimed at detecting potential prior-data conflict.
The ECSS computes the prior effective sample size in terms of samples from the current data model (i.e., samples with characteristics consistent with the current trial). Under extreme prior-data conflict, the prior may account for a negative number of samples, showing that information is subtracted, rather than added, by the elicited prior. The ECSS quantifies the number of current samples to be added or subtracted to the likelihood in order to obtain a posterior inference equivalent to that of a baseline prior model (e.g. in terms of mean squared error, MSE). Fur further details, see Wiesenfarth and Calderazzo (2019).
Standard approach uses effective historical sample size (\code{ess="ehss"}), while Wiesenfarth and Calderazzo (2019) use the effective current sample size (\code{ess="ecss"}).
When the ECSS is negative, the design provides the option of discarding the prior (discard.prior=TRUE).
Extensive documentation is given in the vignette.
}
%\value{
%}
\references{
Schmidli, H., Gsteiger, S., Roychoudhury, S., O'Hagan, A., Spiegelhalter, D., and Neuenschwander, B. (2014). Robust meta-analytic-predictive priors in clinical trials with historical control
information. Biometrics, 70(4):1023-103
Wiesenfarth, M., Calderazzo, S. (2019). Quantification of Prior Impact in Terms of Effective Current Sample Size. Submitted.
}
\author{
Manuel Wiesenfarth
}
%\note{
%}
\seealso{
\code{vignette("robustMAP",package="RBesT")}
}
\examples{
# see
# vignette("vignetteDesign", package = "ESS")
}
| /man/adaptiveDesign_binomial.Rd | no_license | DKFZ-biostats/ESS | R | false | false | 4,010 | rd | \name{adaptiveDesign_binomial}
\alias{adaptiveDesign_binomial}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Simulate adaptive design where control sample size is adjusted according to ESS for binomial outcome
}
\description{
Simulate adaptive design where control sample size is adapted according to prior effective sample size for binomial outcome as in Schmidli at al (2014).
}
\usage{
adaptiveDesign_binomial(ctl.prior, treat.prior,
N1, Ntarget, Nmin, M,
pc, pt,
discard.prior = TRUE,
vague = mixbeta(c(1, 1, 1)),
ess = "ecss",
ehss.method = "mix.moment", subtractESSofVague=TRUE,
min.ecss, D=MSE,
decision)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{ctl.prior}{
RBesT betaMix object (or powerprior object created by \code{\link{as.powerprior}}) as prior for the control group}
\item{treat.prior}{
RBesT betaMix object (or powerprior object created by \code{\link{as.powerprior}}) as prior for the treatment group
}
\item{N1}{
Sample size in each group at interim
}
\item{Ntarget}{
Target sample size in control group
}
\item{Nmin}{
Minimum number of samples in control group after interim analysis
}
\item{M}{
Final sample size in treatment group
}
\item{pc}{
True control rate
}
\item{pt}{
True treatment rate
}
\item{discard.prior}{
Replace prior by vague prior if ESS<0?
}
\item{vague}{
RBesT betaMix object (single component mixture prior) serving as baseline vague prior
}
\item{ess}{
either "ecss" or "ehss" for effective current or historical sample size, respectively.
}
\item{ehss.method}{
if ess=="ehss". Specify version of EHSS as in \code{\link{ehss}}.
}
\item{subtractESSofVague}{
Removes prior ESS of vague component from interim EHSS
}
\item{min.ecss}{
if ess=="ecss". Minimal ECSS of interest (negative). A large absolute value of min.ecss is computational expensive, could be set to -1 if \code{discard.prior=TRUE} and no interest in the ECSS estimate itself.
}
\item{D}{
A function that measures informatives, e.g. \code{\link{MSE}} or user-specified function
}
\item{decision}{
function created by \code{\link[RBesT]{decision2S}}.
}
}
\details{
The traditional approach to prior effective sample size (prior ESS) is aimed at quantifying prior informativeness, but is not aimed at detecting potential prior-data conflict.
The ECSS computes the prior effective sample size in terms of samples from the current data model (i.e., samples with characteristics consistent with the current trial). Under extreme prior-data conflict, the prior may account for a negative number of samples, showing that information is subtracted, rather than added, by the elicited prior. The ECSS quantifies the number of current samples to be added or subtracted to the likelihood in order to obtain a posterior inference equivalent to that of a baseline prior model (e.g. in terms of mean squared error, MSE). Fur further details, see Wiesenfarth and Calderazzo (2019).
Standard approach uses effective historical sample size (\code{ess="ehss"}), while Wiesenfarth and Calderazzo (2019) use the effective current sample size (\code{ess="ecss"}).
When the ECSS is negative, the design provides the option of discarding the prior (discard.prior=TRUE).
Extensive documentation is given in the vignette.
}
%\value{
%}
\references{
Schmidli, H., Gsteiger, S., Roychoudhury, S., O'Hagan, A., Spiegelhalter, D., and Neuenschwander, B. (2014). Robust meta-analytic-predictive priors in clinical trials with historical control
information. Biometrics, 70(4):1023-103
Wiesenfarth, M., Calderazzo, S. (2019). Quantification of Prior Impact in Terms of Effective Current Sample Size. Submitted.
}
\author{
Manuel Wiesenfarth
}
%\note{
%}
\seealso{
\code{vignette("robustMAP",package="RBesT")}
}
\examples{
# see
# vignette("vignetteDesign", package = "ESS")
}
|
library(ape)
testtree <- read.tree("10635_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10635_0_unrooted.txt") | /codeml_files/newick_trees_processed/10635_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("10635_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10635_0_unrooted.txt") |
\name{difMH}
\alias{difMH}
\alias{print.MH}
\alias{plot.MH}
\title{Mantel-Haenszel DIF method}
\description{
Performs DIF detection using Mantel-Haenszel method.
}
\usage{
difMH(Data, group, focal.name , anchor = NULL, match = "score", MHstat = "MHChisq",
correct = TRUE, exact = FALSE, alpha = 0.05, purify = FALSE, nrIter = 10,
p.adjust.method = NULL, save.output = FALSE, output = c("out", "default"))
\method{print}{MH}(x, ...)
\method{plot}{MH}(x, pch = 8, number = TRUE, col = "red", save.plot = FALSE,
save.options = c("plot", "default", "pdf"), ...)
}
\arguments{
\item{Data}{numeric: either the data matrix only, or the data matrix plus the vector of group membership. See \bold{Details}.}
\item{group}{numeric or character: either the vector of group membership or the column indicator (within \code{data}) of group membership. See \bold{Details}.}
\item{focal.name}{numeric or character indicating the level of \code{group} which corresponds to the focal group.}
\item{anchor}{either \code{NULL} (default) or a vector of item names (or identifiers) to specify the anchor items. See \bold{Details}.}
\item{match}{specifies the type of matching criterion. Can be either \code{"score"} (default) to compute the test score, or any continuous or discrete variable with the same length as the number of rows of \code{Data}. See \bold{Details}.}
\item{MHstat}{character: specifies the DIF statistic to be used for DIF identification. Possible values are \code{"MHChisq"} (default) and \code{"logOR"}.
See \bold{Details }.}
\item{correct}{logical: should the continuity correction be used? (default is \code{TRUE})}
\item{exact}{logical: should an exact test be computed? (default is \code{FALSE}).}
\item{alpha}{numeric: significance level (default is 0.05).}
\item{purify}{logical: should the method be used iteratively to purify the set of anchor items? (default is FALSE).}
\item{nrIter}{numeric: the maximal number of iterations in the item purification process (default is 10).}
\item{p.adjust.method}{either \code{NULL} (default) or the acronym of the method for p-value adjustment for multiple comparisons. See \bold{Details}.}
\item{save.output}{logical: should the output be saved into a text file? (Default is \code{FALSE}).}
\item{output}{character: a vector of two components. The first component is the name of the output file, the second component is either the file path or
\code{"default"} (default value). See \bold{Details}.}
\item{x}{the result from a \code{MH} class object.}
\item{pch, col}{type of usual \code{pch} and \code{col} graphical options.}
\item{number}{logical: should the item number identification be printed (default is \code{TRUE}).}
\item{save.plot}{logical: should the plot be saved into a separate file? (default is \code{FALSE}).}
\item{save.options}{character: a vector of three components. The first component is the name of the output file, the second component is either the file path or
\code{"default"} (default value), and the third component is the file extension, either \code{"pdf"} (default) or \code{"jpeg"}.
See \bold{Details}.}
\item{...}{other generic parameters for the \code{plot} or the \code{print} functions.}
}
\value{
A list of class "MH" with the following arguments:
\item{MH}{the values of the Mantel-Haenszel DIF statistics (either exact or asymptotic).}
\item{p.value}{the p-values for the Mantel-Haenszel statistics (either exact or asymptotic).}
\item{alphaMH}{the values of the mantel-Haenszel estimates of common odds ratios. Returned only if \code{exact} is \code{FALSE}.}
\item{varLambda}{the values of the variances of the log odds-ratio statistics. Returned only if \code{exact} is \code{FALSE}.}
\item{MHstat}{the value of the \code{MHstat} argument. Returned only if \code{exact} is \code{FALSE}.}
\item{alpha}{the value of \code{alpha} argument.}
\item{thr}{the threshold (cut-score) for DIF detection. Returned only if \code{exact} is \code{FALSE}.}
\item{DIFitems}{either the column indicators of the items which were detected as DIF items, or "No DIF item detected".}
\item{correct}{the value of \code{correct} option.}
\item{exact}{the value of \code{exact} option.}
\item{match}{a character string, either \code{"score"} or \code{"matching variable"} depending on the \code{match} argument.}
\item{p.adjust.method}{the value of the \code{p.adjust.method} argument.}
\item{adjusted.p}{either \code{NULL} or the vector of adjusted p-values for multiple comparisons.}
\item{purification}{the value of \code{purify} option.}
\item{nrPur}{the number of iterations in the item purification process. Returned only if \code{purify} is \code{TRUE}.}
\item{difPur}{a binary matrix with one row per iteration in the item purification process and one column per item. Zeros and ones in the \emph{i}-th
row refer to items which were classified respectively as non-DIF and DIF items at the (\emph{i}-1)-th step. The first row corresponds to the initial
classification of the items. Returned only if \code{purify} is \code{TRUE}.}
\item{convergence}{logical indicating whether the iterative item purification process stopped before the maximal number \code{nrIter} of allowed iterations.
Returned only if \code{purify} is \code{TRUE}.}
\item{names}{the names of the items.}
\item{anchor.names}{the value of the \code{anchor} argument.}
\item{save.output}{the value of the \code{save.output} argument.}
\item{output}{the value of the \code{output} argument.}
}
\details{
The method of Mantel-Haenszel (1959) allows for detecting uniform differential item functioning without requiring an item response model approach.
The \code{Data} is a matrix whose rows correspond to the subjects and columns to the items. In addition, \code{Data} can hold the vector of group membership.
If so, \code{group} indicates the column of \code{Data} which corresponds to the group membership, either by specifying its name or by giving the column number.
Otherwise, \code{group} must be a vector of same length as \code{nrow(Data)}.
Missing values are allowed for item responses (not for group membership) but must be coded as \code{NA} values. They are discarded from sum-score computation.
The vector of group membership must hold only two different values, either as numeric or character. The focal group is defined by the value of the argument
\code{focal.name}.
The matching criterion can be either the test score or any other continuous or discrete variable to be passed in the \code{\link{mantelHaenszel}} function. This is specified by the \code{match} argument. By default, it takes the value \code{"score"} and the test score (i.e. raw score) is computed. The second option is to assign to \code{match} a vector of continuous or discrete numeric values, which acts as the matching criterion. Note that for consistency this vector should not belong to the \code{Data} matrix.
The DIF statistic is specified by the \code{MHstat} argument. By default, \code{MHstat} takes the value \code{"MHChisq"} and the Mantel-Haenszel chi-square
statistic is used. The other optional value is \code{"logOR"}, and the log odds-ratio statistic (that is, the log of \code{alphaMH} divided by the square root
of \code{varLambda}) is used. See Penfield and Camilli (2007), Philips and Holland (1987) and \code{\link{mantelHaenszel}} help file.
By default, the asymptotic Mantel-Haenszel statistic is computed. However, the exact statistics and related P-values can
be obtained by specifying the logical argument \code{exact} to \code{TRUE}. See Agresti (1990, 1992) for further
details about exact inference.
The threshold (or cut-score) for classifying items as DIF depends on the DIF statistic. With the Mantel-Haenszel chi-squared statistic (\code{MHstat=="MHChisq"}),
it is computed as the quantile of the chi-square distribution with lower-tail probability of one minus \code{alpha} and with one degree of freedom. With
the log odds-ratio statistic (\code{MHstat=="logOR"}), it is computed as the quantile of the standard normal distribution with lower-tail probability of
1-\code{alpha}/2. With exact inference, it is simply the \code{alpha} level since exact P-values are returned.
By default, the continuity correction factor -0.5 is used (Holland and Thayer, 1988). One can nevertheless remove it by specifying \code{correct=FALSE}.
In addition, the Mantel-Haenszel estimates of the common odds ratios \eqn{\alpha_{MH}} are used to measure the effect sizes of the items. These are obtained by
\eqn{\Delta_{MH} = -2.35 \log \alpha_{MH}} (Holland and Thayer, 1985). According to the ETS delta scale, the effect size of an item is classified as negligible
if \eqn{|\Delta_{MH}| \leq 1}, moderate if \eqn{1 \leq |\Delta_{MH}| \leq 1.5}, and large if \eqn{|\Delta_{MH}| \geq 1.5}. The values of the effect sizes,
together with the ETS classification, are printed with the output. Note that this is returned only for asymptotic tests, i.e. when \code{exact} is \code{FALSE}.
Item purification can be performed by setting \code{purify} to \code{TRUE}. Purification works as follows: if at least one item was detected as functioning
differently at some step of the process, then the data set of the next step consists in all items that are currently anchor (DIF free) items, plus the
tested item (if necessary). The process stops when either two successive applications of the method yield the same classifications of the items (Clauser and
Mazor, 1998), or when \code{nrIter} iterations are run without obtaining two successive identical classifications. In the latter case a warning message is printed.
Adjustment for multiple comparisons is possible with the argument \code{p.adjust.method}. The latter must be an acronym of one of the available adjustment methods of the \code{\link{p.adjust}} function. According to Kim and Oshima (2013), Holm and Benjamini-Hochberg adjustments (set respectively by \code{"Holm"} and \code{"BH"}) perform best for DIF purposes. See \code{\link{p.adjust}} function for further details. Note that item purification is performed on original statistics and p-values; in case of adjustment for multiple comparisons this is performed \emph{after} item purification.
A pre-specified set of anchor items can be provided through the \code{anchor} argument. It must be a vector of either item names (which must match exactly the column names of \code{Data} argument) or integer values (specifying the column numbers for item identification). In case anchor items are provided, they are used to compute the test score (matching criterion), including also the tested item. None of the anchor items are tested for DIF: the output separates anchor items and tested items and DIF results are returned only for the latter. Note also that item purification is not activated when anchor items are provided (even if \code{purify} is set to \code{TRUE}). By default it is \code{NULL} so that no anchor item is specified.
The output of the \code{difMH}, as displayed by the \code{print.MH} function, can be stored in a text file provided that \code{save.output} is set to \code{TRUE}
(the default value \code{FALSE} does not execute the storage). In this case, the name of the text file must be given as a character string into the first component
of the \code{output} argument (default name is \code{"out"}), and the path for saving the text file can be given through the second component of \code{output}. The
default value is \code{"default"}, meaning that the file will be saved in the current working directory. Any other path can be specified as a character string:
see the \bold{Examples} section for an illustration.
The \code{plot.MH} function displays the DIF statistics in a plot, with each item on the X axis. The type of point and the color are fixed by the usual \code{pch}
and \code{col} arguments. Option \code{number} permits to display the item numbers instead. Also, the plot can be stored in a figure file, either in PDF or JPEG
format. Fixing \code{save.plot} to \code{TRUE} allows this process. The figure is defined through the components of \code{save.options}. The first two components
perform similarly as those of the \code{output} argument. The third component is the figure format, with allowed values \code{"pdf"} (default) for PDF file and
\code{"jpeg"} for JPEG file. Note that no plot is returned for exact inference.
}
\references{
Agresti, A. (1990). \emph{Categorical data analysis}. New York: Wiley.
Agresti, A. (1992). A survey of exact inference for contingency tables. \emph{Statistical Science, 7}, 131-177. \doi{10.1214/ss/1177011454}
Holland, P. W. and Thayer, D. T. (1985). An alternative definition of the ETS delta scale of item difficulty. \emph{Research Report RR-85-43}. Princeton, NJ:
Educational Testing Service.
Holland, P. W. and Thayer, D. T. (1988). Differential item performance and the Mantel-Haenszel procedure. In H. Wainer and H. I. Braun (Ed.), \emph{Test validity}. Hillsdale, NJ: Lawrence Erlbaum Associates.
Kim, J., and Oshima, T. C. (2013). Effect of multiple testing adjustment in differential item functioning detection. \emph{Educational and Psychological Measurement, 73}, 458--470. \doi{10.1177/0013164412467033}
Magis, D., Beland, S., Tuerlinckx, F. and De Boeck, P. (2010). A general framework and an R package for the detection
of dichotomous differential item functioning. \emph{Behavior Research Methods, 42}, 847-862. \doi{10.3758/BRM.42.3.847}
Mantel, N. and Haenszel, W. (1959). Statistical aspects of the analysis of data from retrospective studies of disease. \emph{Journal of the National Cancer Institute, 22}, 719-748.
Penfield, R. D., and Camilli, G. (2007). Differential item functioning and item bias. In C. R. Rao and S. Sinharray (Eds.), \emph{Handbook of Statistics 26: Psychometrics} (pp. 125-167). Amsterdam, The Netherlands: Elsevier.
Philips, A., and Holland, P. W. (1987). Estimators of the Mantel-Haenszel log odds-ratio estimate. \emph{Biometrics, 43}, 425-431. \doi{10.2307/2531824}
Raju, N. S., Bode, R. K. and Larsen, V. S. (1989). An empirical assessment of the Mantel-Haenszel statistic to detect differential item functioning. \emph{Applied Measurement in Education, 2}, 1-13. \doi{10.1207/s15324818ame0201_1}
Uttaro, T. and Millsap, R. E. (1994). Factors influencing the Mantel-Haenszel procedure in the detection of differential item functioning. \emph{Applied Psychological Measurement, 18}, 15-25. \doi{10.1177/014662169401800102}
}
\author{
Sebastien Beland \cr
Collectif pour le Developpement et les Applications en Mesure et Evaluation (Cdame) \cr
Universite du Quebec a Montreal \cr
\email{sebastien.beland.1@hotmail.com}, \url{http://www.cdame.uqam.ca/} \cr
David Magis \cr
Department of Psychology, University of Liege \cr
Research Group of Quantitative Psychology and Individual Differences, KU Leuven \cr
\email{David.Magis@uliege.be}, \url{http://ppw.kuleuven.be/okp/home/} \cr
Gilles Raiche \cr
Collectif pour le Developpement et les Applications en Mesure et Evaluation (Cdame) \cr
Universite du Quebec a Montreal \cr
\email{raiche.gilles@uqam.ca}, \url{http://www.cdame.uqam.ca/} \cr
}
\seealso{
\code{\link{mantelHaenszel}}, \code{\link{dichoDif}}, \code{\link{p.adjust}}
}
\examples{
\dontrun{
# Loading of the verbal data
data(verbal)
# Excluding the "Anger" variable
verbal <- verbal[colnames(verbal) != "Anger"]
# Three equivalent settings of the data matrix and the group membership
r <- difMH(verbal, group = 25, focal.name = 1)
difMH(verbal, group = "Gender", focal.name = 1)
difMH(verbal[,1:24], group = verbal[,25], focal.name = 1)
# With log odds-ratio statistic
r2 <- difMH(verbal, group = 25, focal.name = 1, MHstat = "logOR")
# With exact inference
difMH(verbal, group = 25, focal.name = 1, exact = TRUE)
# Multiple comparisons adjustment using Benjamini-Hochberg method
difMH(verbal, group = 25, focal.name = 1, p.adjust.method = "BH")
# With item purification
difMH(verbal, group = "Gender", focal.name = 1, purify = TRUE)
difMH(verbal, group = "Gender", focal.name = 1, purify = TRUE, nrIter = 5)
# Without continuity correction and with 0.01 significance level
difMH(verbal, group = "Gender", focal.name = 1, alpha = 0.01, correct = FALSE)
# With items 1 to 5 set as anchor items
difMH(verbal, group = "Gender", focal.name = 1, anchor = 1:5)
difMH(verbal, group = "Gender", focal.name = 1, anchor = 1:5, purify = TRUE)
# Saving the output into the "MHresults.txt" file (and default path)
r <- difMH(verbal, group = 25, focal.name = 1, save.output = TRUE,
output = c("MHresults","default"))
# Graphical devices
plot(r)
plot(r2)
# Plotting results and saving it in a PDF figure
plot(r, save.plot = TRUE, save.options = c("plot", "default", "pdf"))
# Changing the path, JPEG figure
path <- "c:/Program Files/"
plot(r, save.plot = TRUE, save.options = c("plot", path, "jpeg"))
}
}
| /man/difMH.rd | no_license | cran/difR | R | false | false | 17,389 | rd | \name{difMH}
\alias{difMH}
\alias{print.MH}
\alias{plot.MH}
\title{Mantel-Haenszel DIF method}
\description{
Performs DIF detection using Mantel-Haenszel method.
}
\usage{
difMH(Data, group, focal.name , anchor = NULL, match = "score", MHstat = "MHChisq",
correct = TRUE, exact = FALSE, alpha = 0.05, purify = FALSE, nrIter = 10,
p.adjust.method = NULL, save.output = FALSE, output = c("out", "default"))
\method{print}{MH}(x, ...)
\method{plot}{MH}(x, pch = 8, number = TRUE, col = "red", save.plot = FALSE,
save.options = c("plot", "default", "pdf"), ...)
}
\arguments{
\item{Data}{numeric: either the data matrix only, or the data matrix plus the vector of group membership. See \bold{Details}.}
\item{group}{numeric or character: either the vector of group membership or the column indicator (within \code{data}) of group membership. See \bold{Details}.}
\item{focal.name}{numeric or character indicating the level of \code{group} which corresponds to the focal group.}
\item{anchor}{either \code{NULL} (default) or a vector of item names (or identifiers) to specify the anchor items. See \bold{Details}.}
\item{match}{specifies the type of matching criterion. Can be either \code{"score"} (default) to compute the test score, or any continuous or discrete variable with the same length as the number of rows of \code{Data}. See \bold{Details}.}
\item{MHstat}{character: specifies the DIF statistic to be used for DIF identification. Possible values are \code{"MHChisq"} (default) and \code{"logOR"}.
See \bold{Details }.}
\item{correct}{logical: should the continuity correction be used? (default is \code{TRUE})}
\item{exact}{logical: should an exact test be computed? (default is \code{FALSE}).}
\item{alpha}{numeric: significance level (default is 0.05).}
\item{purify}{logical: should the method be used iteratively to purify the set of anchor items? (default is FALSE).}
\item{nrIter}{numeric: the maximal number of iterations in the item purification process (default is 10).}
\item{p.adjust.method}{either \code{NULL} (default) or the acronym of the method for p-value adjustment for multiple comparisons. See \bold{Details}.}
\item{save.output}{logical: should the output be saved into a text file? (Default is \code{FALSE}).}
\item{output}{character: a vector of two components. The first component is the name of the output file, the second component is either the file path or
\code{"default"} (default value). See \bold{Details}.}
\item{x}{the result from a \code{MH} class object.}
\item{pch, col}{type of usual \code{pch} and \code{col} graphical options.}
\item{number}{logical: should the item number identification be printed (default is \code{TRUE}).}
\item{save.plot}{logical: should the plot be saved into a separate file? (default is \code{FALSE}).}
\item{save.options}{character: a vector of three components. The first component is the name of the output file, the second component is either the file path or
\code{"default"} (default value), and the third component is the file extension, either \code{"pdf"} (default) or \code{"jpeg"}.
See \bold{Details}.}
\item{...}{other generic parameters for the \code{plot} or the \code{print} functions.}
}
\value{
A list of class "MH" with the following arguments:
\item{MH}{the values of the Mantel-Haenszel DIF statistics (either exact or asymptotic).}
\item{p.value}{the p-values for the Mantel-Haenszel statistics (either exact or asymptotic).}
\item{alphaMH}{the values of the mantel-Haenszel estimates of common odds ratios. Returned only if \code{exact} is \code{FALSE}.}
\item{varLambda}{the values of the variances of the log odds-ratio statistics. Returned only if \code{exact} is \code{FALSE}.}
\item{MHstat}{the value of the \code{MHstat} argument. Returned only if \code{exact} is \code{FALSE}.}
\item{alpha}{the value of \code{alpha} argument.}
\item{thr}{the threshold (cut-score) for DIF detection. Returned only if \code{exact} is \code{FALSE}.}
\item{DIFitems}{either the column indicators of the items which were detected as DIF items, or "No DIF item detected".}
\item{correct}{the value of \code{correct} option.}
\item{exact}{the value of \code{exact} option.}
\item{match}{a character string, either \code{"score"} or \code{"matching variable"} depending on the \code{match} argument.}
\item{p.adjust.method}{the value of the \code{p.adjust.method} argument.}
\item{adjusted.p}{either \code{NULL} or the vector of adjusted p-values for multiple comparisons.}
\item{purification}{the value of \code{purify} option.}
\item{nrPur}{the number of iterations in the item purification process. Returned only if \code{purify} is \code{TRUE}.}
\item{difPur}{a binary matrix with one row per iteration in the item purification process and one column per item. Zeros and ones in the \emph{i}-th
row refer to items which were classified respectively as non-DIF and DIF items at the (\emph{i}-1)-th step. The first row corresponds to the initial
classification of the items. Returned only if \code{purify} is \code{TRUE}.}
\item{convergence}{logical indicating whether the iterative item purification process stopped before the maximal number \code{nrIter} of allowed iterations.
Returned only if \code{purify} is \code{TRUE}.}
\item{names}{the names of the items.}
\item{anchor.names}{the value of the \code{anchor} argument.}
\item{save.output}{the value of the \code{save.output} argument.}
\item{output}{the value of the \code{output} argument.}
}
\details{
The method of Mantel-Haenszel (1959) allows for detecting uniform differential item functioning without requiring an item response model approach.
The \code{Data} is a matrix whose rows correspond to the subjects and columns to the items. In addition, \code{Data} can hold the vector of group membership.
If so, \code{group} indicates the column of \code{Data} which corresponds to the group membership, either by specifying its name or by giving the column number.
Otherwise, \code{group} must be a vector of same length as \code{nrow(Data)}.
Missing values are allowed for item responses (not for group membership) but must be coded as \code{NA} values. They are discarded from sum-score computation.
The vector of group membership must hold only two different values, either as numeric or character. The focal group is defined by the value of the argument
\code{focal.name}.
The matching criterion can be either the test score or any other continuous or discrete variable to be passed in the \code{\link{mantelHaenszel}} function. This is specified by the \code{match} argument. By default, it takes the value \code{"score"} and the test score (i.e. raw score) is computed. The second option is to assign to \code{match} a vector of continuous or discrete numeric values, which acts as the matching criterion. Note that for consistency this vector should not belong to the \code{Data} matrix.
The DIF statistic is specified by the \code{MHstat} argument. By default, \code{MHstat} takes the value \code{"MHChisq"} and the Mantel-Haenszel chi-square
statistic is used. The other optional value is \code{"logOR"}, and the log odds-ratio statistic (that is, the log of \code{alphaMH} divided by the square root
of \code{varLambda}) is used. See Penfield and Camilli (2007), Philips and Holland (1987) and \code{\link{mantelHaenszel}} help file.
By default, the asymptotic Mantel-Haenszel statistic is computed. However, the exact statistics and related P-values can
be obtained by specifying the logical argument \code{exact} to \code{TRUE}. See Agresti (1990, 1992) for further
details about exact inference.
The threshold (or cut-score) for classifying items as DIF depends on the DIF statistic. With the Mantel-Haenszel chi-squared statistic (\code{MHstat=="MHChisq"}),
it is computed as the quantile of the chi-square distribution with lower-tail probability of one minus \code{alpha} and with one degree of freedom. With
the log odds-ratio statistic (\code{MHstat=="logOR"}), it is computed as the quantile of the standard normal distribution with lower-tail probability of
1-\code{alpha}/2. With exact inference, it is simply the \code{alpha} level since exact P-values are returned.
By default, the continuity correction factor -0.5 is used (Holland and Thayer, 1988). One can nevertheless remove it by specifying \code{correct=FALSE}.
In addition, the Mantel-Haenszel estimates of the common odds ratios \eqn{\alpha_{MH}} are used to measure the effect sizes of the items. These are obtained by
\eqn{\Delta_{MH} = -2.35 \log \alpha_{MH}} (Holland and Thayer, 1985). According to the ETS delta scale, the effect size of an item is classified as negligible
if \eqn{|\Delta_{MH}| \leq 1}, moderate if \eqn{1 \leq |\Delta_{MH}| \leq 1.5}, and large if \eqn{|\Delta_{MH}| \geq 1.5}. The values of the effect sizes,
together with the ETS classification, are printed with the output. Note that this is returned only for asymptotic tests, i.e. when \code{exact} is \code{FALSE}.
Item purification can be performed by setting \code{purify} to \code{TRUE}. Purification works as follows: if at least one item was detected as functioning
differently at some step of the process, then the data set of the next step consists in all items that are currently anchor (DIF free) items, plus the
tested item (if necessary). The process stops when either two successive applications of the method yield the same classifications of the items (Clauser and
Mazor, 1998), or when \code{nrIter} iterations are run without obtaining two successive identical classifications. In the latter case a warning message is printed.
Adjustment for multiple comparisons is possible with the argument \code{p.adjust.method}. The latter must be an acronym of one of the available adjustment methods of the \code{\link{p.adjust}} function. According to Kim and Oshima (2013), Holm and Benjamini-Hochberg adjustments (set respectively by \code{"Holm"} and \code{"BH"}) perform best for DIF purposes. See \code{\link{p.adjust}} function for further details. Note that item purification is performed on original statistics and p-values; in case of adjustment for multiple comparisons this is performed \emph{after} item purification.
A pre-specified set of anchor items can be provided through the \code{anchor} argument. It must be a vector of either item names (which must match exactly the column names of \code{Data} argument) or integer values (specifying the column numbers for item identification). In case anchor items are provided, they are used to compute the test score (matching criterion), including also the tested item. None of the anchor items are tested for DIF: the output separates anchor items and tested items and DIF results are returned only for the latter. Note also that item purification is not activated when anchor items are provided (even if \code{purify} is set to \code{TRUE}). By default it is \code{NULL} so that no anchor item is specified.
The output of the \code{difMH}, as displayed by the \code{print.MH} function, can be stored in a text file provided that \code{save.output} is set to \code{TRUE}
(the default value \code{FALSE} does not execute the storage). In this case, the name of the text file must be given as a character string into the first component
of the \code{output} argument (default name is \code{"out"}), and the path for saving the text file can be given through the second component of \code{output}. The
default value is \code{"default"}, meaning that the file will be saved in the current working directory. Any other path can be specified as a character string:
see the \bold{Examples} section for an illustration.
The \code{plot.MH} function displays the DIF statistics in a plot, with each item on the X axis. The type of point and the color are fixed by the usual \code{pch}
and \code{col} arguments. Option \code{number} permits to display the item numbers instead. Also, the plot can be stored in a figure file, either in PDF or JPEG
format. Fixing \code{save.plot} to \code{TRUE} allows this process. The figure is defined through the components of \code{save.options}. The first two components
perform similarly as those of the \code{output} argument. The third component is the figure format, with allowed values \code{"pdf"} (default) for PDF file and
\code{"jpeg"} for JPEG file. Note that no plot is returned for exact inference.
}
\references{
Agresti, A. (1990). \emph{Categorical data analysis}. New York: Wiley.
Agresti, A. (1992). A survey of exact inference for contingency tables. \emph{Statistical Science, 7}, 131-177. \doi{10.1214/ss/1177011454}
Holland, P. W. and Thayer, D. T. (1985). An alternative definition of the ETS delta scale of item difficulty. \emph{Research Report RR-85-43}. Princeton, NJ:
Educational Testing Service.
Holland, P. W. and Thayer, D. T. (1988). Differential item performance and the Mantel-Haenszel procedure. In H. Wainer and H. I. Braun (Ed.), \emph{Test validity}. Hillsdale, NJ: Lawrence Erlbaum Associates.
Kim, J., and Oshima, T. C. (2013). Effect of multiple testing adjustment in differential item functioning detection. \emph{Educational and Psychological Measurement, 73}, 458--470. \doi{10.1177/0013164412467033}
Magis, D., Beland, S., Tuerlinckx, F. and De Boeck, P. (2010). A general framework and an R package for the detection
of dichotomous differential item functioning. \emph{Behavior Research Methods, 42}, 847-862. \doi{10.3758/BRM.42.3.847}
Mantel, N. and Haenszel, W. (1959). Statistical aspects of the analysis of data from retrospective studies of disease. \emph{Journal of the National Cancer Institute, 22}, 719-748.
Penfield, R. D., and Camilli, G. (2007). Differential item functioning and item bias. In C. R. Rao and S. Sinharray (Eds.), \emph{Handbook of Statistics 26: Psychometrics} (pp. 125-167). Amsterdam, The Netherlands: Elsevier.
Philips, A., and Holland, P. W. (1987). Estimators of the Mantel-Haenszel log odds-ratio estimate. \emph{Biometrics, 43}, 425-431. \doi{10.2307/2531824}
Raju, N. S., Bode, R. K. and Larsen, V. S. (1989). An empirical assessment of the Mantel-Haenszel statistic to detect differential item functioning. \emph{Applied Measurement in Education, 2}, 1-13. \doi{10.1207/s15324818ame0201_1}
Uttaro, T. and Millsap, R. E. (1994). Factors influencing the Mantel-Haenszel procedure in the detection of differential item functioning. \emph{Applied Psychological Measurement, 18}, 15-25. \doi{10.1177/014662169401800102}
}
\author{
Sebastien Beland \cr
Collectif pour le Developpement et les Applications en Mesure et Evaluation (Cdame) \cr
Universite du Quebec a Montreal \cr
\email{sebastien.beland.1@hotmail.com}, \url{http://www.cdame.uqam.ca/} \cr
David Magis \cr
Department of Psychology, University of Liege \cr
Research Group of Quantitative Psychology and Individual Differences, KU Leuven \cr
\email{David.Magis@uliege.be}, \url{http://ppw.kuleuven.be/okp/home/} \cr
Gilles Raiche \cr
Collectif pour le Developpement et les Applications en Mesure et Evaluation (Cdame) \cr
Universite du Quebec a Montreal \cr
\email{raiche.gilles@uqam.ca}, \url{http://www.cdame.uqam.ca/} \cr
}
\seealso{
\code{\link{mantelHaenszel}}, \code{\link{dichoDif}}, \code{\link{p.adjust}}
}
\examples{
\dontrun{
# Loading of the verbal data
data(verbal)
# Excluding the "Anger" variable
verbal <- verbal[colnames(verbal) != "Anger"]
# Three equivalent settings of the data matrix and the group membership
r <- difMH(verbal, group = 25, focal.name = 1)
difMH(verbal, group = "Gender", focal.name = 1)
difMH(verbal[,1:24], group = verbal[,25], focal.name = 1)
# With log odds-ratio statistic
r2 <- difMH(verbal, group = 25, focal.name = 1, MHstat = "logOR")
# With exact inference
difMH(verbal, group = 25, focal.name = 1, exact = TRUE)
# Multiple comparisons adjustment using Benjamini-Hochberg method
difMH(verbal, group = 25, focal.name = 1, p.adjust.method = "BH")
# With item purification
difMH(verbal, group = "Gender", focal.name = 1, purify = TRUE)
difMH(verbal, group = "Gender", focal.name = 1, purify = TRUE, nrIter = 5)
# Without continuity correction and with 0.01 significance level
difMH(verbal, group = "Gender", focal.name = 1, alpha = 0.01, correct = FALSE)
# With items 1 to 5 set as anchor items
difMH(verbal, group = "Gender", focal.name = 1, anchor = 1:5)
difMH(verbal, group = "Gender", focal.name = 1, anchor = 1:5, purify = TRUE)
# Saving the output into the "MHresults.txt" file (and default path)
r <- difMH(verbal, group = 25, focal.name = 1, save.output = TRUE,
output = c("MHresults","default"))
# Graphical devices
plot(r)
plot(r2)
# Plotting results and saving it in a PDF figure
plot(r, save.plot = TRUE, save.options = c("plot", "default", "pdf"))
# Changing the path, JPEG figure
path <- "c:/Program Files/"
plot(r, save.plot = TRUE, save.options = c("plot", path, "jpeg"))
}
}
|
testlist <- list(x = c(1284964172L, -720896L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) | /diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609962361-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 360 | r | testlist <- list(x = c(1284964172L, -720896L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) |
pdf( file="ivypi.pdf", height=5, width=5 )
par( mar=rep(1/2,4) )
plot( g, layout=ll ,
vertex.size=15, edge.arrow.size=1/2, edge.arrow.width=2,
edge.lty=1, edge.color="blue",
edge.width=4, vertex.color="magenta", margin=c(0,.4,0,.4), asp=0,
rescale=FALSE)
symbols( -1/2, 0, circles=1/2, add=TRUE, lwd=1/4 , inches=FALSE, col="grey" )
symbols( 1/2, 0, circles=1/2, add=TRUE , lwd=1/4, inches=FALSE, col="grey" )
plot( g, layout=ll ,
vertex.size=15, edge.arrow.size=1/2, edge.arrow.width=2,
edge.lty=1, edge.color="blue",
edge.width=4, vertex.color="magenta", margin=c(0,.4,0,.4), asp=0,
rescale=FALSE, add=TRUE )
dev.off()
A2 <- matrix(0, 10, 10 ) ## 10 positions
A2[1,7] <- 1; A2[7,1] <- 1
A2[6,2] <- 1; A2[2,6] <- 1
A2[4,8] <- 1; A2[8,4] <- 1
A2[3,9] <- 1; A2[9,3] <- 1
g2 <- graph.adjacency(A2)
pdf( file="ivypi-pass.pdf", height=5, width=5 )
par( mar=rep(1/2,4) )
plot( g2, layout=ll ,
vertex.size=15, edge.arrow.size=1/2, edge.arrow.width=2,
edge.lty=1, edge.color="green",
edge.width=4, vertex.color="magenta", margin=c(0,.4,0,.4), asp=0,
rescale=FALSE)
symbols( -1/2, 0, circles=1/2, add=TRUE, lwd=1/4 , inches=FALSE, col="grey" )
symbols( 1/2, 0, circles=1/2, add=TRUE , lwd=1/4, inches=FALSE, col="grey" )
plot( g2, layout=ll ,
vertex.size=15, edge.arrow.size=1/2, edge.arrow.width=2,
edge.lty=1, edge.color="green",
edge.width=4, vertex.color="magenta", margin=c(0,.4,0,.4), asp=0,
rescale=FALSE, add=TRUE )
dev.off()
| /docxRkey/twopionthree/foo.R | no_license | madjugglers/pattern-book | R | false | false | 1,569 | r |
pdf( file="ivypi.pdf", height=5, width=5 )
par( mar=rep(1/2,4) )
plot( g, layout=ll ,
vertex.size=15, edge.arrow.size=1/2, edge.arrow.width=2,
edge.lty=1, edge.color="blue",
edge.width=4, vertex.color="magenta", margin=c(0,.4,0,.4), asp=0,
rescale=FALSE)
symbols( -1/2, 0, circles=1/2, add=TRUE, lwd=1/4 , inches=FALSE, col="grey" )
symbols( 1/2, 0, circles=1/2, add=TRUE , lwd=1/4, inches=FALSE, col="grey" )
plot( g, layout=ll ,
vertex.size=15, edge.arrow.size=1/2, edge.arrow.width=2,
edge.lty=1, edge.color="blue",
edge.width=4, vertex.color="magenta", margin=c(0,.4,0,.4), asp=0,
rescale=FALSE, add=TRUE )
dev.off()
A2 <- matrix(0, 10, 10 ) ## 10 positions
A2[1,7] <- 1; A2[7,1] <- 1
A2[6,2] <- 1; A2[2,6] <- 1
A2[4,8] <- 1; A2[8,4] <- 1
A2[3,9] <- 1; A2[9,3] <- 1
g2 <- graph.adjacency(A2)
pdf( file="ivypi-pass.pdf", height=5, width=5 )
par( mar=rep(1/2,4) )
plot( g2, layout=ll ,
vertex.size=15, edge.arrow.size=1/2, edge.arrow.width=2,
edge.lty=1, edge.color="green",
edge.width=4, vertex.color="magenta", margin=c(0,.4,0,.4), asp=0,
rescale=FALSE)
symbols( -1/2, 0, circles=1/2, add=TRUE, lwd=1/4 , inches=FALSE, col="grey" )
symbols( 1/2, 0, circles=1/2, add=TRUE , lwd=1/4, inches=FALSE, col="grey" )
plot( g2, layout=ll ,
vertex.size=15, edge.arrow.size=1/2, edge.arrow.width=2,
edge.lty=1, edge.color="green",
edge.width=4, vertex.color="magenta", margin=c(0,.4,0,.4), asp=0,
rescale=FALSE, add=TRUE )
dev.off()
|
cdf.OICI.logit.M1.D <-
function( Par.M1, t, Cov.Matrix.OI.M1, alpha, W, ... ) {
Eta.M1 <- Par.M1[1]
sEta.M1 <- Par.M1[2]
sE.M1 <- Par.M1[3]
cdf.OICI.logit.M1.cal <- function(t){
cdf.M1 <- pnorm( ( Eta.M1 * t - W ) / ( sEta.M1 * t ) )
sEta2.M1 <- sEta.M1^2
Ft.M1 <- expression( pnorm( ( Eta.M1 * t - W ) / ( sqrt(sEta2.M1) * t ) ) )
diff.cdf.M1 <- matrix( 0, 3, 1 )
diff.cdf.M1[1,1] <- eval(D(Ft.M1, "Eta.M1"))
diff.cdf.M1[2,1] <- eval(D(Ft.M1, "sEta2.M1"))
var.cdf.OI.M1 <- c( t( diff.cdf.M1 ) %*% Cov.Matrix.OI.M1 %*% diff.cdf.M1 )
W.OI.M1 <- exp( qnorm( 1 - alpha / 2 ) * sqrt( var.cdf.OI.M1 ) / ( cdf.M1 * ( 1 - cdf.M1 ) ) )
OICI.lower.cdf.M1 <- cdf.M1 / ( cdf.M1 + ( 1 - cdf.M1 ) * W.OI.M1 )
OICI.upper.cdf.M1 <- cdf.M1 / ( cdf.M1 + ( 1 - cdf.M1 ) / W.OI.M1 )
return( c( cdf.M1, OICI.lower.cdf.M1, OICI.upper.cdf.M1 ) )
}
M <- matrix( apply( as.matrix(t), 1, cdf.OICI.logit.M1.cal ), 3, length(t), byrow=FALSE )
cdf.M1.out <- M[1,]
OICI.lower.cdf.M1.out <- M[2,]
OICI.upper.cdf.M1.out <- M[3,]
list( cdf.M1 = cdf.M1.out, lcl.cdf.M1 = OICI.lower.cdf.M1.out, ucl.cdf.M1 = OICI.upper.cdf.M1.out )
}
| /R/cdf.OICI.logit.M1.D.R | no_license | cran/iDEMO | R | false | false | 1,235 | r | cdf.OICI.logit.M1.D <-
function( Par.M1, t, Cov.Matrix.OI.M1, alpha, W, ... ) {
Eta.M1 <- Par.M1[1]
sEta.M1 <- Par.M1[2]
sE.M1 <- Par.M1[3]
cdf.OICI.logit.M1.cal <- function(t){
cdf.M1 <- pnorm( ( Eta.M1 * t - W ) / ( sEta.M1 * t ) )
sEta2.M1 <- sEta.M1^2
Ft.M1 <- expression( pnorm( ( Eta.M1 * t - W ) / ( sqrt(sEta2.M1) * t ) ) )
diff.cdf.M1 <- matrix( 0, 3, 1 )
diff.cdf.M1[1,1] <- eval(D(Ft.M1, "Eta.M1"))
diff.cdf.M1[2,1] <- eval(D(Ft.M1, "sEta2.M1"))
var.cdf.OI.M1 <- c( t( diff.cdf.M1 ) %*% Cov.Matrix.OI.M1 %*% diff.cdf.M1 )
W.OI.M1 <- exp( qnorm( 1 - alpha / 2 ) * sqrt( var.cdf.OI.M1 ) / ( cdf.M1 * ( 1 - cdf.M1 ) ) )
OICI.lower.cdf.M1 <- cdf.M1 / ( cdf.M1 + ( 1 - cdf.M1 ) * W.OI.M1 )
OICI.upper.cdf.M1 <- cdf.M1 / ( cdf.M1 + ( 1 - cdf.M1 ) / W.OI.M1 )
return( c( cdf.M1, OICI.lower.cdf.M1, OICI.upper.cdf.M1 ) )
}
M <- matrix( apply( as.matrix(t), 1, cdf.OICI.logit.M1.cal ), 3, length(t), byrow=FALSE )
cdf.M1.out <- M[1,]
OICI.lower.cdf.M1.out <- M[2,]
OICI.upper.cdf.M1.out <- M[3,]
list( cdf.M1 = cdf.M1.out, lcl.cdf.M1 = OICI.lower.cdf.M1.out, ucl.cdf.M1 = OICI.upper.cdf.M1.out )
}
|
# Evaluates RF performance on common set of genes attained by running variable
# selection on all samples.
library(readr)
library(dplyr)
library(ggplot2)
library(ranger)
library(mlr)
library(tuneRanger)
library(tibble)
# source(snakemake@input[['eval_model']])
# source(snakemake@input[['ggconfusion']])
set.seed(1)
filt <- read_csv("rna_seq/site/site_vita_all_filt.csv") %>%
as.data.frame(filt) %>%
column_to_rownames("X1")
## separate years
filt19 <- grepl(pattern = "2019", x = rownames(filt))
filt19 <- filt[filt19, ]
filt17 <- grepl(pattern = "2017", x = rownames(filt))
filt17 <- filt[filt17, ]
filt19$site <- gsub("2019_", "", rownames(filt19))
filt19$site <- gsub("_.*", "", filt19$site)
filt17$site <- gsub("2017_", "", rownames(filt17))
filt17$site <- gsub("_.*", "", filt17$site)
# train 2017, validate on 2019 --------------------------------------------
# tune
tmp <- filt17
colnames(tmp) <- make.names(colnames(tmp)) # tmp change names to make compatible with tuning
task <- makeClassifTask(data = tmp, target = "site") # make an mlr task with filt17
res <- tuneRanger(task, num.threads = 3) # run tuning process
write_tsv(res$recommended.pars, "rna_seq/site/site_2017train_rec_pars.tsv") # write model parameters to a file
# extract model parameters and use to build an optimal RF
filt17$site <- as.factor(filt17$site) # convert response to a factor
optimal_rf17 <- ranger(
dependent.variable.name = "site",
mtry = res$recommended.pars$mtry,
num.trees = 10000,
data = filt17,
sample.fraction = res$recommended.pars$sample.fraction,
min.node.size = res$recommended.pars$min.node.size,
seed = 1,
importance = 'permutation',
local.importance = T
)
# View(optimal_rf17$variable.importance.local)
saveRDS(optimal_rf17, file = "rna_seq/site/site_2017train_optimal_rf.RDS")
# evaluate the accuracy of the model and generate a confusion matrix
evaluate_model(optimal_ranger = optimal_rf17, data = filt17, reference_class = filt17$site,
plt_title = "2017 Training Performance") # eval training data -- 2017
# validation data
evaluate_model(optimal_ranger = optimal_rf17, data = filt19, reference_class = filt19$site,
plt_title = "2019 Validation Performance") # eval 2017 model on 2019 data
# 46.66% acc
# train 2019, validate on 2017 --------------------------------------------
# tune
tmp <- filt19
colnames(tmp) <- make.names(colnames(tmp)) # tmp change names to make compatible with tuning
task <- makeClassifTask(data = tmp, target = "site") # make an mlr task with filt19
res <- tuneRanger(task, num.threads = 3) # run tuning process
write_tsv(res$recommended.pars, "rna_seq/site/site_2019train_rec_pars.tsv") # write model parameters to a file
# extract model parameters and use to build an optimal RF
filt19$site <- as.factor(filt19$site) # convert response to a factor
optimal_rf19 <- ranger(
dependent.variable.name = "site",
mtry = res$recommended.pars$mtry,
num.trees = 10000,
data = filt19,
sample.fraction = res$recommended.pars$sample.fraction,
min.node.size = res$recommended.pars$min.node.size,
seed = 1,
importance = 'permutation',
local.importance = T
)
saveRDS(optimal_rf19, file = "rna_seq/site/site_2019train_optimal_rf.RDS")
# evaluate the accuracy of the model and generate a confusion matrix
evaluate_model(optimal_ranger = optimal_rf19, data = filt19, reference_class = filt19$site,
plt_title = "2019 Training Performance") # eval training data -- 2019
# validation data
evaluate_model(optimal_ranger = optimal_rf19, data = filt17, reference_class = filt17$site,
plt_title = "2017 Validation Performance") # eval 2019 model on 2017 data
# 43% acc
# eval variable importance/local importance -------------------------------
which.max(optimal_rf17$variable.importance.local[1, ])
which.max(optimal_rf17$variable.importance.local[26, ])
var17 <- data.frame(gene = names(optimal_rf17$variable.importance), imp = optimal_rf17$variable.importance)
var17 <- var17 %>%
filter(imp >0) %>%
arrange(desc(imp))
var19 <- data.frame(gene = names(optimal_rf19$variable.importance), imp = optimal_rf19$variable.importance)
var19 <- var19 %>%
filter(imp >0) %>%
arrange(desc(imp))
table(var19$gene[1:100] %in% var17$gene[1:100])
table(var19$gene %in% var17$gene)
| /rna_seq/site/rf_year.R | no_license | montpetitlab/Reiter_et_al_2020_SigofSite | R | false | false | 4,491 | r | # Evaluates RF performance on common set of genes attained by running variable
# selection on all samples.
library(readr)
library(dplyr)
library(ggplot2)
library(ranger)
library(mlr)
library(tuneRanger)
library(tibble)
# source(snakemake@input[['eval_model']])
# source(snakemake@input[['ggconfusion']])
set.seed(1)
filt <- read_csv("rna_seq/site/site_vita_all_filt.csv") %>%
as.data.frame(filt) %>%
column_to_rownames("X1")
## separate years
filt19 <- grepl(pattern = "2019", x = rownames(filt))
filt19 <- filt[filt19, ]
filt17 <- grepl(pattern = "2017", x = rownames(filt))
filt17 <- filt[filt17, ]
filt19$site <- gsub("2019_", "", rownames(filt19))
filt19$site <- gsub("_.*", "", filt19$site)
filt17$site <- gsub("2017_", "", rownames(filt17))
filt17$site <- gsub("_.*", "", filt17$site)
# train 2017, validate on 2019 --------------------------------------------
# tune
tmp <- filt17
colnames(tmp) <- make.names(colnames(tmp)) # tmp change names to make compatible with tuning
task <- makeClassifTask(data = tmp, target = "site") # make an mlr task with filt17
res <- tuneRanger(task, num.threads = 3) # run tuning process
write_tsv(res$recommended.pars, "rna_seq/site/site_2017train_rec_pars.tsv") # write model parameters to a file
# extract model parameters and use to build an optimal RF
filt17$site <- as.factor(filt17$site) # convert response to a factor
optimal_rf17 <- ranger(
dependent.variable.name = "site",
mtry = res$recommended.pars$mtry,
num.trees = 10000,
data = filt17,
sample.fraction = res$recommended.pars$sample.fraction,
min.node.size = res$recommended.pars$min.node.size,
seed = 1,
importance = 'permutation',
local.importance = T
)
# View(optimal_rf17$variable.importance.local)
saveRDS(optimal_rf17, file = "rna_seq/site/site_2017train_optimal_rf.RDS")
# evaluate the accuracy of the model and generate a confusion matrix
evaluate_model(optimal_ranger = optimal_rf17, data = filt17, reference_class = filt17$site,
plt_title = "2017 Training Performance") # eval training data -- 2017
# validation data
evaluate_model(optimal_ranger = optimal_rf17, data = filt19, reference_class = filt19$site,
plt_title = "2019 Validation Performance") # eval 2017 model on 2019 data
# 46.66% acc
# train 2019, validate on 2017 --------------------------------------------
# tune
tmp <- filt19
colnames(tmp) <- make.names(colnames(tmp)) # tmp change names to make compatible with tuning
task <- makeClassifTask(data = tmp, target = "site") # make an mlr task with filt19
res <- tuneRanger(task, num.threads = 3) # run tuning process
write_tsv(res$recommended.pars, "rna_seq/site/site_2019train_rec_pars.tsv") # write model parameters to a file
# extract model parameters and use to build an optimal RF
filt19$site <- as.factor(filt19$site) # convert response to a factor
optimal_rf19 <- ranger(
dependent.variable.name = "site",
mtry = res$recommended.pars$mtry,
num.trees = 10000,
data = filt19,
sample.fraction = res$recommended.pars$sample.fraction,
min.node.size = res$recommended.pars$min.node.size,
seed = 1,
importance = 'permutation',
local.importance = T
)
saveRDS(optimal_rf19, file = "rna_seq/site/site_2019train_optimal_rf.RDS")
# evaluate the accuracy of the model and generate a confusion matrix
evaluate_model(optimal_ranger = optimal_rf19, data = filt19, reference_class = filt19$site,
plt_title = "2019 Training Performance") # eval training data -- 2019
# validation data
evaluate_model(optimal_ranger = optimal_rf19, data = filt17, reference_class = filt17$site,
plt_title = "2017 Validation Performance") # eval 2019 model on 2017 data
# 43% acc
# eval variable importance/local importance -------------------------------
which.max(optimal_rf17$variable.importance.local[1, ])
which.max(optimal_rf17$variable.importance.local[26, ])
var17 <- data.frame(gene = names(optimal_rf17$variable.importance), imp = optimal_rf17$variable.importance)
var17 <- var17 %>%
filter(imp >0) %>%
arrange(desc(imp))
var19 <- data.frame(gene = names(optimal_rf19$variable.importance), imp = optimal_rf19$variable.importance)
var19 <- var19 %>%
filter(imp >0) %>%
arrange(desc(imp))
table(var19$gene[1:100] %in% var17$gene[1:100])
table(var19$gene %in% var17$gene)
|
\name{tegarch}
\alias{tegarch}
\title{ Estimate first order Beta-Skew-t-EGARCH models }
\description{
Fits a first order Beta-Skew-t-EGARCH model to a univariate time-series by exact Maximum Likelihood (ML) estimation. Estimation is via the \code{\link{nlminb}} function
}
\usage{
tegarch(y, asym = TRUE, skew = TRUE, components = 1, initial.values = NULL,
lower = NULL, upper = NULL, hessian = TRUE, lambda.initial = NULL,
c.code = TRUE, logl.penalty = NULL, aux = NULL, ...)
}
\arguments{
\item{y}{numeric vector, typically a financial return series.}
\item{asym}{logical. TRUE (default) includes leverage or volatility asymmetry in the log-scale specification}
\item{skew}{logical. TRUE (default) enables and estimates the skewness in conditional density (epsilon). The skewness method is that of Fernandez and Steel (1998)}
\item{components}{Numeric value, either 1 (default) or 2. The former estimates a 1-component model, the latter a 2-component model}
\item{initial.values}{NULL (default) or a vector with the initial values. If NULL, then the values are automatically chosen according to model (with or without skewness, 1 or 2 components, etc.)}
\item{lower}{NULL (default) or a vector with the lower bounds of the parameter space. If NULL, then the values are automatically chosen}
\item{upper}{NULL (default) or a vector with the upper bounds of the parameter space. If NULL, then the values are automatically chosen}
\item{hessian}{logical. If TRUE (default) then the Hessian is computed numerically via the optimHess function. Setting hessian=FALSE speeds up estimation, which might be particularly useful in simulation. However, it also slows down the extraction of the variance-covariance matrix by means of the vcov method.}
\item{lambda.initial}{NULL (default) or a vector with the initial value(s) of the recursion for lambda and lambdadagger. If NULL then the values are chosen automatically}
\item{c.code}{logical. TRUE (default) is faster since it makes use of compiled C-code}
\item{logl.penalty}{NULL (default) or a numeric value. If NULL then the log-likelihood value associated with the initial values is used. Sometimes estimation can result in NA and/or +/-Inf values, which are fatal for simulations. The value logl.penalty is the value returned by the log-likelihood function in the presence of NA or +/-Inf values}
\item{aux}{NULL (default) or a list, se code. Useful for simulations (speeds them up)}
\item{\dots}{further arguments passed to the nlminb function}
}
\value{
Returns a list of class 'tegarch' with the following elements:
\item{y}{the series used for estimation.}
\item{date}{date and time of estimation.}
\item{initial.values}{initial values used in estimation.}
\item{lower}{lower bounds used in estimation.}
\item{upper}{upper bounds used in estimation.}
\item{lambda.initial}{initial values of lambda provided by the user, if any.}
\item{model}{type of model estimated.}
\item{hessian}{the numerically estimated Hessian.}
\item{sic}{the value of the Schwarz (1978) information criterion.}
\item{par}{parameter estimates.}
\item{objective}{value of the log-likelihood at the maximum.}
\item{convergence}{an integer code. 0 indicates successful convergence, see the documentation of nlminb.}
\item{iterations}{number of iterations, see the documentation of nlminb.}
\item{evaluations}{number of evaluations of the objective and gradient functions, see the documentation of nlminb.}
\item{message}{a character string giving any additional information returned by the optimizer, or NULL. For details, see PORT documentation and the nlminb documentation.}
\item{NOTE}{an additional message returned if one tries to estimate a 2-component model without leverage.}
}
\references{
Fernandez and Steel (1998), 'On Bayesian Modeling of Fat Tails and Skewness', Journal of the American Statistical Association 93, pp. 359-371.\cr
Nelson, Daniel B. (1991): 'Conditional Heteroskedasticity in Asset Returns: A New Approach', Econometrica 59, pp. 347-370.\cr
Harvey and Sucarrat (2013), 'EGARCH models with fat tails, skewness and leverage', forthcoming in Computational Statistics and Data Analysis. Working paper version: Cambridge Working Papers in Economics 1236, Faculty of Economics, University of Cambridge.\cr
Schwarz (1978), 'Estimating the Dimension of a Model', The Annals of Statistics 6, pp. 461-464.\cr
}
\author{Genaro Sucarrat, \url{http://www.sucarrat.net/}}
\note{Empty}
\seealso{
\code{\link{tegarchSim}}, \code{\link{coef.tegarch}}, \code{\link{fitted.tegarch}}, \code{\link{logLik.tegarch}}, \code{\link{predict.tegarch}}, \code{\link{print.tegarch}}, \code{\link{residuals.tegarch}}, \code{\link{summary.tegarch}}, \code{\link{vcov.tegarch}}
}
\examples{
##simulate series with 500 observations:
set.seed(123)
y <- tegarchSim(500, omega=0.01, phi1=0.9, kappa1=0.1, kappastar=0.05,
df=10, skew=0.8)
##estimate a 1st. order Beta-t-EGARCH model and store the output in mymod:
mymod <- tegarch(y)
#print estimates and standard errors:
print(mymod)
#graph of fitted volatility (conditional standard deviation):
plot(fitted(mymod))
#graph of fitted volatility and more:
plot(fitted(mymod, verbose=TRUE))
#plot forecasts of volatility 1-step ahead up to 20-steps ahead:
plot(predict(mymod, n.ahead=20))
#full variance-covariance matrix:
vcov(mymod)
}
\keyword{Statistical Models}
| /man/tegarch.Rd | no_license | paulhendricks/betategarch | R | false | false | 5,507 | rd | \name{tegarch}
\alias{tegarch}
\title{ Estimate first order Beta-Skew-t-EGARCH models }
\description{
Fits a first order Beta-Skew-t-EGARCH model to a univariate time-series by exact Maximum Likelihood (ML) estimation. Estimation is via the \code{\link{nlminb}} function
}
\usage{
tegarch(y, asym = TRUE, skew = TRUE, components = 1, initial.values = NULL,
lower = NULL, upper = NULL, hessian = TRUE, lambda.initial = NULL,
c.code = TRUE, logl.penalty = NULL, aux = NULL, ...)
}
\arguments{
\item{y}{numeric vector, typically a financial return series.}
\item{asym}{logical. TRUE (default) includes leverage or volatility asymmetry in the log-scale specification}
\item{skew}{logical. TRUE (default) enables and estimates the skewness in conditional density (epsilon). The skewness method is that of Fernandez and Steel (1998)}
\item{components}{Numeric value, either 1 (default) or 2. The former estimates a 1-component model, the latter a 2-component model}
\item{initial.values}{NULL (default) or a vector with the initial values. If NULL, then the values are automatically chosen according to model (with or without skewness, 1 or 2 components, etc.)}
\item{lower}{NULL (default) or a vector with the lower bounds of the parameter space. If NULL, then the values are automatically chosen}
\item{upper}{NULL (default) or a vector with the upper bounds of the parameter space. If NULL, then the values are automatically chosen}
\item{hessian}{logical. If TRUE (default) then the Hessian is computed numerically via the optimHess function. Setting hessian=FALSE speeds up estimation, which might be particularly useful in simulation. However, it also slows down the extraction of the variance-covariance matrix by means of the vcov method.}
\item{lambda.initial}{NULL (default) or a vector with the initial value(s) of the recursion for lambda and lambdadagger. If NULL then the values are chosen automatically}
\item{c.code}{logical. TRUE (default) is faster since it makes use of compiled C-code}
\item{logl.penalty}{NULL (default) or a numeric value. If NULL then the log-likelihood value associated with the initial values is used. Sometimes estimation can result in NA and/or +/-Inf values, which are fatal for simulations. The value logl.penalty is the value returned by the log-likelihood function in the presence of NA or +/-Inf values}
\item{aux}{NULL (default) or a list, se code. Useful for simulations (speeds them up)}
\item{\dots}{further arguments passed to the nlminb function}
}
\value{
Returns a list of class 'tegarch' with the following elements:
\item{y}{the series used for estimation.}
\item{date}{date and time of estimation.}
\item{initial.values}{initial values used in estimation.}
\item{lower}{lower bounds used in estimation.}
\item{upper}{upper bounds used in estimation.}
\item{lambda.initial}{initial values of lambda provided by the user, if any.}
\item{model}{type of model estimated.}
\item{hessian}{the numerically estimated Hessian.}
\item{sic}{the value of the Schwarz (1978) information criterion.}
\item{par}{parameter estimates.}
\item{objective}{value of the log-likelihood at the maximum.}
\item{convergence}{an integer code. 0 indicates successful convergence, see the documentation of nlminb.}
\item{iterations}{number of iterations, see the documentation of nlminb.}
\item{evaluations}{number of evaluations of the objective and gradient functions, see the documentation of nlminb.}
\item{message}{a character string giving any additional information returned by the optimizer, or NULL. For details, see PORT documentation and the nlminb documentation.}
\item{NOTE}{an additional message returned if one tries to estimate a 2-component model without leverage.}
}
\references{
Fernandez and Steel (1998), 'On Bayesian Modeling of Fat Tails and Skewness', Journal of the American Statistical Association 93, pp. 359-371.\cr
Nelson, Daniel B. (1991): 'Conditional Heteroskedasticity in Asset Returns: A New Approach', Econometrica 59, pp. 347-370.\cr
Harvey and Sucarrat (2013), 'EGARCH models with fat tails, skewness and leverage', forthcoming in Computational Statistics and Data Analysis. Working paper version: Cambridge Working Papers in Economics 1236, Faculty of Economics, University of Cambridge.\cr
Schwarz (1978), 'Estimating the Dimension of a Model', The Annals of Statistics 6, pp. 461-464.\cr
}
\author{Genaro Sucarrat, \url{http://www.sucarrat.net/}}
\note{Empty}
\seealso{
\code{\link{tegarchSim}}, \code{\link{coef.tegarch}}, \code{\link{fitted.tegarch}}, \code{\link{logLik.tegarch}}, \code{\link{predict.tegarch}}, \code{\link{print.tegarch}}, \code{\link{residuals.tegarch}}, \code{\link{summary.tegarch}}, \code{\link{vcov.tegarch}}
}
\examples{
##simulate series with 500 observations:
set.seed(123)
y <- tegarchSim(500, omega=0.01, phi1=0.9, kappa1=0.1, kappastar=0.05,
df=10, skew=0.8)
##estimate a 1st. order Beta-t-EGARCH model and store the output in mymod:
mymod <- tegarch(y)
#print estimates and standard errors:
print(mymod)
#graph of fitted volatility (conditional standard deviation):
plot(fitted(mymod))
#graph of fitted volatility and more:
plot(fitted(mymod, verbose=TRUE))
#plot forecasts of volatility 1-step ahead up to 20-steps ahead:
plot(predict(mymod, n.ahead=20))
#full variance-covariance matrix:
vcov(mymod)
}
\keyword{Statistical Models}
|
########################
# TaR Malaria (Simple)
#
# Author: Alec Georgoff
#
# Purpose: Solve for equilibrium prevalence values given R values in a system with one village and
# one forest
########################
rm(list = ls())
list.of.packages <- c("rootSolve", "data.table", "plotly", "ggplot2")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
library(rootSolve)
library(data.table)
library(plotly)
library(ggplot2)
###################################
#
# Choose options
#
###################################
p <- 0.5
R_v_min <- 0
R_v_max <- 3
R_v_step_size <- 0.03
R_f_min <- 0
R_f_max <- 3
R_f_step_size <- 0.03
make_surface <- T
make_binary_heatmap <- T
make_continuous_heatmap <- T
###################################
#
# Set parameters
#
###################################
a <- 0.88 # human blood feeding rate
b <- 0.55 # proportion of bites by infectious mosquitoes that cause an infection
c <- 0.15 # proportion of mosquitoes infected after biting infectious human
g <- 0.1 # per capita death rate of mosquitoes
r <- 1/200 # rate that humans recover from an infection
n <- 12 # time for sporogonic cycle
S <- a/g # stability index
###################################
#
# Establish matrices of variables
#
###################################
# set number of villagers and forest-goers, respectively:
H <- as.vector(c(5000,2000))
# these don't need to be changed:
V <- as.vector(c(100,500))
X <- as.vector(c(0,0))
Y <- as.vector(c(0,0))
# set up function to calculate R values:
calculate_R <- function(V, a, b, c, g, n, H, r) {
R = (V * a^2 * b * c * exp(-g * n)) / (H * g * r)
return(R)
}
# calculate R values:
R <- calculate_R(V, a, b, c, g, n, H, r)
Psi <- matrix(c(1,1-p,0,p), nrow=2)
H_psi <- t(Psi) %*% H
X_psi <- t(Psi) %*% X
# choose starting point for root solver:
theta_start <- c(0.9, 0.9)
# convert to number of humans:
X_start <- theta_start * H
###################################
#
# Set up the equations as a function
#
###################################
model <- function(X, Psi, R, c_val, S_val, H) {
theta_psi <- (t(Psi) %*% X) / (t(Psi) %*% H)
equation_matrix <- (Psi %*% (R * (theta_psi/(c_val*S_val*theta_psi + 1)))) * (H-X) - X
return(equation_matrix)
}
###################################
#
# Solve for roots
#
###################################
find_roots <- function(R,
Psi. = Psi,
H. = H,
S. = S,
c_val = c,
p_val = p,
X_start. = X_start) {
# use multiroot solver to find roots:
ss <- multiroot(f = model, start = X_start.,
positive = TRUE, maxiter = 1000,
ctol = 1e-20,
Psi = Psi.,
R = R,
c_val = c_val,
S_val = S.,
H = H.)
return(ss)
}
# set R values to cycle through:
R_0_v_values <- seq(R_v_min, R_v_max, R_v_step_size)
R_0_f_values <- seq(R_f_min, R_f_max, R_f_step_size)
# create data table to store results:
results <- data.table(R_0_v = rep(0, times = length(R_0_f_values) * length(R_0_v_values)), R_0_f = 0,
theta_v = 0, theta_f = 0,
X_v = 0, X_f = 0,
X_psi_v = 0, X_psi_f = 0,
root_f_value_v = 0, root_f_value_f = 0,
iter = 0, estim.precis = 0)
i <- 1
for (v in R_0_v_values) {
for (f in R_0_f_values) {
# record current R values:
results[i, R_0_v := as.numeric(v)]
results[i, R_0_f := as.numeric(f)]
# solve for roots at those R values:
these_roots <- find_roots(R = c(v,f))
# add X values to results:
results[i, X_v := these_roots$root[1]]
results[i, X_f := these_roots$root[2]]
# add scaled X values to results:
results[i, X_psi_v := X_v + (1-p)*X_f]
results[i, X_psi_f := p*X_f]
# add prevalence values to results:
results[i, theta_v := X_psi_v / H_psi[1]]
results[i, theta_f := X_f / H[2]]
# add value of equations at root to results:
results[i, root_f_value_v := these_roots$f.root[1,]]
results[i, root_f_value_f := these_roots$f.root[2,]]
# add # of iterations and estimated precision to results:
results[i, iter := these_roots$iter]
results[i, estim.precis := these_roots$estim.precis]
# print progress:
cat("R_0_v =", v, ", R_0_f =", f, " \r", file = "", sep = " ")
flush.console()
i <- i + 1
}
}
# create binary results variable:
results$theta_v_binary <- 0
results[theta_v > 0.0001, theta_v_binary := 1]
if (make_binary_heatmap) {
heatmap <- plot_ly(x = results$R_0_v,
y = results$R_0_f,
z = results$theta_v_binary,
type = "heatmap",
colors = colorRamp(c("#56B4E9", "#D55E00")),
height = 800, width = 960) %>%
layout(title = paste0("Equilibrium Prevalence in Village as a Function of R in Village and Forest p = ", p),
titlefont = list(size = 16),
xaxis = list(title = "R Value, Village",
titlefont = list(size = 20)),
yaxis = list(title = "R Value, Forest",
titlefont = list(size = 20)),
showlegend = FALSE)
heatmap
}
if (make_continuous_heatmap) {
heatmap <- plot_ly(x = results$R_0_v,
y = results$R_0_f,
z = results$theta_v,
type = "heatmap",
colors = colorRamp(c("#56B4E9", "#D55E00")),
height = 800, width = 960) %>%
layout(title = paste0("Equilibrium Prevalence in Village as a Function of R in Village and Forest p = ", p),
titlefont = list(size = 16),
xaxis = list(title = "R Value, Village",
titlefont = list(size = 20)),
yaxis = list(title = "R Value, Forest",
titlefont = list(size = 20)))
heatmap
}
if (make_surface) {
results$thresh <- "Malaria in Village"
results$thresh[which(results$theta_v < 0.00001)] <- "No Malaria in Village"
results$thresh[which(results$R_0_v < 1 & results$R_0_f < 1)] <- "Both R Values Below 0"
surface <- plot_ly(data = results,
x = ~R_0_v,
y = ~R_0_f,
z = ~theta_v,
color = ~thresh,
colors = c("purple", "red", "blue"),
type = "scatter3d") %>%
add_markers() %>%
layout(
title = paste0("Malaria Prevalence in the Village as a Function of R\np = ", p),
scene = list(
xaxis = list(title = "R, Village"),
yaxis = list(title = "R, Forest"),
zaxis = list(title = "Village Malaria Prevalence")
)
)
surface
}
#
# my_plot <- ggplot(data = results) +
# geom_raster(aes(x = R_0_v, y = R_0_f, fill = theta_v)) +
# scale_fill_gradientn(colours = c("blue", "red")) +
#
# # geom_point(data = results[theta_v > 0.01 & theta_v < 0.1],
# # aes(x = R_0_v, y = R_0_f)) +
#
# geom_segment(aes(x = 0, xend = 1, y = 1, yend = 1, color = "yellow")) +
#
# labs(title = paste0("Equilibrium Village Prevalence of Malaria \n", "p = ", p),
# x = "R_0 Value in Village",
# y = "R_0 Value in Forest")
#
# my_plot
| /forest_malaria/scripts/figure_generation_scripts/p-05-figures.R | no_license | georgoff/PBF | R | false | false | 7,486 | r | ########################
# TaR Malaria (Simple)
#
# Author: Alec Georgoff
#
# Purpose: Solve for equilibrium prevalence values given R values in a system with one village and
# one forest
########################
rm(list = ls())
list.of.packages <- c("rootSolve", "data.table", "plotly", "ggplot2")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
library(rootSolve)
library(data.table)
library(plotly)
library(ggplot2)
###################################
#
# Choose options
#
###################################
p <- 0.5
R_v_min <- 0
R_v_max <- 3
R_v_step_size <- 0.03
R_f_min <- 0
R_f_max <- 3
R_f_step_size <- 0.03
make_surface <- T
make_binary_heatmap <- T
make_continuous_heatmap <- T
###################################
#
# Set parameters
#
###################################
a <- 0.88 # human blood feeding rate
b <- 0.55 # proportion of bites by infectious mosquitoes that cause an infection
c <- 0.15 # proportion of mosquitoes infected after biting infectious human
g <- 0.1 # per capita death rate of mosquitoes
r <- 1/200 # rate that humans recover from an infection
n <- 12 # time for sporogonic cycle
S <- a/g # stability index
###################################
#
# Establish matrices of variables
#
###################################
# set number of villagers and forest-goers, respectively:
H <- as.vector(c(5000,2000))
# these don't need to be changed:
V <- as.vector(c(100,500))
X <- as.vector(c(0,0))
Y <- as.vector(c(0,0))
# set up function to calculate R values:
calculate_R <- function(V, a, b, c, g, n, H, r) {
R = (V * a^2 * b * c * exp(-g * n)) / (H * g * r)
return(R)
}
# calculate R values:
R <- calculate_R(V, a, b, c, g, n, H, r)
Psi <- matrix(c(1,1-p,0,p), nrow=2)
H_psi <- t(Psi) %*% H
X_psi <- t(Psi) %*% X
# choose starting point for root solver:
theta_start <- c(0.9, 0.9)
# convert to number of humans:
X_start <- theta_start * H
###################################
#
# Set up the equations as a function
#
###################################
model <- function(X, Psi, R, c_val, S_val, H) {
theta_psi <- (t(Psi) %*% X) / (t(Psi) %*% H)
equation_matrix <- (Psi %*% (R * (theta_psi/(c_val*S_val*theta_psi + 1)))) * (H-X) - X
return(equation_matrix)
}
###################################
#
# Solve for roots
#
###################################
find_roots <- function(R,
Psi. = Psi,
H. = H,
S. = S,
c_val = c,
p_val = p,
X_start. = X_start) {
# use multiroot solver to find roots:
ss <- multiroot(f = model, start = X_start.,
positive = TRUE, maxiter = 1000,
ctol = 1e-20,
Psi = Psi.,
R = R,
c_val = c_val,
S_val = S.,
H = H.)
return(ss)
}
# set R values to cycle through:
R_0_v_values <- seq(R_v_min, R_v_max, R_v_step_size)
R_0_f_values <- seq(R_f_min, R_f_max, R_f_step_size)
# create data table to store results:
results <- data.table(R_0_v = rep(0, times = length(R_0_f_values) * length(R_0_v_values)), R_0_f = 0,
theta_v = 0, theta_f = 0,
X_v = 0, X_f = 0,
X_psi_v = 0, X_psi_f = 0,
root_f_value_v = 0, root_f_value_f = 0,
iter = 0, estim.precis = 0)
i <- 1
for (v in R_0_v_values) {
for (f in R_0_f_values) {
# record current R values:
results[i, R_0_v := as.numeric(v)]
results[i, R_0_f := as.numeric(f)]
# solve for roots at those R values:
these_roots <- find_roots(R = c(v,f))
# add X values to results:
results[i, X_v := these_roots$root[1]]
results[i, X_f := these_roots$root[2]]
# add scaled X values to results:
results[i, X_psi_v := X_v + (1-p)*X_f]
results[i, X_psi_f := p*X_f]
# add prevalence values to results:
results[i, theta_v := X_psi_v / H_psi[1]]
results[i, theta_f := X_f / H[2]]
# add value of equations at root to results:
results[i, root_f_value_v := these_roots$f.root[1,]]
results[i, root_f_value_f := these_roots$f.root[2,]]
# add # of iterations and estimated precision to results:
results[i, iter := these_roots$iter]
results[i, estim.precis := these_roots$estim.precis]
# print progress:
cat("R_0_v =", v, ", R_0_f =", f, " \r", file = "", sep = " ")
flush.console()
i <- i + 1
}
}
# create binary results variable:
results$theta_v_binary <- 0
results[theta_v > 0.0001, theta_v_binary := 1]
if (make_binary_heatmap) {
heatmap <- plot_ly(x = results$R_0_v,
y = results$R_0_f,
z = results$theta_v_binary,
type = "heatmap",
colors = colorRamp(c("#56B4E9", "#D55E00")),
height = 800, width = 960) %>%
layout(title = paste0("Equilibrium Prevalence in Village as a Function of R in Village and Forest p = ", p),
titlefont = list(size = 16),
xaxis = list(title = "R Value, Village",
titlefont = list(size = 20)),
yaxis = list(title = "R Value, Forest",
titlefont = list(size = 20)),
showlegend = FALSE)
heatmap
}
if (make_continuous_heatmap) {
heatmap <- plot_ly(x = results$R_0_v,
y = results$R_0_f,
z = results$theta_v,
type = "heatmap",
colors = colorRamp(c("#56B4E9", "#D55E00")),
height = 800, width = 960) %>%
layout(title = paste0("Equilibrium Prevalence in Village as a Function of R in Village and Forest p = ", p),
titlefont = list(size = 16),
xaxis = list(title = "R Value, Village",
titlefont = list(size = 20)),
yaxis = list(title = "R Value, Forest",
titlefont = list(size = 20)))
heatmap
}
if (make_surface) {
results$thresh <- "Malaria in Village"
results$thresh[which(results$theta_v < 0.00001)] <- "No Malaria in Village"
results$thresh[which(results$R_0_v < 1 & results$R_0_f < 1)] <- "Both R Values Below 0"
surface <- plot_ly(data = results,
x = ~R_0_v,
y = ~R_0_f,
z = ~theta_v,
color = ~thresh,
colors = c("purple", "red", "blue"),
type = "scatter3d") %>%
add_markers() %>%
layout(
title = paste0("Malaria Prevalence in the Village as a Function of R\np = ", p),
scene = list(
xaxis = list(title = "R, Village"),
yaxis = list(title = "R, Forest"),
zaxis = list(title = "Village Malaria Prevalence")
)
)
surface
}
#
# my_plot <- ggplot(data = results) +
# geom_raster(aes(x = R_0_v, y = R_0_f, fill = theta_v)) +
# scale_fill_gradientn(colours = c("blue", "red")) +
#
# # geom_point(data = results[theta_v > 0.01 & theta_v < 0.1],
# # aes(x = R_0_v, y = R_0_f)) +
#
# geom_segment(aes(x = 0, xend = 1, y = 1, yend = 1, color = "yellow")) +
#
# labs(title = paste0("Equilibrium Village Prevalence of Malaria \n", "p = ", p),
# x = "R_0 Value in Village",
# y = "R_0 Value in Forest")
#
# my_plot
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615856975-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 313 | r | testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
##setwd("./RepData_PeerAssessment1")
steps <-read.csv("./activity/activity.csv")
stepdays<-tapply(steps$steps, steps$date, sum)
stepdays
hist(stepdays, main="Steps per day", xla="steps", yla="number of days")
mean(stepdays, na.rm=T)
median(stepdays, na.rm=T)
mean(stepdays)
median(stepdays)
stepmin<-aggregate(steps$steps, list(steps$interval), mean, na.rm=T)
names(stepmin)<-c("interval", "steps")
plot(stepmin$interval, stepmin$steps, type="l")
stepmin[order(stepmin$steps),][288,]$interval
length(which(is.na(steps$steps)))
steps$hour = floor(steps$interval/100)
steps$period=floor(steps$hour/3)
steps$period<-factor(steps$period)
levels(steps$period)<-c("0-2", "3-5", "6-8", "9-11", "12-14", "15-17", "18-20", "21-23")
mod<-lm(steps ~ period, data=steps)
mod
steps$stepsi<-steps$steps
steps$stepsi[is.na(steps$steps)]<-predict(mod, newdata=steps[is.na(steps$steps),])
stepdaysi<-tapply(steps$stepsi, steps$date, sum, na.rm=T)
stepdaysi
hist(stepdaysi, main="Steps per day (with imputed data)", xla="steps", yla="number of days", col="#ff99ff")
mean(stepdaysi, na.rm=T)
median(stepdaysi, na.rm=T)
## Are there differences in activity patterns between weekdays and weekends?
steps$ddate<-as.character(steps$date)
steps$ddate<-as.Date(steps$ddate, format="%Y-%m-%d")
steps$weekday<-weekdays(steps$ddate)
steps$weekend<-F
steps$weekend[steps$weekday %in% c("Saturday", "Sunday")]<-T
stepmin.i.weekdays<-aggregate(steps$stepsi[!steps$weekend], list(steps$interval[!steps$weekend]), mean, na.rm=T)
stepmin.i.weekends<-aggregate(steps$stepsi[steps$weekend], list(steps$interval[steps$weekend]), mean, na.rm=T)
names(stepmin.i.weekdays)<-c("interval", "steps")
names(stepmin.i.weekends)<-c("interval", "steps")
par(mfrow = c(2,1), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0))
plot(stepmin.i.weekends$interval, stepmin.i.weekends$steps, pch="", ylab="Steps", xlab="", main="weekend", type="l", ylim=c(0,220), col="blue")
plot(stepmin.i.weekdays$interval, stepmin.i.weekdays$steps, pch="", ylab="Steps", xlab="", main="weekday", type="l", ylim=c(0,220), col="darkred")
| /PA1_template.R | no_license | meleswujira/RepData_PeerAssessment1 | R | false | false | 2,135 | r |
##setwd("./RepData_PeerAssessment1")
steps <-read.csv("./activity/activity.csv")
stepdays<-tapply(steps$steps, steps$date, sum)
stepdays
hist(stepdays, main="Steps per day", xla="steps", yla="number of days")
mean(stepdays, na.rm=T)
median(stepdays, na.rm=T)
mean(stepdays)
median(stepdays)
stepmin<-aggregate(steps$steps, list(steps$interval), mean, na.rm=T)
names(stepmin)<-c("interval", "steps")
plot(stepmin$interval, stepmin$steps, type="l")
stepmin[order(stepmin$steps),][288,]$interval
length(which(is.na(steps$steps)))
steps$hour = floor(steps$interval/100)
steps$period=floor(steps$hour/3)
steps$period<-factor(steps$period)
levels(steps$period)<-c("0-2", "3-5", "6-8", "9-11", "12-14", "15-17", "18-20", "21-23")
mod<-lm(steps ~ period, data=steps)
mod
steps$stepsi<-steps$steps
steps$stepsi[is.na(steps$steps)]<-predict(mod, newdata=steps[is.na(steps$steps),])
stepdaysi<-tapply(steps$stepsi, steps$date, sum, na.rm=T)
stepdaysi
hist(stepdaysi, main="Steps per day (with imputed data)", xla="steps", yla="number of days", col="#ff99ff")
mean(stepdaysi, na.rm=T)
median(stepdaysi, na.rm=T)
## Are there differences in activity patterns between weekdays and weekends?
steps$ddate<-as.character(steps$date)
steps$ddate<-as.Date(steps$ddate, format="%Y-%m-%d")
steps$weekday<-weekdays(steps$ddate)
steps$weekend<-F
steps$weekend[steps$weekday %in% c("Saturday", "Sunday")]<-T
stepmin.i.weekdays<-aggregate(steps$stepsi[!steps$weekend], list(steps$interval[!steps$weekend]), mean, na.rm=T)
stepmin.i.weekends<-aggregate(steps$stepsi[steps$weekend], list(steps$interval[steps$weekend]), mean, na.rm=T)
names(stepmin.i.weekdays)<-c("interval", "steps")
names(stepmin.i.weekends)<-c("interval", "steps")
par(mfrow = c(2,1), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0))
plot(stepmin.i.weekends$interval, stepmin.i.weekends$steps, pch="", ylab="Steps", xlab="", main="weekend", type="l", ylim=c(0,220), col="blue")
plot(stepmin.i.weekdays$interval, stepmin.i.weekdays$steps, pch="", ylab="Steps", xlab="", main="weekday", type="l", ylim=c(0,220), col="darkred")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lm.R
\name{vcovCR.lm}
\alias{vcovCR.lm}
\title{Cluster-robust variance-covariance matrix for an lm object.}
\usage{
\method{vcovCR}{lm}(obj, cluster, type, target = NULL, inverse_var = NULL,
form = "sandwich", ...)
}
\arguments{
\item{obj}{Fitted model for which to calcualte the variance-covariance matrix}
\item{cluster}{Expression or vector indicating which observations belong to
the same cluster. Required for \code{lm} objects.}
\item{type}{Character string specifying which small-sample adjustment should
be used.}
\item{target}{Optional matrix or vector describing the working
variance-covariance model used to calculate the \code{CR2} and \code{CR4}
adjustment matrices. If a vector, the target matrix is assumed to be
diagonal. If not specified, the target is taken to be an identity matrix.}
\item{inverse_var}{Optional logical indicating whether the weights used in
fitting the model are inverse-variance. If not specified, \code{vcovCR}
will attempt to infer a value.}
\item{form}{Controls the form of the returned matrix. The default
\code{"sandwich"} will return the sandwich variance-covariance matrix.
Alternately, setting \code{form = "meat"} will return only the meat of the
sandwich and setting \code{form = B}, where \code{B} is a matrix of
appropriate dimension, will return the sandwich variance-covariance matrix
calculated using \code{B} as the bread.}
\item{...}{Additional arguments available for some classes of objects.}
}
\value{
An object of class \code{c("vcovCR","clubSandwich")}, which consists
of a matrix of the estimated variance of and covariances between the
regression coefficient estimates.
}
\description{
\code{vcovCR} returns a sandwich estimate of the variance-covariance matrix
of a set of regression coefficient estimates from an \code{\link{lm}} object.
}
\seealso{
\code{\link{vcovCR}}
}
| /man/vcovCR.lm.Rd | no_license | windshield999/clubSandwich | R | false | true | 1,937 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lm.R
\name{vcovCR.lm}
\alias{vcovCR.lm}
\title{Cluster-robust variance-covariance matrix for an lm object.}
\usage{
\method{vcovCR}{lm}(obj, cluster, type, target = NULL, inverse_var = NULL,
form = "sandwich", ...)
}
\arguments{
\item{obj}{Fitted model for which to calcualte the variance-covariance matrix}
\item{cluster}{Expression or vector indicating which observations belong to
the same cluster. Required for \code{lm} objects.}
\item{type}{Character string specifying which small-sample adjustment should
be used.}
\item{target}{Optional matrix or vector describing the working
variance-covariance model used to calculate the \code{CR2} and \code{CR4}
adjustment matrices. If a vector, the target matrix is assumed to be
diagonal. If not specified, the target is taken to be an identity matrix.}
\item{inverse_var}{Optional logical indicating whether the weights used in
fitting the model are inverse-variance. If not specified, \code{vcovCR}
will attempt to infer a value.}
\item{form}{Controls the form of the returned matrix. The default
\code{"sandwich"} will return the sandwich variance-covariance matrix.
Alternately, setting \code{form = "meat"} will return only the meat of the
sandwich and setting \code{form = B}, where \code{B} is a matrix of
appropriate dimension, will return the sandwich variance-covariance matrix
calculated using \code{B} as the bread.}
\item{...}{Additional arguments available for some classes of objects.}
}
\value{
An object of class \code{c("vcovCR","clubSandwich")}, which consists
of a matrix of the estimated variance of and covariances between the
regression coefficient estimates.
}
\description{
\code{vcovCR} returns a sandwich estimate of the variance-covariance matrix
of a set of regression coefficient estimates from an \code{\link{lm}} object.
}
\seealso{
\code{\link{vcovCR}}
}
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "mbagrade")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeRegrTask(id = "task", data = dataset$data, target = "grade_point_average")
lrn = makeLearner("regr.gausspr", par.vals = list(kernel = "rbfdot"))
#:# hash
#:# 621e7b2da46bd2ec963dd88434fd8fa9
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(mse, rmse, mae, rsq))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_mbagrade/regression_grade_point_average/621e7b2da46bd2ec963dd88434fd8fa9/code.R | no_license | lukaszbrzozowski/CaseStudies2019S | R | false | false | 677 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "mbagrade")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeRegrTask(id = "task", data = dataset$data, target = "grade_point_average")
lrn = makeLearner("regr.gausspr", par.vals = list(kernel = "rbfdot"))
#:# hash
#:# 621e7b2da46bd2ec963dd88434fd8fa9
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(mse, rmse, mae, rsq))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
###The Easterlin Paradox and Happiness U-curve in Georgia
library(haven)
library(stringr)
library(tidyverse)
library(ggeffects)
library(survey)
library(MASS)
library(effects)
library(descr)
nothap <- read_dta("UN_Women_Geo_2018_14.05.18.dta")
###Setting up the survey data
UNWsvy_Sep5 <- svydesign(id=~psu, strata=~stratum,
weights=~indwt,
data=nothap)
####Economic data
##Unemployment
UNWsvy_Sep5$variables$q12_1_r<-UNWsvy_Sep5$variables$q12_1
UNWsvy_Sep5$variables$q12_1_r[UNWsvy_Sep5$variables$q12_1_r==1]<-100
UNWsvy_Sep5$variables$q12_1_r[UNWsvy_Sep5$variables$q12_1_r==2]<-200
UNWsvy_Sep5$variables$q12_1_r[UNWsvy_Sep5$variables$q12_1_r==3]<-300
UNWsvy_Sep5$variables$q12_1_r[UNWsvy_Sep5$variables$q12_1_r==4]<-400
UNWsvy_Sep5$variables$q12_1_r[UNWsvy_Sep5$variables$q12_1_r==5]<-500
UNWsvy_Sep5$variables$q12_1_r[UNWsvy_Sep5$variables$q12_1_r==6]<-600
table(UNWsvy_Sep5$variables$q12_1_r)
UNWsvy_Sep5$variables$primarystatus<-ifelse(UNWsvy_Sep5$variables$q12_1==-7, UNWsvy_Sep5$variables$q13_1,UNWsvy_Sep5$variables$q12_1_r)
table(UNWsvy_Sep5$variables$primarystatus)
UNWsvy_Sep5$variables$primarystatus_r<-UNWsvy_Sep5$variables$primarystatus
UNWsvy_Sep5$variables$primarystatus_r[UNWsvy_Sep5$variables$primarystatus_r<=-1]<-NA
table(UNWsvy_Sep5$variables$primarystatus_r)
#Household working status
table(UNWsvy_Sep5$variables$q1)
table(UNWsvy_Sep5$variables$q8_1)
UNWsvy_Sep5$variables$q8_1_r<-UNWsvy_Sep5$variables$q8_1
UNWsvy_Sep5$variables$q8_1_r[UNWsvy_Sep5$variables$q8_1_r!=1]<-0
table(UNWsvy_Sep5$variables$q8_1_r)
UNWsvy_Sep5$variables$householdworkertwo<-(UNWsvy_Sep5$variables$q8_1_r+UNWsvy_Sep5$variables$q1)
UNWsvy_Sep5$variables$householdworkertwo[UNWsvy_Sep5$variables$householdworkertwo!=2]<-0
UNWsvy_Sep5$variables$householdworkertwo[UNWsvy_Sep5$variables$householdworkertwo==2]<-1
table(UNWsvy_Sep5$variables$householdworkertwo)
UNWsvy_Sep5$variables$primarystatus<-ifelse(UNWsvy_Sep5$variables$q12_1==-7, UNWsvy_Sep5$variables$q13_1,UNWsvy_Sep5$variables$q12_1_r)
table(UNWsvy_Sep5$variables$primarystatus)
UNWsvy_Sep5$variables$primarystatus_r<-UNWsvy_Sep5$variables$primarystatus
UNWsvy_Sep5$variables$primarystatus_r[UNWsvy_Sep5$variables$primarystatus_r<=-1]<-NA
table(UNWsvy_Sep5$variables$primarystatus_r)
UNWsvy_Sep5$variables$primarystatus_r<-ifelse(UNWsvy_Sep5$variables$householdworkertwo==1,1000,UNWsvy_Sep5$variables$primarystatus)
table(UNWsvy_Sep5$variables$primarystatus_r)
UNWsvy_Sep5$variables$primarystatus_r_r <- factor(UNWsvy_Sep5$variables$primarystatus_r,
levels = c(-7,-3,1,2,3,4,5,100,200,300,400,500,600,1000),
labels = c("notapplicable",
"interviewer error",
"Employee with contract",
"Employee without a contract",
"Self-employed formal",
"Self-employed informal",
"Other Employed",
"Student not working",
"Homemaker and not working",
"Retired and not working",
"Disabled and unable to work",
"Unemployed",
"Other Unemployed",
"Contributing Household Worker"))
table(UNWsvy_Sep5$variables$primarystatus_r_r)
UNWsvy_Sep5$variables$primarystatus_r_r[UNWsvy_Sep5$variables$primarystatus_r_r=="notapplicable"]<-NA
UNWsvy_Sep5$variables$primarystatus_r_r[UNWsvy_Sep5$variables$primarystatus_r_r=="interviewer error"]<-NA
#Wants a job
UNWsvy_Sep5$variables$q9_1_r<-UNWsvy_Sep5$variables$q9_1
UNWsvy_Sep5$variables$q9_1_r[UNWsvy_Sep5$variables$q9_1_r<=-1]<-0
table(UNWsvy_Sep5$variables$q9_1_r)
#Sought job
table(UNWsvy_Sep5$variables$q10_1)
UNWsvy_Sep5$variables$q10_1_r<-UNWsvy_Sep5$variables$q10_1
UNWsvy_Sep5$variables$q10_1_r[UNWsvy_Sep5$variables$q10_1_r<=-1]<-0
table(UNWsvy_Sep5$variables$q10_1_r)
#Can start working
table(UNWsvy_Sep5$variables$q11_1)
UNWsvy_Sep5$variables$q11_1_r<-UNWsvy_Sep5$variables$q11_1
UNWsvy_Sep5$variables$q11_1_r[UNWsvy_Sep5$variables$q11_1_r<=-1]<-0
table(UNWsvy_Sep5$variables$q11_1_r)
#Unemployment calculation
UNWsvy_Sep5$variables$seekingwork<-(UNWsvy_Sep5$variables$q11_1_r+UNWsvy_Sep5$variables$q10_1_r+UNWsvy_Sep5$variables$q9_1_r)
table(UNWsvy_Sep5$variables$seekingwork)
UNWsvy_Sep5$variables$seekingwork[UNWsvy_Sep5$variables$seekingwork<=2]<-0
UNWsvy_Sep5$variables$seekingwork[UNWsvy_Sep5$variables$seekingwork==3]<-100
UNWsvy_Sep5$variables$tocalculateunemployment<-(as.numeric(UNWsvy_Sep5$variables$primarystatus_r_r)+UNWsvy_Sep5$variables$seekingwork)
table(UNWsvy_Sep5$variables$tocalculateunemployment)
table(UNWsvy_Sep5$variables$primarystatus_r_r)
table(as.numeric(UNWsvy_Sep5$variables$primarystatus_r_r))
UNWsvy_Sep5$variables$laborforcebreakdown<-UNWsvy_Sep5$variables$tocalculateunemployment
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==3]<-3
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==4]<-3
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==5]<-3
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==6]<-3
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==7]<-3
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==8]<-0
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==9]<-0
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==10]<-0
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==11]<-0
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==12]<-0
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==13]<-0
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==14]<-3
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==108]<-2
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==109]<-2
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==110]<-2
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==112]<-2
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==114]<-3
freq(UNWsvy_Sep5$variables$laborforcebreakdown, UNWsvy_Sep5$variables$indwt)
freq(UNWsvy_Sep5$variables$laborforcebreakdown, UNWsvy_Sep5$variables$indwt)
crosstab(UNWsvy_Sep5$variables$laborforcebreakdown, UNWsvy_Sep5$variables$sex, UNWsvy_Sep5$variables$indwt, prop.c=TRUE)
##0 out of labor force
##2 unemployed
##3 employed
UNWsvy_Sep5$variables$laborforceparticipation<-UNWsvy_Sep5$variables$laborforcebreakdown
UNWsvy_Sep5$variables$laborforceparticipation[UNWsvy_Sep5$variables$laborforceparticipation<=1]<-0
UNWsvy_Sep5$variables$laborforceparticipation[UNWsvy_Sep5$variables$laborforceparticipation>=2]<-1
table(UNWsvy_Sep5$variables$laborforcebreakdown)
table(UNWsvy_Sep5$variables$laborforceparticipation)
crosstab(UNWsvy_Sep5$variables$laborforceparticipation, UNWsvy_Sep5$variables$sex, w=UNWsvy_Sep5$variables$indwt, prop.c = TRUE)
workingage<-subset(UNWsvy_Sep5, UNWsvy_Sep5$variables$age<=64)
crosstab(workingage$laborforceparticipation, workingage$sex, w=workingage$indwt, prop.c = TRUE)
UNWsvy_Sep5$variables$employedorunemployed<-UNWsvy_Sep5$variables$laborforcebreakdown
UNWsvy_Sep5$variables$employedorunemployed[UNWsvy_Sep5$variables$employedorunemployed<=1]<-NA
UNWsvy_Sep5$variables$employedorunemployed[UNWsvy_Sep5$variables$employedorunemployed==2]<-0
UNWsvy_Sep5$variables$employedorunemployed[UNWsvy_Sep5$variables$employedorunemployed==3]<-1
table(UNWsvy_Sep5$variables$employedorunemployed)
freq(UNWsvy_Sep5$variables$employedorunemployed, w=UNWsvy_Sep5$variables$indwt)
crosstab(UNWsvy_Sep5$variables$employedorunemployed, UNWsvy_Sep5$variables$sex, w=UNWsvy_Sep5$variables$indwt, prop.c = TRUE)
##0 out of labor force
##2 unemployed
##3 employed
##DISPCON
#Sum
names(UNWsvy_Sep5)
summary(UNWsvy_Sep5$variables$q86)
table(UNWsvy_Sep5$variables$q86)
#Cleaning
UNWsvy_Sep5$variables$DISPCON<-UNWsvy_Sep5$variables$q86
UNWsvy_Sep5$variables$DISPCON[UNWsvy_Sep5$variables$DISPCON==-3]<-NA
table(UNWsvy_Sep5$variables$DISPCON)
###SELFCON
names(UNWsvy_Sep5)
summary(UNWsvy_Sep5$variables$q80)
table(UNWsvy_Sep5$variables$q80)
#Cleaning (0-3 no sc, 4-6 some sc, 7-8 sc, 9-10 very sc)
UNWsvy_Sep5$variables$SELFCON<-UNWsvy_Sep5$variables$q80
UNWsvy_Sep5$variables$SELFCON[UNWsvy_Sep5$variables$SELFCON==-1]<-NA
UNWsvy_Sep5$variables$SELFCON[UNWsvy_Sep5$variables$SELFCON==-2]<-NA
table(UNWsvy_Sep5$variables$SELFCON)
#####OWN SERIES
table(UNWsvy_Sep5$variables$q90_)
UNWsvy_Sep5$variables$q90_1[UNWsvy_Sep5$variables$q90_1<=-1]<-NA
UNWsvy_Sep5$variables$q90_2[UNWsvy_Sep5$variables$q90_2<=-1]<-NA
UNWsvy_Sep5$variables$q90_3[UNWsvy_Sep5$variables$q90_3<=-1]<-NA
UNWsvy_Sep5$variables$q90_4[UNWsvy_Sep5$variables$q90_4<=-1]<-NA
UNWsvy_Sep5$variables$q90_5[UNWsvy_Sep5$variables$q90_5<=-1]<-NA
UNWsvy_Sep5$variables$q90_6[UNWsvy_Sep5$variables$q90_6<=-1]<-NA
UNWsvy_Sep5$variables$q90_7[UNWsvy_Sep5$variables$q90_7<=-1]<-NA
UNWsvy_Sep5$variables$q90_8[UNWsvy_Sep5$variables$q90_8<=-1]<-NA
UNWsvy_Sep5$variables$q90_9[UNWsvy_Sep5$variables$q90_9<=-1]<-NA
UNWsvy_Sep5$variables$q90_10[UNWsvy_Sep5$variables$q90_10<=-1]<-NA
UNWsvy_Sep5$variables$q90_11[UNWsvy_Sep5$variables$q90_11<=-1]<-NA
###Own as new category
UNWsvy_Sep5$variables$OWN <- (UNWsvy_Sep5$variables$q90_1+
UNWsvy_Sep5$variables$q90_2+
UNWsvy_Sep5$variables$q90_3+
UNWsvy_Sep5$variables$q90_4+
UNWsvy_Sep5$variables$q90_5+
UNWsvy_Sep5$variables$q90_6+
UNWsvy_Sep5$variables$q90_7+
UNWsvy_Sep5$variables$q90_8+
UNWsvy_Sep5$variables$q90_9+
UNWsvy_Sep5$variables$q90_10+
UNWsvy_Sep5$variables$q90_11)
UNWsvy_Sep5$variables$OWN_f<-as.factor(UNWsvy_Sep5$variables$OWN)
hist(UNWsvy_Sep5$variables$OWN)
##Children or not in household
UNWsvy_Sep5$variables$childdummy<-(UNWsvy_Sep5$variables$n4-UNWsvy_Sep5$variables$n5)
table(UNWsvy_Sep5$variables$childdummy)
UNWsvy_Sep5$variables$childdummy[UNWsvy_Sep5$variables$childdummy>=1]<-1
table(UNWsvy_Sep5$variables$childdummy)
#HAPPMEA
table(UNWsvy_Sep5$variables$q81)
UNWsvy_Sep5$variables$HAPPMEA<-UNWsvy_Sep5$variables$q81
UNWsvy_Sep5$variables$HAPPMEA[UNWsvy_Sep5$variables$HAPPMEA<=-1]<-NA
table(UNWsvy_Sep5$variables$HAPPMEA)
#AGEGRO
names(UNWsvy_Sep5)
summary(UNWsvy_Sep5$variables$age)
table(UNWsvy_Sep5$variables$age)
#Cleaning (18 - 34, 35 - 54, 55+)
UNWsvy_Sep5$variables$AGEGRO<-UNWsvy_Sep5$variables$age
UNWsvy_Sep5$variables$AGEGRO[UNWsvy_Sep5$variables$AGEGRO >=18 & UNWsvy_Sep5$variables$AGEGRO <= 35]<-0
UNWsvy_Sep5$variables$AGEGRO[UNWsvy_Sep5$variables$AGEGRO >=36 & UNWsvy_Sep5$variables$AGEGRO <= 55]<-1
UNWsvy_Sep5$variables$AGEGRO[UNWsvy_Sep5$variables$AGEGRO >=56]<-2
table(UNWsvy_Sep5$variables$AGEGRO)
#EDUGRO
names(UNWsvy_Sep5)
summary(UNWsvy_Sep5$variables$q14)
table(UNWsvy_Sep5$variables$q14)
#cleaning
UNWsvy_Sep5$variables$EDUGRO<-UNWsvy_Sep5$variables$q14
UNWsvy_Sep5$variables$EDUGRO[UNWsvy_Sep5$variables$EDUGRO==-3]<-NA
UNWsvy_Sep5$variables$EDUGRO[UNWsvy_Sep5$variables$EDUGRO >=1 & UNWsvy_Sep5$variables$EDUGRO <= 4]<-0
UNWsvy_Sep5$variables$EDUGRO[UNWsvy_Sep5$variables$EDUGRO >=5 & UNWsvy_Sep5$variables$EDUGRO <= 6]<-1
UNWsvy_Sep5$variables$EDUGRO[UNWsvy_Sep5$variables$EDUGRO >=7]<-2
table(UNWsvy_Sep5$variables$EDUGRO)
#MARAGE
names(UNWsvy_Sep5)
summary(UNWsvy_Sep5$variables$q87)
table(UNWsvy_Sep5$variables$q87)
#cleaning (14 - 17 = 0, 18,25 = 1, 26,35=2, 36+=3, never=4)
UNWsvy_Sep5$variables$MARAGE<-UNWsvy_Sep5$variables$q87
freq(UNWsvy_Sep5$variables$MARAGE)
UNWsvy_Sep5$variables$MARAGE[UNWsvy_Sep5$variables$MARAGE==-5]<-0
UNWsvy_Sep5$variables$MARAGE[UNWsvy_Sep5$variables$MARAGE >=1]<-1
table(UNWsvy_Sep5$variables$MARAGE)
####Factor
UNWsvy_Sep5$variables$OWN_f <-as.factor(UNWsvy_Sep5$variables$OWN)
UNWsvy_Sep5$variables$AGEGRO_f <-as.factor(UNWsvy_Sep5$variables$AGEGRO)
UNWsvy_Sep5$variables$EDUGRO_f <-as.factor(UNWsvy_Sep5$variables$EDUGRO)
#Own and happiness
happ_sep5_own= svyglm(HAPPMEA ~ OWN_f +
sex +
DISPCON +
age*childdummy +
stratum +
nadhh +
EDUGRO_f, design=UNWsvy_Sep5)
summary(happ_sep5_own)
x<-ggpredict(happ_sep5_own, terms = c("OWN_f"))
x$predicted
y<-ggpredict(happ_sep5_own, terms = c("age", "childdummy"))
plot(y)
###age and children
happ_a21= svyglm(HAPPMEA ~ OWN +
sex +
DISPCON +
AGEGRO_f*childdummy +
stratum +
nadhh +
EDUGRO_f, design=UNWsvy_Sep5)
summary(happ_a21)
plot(ggpredict(happ_a21, terms = c("AGEGRO_f")))
| /Replication Code Happiness.R | no_license | crrcgeorgia/happiness | R | false | false | 13,848 | r | ###The Easterlin Paradox and Happiness U-curve in Georgia
library(haven)
library(stringr)
library(tidyverse)
library(ggeffects)
library(survey)
library(MASS)
library(effects)
library(descr)
nothap <- read_dta("UN_Women_Geo_2018_14.05.18.dta")
###Setting up the survey data
UNWsvy_Sep5 <- svydesign(id=~psu, strata=~stratum,
weights=~indwt,
data=nothap)
####Economic data
##Unemployment
UNWsvy_Sep5$variables$q12_1_r<-UNWsvy_Sep5$variables$q12_1
UNWsvy_Sep5$variables$q12_1_r[UNWsvy_Sep5$variables$q12_1_r==1]<-100
UNWsvy_Sep5$variables$q12_1_r[UNWsvy_Sep5$variables$q12_1_r==2]<-200
UNWsvy_Sep5$variables$q12_1_r[UNWsvy_Sep5$variables$q12_1_r==3]<-300
UNWsvy_Sep5$variables$q12_1_r[UNWsvy_Sep5$variables$q12_1_r==4]<-400
UNWsvy_Sep5$variables$q12_1_r[UNWsvy_Sep5$variables$q12_1_r==5]<-500
UNWsvy_Sep5$variables$q12_1_r[UNWsvy_Sep5$variables$q12_1_r==6]<-600
table(UNWsvy_Sep5$variables$q12_1_r)
UNWsvy_Sep5$variables$primarystatus<-ifelse(UNWsvy_Sep5$variables$q12_1==-7, UNWsvy_Sep5$variables$q13_1,UNWsvy_Sep5$variables$q12_1_r)
table(UNWsvy_Sep5$variables$primarystatus)
UNWsvy_Sep5$variables$primarystatus_r<-UNWsvy_Sep5$variables$primarystatus
UNWsvy_Sep5$variables$primarystatus_r[UNWsvy_Sep5$variables$primarystatus_r<=-1]<-NA
table(UNWsvy_Sep5$variables$primarystatus_r)
#Household working status
table(UNWsvy_Sep5$variables$q1)
table(UNWsvy_Sep5$variables$q8_1)
UNWsvy_Sep5$variables$q8_1_r<-UNWsvy_Sep5$variables$q8_1
UNWsvy_Sep5$variables$q8_1_r[UNWsvy_Sep5$variables$q8_1_r!=1]<-0
table(UNWsvy_Sep5$variables$q8_1_r)
UNWsvy_Sep5$variables$householdworkertwo<-(UNWsvy_Sep5$variables$q8_1_r+UNWsvy_Sep5$variables$q1)
UNWsvy_Sep5$variables$householdworkertwo[UNWsvy_Sep5$variables$householdworkertwo!=2]<-0
UNWsvy_Sep5$variables$householdworkertwo[UNWsvy_Sep5$variables$householdworkertwo==2]<-1
table(UNWsvy_Sep5$variables$householdworkertwo)
UNWsvy_Sep5$variables$primarystatus<-ifelse(UNWsvy_Sep5$variables$q12_1==-7, UNWsvy_Sep5$variables$q13_1,UNWsvy_Sep5$variables$q12_1_r)
table(UNWsvy_Sep5$variables$primarystatus)
UNWsvy_Sep5$variables$primarystatus_r<-UNWsvy_Sep5$variables$primarystatus
UNWsvy_Sep5$variables$primarystatus_r[UNWsvy_Sep5$variables$primarystatus_r<=-1]<-NA
table(UNWsvy_Sep5$variables$primarystatus_r)
UNWsvy_Sep5$variables$primarystatus_r<-ifelse(UNWsvy_Sep5$variables$householdworkertwo==1,1000,UNWsvy_Sep5$variables$primarystatus)
table(UNWsvy_Sep5$variables$primarystatus_r)
UNWsvy_Sep5$variables$primarystatus_r_r <- factor(UNWsvy_Sep5$variables$primarystatus_r,
levels = c(-7,-3,1,2,3,4,5,100,200,300,400,500,600,1000),
labels = c("notapplicable",
"interviewer error",
"Employee with contract",
"Employee without a contract",
"Self-employed formal",
"Self-employed informal",
"Other Employed",
"Student not working",
"Homemaker and not working",
"Retired and not working",
"Disabled and unable to work",
"Unemployed",
"Other Unemployed",
"Contributing Household Worker"))
table(UNWsvy_Sep5$variables$primarystatus_r_r)
UNWsvy_Sep5$variables$primarystatus_r_r[UNWsvy_Sep5$variables$primarystatus_r_r=="notapplicable"]<-NA
UNWsvy_Sep5$variables$primarystatus_r_r[UNWsvy_Sep5$variables$primarystatus_r_r=="interviewer error"]<-NA
#Wants a job
UNWsvy_Sep5$variables$q9_1_r<-UNWsvy_Sep5$variables$q9_1
UNWsvy_Sep5$variables$q9_1_r[UNWsvy_Sep5$variables$q9_1_r<=-1]<-0
table(UNWsvy_Sep5$variables$q9_1_r)
#Sought job
table(UNWsvy_Sep5$variables$q10_1)
UNWsvy_Sep5$variables$q10_1_r<-UNWsvy_Sep5$variables$q10_1
UNWsvy_Sep5$variables$q10_1_r[UNWsvy_Sep5$variables$q10_1_r<=-1]<-0
table(UNWsvy_Sep5$variables$q10_1_r)
#Can start working
table(UNWsvy_Sep5$variables$q11_1)
UNWsvy_Sep5$variables$q11_1_r<-UNWsvy_Sep5$variables$q11_1
UNWsvy_Sep5$variables$q11_1_r[UNWsvy_Sep5$variables$q11_1_r<=-1]<-0
table(UNWsvy_Sep5$variables$q11_1_r)
#Unemployment calculation
UNWsvy_Sep5$variables$seekingwork<-(UNWsvy_Sep5$variables$q11_1_r+UNWsvy_Sep5$variables$q10_1_r+UNWsvy_Sep5$variables$q9_1_r)
table(UNWsvy_Sep5$variables$seekingwork)
UNWsvy_Sep5$variables$seekingwork[UNWsvy_Sep5$variables$seekingwork<=2]<-0
UNWsvy_Sep5$variables$seekingwork[UNWsvy_Sep5$variables$seekingwork==3]<-100
UNWsvy_Sep5$variables$tocalculateunemployment<-(as.numeric(UNWsvy_Sep5$variables$primarystatus_r_r)+UNWsvy_Sep5$variables$seekingwork)
table(UNWsvy_Sep5$variables$tocalculateunemployment)
table(UNWsvy_Sep5$variables$primarystatus_r_r)
table(as.numeric(UNWsvy_Sep5$variables$primarystatus_r_r))
UNWsvy_Sep5$variables$laborforcebreakdown<-UNWsvy_Sep5$variables$tocalculateunemployment
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==3]<-3
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==4]<-3
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==5]<-3
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==6]<-3
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==7]<-3
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==8]<-0
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==9]<-0
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==10]<-0
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==11]<-0
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==12]<-0
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==13]<-0
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==14]<-3
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==108]<-2
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==109]<-2
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==110]<-2
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==112]<-2
UNWsvy_Sep5$variables$laborforcebreakdown[UNWsvy_Sep5$variables$laborforcebreakdown==114]<-3
freq(UNWsvy_Sep5$variables$laborforcebreakdown, UNWsvy_Sep5$variables$indwt)
freq(UNWsvy_Sep5$variables$laborforcebreakdown, UNWsvy_Sep5$variables$indwt)
crosstab(UNWsvy_Sep5$variables$laborforcebreakdown, UNWsvy_Sep5$variables$sex, UNWsvy_Sep5$variables$indwt, prop.c=TRUE)
##0 out of labor force
##2 unemployed
##3 employed
UNWsvy_Sep5$variables$laborforceparticipation<-UNWsvy_Sep5$variables$laborforcebreakdown
UNWsvy_Sep5$variables$laborforceparticipation[UNWsvy_Sep5$variables$laborforceparticipation<=1]<-0
UNWsvy_Sep5$variables$laborforceparticipation[UNWsvy_Sep5$variables$laborforceparticipation>=2]<-1
table(UNWsvy_Sep5$variables$laborforcebreakdown)
table(UNWsvy_Sep5$variables$laborforceparticipation)
crosstab(UNWsvy_Sep5$variables$laborforceparticipation, UNWsvy_Sep5$variables$sex, w=UNWsvy_Sep5$variables$indwt, prop.c = TRUE)
workingage<-subset(UNWsvy_Sep5, UNWsvy_Sep5$variables$age<=64)
crosstab(workingage$laborforceparticipation, workingage$sex, w=workingage$indwt, prop.c = TRUE)
UNWsvy_Sep5$variables$employedorunemployed<-UNWsvy_Sep5$variables$laborforcebreakdown
UNWsvy_Sep5$variables$employedorunemployed[UNWsvy_Sep5$variables$employedorunemployed<=1]<-NA
UNWsvy_Sep5$variables$employedorunemployed[UNWsvy_Sep5$variables$employedorunemployed==2]<-0
UNWsvy_Sep5$variables$employedorunemployed[UNWsvy_Sep5$variables$employedorunemployed==3]<-1
table(UNWsvy_Sep5$variables$employedorunemployed)
freq(UNWsvy_Sep5$variables$employedorunemployed, w=UNWsvy_Sep5$variables$indwt)
crosstab(UNWsvy_Sep5$variables$employedorunemployed, UNWsvy_Sep5$variables$sex, w=UNWsvy_Sep5$variables$indwt, prop.c = TRUE)
##0 out of labor force
##2 unemployed
##3 employed
##DISPCON
#Sum
names(UNWsvy_Sep5)
summary(UNWsvy_Sep5$variables$q86)
table(UNWsvy_Sep5$variables$q86)
#Cleaning
UNWsvy_Sep5$variables$DISPCON<-UNWsvy_Sep5$variables$q86
UNWsvy_Sep5$variables$DISPCON[UNWsvy_Sep5$variables$DISPCON==-3]<-NA
table(UNWsvy_Sep5$variables$DISPCON)
###SELFCON
names(UNWsvy_Sep5)
summary(UNWsvy_Sep5$variables$q80)
table(UNWsvy_Sep5$variables$q80)
#Cleaning (0-3 no sc, 4-6 some sc, 7-8 sc, 9-10 very sc)
UNWsvy_Sep5$variables$SELFCON<-UNWsvy_Sep5$variables$q80
UNWsvy_Sep5$variables$SELFCON[UNWsvy_Sep5$variables$SELFCON==-1]<-NA
UNWsvy_Sep5$variables$SELFCON[UNWsvy_Sep5$variables$SELFCON==-2]<-NA
table(UNWsvy_Sep5$variables$SELFCON)
#####OWN SERIES
table(UNWsvy_Sep5$variables$q90_)
UNWsvy_Sep5$variables$q90_1[UNWsvy_Sep5$variables$q90_1<=-1]<-NA
UNWsvy_Sep5$variables$q90_2[UNWsvy_Sep5$variables$q90_2<=-1]<-NA
UNWsvy_Sep5$variables$q90_3[UNWsvy_Sep5$variables$q90_3<=-1]<-NA
UNWsvy_Sep5$variables$q90_4[UNWsvy_Sep5$variables$q90_4<=-1]<-NA
UNWsvy_Sep5$variables$q90_5[UNWsvy_Sep5$variables$q90_5<=-1]<-NA
UNWsvy_Sep5$variables$q90_6[UNWsvy_Sep5$variables$q90_6<=-1]<-NA
UNWsvy_Sep5$variables$q90_7[UNWsvy_Sep5$variables$q90_7<=-1]<-NA
UNWsvy_Sep5$variables$q90_8[UNWsvy_Sep5$variables$q90_8<=-1]<-NA
UNWsvy_Sep5$variables$q90_9[UNWsvy_Sep5$variables$q90_9<=-1]<-NA
UNWsvy_Sep5$variables$q90_10[UNWsvy_Sep5$variables$q90_10<=-1]<-NA
UNWsvy_Sep5$variables$q90_11[UNWsvy_Sep5$variables$q90_11<=-1]<-NA
###Own as new category
UNWsvy_Sep5$variables$OWN <- (UNWsvy_Sep5$variables$q90_1+
UNWsvy_Sep5$variables$q90_2+
UNWsvy_Sep5$variables$q90_3+
UNWsvy_Sep5$variables$q90_4+
UNWsvy_Sep5$variables$q90_5+
UNWsvy_Sep5$variables$q90_6+
UNWsvy_Sep5$variables$q90_7+
UNWsvy_Sep5$variables$q90_8+
UNWsvy_Sep5$variables$q90_9+
UNWsvy_Sep5$variables$q90_10+
UNWsvy_Sep5$variables$q90_11)
UNWsvy_Sep5$variables$OWN_f<-as.factor(UNWsvy_Sep5$variables$OWN)
hist(UNWsvy_Sep5$variables$OWN)
##Children or not in household
UNWsvy_Sep5$variables$childdummy<-(UNWsvy_Sep5$variables$n4-UNWsvy_Sep5$variables$n5)
table(UNWsvy_Sep5$variables$childdummy)
UNWsvy_Sep5$variables$childdummy[UNWsvy_Sep5$variables$childdummy>=1]<-1
table(UNWsvy_Sep5$variables$childdummy)
#HAPPMEA
table(UNWsvy_Sep5$variables$q81)
UNWsvy_Sep5$variables$HAPPMEA<-UNWsvy_Sep5$variables$q81
UNWsvy_Sep5$variables$HAPPMEA[UNWsvy_Sep5$variables$HAPPMEA<=-1]<-NA
table(UNWsvy_Sep5$variables$HAPPMEA)
#AGEGRO
names(UNWsvy_Sep5)
summary(UNWsvy_Sep5$variables$age)
table(UNWsvy_Sep5$variables$age)
#Cleaning (18 - 34, 35 - 54, 55+)
UNWsvy_Sep5$variables$AGEGRO<-UNWsvy_Sep5$variables$age
UNWsvy_Sep5$variables$AGEGRO[UNWsvy_Sep5$variables$AGEGRO >=18 & UNWsvy_Sep5$variables$AGEGRO <= 35]<-0
UNWsvy_Sep5$variables$AGEGRO[UNWsvy_Sep5$variables$AGEGRO >=36 & UNWsvy_Sep5$variables$AGEGRO <= 55]<-1
UNWsvy_Sep5$variables$AGEGRO[UNWsvy_Sep5$variables$AGEGRO >=56]<-2
table(UNWsvy_Sep5$variables$AGEGRO)
#EDUGRO
names(UNWsvy_Sep5)
summary(UNWsvy_Sep5$variables$q14)
table(UNWsvy_Sep5$variables$q14)
#cleaning
UNWsvy_Sep5$variables$EDUGRO<-UNWsvy_Sep5$variables$q14
UNWsvy_Sep5$variables$EDUGRO[UNWsvy_Sep5$variables$EDUGRO==-3]<-NA
UNWsvy_Sep5$variables$EDUGRO[UNWsvy_Sep5$variables$EDUGRO >=1 & UNWsvy_Sep5$variables$EDUGRO <= 4]<-0
UNWsvy_Sep5$variables$EDUGRO[UNWsvy_Sep5$variables$EDUGRO >=5 & UNWsvy_Sep5$variables$EDUGRO <= 6]<-1
UNWsvy_Sep5$variables$EDUGRO[UNWsvy_Sep5$variables$EDUGRO >=7]<-2
table(UNWsvy_Sep5$variables$EDUGRO)
#MARAGE
names(UNWsvy_Sep5)
summary(UNWsvy_Sep5$variables$q87)
table(UNWsvy_Sep5$variables$q87)
#cleaning (14 - 17 = 0, 18,25 = 1, 26,35=2, 36+=3, never=4)
UNWsvy_Sep5$variables$MARAGE<-UNWsvy_Sep5$variables$q87
freq(UNWsvy_Sep5$variables$MARAGE)
UNWsvy_Sep5$variables$MARAGE[UNWsvy_Sep5$variables$MARAGE==-5]<-0
UNWsvy_Sep5$variables$MARAGE[UNWsvy_Sep5$variables$MARAGE >=1]<-1
table(UNWsvy_Sep5$variables$MARAGE)
####Factor
UNWsvy_Sep5$variables$OWN_f <-as.factor(UNWsvy_Sep5$variables$OWN)
UNWsvy_Sep5$variables$AGEGRO_f <-as.factor(UNWsvy_Sep5$variables$AGEGRO)
UNWsvy_Sep5$variables$EDUGRO_f <-as.factor(UNWsvy_Sep5$variables$EDUGRO)
#Own and happiness
happ_sep5_own= svyglm(HAPPMEA ~ OWN_f +
sex +
DISPCON +
age*childdummy +
stratum +
nadhh +
EDUGRO_f, design=UNWsvy_Sep5)
summary(happ_sep5_own)
x<-ggpredict(happ_sep5_own, terms = c("OWN_f"))
x$predicted
y<-ggpredict(happ_sep5_own, terms = c("age", "childdummy"))
plot(y)
###age and children
happ_a21= svyglm(HAPPMEA ~ OWN +
sex +
DISPCON +
AGEGRO_f*childdummy +
stratum +
nadhh +
EDUGRO_f, design=UNWsvy_Sep5)
summary(happ_a21)
plot(ggpredict(happ_a21, terms = c("AGEGRO_f")))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/call_counts.R
\name{convertCallCountsToHashTable}
\alias{convertCallCountsToHashTable}
\title{convert call counts to hash table}
\usage{
convertCallCountsToHashTable(call_counts_hash_table, time = NULL)
}
\arguments{
\item{call_counts_hash_table}{A call counts hash table ( like the one you would
get from getCallCountsHashTable() )}
\item{time}{The current time. So that the hash table can have the corret time
since you last reviewed.}
}
\description{
Helper method for parsing the call_counts_hash_table environment and presenting it as a data frame
}
| /man/convertCallCountsToHashTable.Rd | no_license | djacobs7/remembr | R | false | true | 635 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/call_counts.R
\name{convertCallCountsToHashTable}
\alias{convertCallCountsToHashTable}
\title{convert call counts to hash table}
\usage{
convertCallCountsToHashTable(call_counts_hash_table, time = NULL)
}
\arguments{
\item{call_counts_hash_table}{A call counts hash table ( like the one you would
get from getCallCountsHashTable() )}
\item{time}{The current time. So that the hash table can have the corret time
since you last reviewed.}
}
\description{
Helper method for parsing the call_counts_hash_table environment and presenting it as a data frame
}
|
#' ExerciseGym
#'
#' A gym.
#'
#'
#' @param id identifier for the object (URI)
#' @param priceRange (Text type.) The price range of the business, for example ```$$$```.
#' @param paymentAccepted (Text type.) Cash, Credit Card, Cryptocurrency, Local Exchange Tradings System, etc.
#' @param openingHours (Text or Text type.) The general opening hours for a business. Opening hours can be specified as a weekly time range, starting with days, then times per day. Multiple days can be listed with commas ',' separating each day. Day or time ranges are specified using a hyphen '-'.* Days are specified using the following two-letter combinations: ```Mo```, ```Tu```, ```We```, ```Th```, ```Fr```, ```Sa```, ```Su```.* Times are specified using 24:00 time. For example, 3pm is specified as ```15:00```. * Here is an example: <code><time itemprop="openingHours" datetime="Tu,Th 16:00-20:00">Tuesdays and Thursdays 4-8pm</time></code>.* If a business is open 7 days a week, then it can be specified as <code><time itemprop="openingHours" datetime="Mo-Su">Monday through Sunday, all day</time></code>.
#' @param currenciesAccepted (Text type.) The currency accepted.Use standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217) e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies) for cryptocurrencies e.g. "BTC"; well known names for [Local Exchange Tradings Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system) (LETS) and other currency types e.g. "Ithaca HOUR".
#' @param branchOf (Organization type.) The larger organization that this local business is a branch of, if any. Not to be confused with (anatomical)[[branch]].
#' @param telephone (Text or Text or Text or Text type.) The telephone number.
#' @param specialOpeningHoursSpecification (OpeningHoursSpecification type.) The special opening hours of a certain place.Use this to explicitly override general opening hours brought in scope by [[openingHoursSpecification]] or [[openingHours]].
#' @param smokingAllowed (Boolean type.) Indicates whether it is allowed to smoke in the place, e.g. in the restaurant, hotel or hotel room.
#' @param reviews (Review or Review or Review or Review or Review type.) Review of the item.
#' @param review (Review or Review or Review or Review or Review or Review or Review or Review type.) A review of the item.
#' @param publicAccess (Boolean type.) A flag to signal that the [[Place]] is open to public visitors. If this property is omitted there is no assumed default boolean value
#' @param photos (Photograph or ImageObject type.) Photographs of this place.
#' @param photo (Photograph or ImageObject type.) A photograph of this place.
#' @param openingHoursSpecification (OpeningHoursSpecification type.) The opening hours of a certain place.
#' @param maximumAttendeeCapacity (Integer or Integer type.) The total number of individuals that may attend an event or venue.
#' @param maps (URL type.) A URL to a map of the place.
#' @param map (URL type.) A URL to a map of the place.
#' @param logo (URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject type.) An associated logo.
#' @param isicV4 (Text or Text or Text type.) The International Standard of Industrial Classification of All Economic Activities (ISIC), Revision 4 code for a particular organization, business person, or place.
#' @param isAccessibleForFree (Boolean or Boolean or Boolean or Boolean type.) A flag to signal that the item, event, or place is accessible for free.
#' @param hasMap (URL or Map type.) A URL to a map of the place.
#' @param globalLocationNumber (Text or Text or Text type.) The [Global Location Number](http://www.gs1.org/gln) (GLN, sometimes also referred to as International Location Number or ILN) of the respective organization, person, or place. The GLN is a 13-digit number used to identify parties and physical locations.
#' @param geo (GeoShape or GeoCoordinates type.) The geo coordinates of the place.
#' @param faxNumber (Text or Text or Text or Text type.) The fax number.
#' @param events (Event or Event type.) Upcoming or past events associated with this place or organization.
#' @param event (Event or Event or Event or Event or Event or Event or Event type.) Upcoming or past event associated with this place, organization, or action.
#' @param containsPlace (Place type.) The basic containment relation between a place and another that it contains.
#' @param containedInPlace (Place type.) The basic containment relation between a place and one that contains it.
#' @param containedIn (Place type.) The basic containment relation between a place and one that contains it.
#' @param branchCode (Text type.) A short textual code (also called "store code") that uniquely identifies a place of business. The code is typically assigned by the parentOrganization and used in structured URLs.For example, in the URL http://www.starbucks.co.uk/store-locator/etc/detail/3047 the code "3047" is a branchCode for a particular branch.
#' @param amenityFeature (LocationFeatureSpecification or LocationFeatureSpecification or LocationFeatureSpecification type.) An amenity feature (e.g. a characteristic or service) of the Accommodation. This generic property does not make a statement about whether the feature is included in an offer for the main accommodation or available at extra costs.
#' @param aggregateRating (AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating type.) The overall rating, based on a collection of reviews or ratings, of the item.
#' @param address (Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress type.) Physical address of the item.
#' @param additionalProperty (PropertyValue or PropertyValue or PropertyValue or PropertyValue type.) A property-value pair representing an additional characteristics of the entitity, e.g. a product feature or another characteristic for which there is no matching property in schema.org.Note: Publishers should be aware that applications designed to use specific schema.org properties (e.g. http://schema.org/width, http://schema.org/color, http://schema.org/gtin13, ...) will typically expect such data to be provided using those properties, rather than using the generic property/value mechanism.
#' @param url (URL type.) URL of the item.
#' @param sameAs (URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.
#' @param potentialAction (Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.
#' @param name (Text type.) The name of the item.
#' @param mainEntityOfPage (URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.
#' @param image (URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].
#' @param identifier (URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.
#' @param disambiguatingDescription (Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.
#' @param description (Text type.) A description of the item.
#' @param alternateName (Text type.) An alias for the item.
#' @param additionalType (URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.
#'
#' @return a list object corresponding to a schema:ExerciseGym
#'
#' @export
ExerciseGym <- function(id = NULL,
priceRange = NULL,
paymentAccepted = NULL,
openingHours = NULL,
currenciesAccepted = NULL,
branchOf = NULL,
telephone = NULL,
specialOpeningHoursSpecification = NULL,
smokingAllowed = NULL,
reviews = NULL,
review = NULL,
publicAccess = NULL,
photos = NULL,
photo = NULL,
openingHoursSpecification = NULL,
maximumAttendeeCapacity = NULL,
maps = NULL,
map = NULL,
logo = NULL,
isicV4 = NULL,
isAccessibleForFree = NULL,
hasMap = NULL,
globalLocationNumber = NULL,
geo = NULL,
faxNumber = NULL,
events = NULL,
event = NULL,
containsPlace = NULL,
containedInPlace = NULL,
containedIn = NULL,
branchCode = NULL,
amenityFeature = NULL,
aggregateRating = NULL,
address = NULL,
additionalProperty = NULL,
url = NULL,
sameAs = NULL,
potentialAction = NULL,
name = NULL,
mainEntityOfPage = NULL,
image = NULL,
identifier = NULL,
disambiguatingDescription = NULL,
description = NULL,
alternateName = NULL,
additionalType = NULL){
Filter(Negate(is.null),
list(
type = "ExerciseGym",
id = id,
priceRange = priceRange,
paymentAccepted = paymentAccepted,
openingHours = openingHours,
currenciesAccepted = currenciesAccepted,
branchOf = branchOf,
telephone = telephone,
specialOpeningHoursSpecification = specialOpeningHoursSpecification,
smokingAllowed = smokingAllowed,
reviews = reviews,
review = review,
publicAccess = publicAccess,
photos = photos,
photo = photo,
openingHoursSpecification = openingHoursSpecification,
maximumAttendeeCapacity = maximumAttendeeCapacity,
maps = maps,
map = map,
logo = logo,
isicV4 = isicV4,
isAccessibleForFree = isAccessibleForFree,
hasMap = hasMap,
globalLocationNumber = globalLocationNumber,
geo = geo,
faxNumber = faxNumber,
events = events,
event = event,
containsPlace = containsPlace,
containedInPlace = containedInPlace,
containedIn = containedIn,
branchCode = branchCode,
amenityFeature = amenityFeature,
aggregateRating = aggregateRating,
address = address,
additionalProperty = additionalProperty,
url = url,
sameAs = sameAs,
potentialAction = potentialAction,
name = name,
mainEntityOfPage = mainEntityOfPage,
image = image,
identifier = identifier,
disambiguatingDescription = disambiguatingDescription,
description = description,
alternateName = alternateName,
additionalType = additionalType))}
| /R/ExerciseGym.R | no_license | cboettig/schemar | R | false | false | 10,938 | r | #' ExerciseGym
#'
#' A gym.
#'
#'
#' @param id identifier for the object (URI)
#' @param priceRange (Text type.) The price range of the business, for example ```$$$```.
#' @param paymentAccepted (Text type.) Cash, Credit Card, Cryptocurrency, Local Exchange Tradings System, etc.
#' @param openingHours (Text or Text type.) The general opening hours for a business. Opening hours can be specified as a weekly time range, starting with days, then times per day. Multiple days can be listed with commas ',' separating each day. Day or time ranges are specified using a hyphen '-'.* Days are specified using the following two-letter combinations: ```Mo```, ```Tu```, ```We```, ```Th```, ```Fr```, ```Sa```, ```Su```.* Times are specified using 24:00 time. For example, 3pm is specified as ```15:00```. * Here is an example: <code><time itemprop="openingHours" datetime="Tu,Th 16:00-20:00">Tuesdays and Thursdays 4-8pm</time></code>.* If a business is open 7 days a week, then it can be specified as <code><time itemprop="openingHours" datetime="Mo-Su">Monday through Sunday, all day</time></code>.
#' @param currenciesAccepted (Text type.) The currency accepted.Use standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217) e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies) for cryptocurrencies e.g. "BTC"; well known names for [Local Exchange Tradings Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system) (LETS) and other currency types e.g. "Ithaca HOUR".
#' @param branchOf (Organization type.) The larger organization that this local business is a branch of, if any. Not to be confused with (anatomical)[[branch]].
#' @param telephone (Text or Text or Text or Text type.) The telephone number.
#' @param specialOpeningHoursSpecification (OpeningHoursSpecification type.) The special opening hours of a certain place.Use this to explicitly override general opening hours brought in scope by [[openingHoursSpecification]] or [[openingHours]].
#' @param smokingAllowed (Boolean type.) Indicates whether it is allowed to smoke in the place, e.g. in the restaurant, hotel or hotel room.
#' @param reviews (Review or Review or Review or Review or Review type.) Review of the item.
#' @param review (Review or Review or Review or Review or Review or Review or Review or Review type.) A review of the item.
#' @param publicAccess (Boolean type.) A flag to signal that the [[Place]] is open to public visitors. If this property is omitted there is no assumed default boolean value
#' @param photos (Photograph or ImageObject type.) Photographs of this place.
#' @param photo (Photograph or ImageObject type.) A photograph of this place.
#' @param openingHoursSpecification (OpeningHoursSpecification type.) The opening hours of a certain place.
#' @param maximumAttendeeCapacity (Integer or Integer type.) The total number of individuals that may attend an event or venue.
#' @param maps (URL type.) A URL to a map of the place.
#' @param map (URL type.) A URL to a map of the place.
#' @param logo (URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject type.) An associated logo.
#' @param isicV4 (Text or Text or Text type.) The International Standard of Industrial Classification of All Economic Activities (ISIC), Revision 4 code for a particular organization, business person, or place.
#' @param isAccessibleForFree (Boolean or Boolean or Boolean or Boolean type.) A flag to signal that the item, event, or place is accessible for free.
#' @param hasMap (URL or Map type.) A URL to a map of the place.
#' @param globalLocationNumber (Text or Text or Text type.) The [Global Location Number](http://www.gs1.org/gln) (GLN, sometimes also referred to as International Location Number or ILN) of the respective organization, person, or place. The GLN is a 13-digit number used to identify parties and physical locations.
#' @param geo (GeoShape or GeoCoordinates type.) The geo coordinates of the place.
#' @param faxNumber (Text or Text or Text or Text type.) The fax number.
#' @param events (Event or Event type.) Upcoming or past events associated with this place or organization.
#' @param event (Event or Event or Event or Event or Event or Event or Event type.) Upcoming or past event associated with this place, organization, or action.
#' @param containsPlace (Place type.) The basic containment relation between a place and another that it contains.
#' @param containedInPlace (Place type.) The basic containment relation between a place and one that contains it.
#' @param containedIn (Place type.) The basic containment relation between a place and one that contains it.
#' @param branchCode (Text type.) A short textual code (also called "store code") that uniquely identifies a place of business. The code is typically assigned by the parentOrganization and used in structured URLs.For example, in the URL http://www.starbucks.co.uk/store-locator/etc/detail/3047 the code "3047" is a branchCode for a particular branch.
#' @param amenityFeature (LocationFeatureSpecification or LocationFeatureSpecification or LocationFeatureSpecification type.) An amenity feature (e.g. a characteristic or service) of the Accommodation. This generic property does not make a statement about whether the feature is included in an offer for the main accommodation or available at extra costs.
#' @param aggregateRating (AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating type.) The overall rating, based on a collection of reviews or ratings, of the item.
#' @param address (Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress type.) Physical address of the item.
#' @param additionalProperty (PropertyValue or PropertyValue or PropertyValue or PropertyValue type.) A property-value pair representing an additional characteristics of the entitity, e.g. a product feature or another characteristic for which there is no matching property in schema.org.Note: Publishers should be aware that applications designed to use specific schema.org properties (e.g. http://schema.org/width, http://schema.org/color, http://schema.org/gtin13, ...) will typically expect such data to be provided using those properties, rather than using the generic property/value mechanism.
#' @param url (URL type.) URL of the item.
#' @param sameAs (URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.
#' @param potentialAction (Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.
#' @param name (Text type.) The name of the item.
#' @param mainEntityOfPage (URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.
#' @param image (URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].
#' @param identifier (URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.
#' @param disambiguatingDescription (Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.
#' @param description (Text type.) A description of the item.
#' @param alternateName (Text type.) An alias for the item.
#' @param additionalType (URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.
#'
#' @return a list object corresponding to a schema:ExerciseGym
#'
#' @export
ExerciseGym <- function(id = NULL,
priceRange = NULL,
paymentAccepted = NULL,
openingHours = NULL,
currenciesAccepted = NULL,
branchOf = NULL,
telephone = NULL,
specialOpeningHoursSpecification = NULL,
smokingAllowed = NULL,
reviews = NULL,
review = NULL,
publicAccess = NULL,
photos = NULL,
photo = NULL,
openingHoursSpecification = NULL,
maximumAttendeeCapacity = NULL,
maps = NULL,
map = NULL,
logo = NULL,
isicV4 = NULL,
isAccessibleForFree = NULL,
hasMap = NULL,
globalLocationNumber = NULL,
geo = NULL,
faxNumber = NULL,
events = NULL,
event = NULL,
containsPlace = NULL,
containedInPlace = NULL,
containedIn = NULL,
branchCode = NULL,
amenityFeature = NULL,
aggregateRating = NULL,
address = NULL,
additionalProperty = NULL,
url = NULL,
sameAs = NULL,
potentialAction = NULL,
name = NULL,
mainEntityOfPage = NULL,
image = NULL,
identifier = NULL,
disambiguatingDescription = NULL,
description = NULL,
alternateName = NULL,
additionalType = NULL){
Filter(Negate(is.null),
list(
type = "ExerciseGym",
id = id,
priceRange = priceRange,
paymentAccepted = paymentAccepted,
openingHours = openingHours,
currenciesAccepted = currenciesAccepted,
branchOf = branchOf,
telephone = telephone,
specialOpeningHoursSpecification = specialOpeningHoursSpecification,
smokingAllowed = smokingAllowed,
reviews = reviews,
review = review,
publicAccess = publicAccess,
photos = photos,
photo = photo,
openingHoursSpecification = openingHoursSpecification,
maximumAttendeeCapacity = maximumAttendeeCapacity,
maps = maps,
map = map,
logo = logo,
isicV4 = isicV4,
isAccessibleForFree = isAccessibleForFree,
hasMap = hasMap,
globalLocationNumber = globalLocationNumber,
geo = geo,
faxNumber = faxNumber,
events = events,
event = event,
containsPlace = containsPlace,
containedInPlace = containedInPlace,
containedIn = containedIn,
branchCode = branchCode,
amenityFeature = amenityFeature,
aggregateRating = aggregateRating,
address = address,
additionalProperty = additionalProperty,
url = url,
sameAs = sameAs,
potentialAction = potentialAction,
name = name,
mainEntityOfPage = mainEntityOfPage,
image = image,
identifier = identifier,
disambiguatingDescription = disambiguatingDescription,
description = description,
alternateName = alternateName,
additionalType = additionalType))}
|
directions_to_transit_details = function( json ) {
json %>%
str_replace(',\\W*"status" : "OK"','') %>%
as.tbl_json() %>%
enter_object('routes') %>%
gather_array() %>%
select(-array.index ) %>%
enter_object('legs') %>%
gather_array() %>%
select(-array.index ) %>%
enter_object('steps') %>%
gather_array() %>%
select(-array.index ) %>%
spread_values(
duration = jnumber("duration", "value"),
distance = jnumber("distance", 'value'),
mode = jstring("travel_mode")
) %>%
group_by(mode) %>%
summarise( duration = sum(duration), distance=sum(distance), n_segments = n(), .groups='drop') %>%
mutate( mode = str_to_lower(mode)) %>%
pivot_wider( names_from = 'mode', values_from=c('duration','distance','n_segments') )
}
| /R/directions_to_transit_details.R | no_license | dewoller/greenspace_1km | R | false | false | 840 | r | directions_to_transit_details = function( json ) {
json %>%
str_replace(',\\W*"status" : "OK"','') %>%
as.tbl_json() %>%
enter_object('routes') %>%
gather_array() %>%
select(-array.index ) %>%
enter_object('legs') %>%
gather_array() %>%
select(-array.index ) %>%
enter_object('steps') %>%
gather_array() %>%
select(-array.index ) %>%
spread_values(
duration = jnumber("duration", "value"),
distance = jnumber("distance", 'value'),
mode = jstring("travel_mode")
) %>%
group_by(mode) %>%
summarise( duration = sum(duration), distance=sum(distance), n_segments = n(), .groups='drop') %>%
mutate( mode = str_to_lower(mode)) %>%
pivot_wider( names_from = 'mode', values_from=c('duration','distance','n_segments') )
}
|
################################################################################
context("PCA_PROJECT")
################################################################################
obj.bed <- bed(system.file("extdata", "example-missing.bed", package = "bigsnpr"))
ind.row <- sample(nrow(obj.bed), 100)
ind.col <- which(bed_MAF(obj.bed, ind.row)$mac > 5)
obj.svd <- bed_randomSVD(obj.bed, ind.row = ind.row, ind.col = ind.col)
ind.test <- setdiff(rows_along(obj.bed), ind.row)
expect_error(bed_projectSelfPCA(obj.svd, obj.bed, ind.row = ind.test),
"Incompatibility between dimensions.")
proj <- bed_projectSelfPCA(obj.svd, obj.bed,
ind.row = rows_along(obj.bed),
ind.col = ind.col)
expect_equal(proj$simple_proj[ind.row, ], predict(obj.svd), tolerance = 1e-4)
proj2 <- bed_projectPCA(obj.bed, obj.bed,
ind.row.new = ind.test,
ind.row.ref = ind.row,
strand_flip = FALSE,
roll.size = 10,
thr.r2 = 0.8,
verbose = FALSE)
obj.svd2 <- bed_autoSVD(obj.bed, ind.row = ind.row,
roll.size = 10, thr.r2 = 0.8, verbose = FALSE)
proj3 <- bed_projectSelfPCA(obj.svd2, obj.bed, ind.row = ind.test)
expect_equal(proj2, proj3)
################################################################################
obj.bed <- bed(system.file("extdata", "example.bed", package = "bigsnpr"))
ind.row <- sample(nrow(obj.bed), 400)
obj.svd <- bed_randomSVD(obj.bed, ind.row = ind.row)
ind.test <- setdiff(rows_along(obj.bed), ind.row)
expect_error(bed_projectSelfPCA(obj.svd, obj.bed, ind.row = ind.test),
"Incompatibility between dimensions.")
proj <- bed_projectSelfPCA(obj.svd, obj.bed,
ind.row = rows_along(obj.bed),
ind.col = cols_along(obj.bed))
expect_equal(proj$simple_proj[ind.row, ], predict(obj.svd), tolerance = 1e-4)
pop <- rep(1:3, c(143, 167, 207))
colMedians <- function(x) apply(x, 2, median)
ref <- unlist(by(predict(obj.svd)[, 2:3], pop[ind.row], colMedians))
pred1 <- unlist(by(proj$simple_proj[ind.test, 2:3], pop[ind.test], colMedians))
pred2 <- unlist(by(proj$OADP_proj[ind.test, 2:3], pop[ind.test], colMedians))
expect_gt(sum(ref^2), sum(pred1^2))
expect_lt(sum((ref - pred2)^2), sum((ref - pred1)^2))
proj2 <- bed_projectPCA(obj.bed, obj.bed,
ind.row.new = ind.test,
ind.row.ref = ind.row,
strand_flip = FALSE,
roll.size = 10,
thr.r2 = 0.8,
verbose = FALSE)
obj.svd2 <- bed_autoSVD(obj.bed, ind.row = ind.row,
roll.size = 10, thr.r2 = 0.8, verbose = FALSE)
proj3 <- bed_projectSelfPCA(obj.svd2, obj.bed, ind.row = ind.test)
expect_equal(proj2, proj3, tolerance = 1e-6)
################################################################################
| /tests/testthat/test-2-pca-project.R | no_license | pythseq/bigsnpr | R | false | false | 3,047 | r | ################################################################################
context("PCA_PROJECT")
################################################################################
obj.bed <- bed(system.file("extdata", "example-missing.bed", package = "bigsnpr"))
ind.row <- sample(nrow(obj.bed), 100)
ind.col <- which(bed_MAF(obj.bed, ind.row)$mac > 5)
obj.svd <- bed_randomSVD(obj.bed, ind.row = ind.row, ind.col = ind.col)
ind.test <- setdiff(rows_along(obj.bed), ind.row)
expect_error(bed_projectSelfPCA(obj.svd, obj.bed, ind.row = ind.test),
"Incompatibility between dimensions.")
proj <- bed_projectSelfPCA(obj.svd, obj.bed,
ind.row = rows_along(obj.bed),
ind.col = ind.col)
expect_equal(proj$simple_proj[ind.row, ], predict(obj.svd), tolerance = 1e-4)
proj2 <- bed_projectPCA(obj.bed, obj.bed,
ind.row.new = ind.test,
ind.row.ref = ind.row,
strand_flip = FALSE,
roll.size = 10,
thr.r2 = 0.8,
verbose = FALSE)
obj.svd2 <- bed_autoSVD(obj.bed, ind.row = ind.row,
roll.size = 10, thr.r2 = 0.8, verbose = FALSE)
proj3 <- bed_projectSelfPCA(obj.svd2, obj.bed, ind.row = ind.test)
expect_equal(proj2, proj3)
################################################################################
obj.bed <- bed(system.file("extdata", "example.bed", package = "bigsnpr"))
ind.row <- sample(nrow(obj.bed), 400)
obj.svd <- bed_randomSVD(obj.bed, ind.row = ind.row)
ind.test <- setdiff(rows_along(obj.bed), ind.row)
expect_error(bed_projectSelfPCA(obj.svd, obj.bed, ind.row = ind.test),
"Incompatibility between dimensions.")
proj <- bed_projectSelfPCA(obj.svd, obj.bed,
ind.row = rows_along(obj.bed),
ind.col = cols_along(obj.bed))
expect_equal(proj$simple_proj[ind.row, ], predict(obj.svd), tolerance = 1e-4)
pop <- rep(1:3, c(143, 167, 207))
colMedians <- function(x) apply(x, 2, median)
ref <- unlist(by(predict(obj.svd)[, 2:3], pop[ind.row], colMedians))
pred1 <- unlist(by(proj$simple_proj[ind.test, 2:3], pop[ind.test], colMedians))
pred2 <- unlist(by(proj$OADP_proj[ind.test, 2:3], pop[ind.test], colMedians))
expect_gt(sum(ref^2), sum(pred1^2))
expect_lt(sum((ref - pred2)^2), sum((ref - pred1)^2))
proj2 <- bed_projectPCA(obj.bed, obj.bed,
ind.row.new = ind.test,
ind.row.ref = ind.row,
strand_flip = FALSE,
roll.size = 10,
thr.r2 = 0.8,
verbose = FALSE)
obj.svd2 <- bed_autoSVD(obj.bed, ind.row = ind.row,
roll.size = 10, thr.r2 = 0.8, verbose = FALSE)
proj3 <- bed_projectSelfPCA(obj.svd2, obj.bed, ind.row = ind.test)
expect_equal(proj2, proj3, tolerance = 1e-6)
################################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doc_classes.r
\name{aquap_cube}
\alias{aquap_cube}
\title{Class 'aquap_cube'}
\description{
Holds all the statistical models / calculations that were
performed on the split-variations of the dataset by the function
\code{\link{gdmm}} in a list.
}
\details{
Each element of the list in aquap_cube is an object of class
\code{\link{aquap_set}}.
}
\section{Slots}{
\describe{
\item{\code{.Data}}{A list with one object of class \code{\link{aquap_set}} in each
element.}
\item{\code{metadata}}{An object of class 'aquap_md' (what is list)}
\item{\code{anproc}}{An object of class 'aquap_ap' (what is a list)}
\item{\code{cp}}{A data frame with the 'comparison pattern', i.e. a description of the
split-variations of the dataset in a well readable form. This slots gets printed
to the screen when you just type the name of a cube-object. (method 'show')}
\item{\code{cpt}}{An object of class \code{\link{aquap_cpt}}, what is basically just an
other version of the data in 'cp' for internal use.}
}}
\seealso{
\code{\link{gdmm}}
Other Class documentations: \code{\link{aquap_cpt}},
\code{\link{aquap_set}}
}
| /man/aquap_cube.Rd | no_license | harpreetaqua/aquap2 | R | false | true | 1,196 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doc_classes.r
\name{aquap_cube}
\alias{aquap_cube}
\title{Class 'aquap_cube'}
\description{
Holds all the statistical models / calculations that were
performed on the split-variations of the dataset by the function
\code{\link{gdmm}} in a list.
}
\details{
Each element of the list in aquap_cube is an object of class
\code{\link{aquap_set}}.
}
\section{Slots}{
\describe{
\item{\code{.Data}}{A list with one object of class \code{\link{aquap_set}} in each
element.}
\item{\code{metadata}}{An object of class 'aquap_md' (what is list)}
\item{\code{anproc}}{An object of class 'aquap_ap' (what is a list)}
\item{\code{cp}}{A data frame with the 'comparison pattern', i.e. a description of the
split-variations of the dataset in a well readable form. This slots gets printed
to the screen when you just type the name of a cube-object. (method 'show')}
\item{\code{cpt}}{An object of class \code{\link{aquap_cpt}}, what is basically just an
other version of the data in 'cp' for internal use.}
}}
\seealso{
\code{\link{gdmm}}
Other Class documentations: \code{\link{aquap_cpt}},
\code{\link{aquap_set}}
}
|
#Perseus_Like_Analysis
#Make_Annotation_List
#log2-Impute(MNAR)-Subtract(Median):like a Perseus
################################################################################
#if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
#BiocManager::install(c("org.Hs.eg.db", "org.Mm.eg.db", "mouse4302.db","GO.db",
# "PANTHER.db", "biomaRt"))
################################################################################
setwd("/home/rstudio/project")
getwd()
rm(list = ls(all = TRUE))
detach_all()
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("DEP")
################################################################################
#フルパスの確�?(https://qiita.com/h398qy988q5/items/7e0052b29ec876407f5d)
dir.choose <- function() {
system("osascript -e 'tell app \"RStudio\" to POSIX path of (choose folder with prompt \"Choose Folder:\")' > /tmp/R_folder",
intern = FALSE, ignore.stderr = TRUE)
p <- system("cat /tmp/R_folder && rm -f /tmp/R_folder", intern = TRUE)
return(ifelse(length(p), p, NA))
}
install.packages("cat")
dirname = dir.choose()
#filename = file.choose()
################################################################################
#Annotation table作�??
#setwd("~/Dropbox/0_Work/R/Perseus_Like_Analysis/Heart")
# setwd("C:/Users/user/Dropbox/My PC (DESKTOP-HJ2V1AA)/Desktop/PCPCLZ_SWATH/R/Perseus_Like_Analysis20210908/Heart")
setwd("/Users/ay/Dropbox/GitHub/local/Docker/SWATHR/PCPCLZ_SWATH/R/Perseus_Like_Analysis20210908/Heart")
dat_heart <- read_excel("SWATH.xlsx", 2)
setwd("C:/Users/user/Dropbox/My PC (DESKTOP-HJ2V1AA)/Desktop/PCPCLZ_SWATH/R/Perseus_Like_Analysis20210908/Other2")
#setwd("~/Dropbox/0_Work/R/Perseus_Like_Analysis/Other2")
getwd()
dir()
t(colnames(dat_heart))
num <- grep("(Peak Name|Group)",colnames(dat_heart))
###�X�V�K�v
x <- dat_heart[,num]
rbind(dat_heart[,num],dat_h[,num],dat_n[,num],dat_p[,num],dat_s[,num])
#split,extract
split_pn <- data.frame(str_split(x$`Peak Name`, pattern = "\\|", simplify = TRUE))
colnames(split_pn) <- c("sp", "Protein.IDs", "GeneName") #列名変更
Protein.IDs <- data.frame(str_sub(split_pn$`Protein.IDs`, start = 1, end = 6)) #`Protein.IDs`列�?�1-6�?字目(Protein.IDs)抽
Gene.names <- data.frame(str_sub(split_pn$`GeneName`, start = 1, end = -7)) #`GeneName`列�?�1�?字目�?-7�?字目(GeneName)抽出
Species <- data.frame(str_sub(split_pn$`GeneName`, start = -5, end = -1)) #`GeneName`列�?�-5�?-1�?字目(Species)抽出
split_pn2 <- cbind(Protein.IDs, Gene.names, Species)
colnames(split_pn2) <- c("Protein.IDs", "GeneName", "Species") #列名変更
split_gr <- data.frame(str_split(x$`Group`, pattern = ".OS=|.GN=|.PE=|.SV=", simplify = TRUE))
colnames(split_gr) <- c("Description", "OS", "GN", "PE", "SV") #列名変更
xx <- cbind(x, split_pn2, split_gr)
#Remove duplication
xxx <- xx %>% distinct(Protein.IDs,.keep_all=TRUE)
#Search Duplication
xxx$Protein.IDs %>% duplicated() %>% any()
#Duplication table
xxx %>% group_by(Protein.IDs) %>% summarize(frequency = n()) %>% arrange(desc(frequency)) %>% filter(frequency > 1)
#Annotation table出�?
#write_xlsx(xxx, "anno.xlsx", format_headers = FALSE)
write_xlsx(xxx, "anno3.xlsx", format_headers = FALSE)
################################################################################
#SWATHのAnnotation�?報にEntrezIDなど追�?
anno <- xxx
#生物種レベルのアノテーション?�?OrgDb?�?
id <- anno$`Protein.IDs`
GN <- anno$GN
#GeneName <- anno$GeneName
res_id <- select(org.Mm.eg.db, keys = id, keytype = "UNIPROT",
columns = c("ENSEMBL", "ENTREZID", "GENENAME", "MGI", "SYMBOL", "UNIPROT"))
res_GN <- select(org.Mm.eg.db, keys = GN, keytype = "SYMBOL",
columns = c("ENSEMBL", "ENTREZID", "GENENAME", "MGI", "SYMBOL", "UNIPROT"))
res_GN <- res_GN[,c(6,2,3,4,5,1)]
#rbind
res_id_GN <- rbind(res_id, res_GN)
#remove duplicates
ex_id <- res_id_GN %>% distinct(UNIPROT, .keep_all = T)
ex_GN <- res_id_GN %>% distinct(SYMBOL, .keep_all = T)
ex_res_id_GN <- rbind(ex_id, ex_GN) %>% filter(!is.na(ENTREZID)) %>% filter(!is.na(UNIPROT)) %>% distinct(UNIPROT, .keep_all = T)
ex_res_id_GN_Other <- rbind(ex_id, ex_GN) %>% filter(!is.na(ENTREZID)) %>% filter(is.na(UNIPROT)) %>% distinct(SYMBOL, .keep_all = T)
#left_join
anno_id <- left_join(anno, ex_res_id_GN, by = c("Protein.IDs" = "UNIPROT"))
anno_GN <- left_join(anno, ex_res_id_GN, by = c("GN" = "SYMBOL"))
anno_id_GN <- rbind(anno_id[1:14], anno_GN[-11]) %>% filter(!is.na(ENTREZID)) %>% distinct(Protein.IDs, .keep_all = T)
anno_id_Other <- left_join(anno, ex_res_id_GN_Other, by = c("Protein.IDs" = "UNIPROT"))
anno_GN_Other <- left_join(anno, ex_res_id_GN_Other, by = c("GN" = "SYMBOL"))
anno_id_GN_Other <- rbind(anno_id_Other[1:14], anno_GN_Other[-11]) %>% filter(!is.na(ENTREZID)) %>% distinct(Protein.IDs, .keep_all = T)
anno2 <- left_join(anno, rbind(anno_id_GN[,c(3,11:14)], anno_id_GN_Other[,c(3,11:14)]), by = "Protein.IDs")
#not NA value
anno2_notNA <- anno2 %>% filter(!is.na(ENTREZID))
#NA value
anno2_NA <- anno2 %>% filter(is.na(ENTREZID))
anno2_NA_Mm <- anno2_NA %>% filter(Species == "MOUSE")
anno2_NA_Other <- anno2_NA %>% filter(Species != "MOUSE")
#remove libraries
detach_all()
library(org.Mm.eg.db)
#entrezID searched from internet
ent <- c("18563", "234695", "14467", "14070")
res_ent <- select(org.Mm.eg.db, keys = ent, keytype = "ENTREZID",
columns = c("ENSEMBL", "ENTREZID", "GENENAME", "MGI", "SYMBOL", "UNIPROT"))
#remove duplicates
library(tidyverse)
res_ent <- res_ent %>% filter(!is.na(ENTREZID)) %>% distinct(ENTREZID, .keep_all = T)
res_ent[1,]
res_ent <- res_ent[,c(2,1,3,4,5)]
#cbind
anno2_NA_Mm <- cbind(anno2_NA_Mm[,1:10], res_ent[,1:4])
t(colnames(anno2))
t(colnames(anno2_notNA))
t(colnames(anno2_NA_Mm))
t(colnames(anno2_NA_Other))
#rbind
anno3 <- rbind(anno2_notNA, anno2_NA_Mm, anno2_NA_Other)
anno3_NA <- anno3%>% filter(is.na(Protein.IDs)) #Check NA value
#Original order
t(colnames(anno3))
anno_final <- left_join(anno,anno3[,c(3,11:14)],by = "Protein.IDs")
#output xlsx
library(openxlsx) #入出�?(write.xlsx)
smp <- list("anno_new"=anno_final,"anno"=anno)
write.xlsx(smp, "anno.xlsx")
################################################################################
################################################################################
#Perseus_Like_Analysis
################################################################################
################################################################################
rm(list = ls(all.names = TRUE))
detach_all <- function() {
basic.pkg <- c("package:stats", "package:graphics", "package:grDevices",
"package:utils", "package:datasets", "package:methods", "package:base")
pkg.list <- search()[ifelse(unlist(gregexpr("package:", search())) == 1 ,TRUE, FALSE)]
pkg.list <- setdiff(pkg.list, basic.pkg)
lapply(pkg.list, detach, character.only = TRUE)
}
detach_all()
library(DEP)
library(tidyverse) #ggplot2,dplyr
library(dplyr)
library(readxl) #入�?(read_excel)
library(xlsx) #入�?
library(openxlsx) #入出�?(write.xlsx)
library(writexl) #出�?
library(multcomp)
################################################################################
setwd("C:/Users/user/Dropbox/My PC (DESKTOP-HJ2V1AA)/Desktop/PCPCLZ_SWATH/R/Perseus_Like_Analysis20210908/Other2")
#setwd("/Users/user/Dropbox/0_Work/R/Perseus_Like_Analysis/Other")
anno <- read_excel("anno.xlsx", 1) #シー�?1入�?
################################################################################
#統計解析関数(引数2)
#Log2transform,Imputation(MNAR),Subtraction(Median),1wANOVA,2wANOVA,THSD
fun2 <- function(x,y){
data <- x
ExpDesign <- y
#split
split <- str_split(data$`Peak Name`, pattern = "\\|", simplify = TRUE)
colnames(split) <- c("sp", "Protein.IDs", "GeneName") #列名変更
class(split)
x <- data.frame(split)
#extract
Protein.IDs <- str_sub(x$`Protein.IDs`, start = 1, end = 6) #`Peak Name`列�?�1-6�?字目(Protein.IDs)抽出
Gene.names <- str_sub(x$`GeneName`, start = 1, end = -7) #`GeneName`列�?�1�?字目�?-7�?字目(GeneName)抽出
Species <- str_sub(x$`GeneName`, start = -5, end = -1) #`GeneName`列�?�-5�?-1�?字目(Species)抽出
#bind
data <- cbind(data, Protein.IDs, Gene.names, Species) #data, Protein.IDs, Gene.names, Speciesを�?��?�クトル単位で結合
#Search Duplication
data$Protein.IDs %>% duplicated() %>% any()
data$Gene.names %>% duplicated() %>% any()
data$Species %>% duplicated() %>% any()
#Duplication table
data %>% group_by(Protein.IDs) %>% summarize(frequency = n()) %>% arrange(desc(frequency)) %>% filter(frequency > 1)
data %>% group_by(Gene.names) %>% summarize(frequency = n()) %>% arrange(desc(frequency)) %>% filter(frequency > 1)
data %>% group_by(Species) %>% summarize(frequency = n()) %>% arrange(desc(frequency)) %>% filter(frequency > 1)
#Unique Uniprot ID
data_unique <- make_unique(data, "Gene.names", "Protein.IDs", delim = ";")
data_unique$Protein.IDs %>% duplicated() %>% any() # Are there any duplicated names?
#SummarizedExperiment
Sample_columns <- grep("(SAL|PCP)", colnames(data_unique)) # get Sample column numbers
experimental_design <- ExpDesign #ExperimentalDesignSheet(label,condition,replicate)
###############################################################################
#Log2-transform
data_se <- make_se(data_unique, Sample_columns, experimental_design) #columns=�?ータ数, #Log2-transformation
data1 <- data.frame(data_se@assays@data) #log2
#Impute:left-shifted Gaussian distribution (for MNAR)
data_imp_man <- impute(data_se, fun = "man", shift = 1.8, scale = 0.3) #Perseus,imputation
data2 <- data.frame(data_imp_man@assays@data) #Subtract前log2imp
#Subtract(Median):Perseus
standardize <- function(z) {
colmed <- apply(z, 2, median) #Median of Each Sample's Protein Expression level
colmad <- apply(z, 2, mad) # median absolute deviation
rv <- sweep(z, 2, colmed,"-") #subtracting median expression
#rv <- sweep(rv, 2, colmad, "/") # dividing by median absolute deviation
return(rv)
}
data3 <- data2 #Subtract前log2impをコピ�?�
Sample_columns <- grep("(SC|PC)", colnames(data3)) # get Sample column numbers
data3[Sample_columns] <- standardize(data3[Sample_columns]) #Subtract(Median),log2impsub
#############################################################
dat1 <- cbind(rownames(data1),data1) #log2
dat2 <- cbind(rownames(data2),data2) #log2imp
dat3 <- cbind(rownames(data3),data3) #log2impsub
#integration
dat <- cbind(data$Gene.names,data) #行名追�?
dat4 <- left_join(dat, dat1, by = c("Gene.names" = "rownames(data1)")) #raw+log2
dat4 <- left_join(dat4, dat2, by = c("Gene.names" = "rownames(data2)")) #raw+log2+log2imp
dat4 <- left_join(dat4, dat3, by = c("Gene.names" = "rownames(data3)")) #raw+log2+log2imp+log2impsub
#output xlsx
smp <- list("raw"=dat,"log2"=dat1,"log2imp"=dat2,"log2impsub"=dat3,"integ"=dat4,"anno"=anno) #リスト作�??,rawdata,log2,imputation,subtract,integration
write.xlsx(smp, "data.xlsx")
#############################################################
#statistic summary
data_rm <- data3
data_rm[,1:2] <- NULL #列削除
#transpose
tdata_rm <- t(data_rm)
tdata_rm <- cbind(as.data.frame(rownames(tdata_rm)),tdata_rm)
colnames(tdata_rm)[1] <- "ID"
#grouping
group <- read_excel("SWATH.xlsx", 4) #シー�?4(G)入�?
PC <- factor(group$PC, levels = c("SC0", "SC10", "SC30", "PC0", "PC10", "PC30"))
P <- factor(group$P, levels = c("S", "P"))
C <- factor(group$C, levels = c("C0", "C10", "C30"))
g <- cbind(PC,P,C)
#annotation
ganno <- group[,grep("condition|ID", colnames(group))]
tdata_rm2 <- left_join(ganno, tdata_rm, by = "ID")
tdata_rm3 <- tdata_rm2[,-grep("ID", colnames(tdata_rm2))]
#statistic summary
statv <- tdata_rm3 %>% gather(key = GeneName, value = expression, -condition) %>%
group_by(condition, GeneName) %>%
summarise_each(funs(N = length, mean = mean, sd = sd, se = sd/sqrt(n()),
min = min, Q1 = quantile(.,0.25, na.rm=TRUE),
Q2 = quantile(.,0.5, na.rm=TRUE), #med = median,
Q3 = quantile(., 0.75, na.rm=TRUE),
max = max, IQR = IQR))
statSC0 <- statv %>% filter(condition == "SC0")
statSC10 <- statv %>% filter(condition == "SC10")
statSC30 <- statv %>% filter(condition == "SC30")
statPC0 <- statv %>% filter(condition == "PC0")
statPC10 <- statv %>% filter(condition == "PC10")
statPC30 <- statv %>% filter(condition == "PC30")
#colnames
colnames(statSC0) <- str_c("SC0", colnames(statSC0), sep="_")
colnames(statSC10) <- str_c("SC10", colnames(statSC10), sep="_")
colnames(statSC30) <- str_c("SC30", colnames(statSC30), sep="_")
colnames(statPC0) <- str_c("PC0", colnames(statPC0), sep="_")
colnames(statPC10) <- str_c("PC10", colnames(statPC10), sep="_")
colnames(statPC30) <- str_c("PC30", colnames(statPC30), sep="_")
colnames(statSC0)[c(1,2)] <- c("condition","GeneName")
colnames(statSC10)[c(1,2)] <- c("condition","GeneName")
colnames(statSC30)[c(1,2)] <- c("condition","GeneName")
colnames(statPC0)[c(1,2)] <- c("condition","GeneName")
colnames(statPC10)[c(1,2)] <- c("condition","GeneName")
colnames(statPC30)[c(1,2)] <- c("condition","GeneName")
#bind
statSC0 <- statSC0[,-1]
statSC10 <- statSC10[,-1]
statSC30 <- statSC30[,-1]
statPC0 <- statPC0[,-1]
statPC10 <- statPC10[,-1]
statPC30 <- statPC30[,-1]
statv2 <- left_join(statSC0, statSC10, by = "GeneName")
statv2 <- left_join(statv2, statSC30, by = "GeneName")
statv2 <- left_join(statv2, statPC0, by = "GeneName")
statv2 <- left_join(statv2, statPC10, by = "GeneName")
statv2 <- left_join(statv2, statPC30, by = "GeneName")
#############################################################
#multcomp
#1wANOVA function
aof <- function(x) {
m <- data.frame(PC, x);
anova(aov(x ~ PC, m))
}
# apply analysis to the data and get the pvalues.
onewayANOVA <- apply(data_rm, 1, aof)
onewayANOVAp <- data.frame(lapply(onewayANOVA, function(x) { x["Pr(>F)"][1,] }))
onewayANOVAp2 <- data.frame(t(onewayANOVAp))
colnames(onewayANOVAp2) <- "p_PC" #rename
#############################################################
#2wANOVA function
aof2 <- function(x) {
n <- data.frame(P,C, x);
anova(aov(x ~ P + C + P*C, n))
}
# apply analysis to the data and get the pvalues
twowayANOVA <- apply(data_rm, 1, aof2)
twowayANOVAp <- data.frame(lapply(twowayANOVA, function(x) { x["Pr(>F)"][1:3,] }))
twowayANOVAp2 <- data.frame(t(twowayANOVAp))
colnames(twowayANOVAp2) <- c("p_P","p_C","p_PxC") #rename
sdata <- cbind(data_rm, onewayANOVAp2, twowayANOVAp2)
#############################################################
#2wANOVA BH-FDR
#p値
p_PC <- sdata$p_PC
p_P <- sdata$p_P
p_C <- sdata$p_C
p_PxC <- sdata$p_PxC
checkP <- data.frame(cbind(p_PC, p_P, p_C, p_PxC))
rownames(checkP) <- rownames(data3)
checkPr <- cbind(rownames(checkP),checkP)
names(checkPr)[1] <- "GeneName"
#q値
q_PC <- data.frame(p.adjust(p_PC, method = "BH"))
q_P <- data.frame(p.adjust(p_P, method = "BH"))
q_C <- data.frame(p.adjust(p_C, method = "BH"))
q_PxC <- data.frame(p.adjust(p_PxC, method = "BH"))
checkQ <- data.frame(cbind(q_PC, q_P, q_C, q_PxC))
colnames(checkQ) <- c("q_PC", "q_P", "q_C","q_PxC") #rename
rownames(checkQ) <- rownames(data3)
checkQr <- cbind(rownames(checkQ),checkQ)
names(checkQr)[1] <- "GeneName"
sdata <- cbind(sdata, checkQ)
#############################################################
#TukeyHSD function
#diff群間�?�平�?値の差(�?)B-A�?-127.3であれば�?ータBの平�?がデータAの平�?より-127.3大きい
#lwr,upr=下方信頼限界,�?報信頼限界:信頼区間�?�下限値 (lower) と上限値 (upper)
#0を含まな�?場�? (�?)B-A は含ま�? D-A は含む=2群間差は0ではな�?ので有意差あり
#p.adj < 0.05=2群間に有意差あり(信頼区間�??に0を含まな�?)
#############################################################
THSD <- function(x) {
nn <- data.frame(P,C, x);
TukeyHSD(aov(x ~ P + C + P*C, nn))
}
THSDresults <- apply(data_rm, 1, THSD)
THSD_PC <- data.frame(lapply(THSDresults, function(x) {x["P:C"]}))
#THSDp_PC <- select(THSD_PC, ends_with("p.adj")) #p値抽出
THSDp_PC <- THSD_PC[,grep("p.adj$",colnames(THSD_PC))] #p値抽出
#THSDd_PC <- select(THSD_PC, ends_with(".diff")) #diff値抽出
THSDd_PC <- THSD_PC[,grep(".diff$",colnames(THSD_PC))] #diff値抽出
#transpose
THSDp_PC2 <- data.frame(t(THSDp_PC))
THSDd_PC2 <- data.frame(t(THSDd_PC))
#rename
colnames(THSDp_PC2) <- str_c("THSDp", colnames(THSDp_PC2), sep="_")
colnames(THSDd_PC2) <- str_c("diff", colnames(THSDd_PC2), sep="_")
#bind
THSDpd <- cbind(rownames(data3), THSDp_PC2, THSDd_PC2)
names(THSDpd)[1] <- "GeneName"
#############################################################
#Annotation
sdata2 <- cbind(rownames(sdata),sdata)
names(sdata2)[1] <- "GeneName"
sdata2 <- left_join(sdata2, statv2, by = "GeneName")
sdata2 <- left_join(sdata2, THSDpd, by = "GeneName")
sdata3 <- left_join(sdata2, anno, by = "GeneName")
checkPr2 <- left_join(checkPr, anno, by = "GeneName")
checkQr2 <- left_join(checkQr, anno, by = "GeneName")
THSDpd2 <- left_join(THSDpd, anno, by = "GeneName")
#############################################################
#output xlsx
sheets <- list("integ" = sdata3, "anovap" = checkPr2,
"anovaq" = checkQr2, "THSDpd" = THSDpd2,
"statvalue" = statv2) #assume sheet1-4 are data frames
write_xlsx(sheets, "stat.xlsx", format_headers = FALSE)
#############################################################
#DEP list
twANOVA_Pq005 <- sdata3 %>% filter(Species == "MOUSE") %>% filter(q_P < 0.05)
twANOVA_Cq005 <- sdata3 %>% filter(Species == "MOUSE") %>% filter(q_C < 0.05)
twANOVA_PxCq005 <- sdata3 %>% filter(Species == "MOUSE") %>% filter(q_PxC < 0.05)
sheets2 <- list("Pq005"=twANOVA_Pq005[,grep("(GeneName|p_P$|p_C$|p_PxC$|q_P$|q_C$|q_PxC$|Protein.IDs|Description|GN)", colnames(twANOVA_Pq005))],
"Cq005"=twANOVA_Cq005[,grep("(GeneName|p_P$|p_C$|p_PxC$|q_P$|q_C$|q_PxC$|Protein.IDs|Description|GN)", colnames(twANOVA_Cq005))],
"PxCq005"=twANOVA_PxCq005[,grep("(GeneName|p_P$|p_C$|p_PxC$|q_P$|q_C$|q_PxC$|Protein.IDs|Description|GN)", colnames(twANOVA_PxCq005))])
write_xlsx(sheets2, "DEPtwANOVA.xlsx", format_headers = FALSE)
}
################################################################################
#Heart
setwd("C:/Users/user/Dropbox/My PC (DESKTOP-HJ2V1AA)/Desktop/PCPCLZ_SWATH/R/Perseus_Like_Analysis20210908/Heart")
#setwd("/Users/user/Dropbox/0_Work/R/Perseus_Like_Analysis/Heart")
data <- read_excel("SWATH.xlsx", 2) #swath data
ExpDesign <- read_excel("SWATH.xlsx", 3) #DEP.packcage SE file
fun2(data, ExpDesign)
| /script/archive/Perseus_Like_Analysis(Heart)20211003.R | permissive | achiral/rbioc | R | false | false | 19,233 | r | #Perseus_Like_Analysis
#Make_Annotation_List
#log2-Impute(MNAR)-Subtract(Median):like a Perseus
################################################################################
#if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
#BiocManager::install(c("org.Hs.eg.db", "org.Mm.eg.db", "mouse4302.db","GO.db",
# "PANTHER.db", "biomaRt"))
################################################################################
setwd("/home/rstudio/project")
getwd()
rm(list = ls(all = TRUE))
detach_all()
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("DEP")
################################################################################
#フルパスの確�?(https://qiita.com/h398qy988q5/items/7e0052b29ec876407f5d)
dir.choose <- function() {
system("osascript -e 'tell app \"RStudio\" to POSIX path of (choose folder with prompt \"Choose Folder:\")' > /tmp/R_folder",
intern = FALSE, ignore.stderr = TRUE)
p <- system("cat /tmp/R_folder && rm -f /tmp/R_folder", intern = TRUE)
return(ifelse(length(p), p, NA))
}
install.packages("cat")
dirname = dir.choose()
#filename = file.choose()
################################################################################
#Annotation table作�??
#setwd("~/Dropbox/0_Work/R/Perseus_Like_Analysis/Heart")
# setwd("C:/Users/user/Dropbox/My PC (DESKTOP-HJ2V1AA)/Desktop/PCPCLZ_SWATH/R/Perseus_Like_Analysis20210908/Heart")
setwd("/Users/ay/Dropbox/GitHub/local/Docker/SWATHR/PCPCLZ_SWATH/R/Perseus_Like_Analysis20210908/Heart")
dat_heart <- read_excel("SWATH.xlsx", 2)
setwd("C:/Users/user/Dropbox/My PC (DESKTOP-HJ2V1AA)/Desktop/PCPCLZ_SWATH/R/Perseus_Like_Analysis20210908/Other2")
#setwd("~/Dropbox/0_Work/R/Perseus_Like_Analysis/Other2")
getwd()
dir()
t(colnames(dat_heart))
num <- grep("(Peak Name|Group)",colnames(dat_heart))
###�X�V�K�v
x <- dat_heart[,num]
rbind(dat_heart[,num],dat_h[,num],dat_n[,num],dat_p[,num],dat_s[,num])
#split,extract
split_pn <- data.frame(str_split(x$`Peak Name`, pattern = "\\|", simplify = TRUE))
colnames(split_pn) <- c("sp", "Protein.IDs", "GeneName") #列名変更
Protein.IDs <- data.frame(str_sub(split_pn$`Protein.IDs`, start = 1, end = 6)) #`Protein.IDs`列�?�1-6�?字目(Protein.IDs)抽
Gene.names <- data.frame(str_sub(split_pn$`GeneName`, start = 1, end = -7)) #`GeneName`列�?�1�?字目�?-7�?字目(GeneName)抽出
Species <- data.frame(str_sub(split_pn$`GeneName`, start = -5, end = -1)) #`GeneName`列�?�-5�?-1�?字目(Species)抽出
split_pn2 <- cbind(Protein.IDs, Gene.names, Species)
colnames(split_pn2) <- c("Protein.IDs", "GeneName", "Species") #列名変更
split_gr <- data.frame(str_split(x$`Group`, pattern = ".OS=|.GN=|.PE=|.SV=", simplify = TRUE))
colnames(split_gr) <- c("Description", "OS", "GN", "PE", "SV") #列名変更
xx <- cbind(x, split_pn2, split_gr)
#Remove duplication
xxx <- xx %>% distinct(Protein.IDs,.keep_all=TRUE)
#Search Duplication
xxx$Protein.IDs %>% duplicated() %>% any()
#Duplication table
xxx %>% group_by(Protein.IDs) %>% summarize(frequency = n()) %>% arrange(desc(frequency)) %>% filter(frequency > 1)
#Annotation table出�?
#write_xlsx(xxx, "anno.xlsx", format_headers = FALSE)
write_xlsx(xxx, "anno3.xlsx", format_headers = FALSE)
################################################################################
#SWATHのAnnotation�?報にEntrezIDなど追�?
anno <- xxx
#生物種レベルのアノテーション?�?OrgDb?�?
id <- anno$`Protein.IDs`
GN <- anno$GN
#GeneName <- anno$GeneName
res_id <- select(org.Mm.eg.db, keys = id, keytype = "UNIPROT",
columns = c("ENSEMBL", "ENTREZID", "GENENAME", "MGI", "SYMBOL", "UNIPROT"))
res_GN <- select(org.Mm.eg.db, keys = GN, keytype = "SYMBOL",
columns = c("ENSEMBL", "ENTREZID", "GENENAME", "MGI", "SYMBOL", "UNIPROT"))
res_GN <- res_GN[,c(6,2,3,4,5,1)]
#rbind
res_id_GN <- rbind(res_id, res_GN)
#remove duplicates
ex_id <- res_id_GN %>% distinct(UNIPROT, .keep_all = T)
ex_GN <- res_id_GN %>% distinct(SYMBOL, .keep_all = T)
ex_res_id_GN <- rbind(ex_id, ex_GN) %>% filter(!is.na(ENTREZID)) %>% filter(!is.na(UNIPROT)) %>% distinct(UNIPROT, .keep_all = T)
ex_res_id_GN_Other <- rbind(ex_id, ex_GN) %>% filter(!is.na(ENTREZID)) %>% filter(is.na(UNIPROT)) %>% distinct(SYMBOL, .keep_all = T)
#left_join
anno_id <- left_join(anno, ex_res_id_GN, by = c("Protein.IDs" = "UNIPROT"))
anno_GN <- left_join(anno, ex_res_id_GN, by = c("GN" = "SYMBOL"))
anno_id_GN <- rbind(anno_id[1:14], anno_GN[-11]) %>% filter(!is.na(ENTREZID)) %>% distinct(Protein.IDs, .keep_all = T)
anno_id_Other <- left_join(anno, ex_res_id_GN_Other, by = c("Protein.IDs" = "UNIPROT"))
anno_GN_Other <- left_join(anno, ex_res_id_GN_Other, by = c("GN" = "SYMBOL"))
anno_id_GN_Other <- rbind(anno_id_Other[1:14], anno_GN_Other[-11]) %>% filter(!is.na(ENTREZID)) %>% distinct(Protein.IDs, .keep_all = T)
anno2 <- left_join(anno, rbind(anno_id_GN[,c(3,11:14)], anno_id_GN_Other[,c(3,11:14)]), by = "Protein.IDs")
#not NA value
anno2_notNA <- anno2 %>% filter(!is.na(ENTREZID))
#NA value
anno2_NA <- anno2 %>% filter(is.na(ENTREZID))
anno2_NA_Mm <- anno2_NA %>% filter(Species == "MOUSE")
anno2_NA_Other <- anno2_NA %>% filter(Species != "MOUSE")
#remove libraries
detach_all()
library(org.Mm.eg.db)
#entrezID searched from internet
ent <- c("18563", "234695", "14467", "14070")
res_ent <- select(org.Mm.eg.db, keys = ent, keytype = "ENTREZID",
columns = c("ENSEMBL", "ENTREZID", "GENENAME", "MGI", "SYMBOL", "UNIPROT"))
#remove duplicates
library(tidyverse)
res_ent <- res_ent %>% filter(!is.na(ENTREZID)) %>% distinct(ENTREZID, .keep_all = T)
res_ent[1,]
res_ent <- res_ent[,c(2,1,3,4,5)]
#cbind
anno2_NA_Mm <- cbind(anno2_NA_Mm[,1:10], res_ent[,1:4])
t(colnames(anno2))
t(colnames(anno2_notNA))
t(colnames(anno2_NA_Mm))
t(colnames(anno2_NA_Other))
#rbind
anno3 <- rbind(anno2_notNA, anno2_NA_Mm, anno2_NA_Other)
anno3_NA <- anno3%>% filter(is.na(Protein.IDs)) #Check NA value
#Original order
t(colnames(anno3))
anno_final <- left_join(anno,anno3[,c(3,11:14)],by = "Protein.IDs")
#output xlsx
library(openxlsx) #入出�?(write.xlsx)
smp <- list("anno_new"=anno_final,"anno"=anno)
write.xlsx(smp, "anno.xlsx")
################################################################################
################################################################################
#Perseus_Like_Analysis
################################################################################
################################################################################
rm(list = ls(all.names = TRUE))
detach_all <- function() {
basic.pkg <- c("package:stats", "package:graphics", "package:grDevices",
"package:utils", "package:datasets", "package:methods", "package:base")
pkg.list <- search()[ifelse(unlist(gregexpr("package:", search())) == 1 ,TRUE, FALSE)]
pkg.list <- setdiff(pkg.list, basic.pkg)
lapply(pkg.list, detach, character.only = TRUE)
}
detach_all()
library(DEP)
library(tidyverse) #ggplot2,dplyr
library(dplyr)
library(readxl) #入�?(read_excel)
library(xlsx) #入�?
library(openxlsx) #入出�?(write.xlsx)
library(writexl) #出�?
library(multcomp)
################################################################################
setwd("C:/Users/user/Dropbox/My PC (DESKTOP-HJ2V1AA)/Desktop/PCPCLZ_SWATH/R/Perseus_Like_Analysis20210908/Other2")
#setwd("/Users/user/Dropbox/0_Work/R/Perseus_Like_Analysis/Other")
anno <- read_excel("anno.xlsx", 1) #シー�?1入�?
################################################################################
#統計解析関数(引数2)
#Log2transform,Imputation(MNAR),Subtraction(Median),1wANOVA,2wANOVA,THSD
fun2 <- function(x,y){
data <- x
ExpDesign <- y
#split
split <- str_split(data$`Peak Name`, pattern = "\\|", simplify = TRUE)
colnames(split) <- c("sp", "Protein.IDs", "GeneName") #列名変更
class(split)
x <- data.frame(split)
#extract
Protein.IDs <- str_sub(x$`Protein.IDs`, start = 1, end = 6) #`Peak Name`列�?�1-6�?字目(Protein.IDs)抽出
Gene.names <- str_sub(x$`GeneName`, start = 1, end = -7) #`GeneName`列�?�1�?字目�?-7�?字目(GeneName)抽出
Species <- str_sub(x$`GeneName`, start = -5, end = -1) #`GeneName`列�?�-5�?-1�?字目(Species)抽出
#bind
data <- cbind(data, Protein.IDs, Gene.names, Species) #data, Protein.IDs, Gene.names, Speciesを�?��?�クトル単位で結合
#Search Duplication
data$Protein.IDs %>% duplicated() %>% any()
data$Gene.names %>% duplicated() %>% any()
data$Species %>% duplicated() %>% any()
#Duplication table
data %>% group_by(Protein.IDs) %>% summarize(frequency = n()) %>% arrange(desc(frequency)) %>% filter(frequency > 1)
data %>% group_by(Gene.names) %>% summarize(frequency = n()) %>% arrange(desc(frequency)) %>% filter(frequency > 1)
data %>% group_by(Species) %>% summarize(frequency = n()) %>% arrange(desc(frequency)) %>% filter(frequency > 1)
#Unique Uniprot ID
data_unique <- make_unique(data, "Gene.names", "Protein.IDs", delim = ";")
data_unique$Protein.IDs %>% duplicated() %>% any() # Are there any duplicated names?
#SummarizedExperiment
Sample_columns <- grep("(SAL|PCP)", colnames(data_unique)) # get Sample column numbers
experimental_design <- ExpDesign #ExperimentalDesignSheet(label,condition,replicate)
###############################################################################
#Log2-transform
data_se <- make_se(data_unique, Sample_columns, experimental_design) #columns=�?ータ数, #Log2-transformation
data1 <- data.frame(data_se@assays@data) #log2
#Impute:left-shifted Gaussian distribution (for MNAR)
data_imp_man <- impute(data_se, fun = "man", shift = 1.8, scale = 0.3) #Perseus,imputation
data2 <- data.frame(data_imp_man@assays@data) #Subtract前log2imp
#Subtract(Median):Perseus
standardize <- function(z) {
colmed <- apply(z, 2, median) #Median of Each Sample's Protein Expression level
colmad <- apply(z, 2, mad) # median absolute deviation
rv <- sweep(z, 2, colmed,"-") #subtracting median expression
#rv <- sweep(rv, 2, colmad, "/") # dividing by median absolute deviation
return(rv)
}
data3 <- data2 #Subtract前log2impをコピ�?�
Sample_columns <- grep("(SC|PC)", colnames(data3)) # get Sample column numbers
data3[Sample_columns] <- standardize(data3[Sample_columns]) #Subtract(Median),log2impsub
#############################################################
dat1 <- cbind(rownames(data1),data1) #log2
dat2 <- cbind(rownames(data2),data2) #log2imp
dat3 <- cbind(rownames(data3),data3) #log2impsub
#integration
dat <- cbind(data$Gene.names,data) #行名追�?
dat4 <- left_join(dat, dat1, by = c("Gene.names" = "rownames(data1)")) #raw+log2
dat4 <- left_join(dat4, dat2, by = c("Gene.names" = "rownames(data2)")) #raw+log2+log2imp
dat4 <- left_join(dat4, dat3, by = c("Gene.names" = "rownames(data3)")) #raw+log2+log2imp+log2impsub
#output xlsx
smp <- list("raw"=dat,"log2"=dat1,"log2imp"=dat2,"log2impsub"=dat3,"integ"=dat4,"anno"=anno) #リスト作�??,rawdata,log2,imputation,subtract,integration
write.xlsx(smp, "data.xlsx")
#############################################################
#statistic summary
data_rm <- data3
data_rm[,1:2] <- NULL #列削除
#transpose
tdata_rm <- t(data_rm)
tdata_rm <- cbind(as.data.frame(rownames(tdata_rm)),tdata_rm)
colnames(tdata_rm)[1] <- "ID"
#grouping
group <- read_excel("SWATH.xlsx", 4) #シー�?4(G)入�?
PC <- factor(group$PC, levels = c("SC0", "SC10", "SC30", "PC0", "PC10", "PC30"))
P <- factor(group$P, levels = c("S", "P"))
C <- factor(group$C, levels = c("C0", "C10", "C30"))
g <- cbind(PC,P,C)
#annotation
ganno <- group[,grep("condition|ID", colnames(group))]
tdata_rm2 <- left_join(ganno, tdata_rm, by = "ID")
tdata_rm3 <- tdata_rm2[,-grep("ID", colnames(tdata_rm2))]
#statistic summary
statv <- tdata_rm3 %>% gather(key = GeneName, value = expression, -condition) %>%
group_by(condition, GeneName) %>%
summarise_each(funs(N = length, mean = mean, sd = sd, se = sd/sqrt(n()),
min = min, Q1 = quantile(.,0.25, na.rm=TRUE),
Q2 = quantile(.,0.5, na.rm=TRUE), #med = median,
Q3 = quantile(., 0.75, na.rm=TRUE),
max = max, IQR = IQR))
statSC0 <- statv %>% filter(condition == "SC0")
statSC10 <- statv %>% filter(condition == "SC10")
statSC30 <- statv %>% filter(condition == "SC30")
statPC0 <- statv %>% filter(condition == "PC0")
statPC10 <- statv %>% filter(condition == "PC10")
statPC30 <- statv %>% filter(condition == "PC30")
#colnames
colnames(statSC0) <- str_c("SC0", colnames(statSC0), sep="_")
colnames(statSC10) <- str_c("SC10", colnames(statSC10), sep="_")
colnames(statSC30) <- str_c("SC30", colnames(statSC30), sep="_")
colnames(statPC0) <- str_c("PC0", colnames(statPC0), sep="_")
colnames(statPC10) <- str_c("PC10", colnames(statPC10), sep="_")
colnames(statPC30) <- str_c("PC30", colnames(statPC30), sep="_")
colnames(statSC0)[c(1,2)] <- c("condition","GeneName")
colnames(statSC10)[c(1,2)] <- c("condition","GeneName")
colnames(statSC30)[c(1,2)] <- c("condition","GeneName")
colnames(statPC0)[c(1,2)] <- c("condition","GeneName")
colnames(statPC10)[c(1,2)] <- c("condition","GeneName")
colnames(statPC30)[c(1,2)] <- c("condition","GeneName")
#bind
statSC0 <- statSC0[,-1]
statSC10 <- statSC10[,-1]
statSC30 <- statSC30[,-1]
statPC0 <- statPC0[,-1]
statPC10 <- statPC10[,-1]
statPC30 <- statPC30[,-1]
statv2 <- left_join(statSC0, statSC10, by = "GeneName")
statv2 <- left_join(statv2, statSC30, by = "GeneName")
statv2 <- left_join(statv2, statPC0, by = "GeneName")
statv2 <- left_join(statv2, statPC10, by = "GeneName")
statv2 <- left_join(statv2, statPC30, by = "GeneName")
#############################################################
#multcomp
#1wANOVA function
aof <- function(x) {
m <- data.frame(PC, x);
anova(aov(x ~ PC, m))
}
# apply analysis to the data and get the pvalues.
onewayANOVA <- apply(data_rm, 1, aof)
onewayANOVAp <- data.frame(lapply(onewayANOVA, function(x) { x["Pr(>F)"][1,] }))
onewayANOVAp2 <- data.frame(t(onewayANOVAp))
colnames(onewayANOVAp2) <- "p_PC" #rename
#############################################################
#2wANOVA function
aof2 <- function(x) {
n <- data.frame(P,C, x);
anova(aov(x ~ P + C + P*C, n))
}
# apply analysis to the data and get the pvalues
twowayANOVA <- apply(data_rm, 1, aof2)
twowayANOVAp <- data.frame(lapply(twowayANOVA, function(x) { x["Pr(>F)"][1:3,] }))
twowayANOVAp2 <- data.frame(t(twowayANOVAp))
colnames(twowayANOVAp2) <- c("p_P","p_C","p_PxC") #rename
sdata <- cbind(data_rm, onewayANOVAp2, twowayANOVAp2)
#############################################################
#2wANOVA BH-FDR
#p値
p_PC <- sdata$p_PC
p_P <- sdata$p_P
p_C <- sdata$p_C
p_PxC <- sdata$p_PxC
checkP <- data.frame(cbind(p_PC, p_P, p_C, p_PxC))
rownames(checkP) <- rownames(data3)
checkPr <- cbind(rownames(checkP),checkP)
names(checkPr)[1] <- "GeneName"
#q値
q_PC <- data.frame(p.adjust(p_PC, method = "BH"))
q_P <- data.frame(p.adjust(p_P, method = "BH"))
q_C <- data.frame(p.adjust(p_C, method = "BH"))
q_PxC <- data.frame(p.adjust(p_PxC, method = "BH"))
checkQ <- data.frame(cbind(q_PC, q_P, q_C, q_PxC))
colnames(checkQ) <- c("q_PC", "q_P", "q_C","q_PxC") #rename
rownames(checkQ) <- rownames(data3)
checkQr <- cbind(rownames(checkQ),checkQ)
names(checkQr)[1] <- "GeneName"
sdata <- cbind(sdata, checkQ)
#############################################################
#TukeyHSD function
#diff群間�?�平�?値の差(�?)B-A�?-127.3であれば�?ータBの平�?がデータAの平�?より-127.3大きい
#lwr,upr=下方信頼限界,�?報信頼限界:信頼区間�?�下限値 (lower) と上限値 (upper)
#0を含まな�?場�? (�?)B-A は含ま�? D-A は含む=2群間差は0ではな�?ので有意差あり
#p.adj < 0.05=2群間に有意差あり(信頼区間�??に0を含まな�?)
#############################################################
THSD <- function(x) {
nn <- data.frame(P,C, x);
TukeyHSD(aov(x ~ P + C + P*C, nn))
}
THSDresults <- apply(data_rm, 1, THSD)
THSD_PC <- data.frame(lapply(THSDresults, function(x) {x["P:C"]}))
#THSDp_PC <- select(THSD_PC, ends_with("p.adj")) #p値抽出
THSDp_PC <- THSD_PC[,grep("p.adj$",colnames(THSD_PC))] #p値抽出
#THSDd_PC <- select(THSD_PC, ends_with(".diff")) #diff値抽出
THSDd_PC <- THSD_PC[,grep(".diff$",colnames(THSD_PC))] #diff値抽出
#transpose
THSDp_PC2 <- data.frame(t(THSDp_PC))
THSDd_PC2 <- data.frame(t(THSDd_PC))
#rename
colnames(THSDp_PC2) <- str_c("THSDp", colnames(THSDp_PC2), sep="_")
colnames(THSDd_PC2) <- str_c("diff", colnames(THSDd_PC2), sep="_")
#bind
THSDpd <- cbind(rownames(data3), THSDp_PC2, THSDd_PC2)
names(THSDpd)[1] <- "GeneName"
#############################################################
#Annotation
sdata2 <- cbind(rownames(sdata),sdata)
names(sdata2)[1] <- "GeneName"
sdata2 <- left_join(sdata2, statv2, by = "GeneName")
sdata2 <- left_join(sdata2, THSDpd, by = "GeneName")
sdata3 <- left_join(sdata2, anno, by = "GeneName")
checkPr2 <- left_join(checkPr, anno, by = "GeneName")
checkQr2 <- left_join(checkQr, anno, by = "GeneName")
THSDpd2 <- left_join(THSDpd, anno, by = "GeneName")
#############################################################
#output xlsx
sheets <- list("integ" = sdata3, "anovap" = checkPr2,
"anovaq" = checkQr2, "THSDpd" = THSDpd2,
"statvalue" = statv2) #assume sheet1-4 are data frames
write_xlsx(sheets, "stat.xlsx", format_headers = FALSE)
#############################################################
#DEP list
twANOVA_Pq005 <- sdata3 %>% filter(Species == "MOUSE") %>% filter(q_P < 0.05)
twANOVA_Cq005 <- sdata3 %>% filter(Species == "MOUSE") %>% filter(q_C < 0.05)
twANOVA_PxCq005 <- sdata3 %>% filter(Species == "MOUSE") %>% filter(q_PxC < 0.05)
sheets2 <- list("Pq005"=twANOVA_Pq005[,grep("(GeneName|p_P$|p_C$|p_PxC$|q_P$|q_C$|q_PxC$|Protein.IDs|Description|GN)", colnames(twANOVA_Pq005))],
"Cq005"=twANOVA_Cq005[,grep("(GeneName|p_P$|p_C$|p_PxC$|q_P$|q_C$|q_PxC$|Protein.IDs|Description|GN)", colnames(twANOVA_Cq005))],
"PxCq005"=twANOVA_PxCq005[,grep("(GeneName|p_P$|p_C$|p_PxC$|q_P$|q_C$|q_PxC$|Protein.IDs|Description|GN)", colnames(twANOVA_PxCq005))])
write_xlsx(sheets2, "DEPtwANOVA.xlsx", format_headers = FALSE)
}
################################################################################
#Heart
setwd("C:/Users/user/Dropbox/My PC (DESKTOP-HJ2V1AA)/Desktop/PCPCLZ_SWATH/R/Perseus_Like_Analysis20210908/Heart")
#setwd("/Users/user/Dropbox/0_Work/R/Perseus_Like_Analysis/Heart")
data <- read_excel("SWATH.xlsx", 2) #swath data
ExpDesign <- read_excel("SWATH.xlsx", 3) #DEP.packcage SE file
fun2(data, ExpDesign)
|
\name{getGeneCount}
\alias{getGeneCount}
\title{
Calculate read counts of genes from a ReadCountSet object
}
\description{
Calculate read counts of genes from a ReadCountSet object
}
\usage{
getGeneCount(RCS)
}
\arguments{
\item{RCS}{a ReadCountSet object}
}
\details{
This function can be used to get gene read counts from exon read counts.
}
\value{
a matrix of gene read counts for each gene (row) and each sample (col).
}
\author{
Xi Wang, xi.wang@newcastle.edu.au
}
\seealso{
\code{\link{loadExonCountData}},
\code{\link{runDESeq}}
}
\examples{
data(RCS_example, package="SeqGSEA")
geneCounts <- getGeneCount(RCS_example)
}
| /man/getGeneCount.Rd | no_license | sunlightwang/SeqGSEA | R | false | false | 632 | rd | \name{getGeneCount}
\alias{getGeneCount}
\title{
Calculate read counts of genes from a ReadCountSet object
}
\description{
Calculate read counts of genes from a ReadCountSet object
}
\usage{
getGeneCount(RCS)
}
\arguments{
\item{RCS}{a ReadCountSet object}
}
\details{
This function can be used to get gene read counts from exon read counts.
}
\value{
a matrix of gene read counts for each gene (row) and each sample (col).
}
\author{
Xi Wang, xi.wang@newcastle.edu.au
}
\seealso{
\code{\link{loadExonCountData}},
\code{\link{runDESeq}}
}
\examples{
data(RCS_example, package="SeqGSEA")
geneCounts <- getGeneCount(RCS_example)
}
|
library(treeman)
### Name: getNdsLng
### Title: Get lineage for multiple nodes
### Aliases: getNdsLng
### ** Examples
library(treeman)
data(mammals)
# return human and gorilla lineages
getNdsLng(mammals, id=c('Homo_sapiens', 'Gorilla_gorilla'))
| /data/genthat_extracted_code/treeman/examples/getNdsLng.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 252 | r | library(treeman)
### Name: getNdsLng
### Title: Get lineage for multiple nodes
### Aliases: getNdsLng
### ** Examples
library(treeman)
data(mammals)
# return human and gorilla lineages
getNdsLng(mammals, id=c('Homo_sapiens', 'Gorilla_gorilla'))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Step4_MonteCarloValuation.R
\name{calcMortFactors}
\alias{calcMortFactors}
\title{Calculates the mortality factors (t - 1)px q(x + t - 1) and tpx}
\usage{
calcMortFactors(inPolicy, mortTable, dT = 1/12)
}
\arguments{
\item{inPolicy}{A vector containing 45 attributes of a VA policy,
usually a row of a VA portfolio dataframe.}
\item{mortTable}{A dataframe with three columns of doubles representing the
mortality table.}
\item{dT}{A double of stepsize in years; dT = 1 / 12 would be monthly.}
}
\value{
Outputs a two-column data frame of doubles of mortFactors (t - 1)px
q(x + t - 1) and tpx.
}
\description{
Calculates the mortality factors (t - 1)px q(x + t - 1) and tpx required to
valuate the inPolicy. Extract gender, age (birth date & current date),
valuation date (current date), and maturity date from inPolicy, mortality
rates from mortTable.
}
\examples{
exPolicy <- VAPort[1, ]
calcMortFactors(exPolicy, mortTable, dT = 1 / 12)
}
| /man/calcMortFactors.Rd | no_license | h343li/vamc-r | R | false | true | 1,023 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Step4_MonteCarloValuation.R
\name{calcMortFactors}
\alias{calcMortFactors}
\title{Calculates the mortality factors (t - 1)px q(x + t - 1) and tpx}
\usage{
calcMortFactors(inPolicy, mortTable, dT = 1/12)
}
\arguments{
\item{inPolicy}{A vector containing 45 attributes of a VA policy,
usually a row of a VA portfolio dataframe.}
\item{mortTable}{A dataframe with three columns of doubles representing the
mortality table.}
\item{dT}{A double of stepsize in years; dT = 1 / 12 would be monthly.}
}
\value{
Outputs a two-column data frame of doubles of mortFactors (t - 1)px
q(x + t - 1) and tpx.
}
\description{
Calculates the mortality factors (t - 1)px q(x + t - 1) and tpx required to
valuate the inPolicy. Extract gender, age (birth date & current date),
valuation date (current date), and maturity date from inPolicy, mortality
rates from mortTable.
}
\examples{
exPolicy <- VAPort[1, ]
calcMortFactors(exPolicy, mortTable, dT = 1 / 12)
}
|
#####################################################
# Plot #1
# Author: Adrian Chavarria
# Date: 2020-07-09
#####################################################
#Read data file
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
#Naming columns
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
#Subsetting power consumption data
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
#calling the basic plot function
hist(as.numeric(as.character(subpower$Global_active_power)),col="red",main="Global Active Power",xlab="Global Active Power(kilowatts)")
# annotating graph
title(main="Global Active Power")
| /Plot1.R | no_license | Adrichavamo/ExData_Plotting1 | R | false | false | 763 | r | #####################################################
# Plot #1
# Author: Adrian Chavarria
# Date: 2020-07-09
#####################################################
#Read data file
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
#Naming columns
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
#Subsetting power consumption data
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
#calling the basic plot function
hist(as.numeric(as.character(subpower$Global_active_power)),col="red",main="Global Active Power",xlab="Global Active Power(kilowatts)")
# annotating graph
title(main="Global Active Power")
|
%% File Name: tamaan.Rd
%% File Version: 0.56
\name{tamaan}
\alias{tamaan}
\alias{summary.tamaan}
\alias{print.tamaan}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Wrapper Function for \pkg{TAM} Language
}
\description{
This function is a convenience wrapper function for
several item response models in \pkg{TAM}. Using the
\code{\link{tamaanify}} framework, multidimensional item response models,
latent class models, located and ordered latent class models
and mixture item response models can be estimated.
}
\usage{
tamaan(tammodel, resp, tam.method=NULL, control=list(), doparse=TRUE, ...)
\method{summary}{tamaan}(object,file=NULL,\dots)
\method{print}{tamaan}(x,\dots)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{tammodel}{
String for specification in \pkg{TAM}, see also \code{\link{tamaanify}}.
}
\item{resp}{
Dataset with item responses
}
\item{tam.method}{
One of the \pkg{TAM} methods \code{tam.mml}, \code{tam.mml.2pl}
or \code{tam.mml.3pl}.
}
\item{control}{
List with control arguments. See \code{\link{tam.mml}}.
}
\item{doparse}{Optional logical indicating whether \code{lavmodel}
should be parsed for \code{DO} statements, see \code{\link{doparse}}.
}
\item{\dots}{
Further arguments to be passed to
\code{tam.mml}, \code{tam.mml.2pl}
or \code{tam.mml.3pl}.
}
\item{object}{
Object of class \code{tamaan}
}
\item{file}{
A file name in which the summary output will be written
}
\item{x}{Object of class \code{tamaan}}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
Values generated by \code{tam.mml}, \code{tam.mml.2pl}
or \code{tam.mml.3pl}. In addition, the list also contains the (optional) entries
\item{tamaanify}{Output produced by \code{\link{tamaanify}}}
\item{lcaprobs}{Matrix with probabilities for latent class models}
\item{locs}{Matrix with cluster locations (for \code{TYPE="LOCLCA"})
}
\item{probs_MIXTURE}{Class probabilities (for \code{TYPE="MIXTURE"})}
\item{moments_MIXTURE}{Distribution parameters (for \code{TYPE="MIXTURE"})}
\item{itempartable_MIXTURE}{Item parameters (for \code{TYPE="MIXTURE"})}
\item{ind_classprobs}{Individual posterior probabilities for
latent classes (for \code{TYPE="MIXTURE"})}
}
%\references{
%% ~put references to the literature/web site here ~
%}
%\author{
%% ~~who you are~~
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See \code{\link{tamaanify}} for more details about model specification
using \code{tammodel}.
See \code{\link{tam.mml}} or \code{\link{tam.mml.3pl}}
for more examples.
}
\examples{
#############################################################################
# EXAMPLE 1: Examples dichotomous data data.read
#############################################################################
library(sirt)
data(data.read,package="sirt")
dat <- data.read
#*********************************************************************
#*** Model 1: Rasch model
tammodel <- "
LAVAAN MODEL:
F1=~ A1__C4
F1 ~~ F1
ITEM TYPE:
ALL(Rasch);
"
# estimate model
mod1 <- TAM::tamaan( tammodel, resp=dat)
summary(mod1)
\dontrun{
#*********************************************************************
#*** Model 2: 2PL model with some selected items
tammodel <- "
LAVAAN MODEL:
F1=~ A1__B1 + B3 + C1__C3
F1 ~~ F1
"
mod2 <- TAM::tamaan( tammodel, resp=dat)
summary(mod2)
#*********************************************************************
#*** Model 3: Multidimensional IRT model
tammodel <- "
LAVAAN MODEL:
G=~ A1__C4
F1=~ A1__B4
F2=~ C1__C4
F1 ~~ F2
# specify fixed entries in covariance matrix
F1 ~~ 1*F1
F2 ~~ 1*F2
G ~~ 0*F1
G ~~ 0.3*F2
G ~~ 0.7*G
"
mod3 <- TAM::tamaan( tammodel, resp=dat, control=list(maxiter=30))
summary(mod3)
#*********************************************************************
#*** Model 4: Some linear constraints for item slopes and intercepts
tammodel <- "
LAVAAN MODEL:
F=~ lam1__lam10*A1__C2
F=~ 0.78*C3
F ~~ F
A1 | a1*t1
A2 | a2*t1
A3 | a3*t1
A4 | a4*t1
B1 | b1*t1
B2 | b2*t1
B3 | b3*t1
C1 | t1
MODEL CONSTRAINT:
# defined parameters
# only linear combinations are permitted
b2==1.3*b1 + (-0.6)*b3
a1==q1
a2==q2 + t
a3==q1 + 2*t
a4==q2 + 3*t
# linear constraints for loadings
lam2==1.1*lam1
lam3==0.9*lam1 + (-.1)*lam0
lam8==lam0
lam9==lam0
"
mod4 <- TAM::tamaan( tammodel, resp=dat, control=list(maxiter=5) )
summary(mod4)
#*********************************************************************
#*** Model 5: Latent class analysis with three classes
tammodel <- "
ANALYSIS:
TYPE=LCA;
NCLASSES(3); # 3 classes
NSTARTS(5,20); # 5 random starts with 20 iterations
LAVAAN MODEL:
F=~ A1__C4
"
mod5 <- TAM::tamaan( tammodel, resp=dat, control=list(maxiter=100) )
summary(mod5)
#*********************************************************************
#*** Model 6: Ordered latent class analysis with three classes
tammodel <- "
ANALYSIS:
TYPE=OLCA;
NCLASSES(3); # 3 classes
NSTARTS(20,40); # 20 random starts with 40 iterations
LAVAAN MODEL:
F=~ A1__C4
"
mod6 <- TAM::tamaan( tammodel, dat )
summary(mod6)
#*********************************************************************
#*** Model 7: Unidimensional located latent class model with three classes
tammodel <- "
ANALYSIS:
TYPE=LOCLCA;
NCLASSES(3)
NSTARTS(10,40)
LAVAAN MODEL:
F=~ A1__C4
B2 | 0*t1
"
mod7 <- TAM::tamaan( tammodel, resp=dat)
summary(mod7)
#*********************************************************************
#*** Model 8: Two-dimensional located latent class analysis with some
# priors and equality constraints among thresholds
tammodel <- "
ANALYSIS:
TYPE=LOCLCA;
NCLASSES(4);
NSTARTS(10,20);
LAVAAN MODEL:
AB=~ A1__B4
C=~ C1__C4
A1 | a1diff*t1
B2 | 0*t1
C2 | 0*t1
B1 | a1diff*t1
MODEL PRIOR:
# prior distributions for cluster locations
DO2(1,4,1,1,2,1)
Cl\%1_Dim\%2 ~ N(0,2);
DOEND
"
# estimate model
mod8 <- TAM::tamaan( tammodel, resp=dat )
summary(mod8)
#*********************************************************************
#*** Model 9: Two-dimensional model with constraints on parameters
tammodel <- "
LAVAAN MODEL:
FA=~ A1+b*A2+A3+d*A4
FB=~ B1+b*B2+B3+d*B4
FA ~~ 1*FA
FA ~~ FB
FB ~~ 1*FB
A1 | c*t1
B1 | c*t1
A2 | .7*t1
"
# estimate model
mod9 <- TAM::tamaan( tammodel, resp=dat, control=list(maxiter=30) )
summary(mod9)
#############################################################################
# EXAMPLE 2: Examples polytomous data | data.Students
#############################################################################
library(CDM)
data( data.Students, package="CDM")
dat <- data.Students[,3:13]
## > colnames(dat)
## [1] "act1" "act2" "act3" "act4" "act5" "sc1" "sc2" "sc3" "sc4" "mj1" "mj2"
#*********************************************************************
#*** Model 1: Two-dimensional generalized partial credit model
tammodel <- "
LAVAAN MODEL:
FA=~ act1__act5
FS=~ sc1__sc4
FA ~~ 1*FA
FS ~~ 1*FS
FA ~~ FS
"
# estimate model
mod1 <- TAM::tamaan( tammodel, dat, control=list(maxiter=10) )
summary(mod1)
#*********************************************************************
#*** Model 2: Two-dimensional model, some constraints
tammodel <- "
LAVAAN MODEL:
FA=~ a1__a4*act1__act4 + 0.89*act5
FS=~ 1*sc1 + sc2__sc4
FA ~~ FA
FS ~~ FS
FA ~~ FS
# some equality constraints
act1 + act3 | a13_t1 * t1
act1 + act3 | a13_t2 * t2
"
# only create design matrices with tamaanify
mod2 <- TAM::tamaanify( tammodel, dat )
mod2$lavpartable
# estimate model (only few iterations as a test)
mod2 <- TAM::tamaan( tammodel, dat, control=list(maxiter=10) )
summary(mod2)
#*********************************************************************
#*** Model 3: Two-dimensional model, some more linear constraints
tammodel <- "
LAVAAN MODEL:
FA=~ a1__a5*act1__act5
FS=~ b1__b4*sc1__sc4
FA ~~ 1*FA
FA ~~ FS
FS ~~ 1*FS
act1 + act3 | a13_t1 * t1
act1 + act3 | a13_t2 * t2
MODEL CONSTRAINT:
a1==q0
a2==q0
a3==q0 + q1
a4==q2
a5==q2 + q1
"
# estimate
mod3 <- TAM::tamaan( tammodel, dat, control=list(maxiter=300 ) )
summary(mod3)
#*********************************************************************
#*** Model 4: Latent class analysis with three latent classes
tammodel <- "
ANALYSIS:
TYPE=LCA;
NCLASSES(3); # 3 classes
NSTARTS(10,30); # 10 random starts with 30 iterations
LAVAAN MODEL:
F=~ act1__act5
"
# estimate model
mod4 <- TAM::tamaan( tammodel, resp=dat)
summary(mod4)
#*********************************************************************
#*** Model 5: Partial credit model with "PCM2" parametrization
# select data
dat1 <- dat[, paste0("act",1:5) ]
# specify tamaan model
tammodel <- "
LAVAAN MODEL:
F=~ act1__act5
F ~~ F
# use DO statement as shortages
DO(1,5,1)
act\% | b\%_1 * t1
act\% | b\%_2 * t2
DOEND
MODEL CONSTRAINT:
DO(1,5,1)
b\%_1==delta\% + tau\%_1
b\%_2==2*delta\%
DOEND
ITEM TYPE:
ALL(PCM)
"
# estimate model
mod5 <- TAM::tamaan( tammodel, dat1 )
summary(mod5)
# compare with PCM2 parametrization in tam.mml
mod5b <- TAM::tam.mml( dat1, irtmodel="PCM2" )
summary(mod5b)
#*********************************************************************
#*** Model 6: Rating scale model
# select data
dat1 <- dat[, paste0("sc",1:4) ]
psych::describe(dat1)
# specify tamaan model
tammodel <- "
LAVAAN MODEL:
F=~ sc1__sc4
F ~~ F
# use DO statement as shortages
DO(1,4,1)
sc\% | b\%_1 * t1
sc\% | b\%_2 * t2
sc\% | b\%_3 * t3
DOEND
MODEL CONSTRAINT:
DO(1,4,1)
b\%_1==delta\% + step1
b\%_2==2*delta\% + step1 + step2
b\%_3==3*delta\%
DOEND
ITEM TYPE:
ALL(PCM)
"
# estimate model
mod6 <- TAM::tamaan( tammodel, dat1 )
summary(mod6)
# compare with RSM in tam.mml
mod6b <- TAM::tam.mml( dat1, irtmodel="RSM" )
summary(mod6b)
#*********************************************************************
#*** Model 7: Partial credit model with Fourier basis for
# item intercepts (Thissen, Cai & Bock, 2010)
# see ?tamaanify manual
# define tamaan model
tammodel <- "
LAVAAN MODEL:
mj=~ mj1__mj4
mj ~~ 1*mj
ITEM TYPE:
mj1(PCM,2)
mj2(PCM,3)
mj3(PCM)
mj4(PCM,1)
"
# estimate model
mod7 <- TAM::tamaan( tammodel, dat )
summary(mod7)
# -> This function can also be applied for the generalized partial credit
# model (GPCM).
#############################################################################
# EXAMPLE 3: Rasch model and mixture Rasch model (Geiser & Eid, 2010)
#############################################################################
data(data.geiser, package="TAM")
dat <- data.geiser
#*********************************************************************
#*** Model 1: Rasch model
tammodel <- "
LAVAAN MODEL:
F=~ mrt1__mrt6
F ~~ F
ITEM TYPE:
ALL(Rasch);
"
mod1 <- TAM::tamaan( tammodel, resp=dat )
summary(mod1)
#*********************************************************************
#*** Model 2: Mixed Rasch model with two classes
tammodel <- "
ANALYSIS:
TYPE=MIXTURE ;
NCLASSES(2);
NSTARTS(20,25);
LAVAAN MODEL:
F=~ mrt1__mrt6
F ~~ F
ITEM TYPE:
ALL(Rasch);
"
mod2 <- TAM::tamaan( tammodel, resp=dat )
summary(mod2)
# plot item parameters
ipars <- mod2$itempartable_MIXTURE[ 1:6, ]
plot( 1:6, ipars[,3], type="o", ylim=c(-3,2), pch=16,
xlab="Item", ylab="Item difficulty")
lines( 1:6, ipars[,4], type="l", col=2, lty=2)
points( 1:6, ipars[,4], col=2, pch=2)
# extract individual posterior distribution
post2 <- IRT.posterior(mod2)
str(post2)
# num [1:519, 1:30] 0.000105 0.000105 0.000105 0.000105 0.000105 ...
# - attr(*, "theta")=num [1:30, 1:30] 1 0 0 0 0 0 0 0 0 0 ...
# - attr(*, "prob.theta")=num [1:30, 1] 1.21e-05 2.20e-04 2.29e-03 1.37e-02 4.68e-02 ...
# - attr(*, "G")=num 1
# There are 2 classes and 15 theta grid points for each class
# The loadings of the theta grid on items are as follows
mod2$E[1,2,,"mrt1_F_load_Cl1"]
mod2$E[1,2,,"mrt1_F_load_Cl2"]
# compute individual posterior probability for class 1 (first 15 columns)
round( rowSums( post2[, 1:15] ), 3 )
# columns 16 to 30 refer to class 2
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%% \keyword{Model specification}
%% \keyword{TAM language}% __ONLY ONE__ keyword per line
| /TAM/man/tamaan.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 13,059 | rd | %% File Name: tamaan.Rd
%% File Version: 0.56
\name{tamaan}
\alias{tamaan}
\alias{summary.tamaan}
\alias{print.tamaan}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Wrapper Function for \pkg{TAM} Language
}
\description{
This function is a convenience wrapper function for
several item response models in \pkg{TAM}. Using the
\code{\link{tamaanify}} framework, multidimensional item response models,
latent class models, located and ordered latent class models
and mixture item response models can be estimated.
}
\usage{
tamaan(tammodel, resp, tam.method=NULL, control=list(), doparse=TRUE, ...)
\method{summary}{tamaan}(object,file=NULL,\dots)
\method{print}{tamaan}(x,\dots)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{tammodel}{
String for specification in \pkg{TAM}, see also \code{\link{tamaanify}}.
}
\item{resp}{
Dataset with item responses
}
\item{tam.method}{
One of the \pkg{TAM} methods \code{tam.mml}, \code{tam.mml.2pl}
or \code{tam.mml.3pl}.
}
\item{control}{
List with control arguments. See \code{\link{tam.mml}}.
}
\item{doparse}{Optional logical indicating whether \code{lavmodel}
should be parsed for \code{DO} statements, see \code{\link{doparse}}.
}
\item{\dots}{
Further arguments to be passed to
\code{tam.mml}, \code{tam.mml.2pl}
or \code{tam.mml.3pl}.
}
\item{object}{
Object of class \code{tamaan}
}
\item{file}{
A file name in which the summary output will be written
}
\item{x}{Object of class \code{tamaan}}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
Values generated by \code{tam.mml}, \code{tam.mml.2pl}
or \code{tam.mml.3pl}. In addition, the list also contains the (optional) entries
\item{tamaanify}{Output produced by \code{\link{tamaanify}}}
\item{lcaprobs}{Matrix with probabilities for latent class models}
\item{locs}{Matrix with cluster locations (for \code{TYPE="LOCLCA"})
}
\item{probs_MIXTURE}{Class probabilities (for \code{TYPE="MIXTURE"})}
\item{moments_MIXTURE}{Distribution parameters (for \code{TYPE="MIXTURE"})}
\item{itempartable_MIXTURE}{Item parameters (for \code{TYPE="MIXTURE"})}
\item{ind_classprobs}{Individual posterior probabilities for
latent classes (for \code{TYPE="MIXTURE"})}
}
%\references{
%% ~put references to the literature/web site here ~
%}
%\author{
%% ~~who you are~~
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See \code{\link{tamaanify}} for more details about model specification
using \code{tammodel}.
See \code{\link{tam.mml}} or \code{\link{tam.mml.3pl}}
for more examples.
}
\examples{
#############################################################################
# EXAMPLE 1: Examples dichotomous data data.read
#############################################################################
library(sirt)
data(data.read,package="sirt")
dat <- data.read
#*********************************************************************
#*** Model 1: Rasch model
tammodel <- "
LAVAAN MODEL:
F1=~ A1__C4
F1 ~~ F1
ITEM TYPE:
ALL(Rasch);
"
# estimate model
mod1 <- TAM::tamaan( tammodel, resp=dat)
summary(mod1)
\dontrun{
#*********************************************************************
#*** Model 2: 2PL model with some selected items
tammodel <- "
LAVAAN MODEL:
F1=~ A1__B1 + B3 + C1__C3
F1 ~~ F1
"
mod2 <- TAM::tamaan( tammodel, resp=dat)
summary(mod2)
#*********************************************************************
#*** Model 3: Multidimensional IRT model
tammodel <- "
LAVAAN MODEL:
G=~ A1__C4
F1=~ A1__B4
F2=~ C1__C4
F1 ~~ F2
# specify fixed entries in covariance matrix
F1 ~~ 1*F1
F2 ~~ 1*F2
G ~~ 0*F1
G ~~ 0.3*F2
G ~~ 0.7*G
"
mod3 <- TAM::tamaan( tammodel, resp=dat, control=list(maxiter=30))
summary(mod3)
#*********************************************************************
#*** Model 4: Some linear constraints for item slopes and intercepts
tammodel <- "
LAVAAN MODEL:
F=~ lam1__lam10*A1__C2
F=~ 0.78*C3
F ~~ F
A1 | a1*t1
A2 | a2*t1
A3 | a3*t1
A4 | a4*t1
B1 | b1*t1
B2 | b2*t1
B3 | b3*t1
C1 | t1
MODEL CONSTRAINT:
# defined parameters
# only linear combinations are permitted
b2==1.3*b1 + (-0.6)*b3
a1==q1
a2==q2 + t
a3==q1 + 2*t
a4==q2 + 3*t
# linear constraints for loadings
lam2==1.1*lam1
lam3==0.9*lam1 + (-.1)*lam0
lam8==lam0
lam9==lam0
"
mod4 <- TAM::tamaan( tammodel, resp=dat, control=list(maxiter=5) )
summary(mod4)
#*********************************************************************
#*** Model 5: Latent class analysis with three classes
tammodel <- "
ANALYSIS:
TYPE=LCA;
NCLASSES(3); # 3 classes
NSTARTS(5,20); # 5 random starts with 20 iterations
LAVAAN MODEL:
F=~ A1__C4
"
mod5 <- TAM::tamaan( tammodel, resp=dat, control=list(maxiter=100) )
summary(mod5)
#*********************************************************************
#*** Model 6: Ordered latent class analysis with three classes
tammodel <- "
ANALYSIS:
TYPE=OLCA;
NCLASSES(3); # 3 classes
NSTARTS(20,40); # 20 random starts with 40 iterations
LAVAAN MODEL:
F=~ A1__C4
"
mod6 <- TAM::tamaan( tammodel, dat )
summary(mod6)
#*********************************************************************
#*** Model 7: Unidimensional located latent class model with three classes
tammodel <- "
ANALYSIS:
TYPE=LOCLCA;
NCLASSES(3)
NSTARTS(10,40)
LAVAAN MODEL:
F=~ A1__C4
B2 | 0*t1
"
mod7 <- TAM::tamaan( tammodel, resp=dat)
summary(mod7)
#*********************************************************************
#*** Model 8: Two-dimensional located latent class analysis with some
# priors and equality constraints among thresholds
tammodel <- "
ANALYSIS:
TYPE=LOCLCA;
NCLASSES(4);
NSTARTS(10,20);
LAVAAN MODEL:
AB=~ A1__B4
C=~ C1__C4
A1 | a1diff*t1
B2 | 0*t1
C2 | 0*t1
B1 | a1diff*t1
MODEL PRIOR:
# prior distributions for cluster locations
DO2(1,4,1,1,2,1)
Cl\%1_Dim\%2 ~ N(0,2);
DOEND
"
# estimate model
mod8 <- TAM::tamaan( tammodel, resp=dat )
summary(mod8)
#*********************************************************************
#*** Model 9: Two-dimensional model with constraints on parameters
tammodel <- "
LAVAAN MODEL:
FA=~ A1+b*A2+A3+d*A4
FB=~ B1+b*B2+B3+d*B4
FA ~~ 1*FA
FA ~~ FB
FB ~~ 1*FB
A1 | c*t1
B1 | c*t1
A2 | .7*t1
"
# estimate model
mod9 <- TAM::tamaan( tammodel, resp=dat, control=list(maxiter=30) )
summary(mod9)
#############################################################################
# EXAMPLE 2: Examples polytomous data | data.Students
#############################################################################
library(CDM)
data( data.Students, package="CDM")
dat <- data.Students[,3:13]
## > colnames(dat)
## [1] "act1" "act2" "act3" "act4" "act5" "sc1" "sc2" "sc3" "sc4" "mj1" "mj2"
#*********************************************************************
#*** Model 1: Two-dimensional generalized partial credit model
tammodel <- "
LAVAAN MODEL:
FA=~ act1__act5
FS=~ sc1__sc4
FA ~~ 1*FA
FS ~~ 1*FS
FA ~~ FS
"
# estimate model
mod1 <- TAM::tamaan( tammodel, dat, control=list(maxiter=10) )
summary(mod1)
#*********************************************************************
#*** Model 2: Two-dimensional model, some constraints
tammodel <- "
LAVAAN MODEL:
FA=~ a1__a4*act1__act4 + 0.89*act5
FS=~ 1*sc1 + sc2__sc4
FA ~~ FA
FS ~~ FS
FA ~~ FS
# some equality constraints
act1 + act3 | a13_t1 * t1
act1 + act3 | a13_t2 * t2
"
# only create design matrices with tamaanify
mod2 <- TAM::tamaanify( tammodel, dat )
mod2$lavpartable
# estimate model (only few iterations as a test)
mod2 <- TAM::tamaan( tammodel, dat, control=list(maxiter=10) )
summary(mod2)
#*********************************************************************
#*** Model 3: Two-dimensional model, some more linear constraints
tammodel <- "
LAVAAN MODEL:
FA=~ a1__a5*act1__act5
FS=~ b1__b4*sc1__sc4
FA ~~ 1*FA
FA ~~ FS
FS ~~ 1*FS
act1 + act3 | a13_t1 * t1
act1 + act3 | a13_t2 * t2
MODEL CONSTRAINT:
a1==q0
a2==q0
a3==q0 + q1
a4==q2
a5==q2 + q1
"
# estimate
mod3 <- TAM::tamaan( tammodel, dat, control=list(maxiter=300 ) )
summary(mod3)
#*********************************************************************
#*** Model 4: Latent class analysis with three latent classes
tammodel <- "
ANALYSIS:
TYPE=LCA;
NCLASSES(3); # 3 classes
NSTARTS(10,30); # 10 random starts with 30 iterations
LAVAAN MODEL:
F=~ act1__act5
"
# estimate model
mod4 <- TAM::tamaan( tammodel, resp=dat)
summary(mod4)
#*********************************************************************
#*** Model 5: Partial credit model with "PCM2" parametrization
# select data
dat1 <- dat[, paste0("act",1:5) ]
# specify tamaan model
tammodel <- "
LAVAAN MODEL:
F=~ act1__act5
F ~~ F
# use DO statement as shortages
DO(1,5,1)
act\% | b\%_1 * t1
act\% | b\%_2 * t2
DOEND
MODEL CONSTRAINT:
DO(1,5,1)
b\%_1==delta\% + tau\%_1
b\%_2==2*delta\%
DOEND
ITEM TYPE:
ALL(PCM)
"
# estimate model
mod5 <- TAM::tamaan( tammodel, dat1 )
summary(mod5)
# compare with PCM2 parametrization in tam.mml
mod5b <- TAM::tam.mml( dat1, irtmodel="PCM2" )
summary(mod5b)
#*********************************************************************
#*** Model 6: Rating scale model
# select data
dat1 <- dat[, paste0("sc",1:4) ]
psych::describe(dat1)
# specify tamaan model
tammodel <- "
LAVAAN MODEL:
F=~ sc1__sc4
F ~~ F
# use DO statement as shortages
DO(1,4,1)
sc\% | b\%_1 * t1
sc\% | b\%_2 * t2
sc\% | b\%_3 * t3
DOEND
MODEL CONSTRAINT:
DO(1,4,1)
b\%_1==delta\% + step1
b\%_2==2*delta\% + step1 + step2
b\%_3==3*delta\%
DOEND
ITEM TYPE:
ALL(PCM)
"
# estimate model
mod6 <- TAM::tamaan( tammodel, dat1 )
summary(mod6)
# compare with RSM in tam.mml
mod6b <- TAM::tam.mml( dat1, irtmodel="RSM" )
summary(mod6b)
#*********************************************************************
#*** Model 7: Partial credit model with Fourier basis for
# item intercepts (Thissen, Cai & Bock, 2010)
# see ?tamaanify manual
# define tamaan model
tammodel <- "
LAVAAN MODEL:
mj=~ mj1__mj4
mj ~~ 1*mj
ITEM TYPE:
mj1(PCM,2)
mj2(PCM,3)
mj3(PCM)
mj4(PCM,1)
"
# estimate model
mod7 <- TAM::tamaan( tammodel, dat )
summary(mod7)
# -> This function can also be applied for the generalized partial credit
# model (GPCM).
#############################################################################
# EXAMPLE 3: Rasch model and mixture Rasch model (Geiser & Eid, 2010)
#############################################################################
data(data.geiser, package="TAM")
dat <- data.geiser
#*********************************************************************
#*** Model 1: Rasch model
tammodel <- "
LAVAAN MODEL:
F=~ mrt1__mrt6
F ~~ F
ITEM TYPE:
ALL(Rasch);
"
mod1 <- TAM::tamaan( tammodel, resp=dat )
summary(mod1)
#*********************************************************************
#*** Model 2: Mixed Rasch model with two classes
tammodel <- "
ANALYSIS:
TYPE=MIXTURE ;
NCLASSES(2);
NSTARTS(20,25);
LAVAAN MODEL:
F=~ mrt1__mrt6
F ~~ F
ITEM TYPE:
ALL(Rasch);
"
mod2 <- TAM::tamaan( tammodel, resp=dat )
summary(mod2)
# plot item parameters
ipars <- mod2$itempartable_MIXTURE[ 1:6, ]
plot( 1:6, ipars[,3], type="o", ylim=c(-3,2), pch=16,
xlab="Item", ylab="Item difficulty")
lines( 1:6, ipars[,4], type="l", col=2, lty=2)
points( 1:6, ipars[,4], col=2, pch=2)
# extract individual posterior distribution
post2 <- IRT.posterior(mod2)
str(post2)
# num [1:519, 1:30] 0.000105 0.000105 0.000105 0.000105 0.000105 ...
# - attr(*, "theta")=num [1:30, 1:30] 1 0 0 0 0 0 0 0 0 0 ...
# - attr(*, "prob.theta")=num [1:30, 1] 1.21e-05 2.20e-04 2.29e-03 1.37e-02 4.68e-02 ...
# - attr(*, "G")=num 1
# There are 2 classes and 15 theta grid points for each class
# The loadings of the theta grid on items are as follows
mod2$E[1,2,,"mrt1_F_load_Cl1"]
mod2$E[1,2,,"mrt1_F_load_Cl2"]
# compute individual posterior probability for class 1 (first 15 columns)
round( rowSums( post2[, 1:15] ), 3 )
# columns 16 to 30 refer to class 2
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%% \keyword{Model specification}
%% \keyword{TAM language}% __ONLY ONE__ keyword per line
|
context("ds_group_summary")
test_that("output from ds_group_summary matches the expected result", {
mt <- mtcars
mt$cyl <- as.factor(mt$cyl)
k <- ds_group_summary(mt, cyl, mpg)
metrics <- c(
"Obs", "Minimum", "Maximum", "Mean", "Median", "Mode",
"Std. Deviation", "Variance", "Skewness", "Kurtosis",
"Uncorrected SS", "Corrected SS", "Coeff Variation",
"Std. Error Mean", "Range", "Interquartile Range"
)
expect_equal(k$xvar, "cyl")
expect_equal(k$yvar, "mpg")
expect_equivalent(as.character(k$stats[, 1]), metrics)
expect_equivalent(k$stats[, 2], c(
11.00, 21.40, 33.90, 26.66, 26.00, 22.80,
4.51, 20.34, 0.35, -1.43, 8023.83, 203.39,
16.91, 1.36, 12.50, 7.60
))
expect_equivalent(k$stats[, 3], c(
7.00, 17.80, 21.40, 19.74, 19.70, 21.00,
1.45, 2.11, -0.26, -1.83, 2741.14, 12.68,
7.36, 0.55, 3.60, 2.35
))
expect_equivalent(k$stats[, 4], c(
14.00, 10.40, 19.20, 15.10, 15.20, 10.40,
2.56, 6.55, -0.46, 0.33, 3277.34, 85.20,
16.95, 0.68, 8.80, 1.85
))
})
test_that("ds_group_summary throws the appropriate error", {
mt <- mtcars
mt$cyl <- as.factor(mt$cyl)
mt$am <- as.factor(mt$am)
expect_error(
ds_group_summary(mtcars, gear, mpg),
"gear is not a categorical variable. The function expects an object of type `factor` but gear is of type `numeric`."
)
expect_error(
ds_group_summary(mt, cyl, am),
"am is not a continuous variable. The function expects an object of type `numeric` or `integer` but am is of type `factor`."
)
})
test_that("output from ds_group_summary plot is as expected", {
skip_on_cran()
k <- ds_group_summary(mtcarz, cyl, mpg)
p <- plot(k)
vdiffr::expect_doppelganger("group_summary", p$plot)
})
| /tests/testthat/test-group-summary.R | no_license | Efsilvaa/descriptr | R | false | false | 1,743 | r | context("ds_group_summary")
test_that("output from ds_group_summary matches the expected result", {
mt <- mtcars
mt$cyl <- as.factor(mt$cyl)
k <- ds_group_summary(mt, cyl, mpg)
metrics <- c(
"Obs", "Minimum", "Maximum", "Mean", "Median", "Mode",
"Std. Deviation", "Variance", "Skewness", "Kurtosis",
"Uncorrected SS", "Corrected SS", "Coeff Variation",
"Std. Error Mean", "Range", "Interquartile Range"
)
expect_equal(k$xvar, "cyl")
expect_equal(k$yvar, "mpg")
expect_equivalent(as.character(k$stats[, 1]), metrics)
expect_equivalent(k$stats[, 2], c(
11.00, 21.40, 33.90, 26.66, 26.00, 22.80,
4.51, 20.34, 0.35, -1.43, 8023.83, 203.39,
16.91, 1.36, 12.50, 7.60
))
expect_equivalent(k$stats[, 3], c(
7.00, 17.80, 21.40, 19.74, 19.70, 21.00,
1.45, 2.11, -0.26, -1.83, 2741.14, 12.68,
7.36, 0.55, 3.60, 2.35
))
expect_equivalent(k$stats[, 4], c(
14.00, 10.40, 19.20, 15.10, 15.20, 10.40,
2.56, 6.55, -0.46, 0.33, 3277.34, 85.20,
16.95, 0.68, 8.80, 1.85
))
})
test_that("ds_group_summary throws the appropriate error", {
mt <- mtcars
mt$cyl <- as.factor(mt$cyl)
mt$am <- as.factor(mt$am)
expect_error(
ds_group_summary(mtcars, gear, mpg),
"gear is not a categorical variable. The function expects an object of type `factor` but gear is of type `numeric`."
)
expect_error(
ds_group_summary(mt, cyl, am),
"am is not a continuous variable. The function expects an object of type `numeric` or `integer` but am is of type `factor`."
)
})
test_that("output from ds_group_summary plot is as expected", {
skip_on_cran()
k <- ds_group_summary(mtcarz, cyl, mpg)
p <- plot(k)
vdiffr::expect_doppelganger("group_summary", p$plot)
})
|
####################
## Occupational Risk Covid19 Code Repository
#######################
## Step5: Create Datatool Datasets for health regions (table3)
#######################
library(dplyr)
library(scales)
library(tidyverse)
rm(list=ls())
load("table3_r.RData")
load("onet_naics_noc.RData")
load("table3_median_income.RData")
##########################################
## Sum NOC codes within services
##########################################
table3_r_tool <- table3_r %>%
group_by(geography, health_region, essential, noc_code, sex, age) %>%
summarise_if(is.numeric, sum)
######################################################################
## create designations for industry variable for each service strategy
######################################################################
table3_r_tool$industry <- ifelse(table3_r_tool$essential==1, "Essential", "Non-essential")
table3_r_tool <- table3_r_tool %>%
ungroup() %>%
select(-essential)
###################################################
## now create an "all occupations" dataset
## which includes both essential and other services
###################################################
table3_r_tool_all <- table3_r_tool %>%
group_by(geography, health_region, noc_code, sex, age) %>%
summarise_if(is.numeric, sum)
table3_r_tool_all$industry <- c("Total")
##### View(sum_all)
############################
## append the datasets to have essential, non-essential, and all
############################
table3_r_tool_f <- rbind(table3_r_tool, table3_r_tool_all)
#############################
## socio-dem characteristics
#############################
table3_r_tool_f$sum_nonimmig1 <- ifelse(table3_r_tool_f$sum_total1 >=(table3_r_tool_f$sum_immig1 + table3_r_tool_f$sum_nonpermres1),table3_r_tool_f$sum_total1 - (table3_r_tool_f$sum_immig1 + table3_r_tool_f$sum_nonpermres1),0)
table3_r_tool_f$sum_white1 <- ifelse(table3_r_tool_f$sum_total1 >=(table3_r_tool_f$sum_vismin1 + table3_r_tool_f$sum_aboriginal1),table3_r_tool_f$sum_total1 -(table3_r_tool_f$sum_vismin1 + table3_r_tool_f$sum_aboriginal1),0)
table3_r_tool_f$percent_immig <- ifelse(table3_r_tool_f$sum_total1>0, table3_r_tool_f$sum_immig1 / table3_r_tool_f$sum_total1*100, 0)
table3_r_tool_f$percent_immig <- ifelse(table3_r_tool_f$percent_immig>100, 100, table3_r_tool_f$percent_immig)
table3_r_tool_f$percent_nonpermres <- ifelse(table3_r_tool_f$sum_total1>0, table3_r_tool_f$sum_nonpermres1 / table3_r_tool_f$sum_total1*100, 0)
table3_r_tool_f$percent_nonpermres <- ifelse(table3_r_tool_f$percent_nonpermres>100, 100, table3_r_tool_f$percent_nonpermres)
table3_r_tool_f$percent_vismin <- ifelse(table3_r_tool_f$sum_total1>0, table3_r_tool_f$sum_vismin1 / table3_r_tool_f$sum_total1*100, 0)
table3_r_tool_f$percent_vismin <- ifelse(table3_r_tool_f$percent_vismin>100, 100, table3_r_tool_f$percent_vismin)
################################################################################
## create overall female and over 65 percents (for SLIDER in population group)
################################################################################
female_slider <- table3_r_tool_f %>%
filter(age == "Total - 15 years and over") %>%
select(geography,health_region,industry,noc_code,sex,age,sum_total1) %>%
spread(sex,sum_total1)
female_slider$Female <- ifelse(is.na(female_slider$Female),0,female_slider$Female)
female_slider$`Total - Sex` <- ifelse(is.na(female_slider$`Total - Sex`),0,female_slider$`Total - Sex`)
female_slider$overall_percent_female <- ifelse(female_slider$`Total - Sex` >0,female_slider$Female/female_slider$`Total - Sex`*100,0)
female_slider$overall_percent_female <- ifelse(female_slider$overall_percent_female>100, 100, female_slider$overall_percent_female)
female_slider <- female_slider %>% ungroup() %>% distinct(geography,health_region,industry,noc_code,overall_percent_female)
# # View(female_slider)
age65_slider <- table3_r_tool_f %>%
filter(sex == "Total - Sex") %>%
select(geography,health_region,industry,noc_code,sex,age,sum_total1) %>%
spread(age,sum_total1)
age65_slider$`65 years and over` <- ifelse(is.na(age65_slider$`65 years and over`),0,age65_slider$`65 years and over`)
age65_slider$`Total - 15 years and over` <- ifelse(is.na(age65_slider$`Total - 15 years and over`),0,age65_slider$`Total - 15 years and over`)
age65_slider$overall_percent_65 <- ifelse(age65_slider$`Total - 15 years and over` >0,age65_slider$`65 years and over`/age65_slider$`Total - 15 years and over`*100,0)
age65_slider$overall_percent_65 <- ifelse(age65_slider$overall_percent_65>100, 100, age65_slider$overall_percent_65)
age65_slider <-age65_slider %>% ungroup() %>% distinct(geography,health_region,industry,noc_code,overall_percent_65)
# # View(age65_slider)
sliders <- merge(female_slider,age65_slider,by=c('geography','health_region', 'industry','noc_code'),all=T)
sliders$overall_percent_female <- ifelse(is.na(sliders$overall_percent_female),0,sliders$overall_percent_female)
sliders$overall_percent_65 <- ifelse(is.na(sliders$overall_percent_65),0,sliders$overall_percent_65)
# # View(sliders)
table3_datatool <- merge(table3_r_tool_f,sliders,by=c('geography','health_region', 'industry','noc_code'),all=T)
table3_datatool$overall_percent_female <- ifelse(is.na(table3_datatool$overall_percent_female),0,table3_datatool$overall_percent_female)
table3_datatool$overall_percent_65 <- ifelse(is.na(table3_datatool$overall_percent_65),0,table3_datatool$overall_percent_65)
###########################################
## MERGE WITH OCCUPATION MEASURES from ONET
############################################
# # View(onet)
table3_datatool <- merge(table3_datatool,onet, by=c("noc_code"),all.x=T)
###########################################
## MERGE WITH INCOME
############################################
table3_median_income <- table3_median_income %>% select(-noc_code_class)
# # View(table3_median_income)
table3_median_income$noc_code <- as.numeric(table3_median_income$noc_code)
table3_median_income$noc_code<-formatC(table3_median_income$noc_code, width = 4, format = "d", flag = "0")
table3_median_income$noc_code<-as.character(table3_median_income$noc_code)
table3_datatool <- merge(table3_datatool,table3_median_income , by=c("health_region","noc_code"),all.x=T)
table3_datatool <- table3_datatool %>% dplyr::rename("median_income"="median_total1")
# # View(table3_datatool)
###########################################
## MERGE WITH NOC_MERGE & SAVE
############################################
table3_datatool <- merge(table3_datatool,NOC_MERGE,by="noc_code",all.x=T)
table3_datatool <- table3_datatool %>%
select(geography,health_region,industry,noc_broad,noc_broad_descript,noc_code,noc_code_class,sex,age,everything()) %>%
arrange(geography,health_region,industry,noc_code,sex,age) %>%
mutate_if(is.numeric, round, 0)
table3_datatool <- table3_datatool%>%mutate(noc_code_class=substring(noc_code_class,6))
table3_datatool<-table3_datatool%>%mutate(noc_code_class=gsub("\\s*\\([^\\)]+\\)","",as.character(noc_code_class)))
table3_datatool<-table3_datatool%>%mutate(noc_code_class= gsub('[0-9]+', '', noc_code_class))
table3_datatool <- table3_datatool%>%mutate(health_region=substring(health_region,6))
##########################################
## Apply PHO Operational names for PHUs in Ontario
##########################################
table3_datatool$health_region_ontario = factor(
table3_datatool$health_region,
levels = c(
'The District of Algoma Health Unit',
'Brant County Health Unit',
'Durham Regional Health Unit',
'Grey Bruce Health Unit',
'Haldimand-Norfolk Health Unit',
'Haliburton, Kawartha, Pine Ridge District Health Unit',
'Halton Regional Health Unit',
'City of Hamilton Health Unit',
'Hastings and Prince Edward Counties Health Unit',
'Huron County Health Unit',
'Chatham-Kent Health Unit',
'Kingston, Frontenac and Lennox and Addington Health Unit',
'Lambton Health Unit',
'Leeds, Grenville and Lanark District Health Unit',
'Middlesex-London Health Unit',
'Niagara Regional Area Health Unit',
'North Bay Parry Sound District Health Unit',
'Northwestern Health Unit',
'City of Ottawa Health Unit',
'Peel Regional Health Unit',
'Perth District Health Unit',
'Peterborough County-City Health Unit',
'Porcupine Health Unit',
'Renfrew County and District Health Unit',
'The Eastern Ontario Health Unit',
'Simcoe Muskoka District Health Unit',
'Sudbury and District Health Unit',
'Thunder Bay District Health Unit',
'Timiskaming Health Unit',
'Waterloo Health Unit',
'Wellington-Dufferin-Guelph Health Unit',
'Windsor-Essex County Health Unit',
'York Regional Health Unit',
'Oxford Elgin St. Thomas Health Unit',
'City of Toronto Health Unit'
),
labels = c(
'Algoma Public Health',
'Brant County Health Unit',
'Durham Region Health Department',
'Grey Bruce Health Unit',
'Haldimand-Norfolk Health Unit',
'Haliburton, Kawartha, Pine Ridge District Health Unit',
'Halton Region Public Health',
'City of Hamilton Public Health Services',
'Hastings Prince Edward Public Health',
'Huron Public Health',
'Chatham-Kent Public Health',
'Kingston, Frontenac and Lennox & Addington Public Health',
'Lambton Public Health',
'Leeds, Grenville & Lanark District Health Unit',
'Middlesex-London Health Unit',
'Niagara Region Public Health',
'North Bay Parry Sound District Health Unit',
'Northwestern Health Unit',
'Ottawa Public Health',
'Peel Public Health',
'Perth Public Health',
'Peterborough Public Health',
'Porcupine Health Unit',
'Renfrew County and District Health Unit',
'Eastern Ontario Health Unit',
'Simcoe Muskoka District Health Unit',
'Public Health Sudbury & Districts',
'Thunder Bay District Health Unit',
'Timiskaming Health Unit',
'Region of Waterloo Public Health and Emergency Services',
'Wellington-Dufferin-Guelph Public Health',
'Windsor-Essex County Health Unit',
'York Region Public Health',
'Southwestern Public Health',
'Toronto Public Health'
)
)
table3_datatool$health_region <- ifelse(is.na(table3_datatool$health_region),"Peterborough Public Health",table3_datatool$health_region)
table3_datatool$health_region_ontario <- as.character(table3_datatool$health_region_ontario)
table3_datatool$health_region <- ifelse(table3_datatool$geography %in% "Ontario", table3_datatool$health_region_ontario,table3_datatool$health_region)
# View(table3_datatool)
summary(table3_datatool)
table3_datatool <- table3_datatool %>% select(-health_region_ontario)
saveRDS(table3_datatool,file = "table3_datatool.rds")
######
## Specific sectors (NAICS)
######
library(dplyr)
library(scales)
library(tidyverse)
rm(list=ls())
load("table3_r.RData")
load("onet_naics_noc.RData")
load("table3_median_income.RData")
# View(table3_r)
##########################################
## Sum NOC codes within services
##########################################
table3_r_tool <- table3_r %>%
group_by(geography, health_region, essential,naics_sector_name, noc_code, sex, age) %>%
summarise_if(is.numeric, sum)
# View(table3_r)
######################################################################
## create designations for industry variable for each service strategy
######################################################################
table3_r_tool$industry <- ifelse(table3_r_tool$essential==1, "Essential", "Non-essential")
table3_r_tool <- table3_r_tool %>%
ungroup() %>%
select(-essential)
# View(table3_r_tool)
###################################################
## now create an "all occupations" dataset
## which includes both essential and other services
###################################################
table3_r_tool_all <- table3_r_tool %>%
group_by(geography, health_region, naics_sector_name,noc_code, sex, age) %>%
summarise_if(is.numeric, sum)
table3_r_tool_all$industry <- c("Total")
##### View(sum_all)
############################
## append the datasets to have essential, non-essential, and all
############################
table3_r_tool_f <- rbind(table3_r_tool, table3_r_tool_all)
# View(table3_r_tool_f)
#############################
## socio-dem characteristics
#############################
table3_r_tool_f$sum_nonimmig1 <- ifelse(table3_r_tool_f$sum_total1 >=(table3_r_tool_f$sum_immig1 + table3_r_tool_f$sum_nonpermres1),table3_r_tool_f$sum_total1 - (table3_r_tool_f$sum_immig1 + table3_r_tool_f$sum_nonpermres1),0)
table3_r_tool_f$sum_white1 <- ifelse(table3_r_tool_f$sum_total1 >=(table3_r_tool_f$sum_vismin1 + table3_r_tool_f$sum_aboriginal1),table3_r_tool_f$sum_total1 -(table3_r_tool_f$sum_vismin1 + table3_r_tool_f$sum_aboriginal1),0)
table3_r_tool_f$percent_immig <- ifelse(table3_r_tool_f$sum_total1>0, table3_r_tool_f$sum_immig1 / table3_r_tool_f$sum_total1*100, 0)
table3_r_tool_f$percent_immig <- ifelse(table3_r_tool_f$percent_immig>100, 100, table3_r_tool_f$percent_immig)
table3_r_tool_f$percent_nonpermres <- ifelse(table3_r_tool_f$sum_total1>0, table3_r_tool_f$sum_nonpermres1 / table3_r_tool_f$sum_total1*100, 0)
table3_r_tool_f$percent_nonpermres <- ifelse(table3_r_tool_f$percent_nonpermres>100, 100, table3_r_tool_f$percent_nonpermres)
table3_r_tool_f$percent_vismin <- ifelse(table3_r_tool_f$sum_total1>0, table3_r_tool_f$sum_vismin1 / table3_r_tool_f$sum_total1*100, 0)
table3_r_tool_f$percent_vismin <- ifelse(table3_r_tool_f$percent_vismin>100, 100, table3_r_tool_f$percent_vismin)
# # View(table3_r_tool_f)
################################################################################
## create overall female and over 65 percents (for SLIDER in population group)
################################################################################
female_slider <- table3_r_tool_f %>%
filter(age == "Total - 15 years and over") %>%
select(geography,health_region,industry,naics_sector_name,noc_code,sex,age,sum_total1) %>%
spread(sex,sum_total1)
female_slider$Female <- ifelse(is.na(female_slider$Female),0,female_slider$Female)
female_slider$`Total - Sex` <- ifelse(is.na(female_slider$`Total - Sex`),0,female_slider$`Total - Sex`)
female_slider$overall_percent_female <- ifelse(female_slider$`Total - Sex` >0,female_slider$Female/female_slider$`Total - Sex`*100,0)
female_slider$overall_percent_female <- ifelse(female_slider$overall_percent_female>100, 100, female_slider$overall_percent_female)
female_slider <- female_slider %>% ungroup() %>% distinct(geography,health_region,industry,naics_sector_name,noc_code,overall_percent_female)
# # View(female_slider)
age65_slider <- table3_r_tool_f %>%
filter(sex == "Total - Sex") %>%
select(geography,health_region,industry,naics_sector_name,noc_code,sex,age,sum_total1) %>%
spread(age,sum_total1)
age65_slider$`65 years and over` <- ifelse(is.na(age65_slider$`65 years and over`),0,age65_slider$`65 years and over`)
age65_slider$`Total - 15 years and over` <- ifelse(is.na(age65_slider$`Total - 15 years and over`),0,age65_slider$`Total - 15 years and over`)
age65_slider$overall_percent_65 <- ifelse(age65_slider$`Total - 15 years and over` >0,age65_slider$`65 years and over`/age65_slider$`Total - 15 years and over`*100,0)
age65_slider$overall_percent_65 <- ifelse(age65_slider$overall_percent_65>100, 100, age65_slider$overall_percent_65)
age65_slider <-age65_slider %>% ungroup() %>% distinct(geography,health_region,industry,naics_sector_name,noc_code,overall_percent_65)
# # View(age65_slider)
sliders <- merge(female_slider,age65_slider,by=c('geography','health_region', 'industry','naics_sector_name', 'noc_code'),all=T)
sliders$overall_percent_female <- ifelse(is.na(sliders$overall_percent_female),0,sliders$overall_percent_female)
sliders$overall_percent_65 <- ifelse(is.na(sliders$overall_percent_65),0,sliders$overall_percent_65)
# View(sliders)
table3_datatool <- merge(table3_r_tool_f,sliders,by=c('geography','health_region', 'industry','naics_sector_name', 'noc_code'),all=T)
table3_datatool$overall_percent_female <- ifelse(is.na(table3_datatool$overall_percent_female),0,table3_datatool$overall_percent_female)
table3_datatool$overall_percent_65 <- ifelse(is.na(table3_datatool$overall_percent_65),0,table3_datatool$overall_percent_65)
###########################################
## MERGE WITH OCCUPATION MEASURES from ONET
############################################
# # View(onet)
table3_datatool <- merge(table3_datatool,onet, by=c("noc_code"),all.x=T)
# View(table3_datatool)
###########################################
## MERGE WITH INCOME
############################################
table3_median_income <- table3_median_income %>% select(-noc_code_class)
# # View(table3_median_income)
table3_median_income$noc_code <- as.numeric(table3_median_income$noc_code)
table3_median_income$noc_code<-formatC(table3_median_income$noc_code, width = 4, format = "d", flag = "0")
table3_median_income$noc_code<-as.character(table3_median_income$noc_code)
table3_datatool <- merge(table3_datatool,table3_median_income , by=c("health_region","noc_code"),all.x=T)
table3_datatool <- table3_datatool %>% dplyr::rename("median_income"="median_total1")
# View(table3_datatool)
##########################################
## MERGE WITH NOC_MERGE & SAVE
############################################
table3_sector <- merge(table3_datatool,NOC_MERGE,by="noc_code",all.x=T)
table3_sector <- table3_sector %>%
select(geography,health_region,industry,noc_broad,noc_broad_descript,naics_sector_name,noc_code,noc_code_class,sex,age,everything()) %>%
arrange(geography,health_region,industry,naics_sector_name,noc_code,sex,age) %>%
mutate_if(is.numeric, round, 0)
table3_sector <- table3_sector%>%mutate(noc_code_class=substring(noc_code_class,6))
table3_sector<-table3_sector%>%mutate(noc_code_class=gsub("\\s*\\([^\\)]+\\)","",as.character(noc_code_class)))
table3_sector<-table3_sector%>%mutate(noc_code_class= gsub('[0-9]+', '', noc_code_class))
table3_sector <- table3_sector%>%mutate(health_region=substring(health_region,6))
##########################################
## Apply PHO Operational names for PHUs in Ontario
##########################################
table3_sector$health_region_ontario = factor(
table3_sector$health_region,
levels = c(
'The District of Algoma Health Unit',
'Brant County Health Unit',
'Durham Regional Health Unit',
'Grey Bruce Health Unit',
'Haldimand-Norfolk Health Unit',
'Haliburton, Kawartha, Pine Ridge District Health Unit',
'Halton Regional Health Unit',
'City of Hamilton Health Unit',
'Hastings and Prince Edward Counties Health Unit',
'Huron County Health Unit',
'Chatham-Kent Health Unit',
'Kingston, Frontenac and Lennox and Addington Health Unit',
'Lambton Health Unit',
'Leeds, Grenville and Lanark District Health Unit',
'Middlesex-London Health Unit',
'Niagara Regional Area Health Unit',
'North Bay Parry Sound District Health Unit',
'Northwestern Health Unit',
'City of Ottawa Health Unit',
'Peel Regional Health Unit',
'Perth District Health Unit',
'Peterborough County-City Health Unit',
'Porcupine Health Unit',
'Renfrew County and District Health Unit',
'The Eastern Ontario Health Unit',
'Simcoe Muskoka District Health Unit',
'Sudbury and District Health Unit',
'Thunder Bay District Health Unit',
'Timiskaming Health Unit',
'Waterloo Health Unit',
'Wellington-Dufferin-Guelph Health Unit',
'Windsor-Essex County Health Unit',
'York Regional Health Unit',
'Oxford Elgin St. Thomas Health Unit',
'City of Toronto Health Unit'
),
labels = c(
'Algoma Public Health',
'Brant County Health Unit',
'Durham Region Health Department',
'Grey Bruce Health Unit',
'Haldimand-Norfolk Health Unit',
'Haliburton, Kawartha, Pine Ridge District Health Unit',
'Halton Region Public Health',
'City of Hamilton Public Health Services',
'Hastings Prince Edward Public Health',
'Huron Public Health',
'Chatham-Kent Public Health',
'Kingston, Frontenac and Lennox & Addington Public Health',
'Lambton Public Health',
'Leeds, Grenville & Lanark District Health Unit',
'Middlesex-London Health Unit',
'Niagara Region Public Health',
'North Bay Parry Sound District Health Unit',
'Northwestern Health Unit',
'Ottawa Public Health',
'Peel Public Health',
'Perth Public Health',
'Peterborough Public Health',
'Porcupine Health Unit',
'Renfrew County and District Health Unit',
'Eastern Ontario Health Unit',
'Simcoe Muskoka District Health Unit',
'Public Health Sudbury & Districts',
'Thunder Bay District Health Unit',
'Timiskaming Health Unit',
'Region of Waterloo Public Health and Emergency Services',
'Wellington-Dufferin-Guelph Public Health',
'Windsor-Essex County Health Unit',
'York Region Public Health',
'Southwestern Public Health',
'Toronto Public Health'
)
)
table3_sector$health_region_ontario <- as.character(table3_sector$health_region_ontario)
table3_sector$health_region <- ifelse(table3_sector$geography %in% "Ontario", table3_sector$health_region_ontario,table3_sector$health_region)
table3_sector$health_region <- ifelse(is.na(table3_sector$health_region),"Peterborough Public Health",table3_sector$health_region)
table3_sector <- table3_sector %>% select(-health_region_ontario)
table3_sector <- table3_sector %>% filter(!(is.na(sum_total1)))
table3_datatool <- readRDS("table3_datatool.rds")
table3_datatool$naics_sector_name <- c("Total Sectors")
table3_final <- rbind(table3_sector,table3_datatool)
table3_final$age=factor(table3_final$age,
levels=c("15 - 24 years",
"25 - 34 years",
"35 - 44 years",
"45 - 54 years",
"55 - 64 years",
"65 years and over",
"Total - 15 years and over"),
labels=c("15 - 24",
"25 - 34",
"35 - 44",
"45 - 54",
"55 - 64",
"65+",
"Total"))
# View(table3_final)
table3_final$sex=factor(table3_final$sex,
levels=c("Female",
"Male",
"Total - Sex"),
labels=c("Female",
"Male",
"Total"))
table3_final <- table3_final %>%
select(geography,health_region,industry,naics_sector_name,noc_code,noc_code_class,everything()) %>%
arrange(geography,health_region,industry,naics_sector_name,noc_code,noc_code_class) %>%
filter(sum_total1 > 10)
table3_final_ontario <- table3_final %>% filter(geography=="Ontario")
saveRDS(table3_final, file = "table3_final.rds")
#### create input dataset with regions for sidebar selections in tool
regions_input <- readRDS("table3_final.rds")
regions_input <- regions_input %>% distinct(geography,health_region) %>% filter(!is.na(health_region))
### create the total dataset for tabs that use overall sex and age
table3_final <- readRDS("table3_final.rds")
table3_final$median_income_plot <-
ifelse(table3_final$median_income > 150000,
150000,
table3_final$median_income)
table3_final$noc_broad_descript = factor(
table3_final$noc_broad_descript,
levels = c(
"Management occupations",
"Business, finance and administration occupations",
"Natural and applied sciences and related occupations",
"Health occupations",
"Occupations in education, law and social, community and government services",
"Occupations in art, culture, recreation and sport",
"Sales and service occupations",
"Trades, transport and equipment operators and related occupations",
"Natural resources, agriculture and related production occupations",
"Occupations in manufacturing and utilities"
),
labels = c(
"Management",
"Business",
"Sciences",
"Health",
"Community",
"Culture",
"Sales",
"Trades",
"Agriculture",
"Utilities"
)
)
table3_final_total <- table3_final %>% filter(sex == 'Total' & age == 'Total')
## Save datatool datasets
saveRDS(regions_input, file = "regions_input.rds")
saveRDS(table3_final_total, file = "table3_final_total.rds")
saveRDS(table3_final, file = "table3_final.rds")
| /Step5_Regions_Datatool_Create.R | no_license | BtsmithPHO/Occ_COVID_Tool | R | false | false | 25,399 | r | ####################
## Occupational Risk Covid19 Code Repository
#######################
## Step5: Create Datatool Datasets for health regions (table3)
#######################
library(dplyr)
library(scales)
library(tidyverse)
rm(list=ls())
load("table3_r.RData")
load("onet_naics_noc.RData")
load("table3_median_income.RData")
##########################################
## Sum NOC codes within services
##########################################
table3_r_tool <- table3_r %>%
group_by(geography, health_region, essential, noc_code, sex, age) %>%
summarise_if(is.numeric, sum)
######################################################################
## create designations for industry variable for each service strategy
######################################################################
table3_r_tool$industry <- ifelse(table3_r_tool$essential==1, "Essential", "Non-essential")
table3_r_tool <- table3_r_tool %>%
ungroup() %>%
select(-essential)
###################################################
## now create an "all occupations" dataset
## which includes both essential and other services
###################################################
table3_r_tool_all <- table3_r_tool %>%
group_by(geography, health_region, noc_code, sex, age) %>%
summarise_if(is.numeric, sum)
table3_r_tool_all$industry <- c("Total")
##### View(sum_all)
############################
## append the datasets to have essential, non-essential, and all
############################
table3_r_tool_f <- rbind(table3_r_tool, table3_r_tool_all)
#############################
## socio-dem characteristics
#############################
table3_r_tool_f$sum_nonimmig1 <- ifelse(table3_r_tool_f$sum_total1 >=(table3_r_tool_f$sum_immig1 + table3_r_tool_f$sum_nonpermres1),table3_r_tool_f$sum_total1 - (table3_r_tool_f$sum_immig1 + table3_r_tool_f$sum_nonpermres1),0)
table3_r_tool_f$sum_white1 <- ifelse(table3_r_tool_f$sum_total1 >=(table3_r_tool_f$sum_vismin1 + table3_r_tool_f$sum_aboriginal1),table3_r_tool_f$sum_total1 -(table3_r_tool_f$sum_vismin1 + table3_r_tool_f$sum_aboriginal1),0)
table3_r_tool_f$percent_immig <- ifelse(table3_r_tool_f$sum_total1>0, table3_r_tool_f$sum_immig1 / table3_r_tool_f$sum_total1*100, 0)
table3_r_tool_f$percent_immig <- ifelse(table3_r_tool_f$percent_immig>100, 100, table3_r_tool_f$percent_immig)
table3_r_tool_f$percent_nonpermres <- ifelse(table3_r_tool_f$sum_total1>0, table3_r_tool_f$sum_nonpermres1 / table3_r_tool_f$sum_total1*100, 0)
table3_r_tool_f$percent_nonpermres <- ifelse(table3_r_tool_f$percent_nonpermres>100, 100, table3_r_tool_f$percent_nonpermres)
table3_r_tool_f$percent_vismin <- ifelse(table3_r_tool_f$sum_total1>0, table3_r_tool_f$sum_vismin1 / table3_r_tool_f$sum_total1*100, 0)
table3_r_tool_f$percent_vismin <- ifelse(table3_r_tool_f$percent_vismin>100, 100, table3_r_tool_f$percent_vismin)
################################################################################
## create overall female and over 65 percents (for SLIDER in population group)
################################################################################
female_slider <- table3_r_tool_f %>%
filter(age == "Total - 15 years and over") %>%
select(geography,health_region,industry,noc_code,sex,age,sum_total1) %>%
spread(sex,sum_total1)
female_slider$Female <- ifelse(is.na(female_slider$Female),0,female_slider$Female)
female_slider$`Total - Sex` <- ifelse(is.na(female_slider$`Total - Sex`),0,female_slider$`Total - Sex`)
female_slider$overall_percent_female <- ifelse(female_slider$`Total - Sex` >0,female_slider$Female/female_slider$`Total - Sex`*100,0)
female_slider$overall_percent_female <- ifelse(female_slider$overall_percent_female>100, 100, female_slider$overall_percent_female)
female_slider <- female_slider %>% ungroup() %>% distinct(geography,health_region,industry,noc_code,overall_percent_female)
# # View(female_slider)
age65_slider <- table3_r_tool_f %>%
filter(sex == "Total - Sex") %>%
select(geography,health_region,industry,noc_code,sex,age,sum_total1) %>%
spread(age,sum_total1)
age65_slider$`65 years and over` <- ifelse(is.na(age65_slider$`65 years and over`),0,age65_slider$`65 years and over`)
age65_slider$`Total - 15 years and over` <- ifelse(is.na(age65_slider$`Total - 15 years and over`),0,age65_slider$`Total - 15 years and over`)
age65_slider$overall_percent_65 <- ifelse(age65_slider$`Total - 15 years and over` >0,age65_slider$`65 years and over`/age65_slider$`Total - 15 years and over`*100,0)
age65_slider$overall_percent_65 <- ifelse(age65_slider$overall_percent_65>100, 100, age65_slider$overall_percent_65)
age65_slider <-age65_slider %>% ungroup() %>% distinct(geography,health_region,industry,noc_code,overall_percent_65)
# # View(age65_slider)
sliders <- merge(female_slider,age65_slider,by=c('geography','health_region', 'industry','noc_code'),all=T)
sliders$overall_percent_female <- ifelse(is.na(sliders$overall_percent_female),0,sliders$overall_percent_female)
sliders$overall_percent_65 <- ifelse(is.na(sliders$overall_percent_65),0,sliders$overall_percent_65)
# # View(sliders)
table3_datatool <- merge(table3_r_tool_f,sliders,by=c('geography','health_region', 'industry','noc_code'),all=T)
table3_datatool$overall_percent_female <- ifelse(is.na(table3_datatool$overall_percent_female),0,table3_datatool$overall_percent_female)
table3_datatool$overall_percent_65 <- ifelse(is.na(table3_datatool$overall_percent_65),0,table3_datatool$overall_percent_65)
###########################################
## MERGE WITH OCCUPATION MEASURES from ONET
############################################
# # View(onet)
table3_datatool <- merge(table3_datatool,onet, by=c("noc_code"),all.x=T)
###########################################
## MERGE WITH INCOME
############################################
table3_median_income <- table3_median_income %>% select(-noc_code_class)
# # View(table3_median_income)
table3_median_income$noc_code <- as.numeric(table3_median_income$noc_code)
table3_median_income$noc_code<-formatC(table3_median_income$noc_code, width = 4, format = "d", flag = "0")
table3_median_income$noc_code<-as.character(table3_median_income$noc_code)
table3_datatool <- merge(table3_datatool,table3_median_income , by=c("health_region","noc_code"),all.x=T)
table3_datatool <- table3_datatool %>% dplyr::rename("median_income"="median_total1")
# # View(table3_datatool)
###########################################
## MERGE WITH NOC_MERGE & SAVE
############################################
table3_datatool <- merge(table3_datatool,NOC_MERGE,by="noc_code",all.x=T)
table3_datatool <- table3_datatool %>%
select(geography,health_region,industry,noc_broad,noc_broad_descript,noc_code,noc_code_class,sex,age,everything()) %>%
arrange(geography,health_region,industry,noc_code,sex,age) %>%
mutate_if(is.numeric, round, 0)
table3_datatool <- table3_datatool%>%mutate(noc_code_class=substring(noc_code_class,6))
table3_datatool<-table3_datatool%>%mutate(noc_code_class=gsub("\\s*\\([^\\)]+\\)","",as.character(noc_code_class)))
table3_datatool<-table3_datatool%>%mutate(noc_code_class= gsub('[0-9]+', '', noc_code_class))
table3_datatool <- table3_datatool%>%mutate(health_region=substring(health_region,6))
##########################################
## Apply PHO Operational names for PHUs in Ontario
##########################################
table3_datatool$health_region_ontario = factor(
table3_datatool$health_region,
levels = c(
'The District of Algoma Health Unit',
'Brant County Health Unit',
'Durham Regional Health Unit',
'Grey Bruce Health Unit',
'Haldimand-Norfolk Health Unit',
'Haliburton, Kawartha, Pine Ridge District Health Unit',
'Halton Regional Health Unit',
'City of Hamilton Health Unit',
'Hastings and Prince Edward Counties Health Unit',
'Huron County Health Unit',
'Chatham-Kent Health Unit',
'Kingston, Frontenac and Lennox and Addington Health Unit',
'Lambton Health Unit',
'Leeds, Grenville and Lanark District Health Unit',
'Middlesex-London Health Unit',
'Niagara Regional Area Health Unit',
'North Bay Parry Sound District Health Unit',
'Northwestern Health Unit',
'City of Ottawa Health Unit',
'Peel Regional Health Unit',
'Perth District Health Unit',
'Peterborough County-City Health Unit',
'Porcupine Health Unit',
'Renfrew County and District Health Unit',
'The Eastern Ontario Health Unit',
'Simcoe Muskoka District Health Unit',
'Sudbury and District Health Unit',
'Thunder Bay District Health Unit',
'Timiskaming Health Unit',
'Waterloo Health Unit',
'Wellington-Dufferin-Guelph Health Unit',
'Windsor-Essex County Health Unit',
'York Regional Health Unit',
'Oxford Elgin St. Thomas Health Unit',
'City of Toronto Health Unit'
),
labels = c(
'Algoma Public Health',
'Brant County Health Unit',
'Durham Region Health Department',
'Grey Bruce Health Unit',
'Haldimand-Norfolk Health Unit',
'Haliburton, Kawartha, Pine Ridge District Health Unit',
'Halton Region Public Health',
'City of Hamilton Public Health Services',
'Hastings Prince Edward Public Health',
'Huron Public Health',
'Chatham-Kent Public Health',
'Kingston, Frontenac and Lennox & Addington Public Health',
'Lambton Public Health',
'Leeds, Grenville & Lanark District Health Unit',
'Middlesex-London Health Unit',
'Niagara Region Public Health',
'North Bay Parry Sound District Health Unit',
'Northwestern Health Unit',
'Ottawa Public Health',
'Peel Public Health',
'Perth Public Health',
'Peterborough Public Health',
'Porcupine Health Unit',
'Renfrew County and District Health Unit',
'Eastern Ontario Health Unit',
'Simcoe Muskoka District Health Unit',
'Public Health Sudbury & Districts',
'Thunder Bay District Health Unit',
'Timiskaming Health Unit',
'Region of Waterloo Public Health and Emergency Services',
'Wellington-Dufferin-Guelph Public Health',
'Windsor-Essex County Health Unit',
'York Region Public Health',
'Southwestern Public Health',
'Toronto Public Health'
)
)
table3_datatool$health_region <- ifelse(is.na(table3_datatool$health_region),"Peterborough Public Health",table3_datatool$health_region)
table3_datatool$health_region_ontario <- as.character(table3_datatool$health_region_ontario)
table3_datatool$health_region <- ifelse(table3_datatool$geography %in% "Ontario", table3_datatool$health_region_ontario,table3_datatool$health_region)
# View(table3_datatool)
summary(table3_datatool)
table3_datatool <- table3_datatool %>% select(-health_region_ontario)
saveRDS(table3_datatool,file = "table3_datatool.rds")
######
## Specific sectors (NAICS)
######
library(dplyr)
library(scales)
library(tidyverse)
rm(list=ls())
load("table3_r.RData")
load("onet_naics_noc.RData")
load("table3_median_income.RData")
# View(table3_r)
##########################################
## Sum NOC codes within services
##########################################
table3_r_tool <- table3_r %>%
group_by(geography, health_region, essential,naics_sector_name, noc_code, sex, age) %>%
summarise_if(is.numeric, sum)
# View(table3_r)
######################################################################
## create designations for industry variable for each service strategy
######################################################################
table3_r_tool$industry <- ifelse(table3_r_tool$essential==1, "Essential", "Non-essential")
table3_r_tool <- table3_r_tool %>%
ungroup() %>%
select(-essential)
# View(table3_r_tool)
###################################################
## now create an "all occupations" dataset
## which includes both essential and other services
###################################################
table3_r_tool_all <- table3_r_tool %>%
group_by(geography, health_region, naics_sector_name,noc_code, sex, age) %>%
summarise_if(is.numeric, sum)
table3_r_tool_all$industry <- c("Total")
##### View(sum_all)
############################
## append the datasets to have essential, non-essential, and all
############################
table3_r_tool_f <- rbind(table3_r_tool, table3_r_tool_all)
# View(table3_r_tool_f)
#############################
## socio-dem characteristics
#############################
table3_r_tool_f$sum_nonimmig1 <- ifelse(table3_r_tool_f$sum_total1 >=(table3_r_tool_f$sum_immig1 + table3_r_tool_f$sum_nonpermres1),table3_r_tool_f$sum_total1 - (table3_r_tool_f$sum_immig1 + table3_r_tool_f$sum_nonpermres1),0)
table3_r_tool_f$sum_white1 <- ifelse(table3_r_tool_f$sum_total1 >=(table3_r_tool_f$sum_vismin1 + table3_r_tool_f$sum_aboriginal1),table3_r_tool_f$sum_total1 -(table3_r_tool_f$sum_vismin1 + table3_r_tool_f$sum_aboriginal1),0)
table3_r_tool_f$percent_immig <- ifelse(table3_r_tool_f$sum_total1>0, table3_r_tool_f$sum_immig1 / table3_r_tool_f$sum_total1*100, 0)
table3_r_tool_f$percent_immig <- ifelse(table3_r_tool_f$percent_immig>100, 100, table3_r_tool_f$percent_immig)
table3_r_tool_f$percent_nonpermres <- ifelse(table3_r_tool_f$sum_total1>0, table3_r_tool_f$sum_nonpermres1 / table3_r_tool_f$sum_total1*100, 0)
table3_r_tool_f$percent_nonpermres <- ifelse(table3_r_tool_f$percent_nonpermres>100, 100, table3_r_tool_f$percent_nonpermres)
table3_r_tool_f$percent_vismin <- ifelse(table3_r_tool_f$sum_total1>0, table3_r_tool_f$sum_vismin1 / table3_r_tool_f$sum_total1*100, 0)
table3_r_tool_f$percent_vismin <- ifelse(table3_r_tool_f$percent_vismin>100, 100, table3_r_tool_f$percent_vismin)
# # View(table3_r_tool_f)
################################################################################
## create overall female and over 65 percents (for SLIDER in population group)
################################################################################
female_slider <- table3_r_tool_f %>%
filter(age == "Total - 15 years and over") %>%
select(geography,health_region,industry,naics_sector_name,noc_code,sex,age,sum_total1) %>%
spread(sex,sum_total1)
female_slider$Female <- ifelse(is.na(female_slider$Female),0,female_slider$Female)
female_slider$`Total - Sex` <- ifelse(is.na(female_slider$`Total - Sex`),0,female_slider$`Total - Sex`)
female_slider$overall_percent_female <- ifelse(female_slider$`Total - Sex` >0,female_slider$Female/female_slider$`Total - Sex`*100,0)
female_slider$overall_percent_female <- ifelse(female_slider$overall_percent_female>100, 100, female_slider$overall_percent_female)
female_slider <- female_slider %>% ungroup() %>% distinct(geography,health_region,industry,naics_sector_name,noc_code,overall_percent_female)
# # View(female_slider)
age65_slider <- table3_r_tool_f %>%
filter(sex == "Total - Sex") %>%
select(geography,health_region,industry,naics_sector_name,noc_code,sex,age,sum_total1) %>%
spread(age,sum_total1)
age65_slider$`65 years and over` <- ifelse(is.na(age65_slider$`65 years and over`),0,age65_slider$`65 years and over`)
age65_slider$`Total - 15 years and over` <- ifelse(is.na(age65_slider$`Total - 15 years and over`),0,age65_slider$`Total - 15 years and over`)
age65_slider$overall_percent_65 <- ifelse(age65_slider$`Total - 15 years and over` >0,age65_slider$`65 years and over`/age65_slider$`Total - 15 years and over`*100,0)
age65_slider$overall_percent_65 <- ifelse(age65_slider$overall_percent_65>100, 100, age65_slider$overall_percent_65)
age65_slider <-age65_slider %>% ungroup() %>% distinct(geography,health_region,industry,naics_sector_name,noc_code,overall_percent_65)
# # View(age65_slider)
sliders <- merge(female_slider,age65_slider,by=c('geography','health_region', 'industry','naics_sector_name', 'noc_code'),all=T)
sliders$overall_percent_female <- ifelse(is.na(sliders$overall_percent_female),0,sliders$overall_percent_female)
sliders$overall_percent_65 <- ifelse(is.na(sliders$overall_percent_65),0,sliders$overall_percent_65)
# View(sliders)
table3_datatool <- merge(table3_r_tool_f,sliders,by=c('geography','health_region', 'industry','naics_sector_name', 'noc_code'),all=T)
table3_datatool$overall_percent_female <- ifelse(is.na(table3_datatool$overall_percent_female),0,table3_datatool$overall_percent_female)
table3_datatool$overall_percent_65 <- ifelse(is.na(table3_datatool$overall_percent_65),0,table3_datatool$overall_percent_65)
###########################################
## MERGE WITH OCCUPATION MEASURES from ONET
############################################
# # View(onet)
table3_datatool <- merge(table3_datatool,onet, by=c("noc_code"),all.x=T)
# View(table3_datatool)
###########################################
## MERGE WITH INCOME
############################################
table3_median_income <- table3_median_income %>% select(-noc_code_class)
# # View(table3_median_income)
table3_median_income$noc_code <- as.numeric(table3_median_income$noc_code)
table3_median_income$noc_code<-formatC(table3_median_income$noc_code, width = 4, format = "d", flag = "0")
table3_median_income$noc_code<-as.character(table3_median_income$noc_code)
table3_datatool <- merge(table3_datatool,table3_median_income , by=c("health_region","noc_code"),all.x=T)
table3_datatool <- table3_datatool %>% dplyr::rename("median_income"="median_total1")
# View(table3_datatool)
##########################################
## MERGE WITH NOC_MERGE & SAVE
############################################
table3_sector <- merge(table3_datatool,NOC_MERGE,by="noc_code",all.x=T)
table3_sector <- table3_sector %>%
select(geography,health_region,industry,noc_broad,noc_broad_descript,naics_sector_name,noc_code,noc_code_class,sex,age,everything()) %>%
arrange(geography,health_region,industry,naics_sector_name,noc_code,sex,age) %>%
mutate_if(is.numeric, round, 0)
table3_sector <- table3_sector%>%mutate(noc_code_class=substring(noc_code_class,6))
table3_sector<-table3_sector%>%mutate(noc_code_class=gsub("\\s*\\([^\\)]+\\)","",as.character(noc_code_class)))
table3_sector<-table3_sector%>%mutate(noc_code_class= gsub('[0-9]+', '', noc_code_class))
table3_sector <- table3_sector%>%mutate(health_region=substring(health_region,6))
##########################################
## Apply PHO Operational names for PHUs in Ontario
##########################################
table3_sector$health_region_ontario = factor(
table3_sector$health_region,
levels = c(
'The District of Algoma Health Unit',
'Brant County Health Unit',
'Durham Regional Health Unit',
'Grey Bruce Health Unit',
'Haldimand-Norfolk Health Unit',
'Haliburton, Kawartha, Pine Ridge District Health Unit',
'Halton Regional Health Unit',
'City of Hamilton Health Unit',
'Hastings and Prince Edward Counties Health Unit',
'Huron County Health Unit',
'Chatham-Kent Health Unit',
'Kingston, Frontenac and Lennox and Addington Health Unit',
'Lambton Health Unit',
'Leeds, Grenville and Lanark District Health Unit',
'Middlesex-London Health Unit',
'Niagara Regional Area Health Unit',
'North Bay Parry Sound District Health Unit',
'Northwestern Health Unit',
'City of Ottawa Health Unit',
'Peel Regional Health Unit',
'Perth District Health Unit',
'Peterborough County-City Health Unit',
'Porcupine Health Unit',
'Renfrew County and District Health Unit',
'The Eastern Ontario Health Unit',
'Simcoe Muskoka District Health Unit',
'Sudbury and District Health Unit',
'Thunder Bay District Health Unit',
'Timiskaming Health Unit',
'Waterloo Health Unit',
'Wellington-Dufferin-Guelph Health Unit',
'Windsor-Essex County Health Unit',
'York Regional Health Unit',
'Oxford Elgin St. Thomas Health Unit',
'City of Toronto Health Unit'
),
labels = c(
'Algoma Public Health',
'Brant County Health Unit',
'Durham Region Health Department',
'Grey Bruce Health Unit',
'Haldimand-Norfolk Health Unit',
'Haliburton, Kawartha, Pine Ridge District Health Unit',
'Halton Region Public Health',
'City of Hamilton Public Health Services',
'Hastings Prince Edward Public Health',
'Huron Public Health',
'Chatham-Kent Public Health',
'Kingston, Frontenac and Lennox & Addington Public Health',
'Lambton Public Health',
'Leeds, Grenville & Lanark District Health Unit',
'Middlesex-London Health Unit',
'Niagara Region Public Health',
'North Bay Parry Sound District Health Unit',
'Northwestern Health Unit',
'Ottawa Public Health',
'Peel Public Health',
'Perth Public Health',
'Peterborough Public Health',
'Porcupine Health Unit',
'Renfrew County and District Health Unit',
'Eastern Ontario Health Unit',
'Simcoe Muskoka District Health Unit',
'Public Health Sudbury & Districts',
'Thunder Bay District Health Unit',
'Timiskaming Health Unit',
'Region of Waterloo Public Health and Emergency Services',
'Wellington-Dufferin-Guelph Public Health',
'Windsor-Essex County Health Unit',
'York Region Public Health',
'Southwestern Public Health',
'Toronto Public Health'
)
)
table3_sector$health_region_ontario <- as.character(table3_sector$health_region_ontario)
table3_sector$health_region <- ifelse(table3_sector$geography %in% "Ontario", table3_sector$health_region_ontario,table3_sector$health_region)
table3_sector$health_region <- ifelse(is.na(table3_sector$health_region),"Peterborough Public Health",table3_sector$health_region)
table3_sector <- table3_sector %>% select(-health_region_ontario)
table3_sector <- table3_sector %>% filter(!(is.na(sum_total1)))
table3_datatool <- readRDS("table3_datatool.rds")
table3_datatool$naics_sector_name <- c("Total Sectors")
table3_final <- rbind(table3_sector,table3_datatool)
table3_final$age=factor(table3_final$age,
levels=c("15 - 24 years",
"25 - 34 years",
"35 - 44 years",
"45 - 54 years",
"55 - 64 years",
"65 years and over",
"Total - 15 years and over"),
labels=c("15 - 24",
"25 - 34",
"35 - 44",
"45 - 54",
"55 - 64",
"65+",
"Total"))
# View(table3_final)
table3_final$sex=factor(table3_final$sex,
levels=c("Female",
"Male",
"Total - Sex"),
labels=c("Female",
"Male",
"Total"))
table3_final <- table3_final %>%
select(geography,health_region,industry,naics_sector_name,noc_code,noc_code_class,everything()) %>%
arrange(geography,health_region,industry,naics_sector_name,noc_code,noc_code_class) %>%
filter(sum_total1 > 10)
table3_final_ontario <- table3_final %>% filter(geography=="Ontario")
saveRDS(table3_final, file = "table3_final.rds")
#### create input dataset with regions for sidebar selections in tool
regions_input <- readRDS("table3_final.rds")
regions_input <- regions_input %>% distinct(geography,health_region) %>% filter(!is.na(health_region))
### create the total dataset for tabs that use overall sex and age
table3_final <- readRDS("table3_final.rds")
table3_final$median_income_plot <-
ifelse(table3_final$median_income > 150000,
150000,
table3_final$median_income)
table3_final$noc_broad_descript = factor(
table3_final$noc_broad_descript,
levels = c(
"Management occupations",
"Business, finance and administration occupations",
"Natural and applied sciences and related occupations",
"Health occupations",
"Occupations in education, law and social, community and government services",
"Occupations in art, culture, recreation and sport",
"Sales and service occupations",
"Trades, transport and equipment operators and related occupations",
"Natural resources, agriculture and related production occupations",
"Occupations in manufacturing and utilities"
),
labels = c(
"Management",
"Business",
"Sciences",
"Health",
"Community",
"Culture",
"Sales",
"Trades",
"Agriculture",
"Utilities"
)
)
table3_final_total <- table3_final %>% filter(sex == 'Total' & age == 'Total')
## Save datatool datasets
saveRDS(regions_input, file = "regions_input.rds")
saveRDS(table3_final_total, file = "table3_final_total.rds")
saveRDS(table3_final, file = "table3_final.rds")
|
######ParentSchoolSatisfaction######
##############Tabela dados qualitativos(categóricos)#################
tab15<-table(data1$ParentschoolSatisfaction)
tab15
f<-tab15
F<-cumsum(f)
fr<-f/sum(f)
Fra<-cumsum(fr)
#f - Frequência Absoluta#
#fr - Frequência Relatida#
dist15<-cbind(f, F, fr, Fra)
dist15
Total<-c(sum(f), NA, sum(fr), NA)
dist15<-rbind(dist15, Total)
dist15
##########Gráfico de Barras Simples##############
barplot(tab15,main="Gráfico de Barras\n Variável: ParentSchoolSatisfaction",
ylab= "Frequência", xlab="Result", col=c("Black","Pink"))
##########Gráfico de Setor##############
pie(tab15, labels = c(names(tab15)),main="Distribuição dos elementos da amostra segundo ParentSchoolSatisfaction", col=c("Black","Pink"))
| /matvanc/R - Trab (Fischer, Marina, Davi, Siaudzionis)/Q2/ParentSchoolSatisfaction/ParentSchoolSatisfaction.R | no_license | brunovcosta/IME | R | false | false | 799 | r | ######ParentSchoolSatisfaction######
##############Tabela dados qualitativos(categóricos)#################
tab15<-table(data1$ParentschoolSatisfaction)
tab15
f<-tab15
F<-cumsum(f)
fr<-f/sum(f)
Fra<-cumsum(fr)
#f - Frequência Absoluta#
#fr - Frequência Relatida#
dist15<-cbind(f, F, fr, Fra)
dist15
Total<-c(sum(f), NA, sum(fr), NA)
dist15<-rbind(dist15, Total)
dist15
##########Gráfico de Barras Simples##############
barplot(tab15,main="Gráfico de Barras\n Variável: ParentSchoolSatisfaction",
ylab= "Frequência", xlab="Result", col=c("Black","Pink"))
##########Gráfico de Setor##############
pie(tab15, labels = c(names(tab15)),main="Distribuição dos elementos da amostra segundo ParentSchoolSatisfaction", col=c("Black","Pink"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pcldar.R
\name{get_phi}
\alias{get_phi}
\title{get_phi}
\usage{
get_phi(lda)
}
\arguments{
\item{lda}{LDA sampler object}
}
\description{
Get the word/topic distribution (phi matrix) from an LDA sampler
}
| /man/get_phi.Rd | no_license | lejon/pcldar | R | false | true | 283 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pcldar.R
\name{get_phi}
\alias{get_phi}
\title{get_phi}
\usage{
get_phi(lda)
}
\arguments{
\item{lda}{LDA sampler object}
}
\description{
Get the word/topic distribution (phi matrix) from an LDA sampler
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/segmentByGFLars.R
\name{segmentByGFLars}
\alias{segmentByGFLars}
\title{Group fused Lars segmentation (low-level)}
\usage{
segmentByGFLars(Y, K, weights = defaultWeights(nrow(Y)),
epsilon = 1e-09, verbose = FALSE)
}
\arguments{
\item{Y}{A \code{n*p} matrix of signals to be segmented}
\item{K}{The number of change points to find}
\item{weights}{A \code{(n-1)*1} vector of weights for the weigthed group
fused Lasso penalty. See Details.}
\item{epsilon}{Values smaller than epsilon are considered null. Defaults to
\code{1e-9}.}
\item{verbose}{A \code{logical} value: should extra information be output ?
Defaults to \code{FALSE}.}
}
\value{
A list with elements: \describe{\item{bkp}{A vector of \code{k} candidate
change-point positions} \item{lambda}{The estimated lambda values for each
change-point} \item{mean}{A vector of length \code{p}, the mean signal per
column} \item{value}{A \code{i x p} matrix of change-point values for the
first i change-points} \item{c}{\eqn{\hat{c}}, a \code{n-1 x K} matrix }}
}
\description{
Low-level function for multivariate fused Lars segmentation (GFLars)
}
\details{
This function recrusively looks for the best candidate change point
according to group-fused LARS. This is a low-level function. It is generally
advised to use the wrapper \code{\link{doGFLars}} which also works on data
frames, has a convenient argument \code{stat}, and includes a basic
workaround for handling missing values.
See also \code{\link{jointSeg}} for combining group fused LARS segmentation
with pruning by dynamic programming (\code{\link{pruneByDP}}).
See \code{\link{PSSeg}} for segmenting genomic signals from SNP arrays.
The default weights \eqn{\sqrt{n/(i*(n-i))}} are calibrated as suggested by
Bleakley and Vert (2011). Using this calibration, the first breakpoint
maximizes the likelihood ratio test (LRT) statistic.
}
\note{
This implementation is derived from the MATLAB code by Vert and
Bleakley: \url{http://cbio.ensmp.fr/GFLseg}.
}
\examples{
p <- 2
trueK <- 10
sim <- randomProfile(1e4, trueK, 1, p)
Y <- sim$profile
K <- 2*trueK
res <- segmentByGFLars(Y, K)
print(res$bkp)
print(sim$bkp)
plotSeg(Y, res$bkp)
}
\references{
Bleakley, K., & Vert, J. P. (2011). The group fused lasso for
multiple change-point detection. arXiv preprint arXiv:1106.4199.
Vert, J. P., & Bleakley, K. (2010). Fast detection of multiple change-points
shared by many signals using group LARS. Advances in Neural Information
Processing Systems, 23, 2343-2351.
}
\seealso{
\code{\link{PSSeg}}, \code{\link{jointSeg}},
\code{\link{doGFLars}}, \code{\link{pruneByDP}}
}
\author{
Morgane Pierre-Jean and Pierre Neuvial
}
| /man/segmentByGFLars.Rd | no_license | cran/jointseg | R | false | true | 2,722 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/segmentByGFLars.R
\name{segmentByGFLars}
\alias{segmentByGFLars}
\title{Group fused Lars segmentation (low-level)}
\usage{
segmentByGFLars(Y, K, weights = defaultWeights(nrow(Y)),
epsilon = 1e-09, verbose = FALSE)
}
\arguments{
\item{Y}{A \code{n*p} matrix of signals to be segmented}
\item{K}{The number of change points to find}
\item{weights}{A \code{(n-1)*1} vector of weights for the weigthed group
fused Lasso penalty. See Details.}
\item{epsilon}{Values smaller than epsilon are considered null. Defaults to
\code{1e-9}.}
\item{verbose}{A \code{logical} value: should extra information be output ?
Defaults to \code{FALSE}.}
}
\value{
A list with elements: \describe{\item{bkp}{A vector of \code{k} candidate
change-point positions} \item{lambda}{The estimated lambda values for each
change-point} \item{mean}{A vector of length \code{p}, the mean signal per
column} \item{value}{A \code{i x p} matrix of change-point values for the
first i change-points} \item{c}{\eqn{\hat{c}}, a \code{n-1 x K} matrix }}
}
\description{
Low-level function for multivariate fused Lars segmentation (GFLars)
}
\details{
This function recrusively looks for the best candidate change point
according to group-fused LARS. This is a low-level function. It is generally
advised to use the wrapper \code{\link{doGFLars}} which also works on data
frames, has a convenient argument \code{stat}, and includes a basic
workaround for handling missing values.
See also \code{\link{jointSeg}} for combining group fused LARS segmentation
with pruning by dynamic programming (\code{\link{pruneByDP}}).
See \code{\link{PSSeg}} for segmenting genomic signals from SNP arrays.
The default weights \eqn{\sqrt{n/(i*(n-i))}} are calibrated as suggested by
Bleakley and Vert (2011). Using this calibration, the first breakpoint
maximizes the likelihood ratio test (LRT) statistic.
}
\note{
This implementation is derived from the MATLAB code by Vert and
Bleakley: \url{http://cbio.ensmp.fr/GFLseg}.
}
\examples{
p <- 2
trueK <- 10
sim <- randomProfile(1e4, trueK, 1, p)
Y <- sim$profile
K <- 2*trueK
res <- segmentByGFLars(Y, K)
print(res$bkp)
print(sim$bkp)
plotSeg(Y, res$bkp)
}
\references{
Bleakley, K., & Vert, J. P. (2011). The group fused lasso for
multiple change-point detection. arXiv preprint arXiv:1106.4199.
Vert, J. P., & Bleakley, K. (2010). Fast detection of multiple change-points
shared by many signals using group LARS. Advances in Neural Information
Processing Systems, 23, 2343-2351.
}
\seealso{
\code{\link{PSSeg}}, \code{\link{jointSeg}},
\code{\link{doGFLars}}, \code{\link{pruneByDP}}
}
\author{
Morgane Pierre-Jean and Pierre Neuvial
}
|
# http://projecteuler.net/problem=34
p <- factorial(0:9)
n <- floor(log(p, 10)) + 1
b <- 10
while ((function(a) {b <- 0; while(a >= 1) {b <- b + factorial(a%%10); a <- floor(a / 10)}; b})(b-1) >= b*10) b <- b * 10
a <- 0
for (d in 2:log(b, 10)) {
s <- rowSums(do.call("expand.grid", rep(list(p[n<=d]), d)))
o <- do.call("expand.grid", rep(list(which(n<=d)-1), d))[floor(log(s, 10)) + 1 == d, ]
s <- s[floor(log(s, 10)) + 1 == d]
o <- 10^((d-1):0) %*% t(o)
a <- a + sum(o[o==s])
}
print(a)
cat(a, file = pipe('pbcopy'))
| /R/p0034.r | no_license | kohske/peuleR | R | false | false | 534 | r | # http://projecteuler.net/problem=34
p <- factorial(0:9)
n <- floor(log(p, 10)) + 1
b <- 10
while ((function(a) {b <- 0; while(a >= 1) {b <- b + factorial(a%%10); a <- floor(a / 10)}; b})(b-1) >= b*10) b <- b * 10
a <- 0
for (d in 2:log(b, 10)) {
s <- rowSums(do.call("expand.grid", rep(list(p[n<=d]), d)))
o <- do.call("expand.grid", rep(list(which(n<=d)-1), d))[floor(log(s, 10)) + 1 == d, ]
s <- s[floor(log(s, 10)) + 1 == d]
o <- 10^((d-1):0) %*% t(o)
a <- a + sum(o[o==s])
}
print(a)
cat(a, file = pipe('pbcopy'))
|
a <- switch (4,
"one","Two","Three"
)
print(a) | /WorkSpace/R Programming/R-DecisionMaking/Switch.R | no_license | chinna510/Projects | R | false | false | 49 | r |
a <- switch (4,
"one","Two","Three"
)
print(a) |
#########################################
# Farmyard Problem Set
#
#
#
#########################################
# Read in the csv file and store it in an object called "farmyard"----------
# A bit about the dataset:----------------
# animal = species
# weight = in pounds
# age = in years
# color = coat color
# spots = binary (present (1) /not present (0))
# sex = Male (M) or Female (F)
# appetite = farmer rating of how hungry each animal is (higher numbers indicate bigger appetites)
# eyecolor = eye color
# aggressiveness = rating of aggressiveness (higher numbers indicate angrier animals)
# yard = which yard the animal lives in
# This dataset is styled after data available in R.
# Call up a list of all available datasets in R.
# (Hint: A question mark may be helpful.)
# dataset inspection------------------
#a. How many of each kind of animal is present on the farm?
#b. How do you see the top of the dataset? The bottom?
#c. How would you display a count of all the animals and their coat colors?
#d. What are three ways to display the data in the column "eyecolor"?
#e. How many variables are in the dataset? How can you see a list of them?
#f. Find the typo in the column names and fix it.
# General Tasks ----------------------
#a. How many goats have red eyes and a red coat color?
#b. How many unique weights are there?
#c. How many different horse coat colors are there?
#d. Make a new dataset that only includes animals that were heavier than average and does not include pigs.
#e. Is there a difference in average weight between the sexes?
#f. Which yard has the largest number of animals?
#g. Create a data.frame of just horses. Sort it by weight.
# Basic animal round up. For the animal of your choice, determine how many: --------
#a. have spots
#b. have a red coat color
#c. are very aggressive (i.e., 1 is not aggressive at all, 5 is very aggressive)
#d. have blue eyes
#e. are female
#f. weight less than 1100lbs?
# Phenotypic Combinations ---------------
# That's great, but you also need to know a lot of attribute combinations
# For the animal of your choice, how many:
#a. do not have spots and are brown
#b. have blue eyes and are above the mean size
#c. are male and live in yard 3
#d. have black eyes and spots
#e. are white or have brown eyes
#f. are not very aggressive (2) and are under average weight
#g. have spots or blue eyes
# IT'S BROKEN-------------------------
# coding errors. For each of the following questions, if the code runs, explain what it is doing, and then fix
# the code so that it produces the desired result:
#a) Pull out all animals who are at least four years old
farmyard[, age >= 4]
#b) Create a new object with weight and coat color
mat <- matrix(farmyard$weight, farmyard$color)
#c) Remove all of the female animals from the dataset
farmyard[farmyard$sex != F, ]
#d) Create a new object with the yard number and appetite of all goats
dat <- farmyard[farmyard == "goat", 7,10]
#e) Show all the animals with the highest aggression scores
farmyard[aggression == 5]
#f) Count up the number of white chickens
length(farmyard[farmyard$animal == chicken])
#g) Determine how many animals have black eyes
dim(farmyard$eyecolor == "black")
#h) Show all male animals
farmyard[farmyard$sex == 'm',]
#i) Sum up the weight of all the cows
sum(farmyard[farmyard$weight, "cow"])
# Additional Tasks-------------------------------
#a. Find the mode for weight. Replace that value with -9.
#b. Create a vector of unique appetite values
#c. Create a new vector composed of only even numbered rows
#d. What are two ways that you could save the data object?
#e. Round appetite to one decimal place
# Bonus Round --------------######################
#
# If you finish early, you are welcome to work on the problems below. You are not expected to know how to do these problems yet,
# but all of these tasks will be doable by the end of the course.
#a. Create a new column called ID. Give each animal an ID based on its weight and species. The lightest animal will get a 1. For example, "pig_1" is the ID for the lightest pig.
#b. Plot a histogram of weight for any animal
#c. Plot two histograms of weight side-by-side
#d. Create a scatterplot of weight vs. appetite. Is there a correlation? Change the color of the dots.
#e. Test if there is a significant mean difference in weight by sex for horses.
| /scripts/farmyard-problem-set.R | no_license | tpyork/HGEN-517 | R | false | false | 4,517 | r | #########################################
# Farmyard Problem Set
#
#
#
#########################################
# Read in the csv file and store it in an object called "farmyard"----------
# A bit about the dataset:----------------
# animal = species
# weight = in pounds
# age = in years
# color = coat color
# spots = binary (present (1) /not present (0))
# sex = Male (M) or Female (F)
# appetite = farmer rating of how hungry each animal is (higher numbers indicate bigger appetites)
# eyecolor = eye color
# aggressiveness = rating of aggressiveness (higher numbers indicate angrier animals)
# yard = which yard the animal lives in
# This dataset is styled after data available in R.
# Call up a list of all available datasets in R.
# (Hint: A question mark may be helpful.)
# dataset inspection------------------
#a. How many of each kind of animal is present on the farm?
#b. How do you see the top of the dataset? The bottom?
#c. How would you display a count of all the animals and their coat colors?
#d. What are three ways to display the data in the column "eyecolor"?
#e. How many variables are in the dataset? How can you see a list of them?
#f. Find the typo in the column names and fix it.
# General Tasks ----------------------
#a. How many goats have red eyes and a red coat color?
#b. How many unique weights are there?
#c. How many different horse coat colors are there?
#d. Make a new dataset that only includes animals that were heavier than average and does not include pigs.
#e. Is there a difference in average weight between the sexes?
#f. Which yard has the largest number of animals?
#g. Create a data.frame of just horses. Sort it by weight.
# Basic animal round up. For the animal of your choice, determine how many: --------
#a. have spots
#b. have a red coat color
#c. are very aggressive (i.e., 1 is not aggressive at all, 5 is very aggressive)
#d. have blue eyes
#e. are female
#f. weight less than 1100lbs?
# Phenotypic Combinations ---------------
# That's great, but you also need to know a lot of attribute combinations
# For the animal of your choice, how many:
#a. do not have spots and are brown
#b. have blue eyes and are above the mean size
#c. are male and live in yard 3
#d. have black eyes and spots
#e. are white or have brown eyes
#f. are not very aggressive (2) and are under average weight
#g. have spots or blue eyes
# IT'S BROKEN-------------------------
# coding errors. For each of the following questions, if the code runs, explain what it is doing, and then fix
# the code so that it produces the desired result:
#a) Pull out all animals who are at least four years old
farmyard[, age >= 4]
#b) Create a new object with weight and coat color
mat <- matrix(farmyard$weight, farmyard$color)
#c) Remove all of the female animals from the dataset
farmyard[farmyard$sex != F, ]
#d) Create a new object with the yard number and appetite of all goats
dat <- farmyard[farmyard == "goat", 7,10]
#e) Show all the animals with the highest aggression scores
farmyard[aggression == 5]
#f) Count up the number of white chickens
length(farmyard[farmyard$animal == chicken])
#g) Determine how many animals have black eyes
dim(farmyard$eyecolor == "black")
#h) Show all male animals
farmyard[farmyard$sex == 'm',]
#i) Sum up the weight of all the cows
sum(farmyard[farmyard$weight, "cow"])
# Additional Tasks-------------------------------
#a. Find the mode for weight. Replace that value with -9.
#b. Create a vector of unique appetite values
#c. Create a new vector composed of only even numbered rows
#d. What are two ways that you could save the data object?
#e. Round appetite to one decimal place
# Bonus Round --------------######################
#
# If you finish early, you are welcome to work on the problems below. You are not expected to know how to do these problems yet,
# but all of these tasks will be doable by the end of the course.
#a. Create a new column called ID. Give each animal an ID based on its weight and species. The lightest animal will get a 1. For example, "pig_1" is the ID for the lightest pig.
#b. Plot a histogram of weight for any animal
#c. Plot two histograms of weight side-by-side
#d. Create a scatterplot of weight vs. appetite. Is there a correlation? Change the color of the dots.
#e. Test if there is a significant mean difference in weight by sex for horses.
|
fish_data = read.csv("Gaeta_etal_CLC_data_1.csv")
library(dplyr)
fish_data_cat = fish_data %>%
mutate(length_cat = ifelse(length > 200, "big", "small"))
fish_data_cat = fish_data %>%
mutate(length_cat = ifelse(length > 300, "big", "small"))
fish_data_cat_filter <- filter(fish_data_cat, scalelength > 1)
library(tidyverse)
ggplot(data = fish_data_cat_filter) +
geom_point(mapping = aes(x = length, y = scalelength, color = lakeid))
| /fish-analysis.R | no_license | sr320/BellaColpo | R | false | false | 437 | r | fish_data = read.csv("Gaeta_etal_CLC_data_1.csv")
library(dplyr)
fish_data_cat = fish_data %>%
mutate(length_cat = ifelse(length > 200, "big", "small"))
fish_data_cat = fish_data %>%
mutate(length_cat = ifelse(length > 300, "big", "small"))
fish_data_cat_filter <- filter(fish_data_cat, scalelength > 1)
library(tidyverse)
ggplot(data = fish_data_cat_filter) +
geom_point(mapping = aes(x = length, y = scalelength, color = lakeid))
|
function (domains)
{
e <- get("data.env", .GlobalEnv)
e[["host_extract_"]][[length(e[["host_extract_"]]) + 1]] <- list(domains = domains)
.Call("_urltools_host_extract_", domains)
}
| /valgrind_test_dir/host_extract_-test.R | no_license | akhikolla/RcppDeepStateTest | R | false | false | 195 | r | function (domains)
{
e <- get("data.env", .GlobalEnv)
e[["host_extract_"]][[length(e[["host_extract_"]]) + 1]] <- list(domains = domains)
.Call("_urltools_host_extract_", domains)
}
|
#' Prints a flashlight
#'
#' Print method for an object of class "flashlight".
#'
#' @param x A on object of class "flashlight".
#' @param ... Further arguments passed from other methods.
#' @returns Invisibly, the input is returned.
#' @export
#' @examples
#' fit <- lm(Sepal.Length ~ ., data = iris)
#' x <- flashlight(model = fit, label = "lm", y = "Sepal.Length", data = iris)
#' x
#' @seealso [flashlight()]
print.flashlight <- function(x, ...) {
cat("\nFlashlight", x$label, "\n")
cat("\nModel:\t\t\t", .yn(x$model, "Yes"))
cat("\ny:\t\t\t", .yn(x$y))
cat("\nw:\t\t\t", .yn(x$w))
cat("\nby:\t\t\t", .yn(x$by))
cat("\ndata dim:\t\t", .yn(dim(x$data)))
cat("\npredict_fct default:\t", isTRUE(all.equal(stats::predict, x$predict_function)))
cat("\nlinkinv default:\t", isTRUE(all.equal(function(z) z, x$linkinv)))
cat("\nmetrics:\t\t", .yn(x[["metrics"]], names(x$metrics)))
cat("\nSHAP:\t\t\t", .yn(x$shap, "Yes"))
cat("\n")
invisible(x)
}
# Helper function
.yn <- function(z, ret = z) {
if (!is.null(z)) ret else "No"
}
| /R/print_flashlight.R | no_license | cran/flashlight | R | false | false | 1,084 | r | #' Prints a flashlight
#'
#' Print method for an object of class "flashlight".
#'
#' @param x A on object of class "flashlight".
#' @param ... Further arguments passed from other methods.
#' @returns Invisibly, the input is returned.
#' @export
#' @examples
#' fit <- lm(Sepal.Length ~ ., data = iris)
#' x <- flashlight(model = fit, label = "lm", y = "Sepal.Length", data = iris)
#' x
#' @seealso [flashlight()]
print.flashlight <- function(x, ...) {
cat("\nFlashlight", x$label, "\n")
cat("\nModel:\t\t\t", .yn(x$model, "Yes"))
cat("\ny:\t\t\t", .yn(x$y))
cat("\nw:\t\t\t", .yn(x$w))
cat("\nby:\t\t\t", .yn(x$by))
cat("\ndata dim:\t\t", .yn(dim(x$data)))
cat("\npredict_fct default:\t", isTRUE(all.equal(stats::predict, x$predict_function)))
cat("\nlinkinv default:\t", isTRUE(all.equal(function(z) z, x$linkinv)))
cat("\nmetrics:\t\t", .yn(x[["metrics"]], names(x$metrics)))
cat("\nSHAP:\t\t\t", .yn(x$shap, "Yes"))
cat("\n")
invisible(x)
}
# Helper function
.yn <- function(z, ret = z) {
if (!is.null(z)) ret else "No"
}
|
# Random Forest Classification
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Fitting Random Forest Classification to the Training set
# install.packages('randomForest')
library(randomForest)
set.seed(123)
classifier = randomForest(x = training_set[-3],y = training_set$Purchased,ntree = 500)
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3])
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
# Visualising the Training set results
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, grid_set)
plot(set[, -3],main = 'Random Forest Classification (Training set)',xlab = 'Age', ylab = 'Estimated Salary',xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'black', 'red3'))
# Visualising the Test set results
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, grid_set)
plot(set[, -3], main = 'Random Forest Classification (Test set)',xlab = 'Age', ylab = 'Estimated Salary',xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'Black', 'red3'))
# Choosing the number of trees
plot(classifier)
text(classifier)
| /ML with R/3. Classification/Random Forest Classification/random_forest_classification.R | no_license | Manjunath7717/Machine-Learning-Algorithms | R | false | false | 2,353 | r | # Random Forest Classification
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Fitting Random Forest Classification to the Training set
# install.packages('randomForest')
library(randomForest)
set.seed(123)
classifier = randomForest(x = training_set[-3],y = training_set$Purchased,ntree = 500)
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3])
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
# Visualising the Training set results
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, grid_set)
plot(set[, -3],main = 'Random Forest Classification (Training set)',xlab = 'Age', ylab = 'Estimated Salary',xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'black', 'red3'))
# Visualising the Test set results
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, grid_set)
plot(set[, -3], main = 'Random Forest Classification (Test set)',xlab = 'Age', ylab = 'Estimated Salary',xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'Black', 'red3'))
# Choosing the number of trees
plot(classifier)
text(classifier)
|
# Title: Fluoroprobe Heatmaps
# Author: Ryan McClure & Mary Lofton
# Date last updated: 08AUG18
# Description: Makes heatmaps of fluoroprobe data
#Note: currently this script plots DOY on the x-axis and so can only plot 1 year at a time
rm(list=ls())
########WHAT RESERVOIR ARE YOU WORKING WITH?########
Reservoir = "CCR" #choose from FCR, BVR, CCR
####################################################
########WHAT YEAR WOULD YOU LIKE TO PLOT?###########
plot_year = 2018 #choose from 2014-2018
####################################################
# load packages
#install.packages('pacman')
pacman::p_load(tidyverse, lubridate, akima, reshape2,
gridExtra, grid, colorRamps,RColorBrewer, rLakeAnalyzer, cowplot)
# Load .txt files for appropriate reservoir
#NOTE: this script is not currently set up to handle upstream sites in FCR
col_names <- names(read_tsv("./Data/DataNotYetUploadedToEDI/Raw_fluoroprobe/FP_txt/20180410_FCR_50.txt", n_max = 0))
raw_fp <- dir(path = "./Data/DataNotYetUploadedToEDI/Raw_fluoroprobe/FP_txt", pattern = paste0("*_",Reservoir,"_50.txt")) %>%
map_df(~ read_tsv(file.path(path = "./Data/DataNotYetUploadedToEDI/Raw_fluoroprobe/FP_txt", .), col_types = cols(.default = "c"), col_names = col_names, skip = 2))
fp <- raw_fp %>%
mutate(DateTime = `Date/Time`, GreenAlgae_ugL = as.numeric(`Green Algae`), Bluegreens_ugL = as.numeric(`Bluegreen`),
Browns_ugL = as.numeric(`Diatoms`), Mixed_ugL = as.numeric(`Cryptophyta`), YellowSubstances_ugL = as.numeric(`Yellow substances`),
TotalConc_ugL = as.numeric(`Total conc.`), Transmission_perc = as.numeric(`Transmission`), Depth_m = `Depth`) %>%
select(DateTime, GreenAlgae_ugL, Bluegreens_ugL, Browns_ugL, Mixed_ugL, YellowSubstances_ugL,
TotalConc_ugL, Transmission_perc, Depth_m) %>%
mutate(DateTime = as.POSIXct(as_datetime(DateTime, tz = "", format = "%m/%d/%Y %I:%M:%S %p"))) %>%
filter(year(DateTime) == plot_year)%>%
mutate(Date = date(DateTime), DOY = yday(DateTime))
# filter out depths in the fp cast that are closest to specified values.
if (Reservoir == "FCR"){
depths = seq(0.1, 9.7, by = 0.3)
df.final<-data.frame()
for (i in 1:length(depths)){
fp_layer<-fp %>% group_by(Date) %>% slice(which.min(abs(as.numeric(Depth_m) - depths[i])))
# Bind each of the data layers together.
df.final = bind_rows(df.final, fp_layer)
}
} else if (Reservoir == "BVR"){
depths = seq(0.1, 10.3, by = 0.3)
df.final<-data.frame()
for (i in 1:length(depths)){
fp_layer<-fp %>% group_by(Date) %>% slice(which.min(abs(as.numeric(Depth_m) - depths[i])))
# Bind each of the data layers together.
df.final = bind_rows(df.final, fp_layer)
}
} else if(Reservoir == "CCR"){
depths = seq(0.1, 19.9, by = 0.3)
df.final<-data.frame()
for (i in 1:length(depths)){
fp_layer<-fp %>% group_by(Date) %>% slice(which.min(abs(as.numeric(Depth_m) - depths[i])))
# Bind each of the data layers together.
df.final = bind_rows(df.final, fp_layer)
}
}
# Re-arrange the data frame by date
fp_new <- arrange(df.final, Date)
# Round each extracted depth to the nearest 10th.
fp_new$Depth_m <- round(as.numeric(fp_new$Depth_m), digits = 0.5)
# Select and make each fp variable a separate dataframe
# I have done this for the heatmap plotting purposes.
green <- select(fp_new, DateTime, Depth_m, GreenAlgae_ugL, Date, DOY)
bluegreen <- select(fp_new, DateTime, Depth_m, Bluegreens_ugL, Date, DOY)
brown <- select(fp_new, DateTime, Depth_m, Browns_ugL, Date, DOY)
mixed <- select(fp_new, DateTime, Depth_m, Mixed_ugL, Date, DOY)
yellow <- select(fp_new, DateTime, Depth_m, YellowSubstances_ugL, Date, DOY)
total <- select(fp_new, DateTime, Depth_m, TotalConc_ugL, Date, DOY)
trans <- select(fp_new, DateTime, Depth_m, Transmission_perc, Date, DOY)
# Complete data interpolation for the heatmaps
# interative processes here
#green algae
##NOTE: the interp function WILL NOT WORK if your vectors are not numeric or have NAs or Infs
interp_green <- interp(x=green$DOY, y = green$Depth_m, z = green$GreenAlgae_ugL,
xo = seq(min(green$DOY), max(green$DOY), by = .1),
yo = seq(0.1, 19.9, by = 0.01),
extrap = T, linear = T, duplicate = "strip")
interp_green <- interp2xyz(interp_green, data.frame=T)
#Bluegreen algae
interp_bluegreen <- interp(x=bluegreen$DOY, y = bluegreen$Depth_m, z = bluegreen$Bluegreens_ugL,
xo = seq(min(bluegreen$DOY), max(bluegreen$DOY), by = .1),
yo = seq(0.1, 19.9, by = 0.01),
extrap = F, linear = T, duplicate = "strip")
interp_bluegreen <- interp2xyz(interp_bluegreen, data.frame=T)
#Browns
interp_brown <- interp(x=brown$DOY, y = brown$Depth_m, z = brown$Browns_ugL,
xo = seq(min(brown$DOY), max(brown$DOY), by = .1),
yo = seq(0.1, 19.9, by = 0.01),
extrap = F, linear = T, duplicate = "strip")
interp_brown <- interp2xyz(interp_brown, data.frame=T)
#Mixed
interp_mixed <- interp(x=mixed$DOY, y = mixed$Depth_m, z = mixed$Mixed_ugL,
xo = seq(min(mixed$DOY), max(mixed$DOY), by = .1),
yo = seq(0.1, 19.9, by = 0.01),
extrap = F, linear = T, duplicate = "strip")
interp_mixed <- interp2xyz(interp_mixed, data.frame=T)
#Yellow substances
interp_yellow <- interp(x=yellow$DOY, y = yellow$Depth_m, z = yellow$YellowSubstances_ugL,
xo = seq(min(yellow$DOY), max(yellow$DOY), by = .1),
yo = seq(0.1, 19.9, by = 0.01),
extrap = F, linear = T, duplicate = "strip")
interp_yellow <- interp2xyz(interp_yellow, data.frame=T)
#Total conc.
interp_total <- interp(x=total$DOY, y = total$Depth_m, z = total$TotalConc_ugL,
xo = seq(min(total$DOY), max(total$DOY), by = .1),
yo = seq(0.1, 19.9, by = 0.01),
extrap = F, linear = T, duplicate = "strip")
interp_total <- interp2xyz(interp_total, data.frame=T)
#Transmission
interp_trans <- interp(x=trans$DOY, y = trans$Depth_m, z = trans$Transmission_perc,
xo = seq(min(trans$DOY), max(trans$DOY), by = .1),
yo = seq(0.1, 19.9, by = 0.01),
extrap = F, linear = T, duplicate = "strip")
interp_trans <- interp2xyz(interp_trans, data.frame=T)
# Plotting #
# Create a pdf so the plots can all be saved in one giant bin!
#Green Algae
p1 <- ggplot(interp_green, aes(x=x, y=y))+
geom_raster(aes(fill=z))+
scale_y_reverse(expand = c(0,0))+
scale_x_continuous(expand = c(0, 0)) +
scale_fill_gradientn(colours = blue2green2red(60), na.value="gray")+
labs(x = "Day of year", y = "Depth (m)", title = paste0(Reservoir, " Green Algae Heatmap"),fill=expression(paste(mu,g/L)))+
theme_bw()
#Bluegreens
p2 <- ggplot(interp_bluegreen, aes(x=x, y=y))+
geom_raster(aes(fill=z))+
scale_y_reverse(expand = c(0,0))+
scale_x_continuous(expand = c(0, 0)) +
scale_fill_gradientn(colours = blue2green2red(60), na.value="gray")+
labs(x = "Day of year", y = "Depth (m)", title = paste0(Reservoir, " Cyanobacteria Heatmap"),fill=expression(paste(mu,g/L)))+
theme_bw()
#p2
#Browns
p3 <- ggplot(interp_brown, aes(x=x, y=y))+
geom_raster(aes(fill=z))+
scale_y_reverse(expand = c(0,0))+
scale_x_continuous(expand = c(0, 0)) +
scale_fill_gradientn(colours = blue2green2red(60), na.value="gray")+
labs(x = "Day of year", y = "Depth (m)", title = paste0(Reservoir, " Brown Algae Heatmap"),fill=expression(paste(mu,g/L)))+
theme_bw()
#p3
#Mixed
p4 <- ggplot(interp_mixed, aes(x=x, y=y))+
geom_raster(aes(fill=z))+
scale_y_reverse(expand = c(0,0))+
scale_x_continuous(expand = c(0, 0)) +
scale_fill_gradientn(colours = blue2green2red(60), na.value="gray")+
labs(x = "Day of year", y = "Depth (m)", title = paste0(Reservoir, " 'MIXED' Heatmap"),fill=expression(paste(mu,g/L)))+
theme_bw()
#p4
#Yellow substances
p5 <- ggplot(interp_yellow, aes(x=x, y=y))+
geom_raster(aes(fill=z))+
scale_y_reverse(expand = c(0,0))+
scale_x_continuous(expand = c(0, 0)) +
scale_fill_gradientn(colours = blue2green2red(60), na.value="gray")+
labs(x = "Day of year", y = "Depth (m)", title = paste0(Reservoir," Yellow Substances Heatmap"),fill=expression(paste(mu,g/L)))+
theme_bw()
#p5
#Total concentration
p6 <- ggplot(interp_total, aes(x=x, y=y))+
geom_raster(aes(fill=z))+
scale_y_reverse(expand = c(0,0))+
scale_x_continuous(expand = c(0, 0)) +
scale_fill_gradientn(colours = blue2green2red(60), na.value="gray")+
labs(x = "Day of year", y = "Depth (m)", title = paste0(Reservoir," Total Phytoplankton Heatmap"),fill=expression(paste(mu,g/L)))+
theme_bw()
#p6
#Transmission
p7 <- ggplot(interp_trans, aes(x=x, y=y))+
geom_raster(aes(fill=z))+
scale_y_reverse(expand = c(0,0))+
scale_x_continuous(expand = c(0, 0)) +
scale_fill_gradientn(colours = blue2green2red(60), na.value="gray")+
labs(x = "Day of year", y = "Depth (m)", title = paste0(Reservoir, " Transmission % Heatmap"),fill=expression(paste(mu,g/L)))+
theme_bw()
#p7
# # create a grid that stacks all the heatmaps together.
# grid.newpage()
# grid.draw(rbind(ggplotGrob(p1), ggplotGrob(p2), ggplotGrob(p3),
# ggplotGrob(p4), ggplotGrob(p5), ggplotGrob(p6),
# ggplotGrob(p7),
# size = "first"))
# # end the make-pdf function.
# dev.off()
final_plot <- plot_grid(p1, p2, p3, p4, p5, p6, p7, ncol = 1) # rel_heights values control title margins
ggsave(plot=final_plot, file= paste0("./Data/DataNotYetUploadedToEDI/Raw_fluoroprobe/",Reservoir,"_50_FP_2018.pdf"),
h=30, w=10, units="in", dpi=300,scale = 1)
#multi-year plots
fp_edi <- read_csv("./Data/DataAlreadyUploadedToEDI/EDIProductionFiles/MakeEMLFluoroProbe/FluoroProbe.csv")%>%
filter(Reservoir == "CCR" & Site == "50")
allyears <- fp_edi %>%
filter(Depth_m <= 10)%>%
mutate(Year = as.factor(year(DateTime)),
DOY = yday(DateTime),
Date = date(DateTime))%>%
group_by(Date,Year, DOY) %>%
summarize(Total = mean(TotalConc_ugL, na.rm = TRUE),
GreenAlgae = mean(GreenAlgae_ugL, na.rm = TRUE),
BluegreenAlgae = mean(Bluegreens_ugL, na.rm = TRUE),
BrownAlgae = mean(BrownAlgae_ugL, na.rm = TRUE),
MixedAlgae = mean(MixedAlgae_ugL, na.rm = TRUE)) %>%
gather(Total:MixedAlgae, key = "spectral_group", value = "ugL")
plot_all <- ggplot(data = subset(allyears, Year == 2018 & spectral_group != "Total" & spectral_group != "MixedAlgae"), aes(x = DOY, y = ugL, group = spectral_group, colour = spectral_group))+
geom_line(size = 1)+
scale_colour_manual(values = c("darkcyan","chocolate1","chartreuse4"))+
xlab("Day of Year")+
ylab("micrograms per liter")+
ggtitle("2018")+
# ylim(c(0,5))+
# xlim(c(125,275))+
theme_bw()
plot_all
ggsave(plot_all, filename = "./Data/DataNotYetUploadedToEDI/Raw_fluoroprobe/CCR_epi_2018.png",
h = 3, w = 8, units = "in")
| /Scripts/Fluoroprobe_HEATMAPS_2018_R.R | no_license | katiek5/Reservoirs | R | false | false | 11,111 | r | # Title: Fluoroprobe Heatmaps
# Author: Ryan McClure & Mary Lofton
# Date last updated: 08AUG18
# Description: Makes heatmaps of fluoroprobe data
#Note: currently this script plots DOY on the x-axis and so can only plot 1 year at a time
rm(list=ls())
########WHAT RESERVOIR ARE YOU WORKING WITH?########
Reservoir = "CCR" #choose from FCR, BVR, CCR
####################################################
########WHAT YEAR WOULD YOU LIKE TO PLOT?###########
plot_year = 2018 #choose from 2014-2018
####################################################
# load packages
#install.packages('pacman')
pacman::p_load(tidyverse, lubridate, akima, reshape2,
gridExtra, grid, colorRamps,RColorBrewer, rLakeAnalyzer, cowplot)
# Load .txt files for appropriate reservoir
#NOTE: this script is not currently set up to handle upstream sites in FCR
col_names <- names(read_tsv("./Data/DataNotYetUploadedToEDI/Raw_fluoroprobe/FP_txt/20180410_FCR_50.txt", n_max = 0))
raw_fp <- dir(path = "./Data/DataNotYetUploadedToEDI/Raw_fluoroprobe/FP_txt", pattern = paste0("*_",Reservoir,"_50.txt")) %>%
map_df(~ read_tsv(file.path(path = "./Data/DataNotYetUploadedToEDI/Raw_fluoroprobe/FP_txt", .), col_types = cols(.default = "c"), col_names = col_names, skip = 2))
fp <- raw_fp %>%
mutate(DateTime = `Date/Time`, GreenAlgae_ugL = as.numeric(`Green Algae`), Bluegreens_ugL = as.numeric(`Bluegreen`),
Browns_ugL = as.numeric(`Diatoms`), Mixed_ugL = as.numeric(`Cryptophyta`), YellowSubstances_ugL = as.numeric(`Yellow substances`),
TotalConc_ugL = as.numeric(`Total conc.`), Transmission_perc = as.numeric(`Transmission`), Depth_m = `Depth`) %>%
select(DateTime, GreenAlgae_ugL, Bluegreens_ugL, Browns_ugL, Mixed_ugL, YellowSubstances_ugL,
TotalConc_ugL, Transmission_perc, Depth_m) %>%
mutate(DateTime = as.POSIXct(as_datetime(DateTime, tz = "", format = "%m/%d/%Y %I:%M:%S %p"))) %>%
filter(year(DateTime) == plot_year)%>%
mutate(Date = date(DateTime), DOY = yday(DateTime))
# filter out depths in the fp cast that are closest to specified values.
if (Reservoir == "FCR"){
depths = seq(0.1, 9.7, by = 0.3)
df.final<-data.frame()
for (i in 1:length(depths)){
fp_layer<-fp %>% group_by(Date) %>% slice(which.min(abs(as.numeric(Depth_m) - depths[i])))
# Bind each of the data layers together.
df.final = bind_rows(df.final, fp_layer)
}
} else if (Reservoir == "BVR"){
depths = seq(0.1, 10.3, by = 0.3)
df.final<-data.frame()
for (i in 1:length(depths)){
fp_layer<-fp %>% group_by(Date) %>% slice(which.min(abs(as.numeric(Depth_m) - depths[i])))
# Bind each of the data layers together.
df.final = bind_rows(df.final, fp_layer)
}
} else if(Reservoir == "CCR"){
depths = seq(0.1, 19.9, by = 0.3)
df.final<-data.frame()
for (i in 1:length(depths)){
fp_layer<-fp %>% group_by(Date) %>% slice(which.min(abs(as.numeric(Depth_m) - depths[i])))
# Bind each of the data layers together.
df.final = bind_rows(df.final, fp_layer)
}
}
# Re-arrange the data frame by date
fp_new <- arrange(df.final, Date)
# Round each extracted depth to the nearest 10th.
fp_new$Depth_m <- round(as.numeric(fp_new$Depth_m), digits = 0.5)
# Select and make each fp variable a separate dataframe
# I have done this for the heatmap plotting purposes.
green <- select(fp_new, DateTime, Depth_m, GreenAlgae_ugL, Date, DOY)
bluegreen <- select(fp_new, DateTime, Depth_m, Bluegreens_ugL, Date, DOY)
brown <- select(fp_new, DateTime, Depth_m, Browns_ugL, Date, DOY)
mixed <- select(fp_new, DateTime, Depth_m, Mixed_ugL, Date, DOY)
yellow <- select(fp_new, DateTime, Depth_m, YellowSubstances_ugL, Date, DOY)
total <- select(fp_new, DateTime, Depth_m, TotalConc_ugL, Date, DOY)
trans <- select(fp_new, DateTime, Depth_m, Transmission_perc, Date, DOY)
# Complete data interpolation for the heatmaps
# interative processes here
#green algae
##NOTE: the interp function WILL NOT WORK if your vectors are not numeric or have NAs or Infs
interp_green <- interp(x=green$DOY, y = green$Depth_m, z = green$GreenAlgae_ugL,
xo = seq(min(green$DOY), max(green$DOY), by = .1),
yo = seq(0.1, 19.9, by = 0.01),
extrap = T, linear = T, duplicate = "strip")
interp_green <- interp2xyz(interp_green, data.frame=T)
#Bluegreen algae
interp_bluegreen <- interp(x=bluegreen$DOY, y = bluegreen$Depth_m, z = bluegreen$Bluegreens_ugL,
xo = seq(min(bluegreen$DOY), max(bluegreen$DOY), by = .1),
yo = seq(0.1, 19.9, by = 0.01),
extrap = F, linear = T, duplicate = "strip")
interp_bluegreen <- interp2xyz(interp_bluegreen, data.frame=T)
#Browns
interp_brown <- interp(x=brown$DOY, y = brown$Depth_m, z = brown$Browns_ugL,
xo = seq(min(brown$DOY), max(brown$DOY), by = .1),
yo = seq(0.1, 19.9, by = 0.01),
extrap = F, linear = T, duplicate = "strip")
interp_brown <- interp2xyz(interp_brown, data.frame=T)
#Mixed
interp_mixed <- interp(x=mixed$DOY, y = mixed$Depth_m, z = mixed$Mixed_ugL,
xo = seq(min(mixed$DOY), max(mixed$DOY), by = .1),
yo = seq(0.1, 19.9, by = 0.01),
extrap = F, linear = T, duplicate = "strip")
interp_mixed <- interp2xyz(interp_mixed, data.frame=T)
#Yellow substances
interp_yellow <- interp(x=yellow$DOY, y = yellow$Depth_m, z = yellow$YellowSubstances_ugL,
xo = seq(min(yellow$DOY), max(yellow$DOY), by = .1),
yo = seq(0.1, 19.9, by = 0.01),
extrap = F, linear = T, duplicate = "strip")
interp_yellow <- interp2xyz(interp_yellow, data.frame=T)
#Total conc.
interp_total <- interp(x=total$DOY, y = total$Depth_m, z = total$TotalConc_ugL,
xo = seq(min(total$DOY), max(total$DOY), by = .1),
yo = seq(0.1, 19.9, by = 0.01),
extrap = F, linear = T, duplicate = "strip")
interp_total <- interp2xyz(interp_total, data.frame=T)
#Transmission
interp_trans <- interp(x=trans$DOY, y = trans$Depth_m, z = trans$Transmission_perc,
xo = seq(min(trans$DOY), max(trans$DOY), by = .1),
yo = seq(0.1, 19.9, by = 0.01),
extrap = F, linear = T, duplicate = "strip")
interp_trans <- interp2xyz(interp_trans, data.frame=T)
# Plotting #
# Create a pdf so the plots can all be saved in one giant bin!
#Green Algae
p1 <- ggplot(interp_green, aes(x=x, y=y))+
geom_raster(aes(fill=z))+
scale_y_reverse(expand = c(0,0))+
scale_x_continuous(expand = c(0, 0)) +
scale_fill_gradientn(colours = blue2green2red(60), na.value="gray")+
labs(x = "Day of year", y = "Depth (m)", title = paste0(Reservoir, " Green Algae Heatmap"),fill=expression(paste(mu,g/L)))+
theme_bw()
#Bluegreens
p2 <- ggplot(interp_bluegreen, aes(x=x, y=y))+
geom_raster(aes(fill=z))+
scale_y_reverse(expand = c(0,0))+
scale_x_continuous(expand = c(0, 0)) +
scale_fill_gradientn(colours = blue2green2red(60), na.value="gray")+
labs(x = "Day of year", y = "Depth (m)", title = paste0(Reservoir, " Cyanobacteria Heatmap"),fill=expression(paste(mu,g/L)))+
theme_bw()
#p2
#Browns
p3 <- ggplot(interp_brown, aes(x=x, y=y))+
geom_raster(aes(fill=z))+
scale_y_reverse(expand = c(0,0))+
scale_x_continuous(expand = c(0, 0)) +
scale_fill_gradientn(colours = blue2green2red(60), na.value="gray")+
labs(x = "Day of year", y = "Depth (m)", title = paste0(Reservoir, " Brown Algae Heatmap"),fill=expression(paste(mu,g/L)))+
theme_bw()
#p3
#Mixed
p4 <- ggplot(interp_mixed, aes(x=x, y=y))+
geom_raster(aes(fill=z))+
scale_y_reverse(expand = c(0,0))+
scale_x_continuous(expand = c(0, 0)) +
scale_fill_gradientn(colours = blue2green2red(60), na.value="gray")+
labs(x = "Day of year", y = "Depth (m)", title = paste0(Reservoir, " 'MIXED' Heatmap"),fill=expression(paste(mu,g/L)))+
theme_bw()
#p4
#Yellow substances
p5 <- ggplot(interp_yellow, aes(x=x, y=y))+
geom_raster(aes(fill=z))+
scale_y_reverse(expand = c(0,0))+
scale_x_continuous(expand = c(0, 0)) +
scale_fill_gradientn(colours = blue2green2red(60), na.value="gray")+
labs(x = "Day of year", y = "Depth (m)", title = paste0(Reservoir," Yellow Substances Heatmap"),fill=expression(paste(mu,g/L)))+
theme_bw()
#p5
#Total concentration
p6 <- ggplot(interp_total, aes(x=x, y=y))+
geom_raster(aes(fill=z))+
scale_y_reverse(expand = c(0,0))+
scale_x_continuous(expand = c(0, 0)) +
scale_fill_gradientn(colours = blue2green2red(60), na.value="gray")+
labs(x = "Day of year", y = "Depth (m)", title = paste0(Reservoir," Total Phytoplankton Heatmap"),fill=expression(paste(mu,g/L)))+
theme_bw()
#p6
#Transmission
p7 <- ggplot(interp_trans, aes(x=x, y=y))+
geom_raster(aes(fill=z))+
scale_y_reverse(expand = c(0,0))+
scale_x_continuous(expand = c(0, 0)) +
scale_fill_gradientn(colours = blue2green2red(60), na.value="gray")+
labs(x = "Day of year", y = "Depth (m)", title = paste0(Reservoir, " Transmission % Heatmap"),fill=expression(paste(mu,g/L)))+
theme_bw()
#p7
# # create a grid that stacks all the heatmaps together.
# grid.newpage()
# grid.draw(rbind(ggplotGrob(p1), ggplotGrob(p2), ggplotGrob(p3),
# ggplotGrob(p4), ggplotGrob(p5), ggplotGrob(p6),
# ggplotGrob(p7),
# size = "first"))
# # end the make-pdf function.
# dev.off()
final_plot <- plot_grid(p1, p2, p3, p4, p5, p6, p7, ncol = 1) # rel_heights values control title margins
ggsave(plot=final_plot, file= paste0("./Data/DataNotYetUploadedToEDI/Raw_fluoroprobe/",Reservoir,"_50_FP_2018.pdf"),
h=30, w=10, units="in", dpi=300,scale = 1)
#multi-year plots
fp_edi <- read_csv("./Data/DataAlreadyUploadedToEDI/EDIProductionFiles/MakeEMLFluoroProbe/FluoroProbe.csv")%>%
filter(Reservoir == "CCR" & Site == "50")
allyears <- fp_edi %>%
filter(Depth_m <= 10)%>%
mutate(Year = as.factor(year(DateTime)),
DOY = yday(DateTime),
Date = date(DateTime))%>%
group_by(Date,Year, DOY) %>%
summarize(Total = mean(TotalConc_ugL, na.rm = TRUE),
GreenAlgae = mean(GreenAlgae_ugL, na.rm = TRUE),
BluegreenAlgae = mean(Bluegreens_ugL, na.rm = TRUE),
BrownAlgae = mean(BrownAlgae_ugL, na.rm = TRUE),
MixedAlgae = mean(MixedAlgae_ugL, na.rm = TRUE)) %>%
gather(Total:MixedAlgae, key = "spectral_group", value = "ugL")
plot_all <- ggplot(data = subset(allyears, Year == 2018 & spectral_group != "Total" & spectral_group != "MixedAlgae"), aes(x = DOY, y = ugL, group = spectral_group, colour = spectral_group))+
geom_line(size = 1)+
scale_colour_manual(values = c("darkcyan","chocolate1","chartreuse4"))+
xlab("Day of Year")+
ylab("micrograms per liter")+
ggtitle("2018")+
# ylim(c(0,5))+
# xlim(c(125,275))+
theme_bw()
plot_all
ggsave(plot_all, filename = "./Data/DataNotYetUploadedToEDI/Raw_fluoroprobe/CCR_epi_2018.png",
h = 3, w = 8, units = "in")
|
#Spartan Hackers R Workshop 11-3-15
#display working directory
getwd()
#change path to reflect the location of the CSV data file on your machine
setwd("/Users/laurenbretz/Desktop")
#read in data
mydata = read.csv("R_workshop_data.csv")
#shorten variable names
names(mydata) = c("gender", "study", "class", "gpa", "act", "animal", "pizza", "sport", "worldend", "pres")
#display data
mydata
#transform "gender" variable into matrix of three dummy variables
tempmatrix = model.matrix(~gender -1, data=mydata)
#display matrix of gender dummy variables
tempmatrix
#iterates through each column in tempmatrix
for (i in 1:ncol(tempmatrix)){
#adds column i from tempmatrix to mydata
mydata[ncol(mydata)+1] = tempmatrix[,i]
#adds column names to gender dummy variables in mydata
colnames(mydata)[ncol(mydata)] = colnames(tempmatrix)[i]
}
#function that automates lines 15-26
#accepts two arguments: the variable that you want to tranform and the data frame that contains it
dummies=function(var,dataframe){
tempmatrix <- model.matrix(~var -1, data=dataframe)
for (i in 1:ncol(tempmatrix)){
mydata[ncol(mydata)+1]<-tempmatrix[,i]
colnames(mydata)[ncol(mydata)]<-colnames(tempmatrix)[i]
}
#returns a new dataset augmented with the desired dummy variables
return(mydata)
}
#calls the function to transform each non-numerical variable
#the $ symbol all you to refer to a specific variable from a specific dataset
mydata = dummies(mydata$study,mydata)
mydata = dummies(mydata$class,mydata)
mydata = dummies(mydata$animal,mydata)
mydata = dummies(mydata$pizza,mydata)
mydata = dummies(mydata$sport,mydata)
mydata = dummies(mydata$worldend,mydata)
mydata = dummies(mydata$pres,mydata)
#shorten new variable names
names(mydata) = c("gender", "study", "class", "gpa", "act", "animal", "pizza", "sport", "worldend","pres","genderF","genderM","genderNB","studyArts","studyBiz","studyComm","studyEdu","studyMed","studyNatsci","studySocsci","class1","class2","class3","class4","class5","classGrad","animalCats","animalDogs","pizzaCheese","pizzaHI","pizzaMeat","pizzaPepp","pizzaVeggie","sportBaseball","sportBasketball","sportBoard","sportEsports","sportFootball","sportSoccer","worldendWarm","worldendNukes","worldendSun","worldendRapture","worldendZombie","presSanders","presTrump","presClinton","presBush")
#display data
mydata
#summarize numerical data
summary(mydata$gpa)
summary(mydata$act)
#summarize non-numerical data
summary(mydata$pres)
#summarize with dummy variables
summary(mydata$presSanders)
summary(mydata$presTrump)
summary(mydata$presClinton)
summary(mydata$presBush)
#plain histogram of ACT scores
hist(mydata$act)
#pretty histogram of ACT scores
hist(mydata$act, #tell R which variable to plot
col=c("green","blue"), #alternates as many colors as you want
main="ACT Scores of Spartan Hackers", #title of histogram
xlab="ACT") #label on x-axis
#average GPA of people who would vote for a Republican
mean(subset(mydata,pres=="Donald Trump" | pres=="Jeb Bush")$gpa)
#average GPA of people who would vote for a Democrat
mean(subset(mydata,pres=="Hillary Clinton" | pres=="Bernie Sanders")$gpa)
#create variables for the two values we want to compare
mean1 = mean(subset(mydata,pres=="Donald Trump" | pres=="Jeb Bush")$gpa)
mean2 = mean(subset(mydata,pres=="Hillary Clinton" | pres=="Bernie Sanders")$gpa)
#n is the sample size, or number of observations (rows) in mydata
n = nrow(mydata)
#s is the sample standard deviation, or measure of the data’s spread
s = sd(mydata$gpa)
#a score for how different mean1 and mean2 are
teststat = (mean1 - mean2)/(s/sqrt(n))
#display the value of our test statistic
teststat
#The amount of error we are willing to accept (in this case, 5%)
alpha = 0.05
#critical value for our indicated alpha and n
t_half_alpha = qt(1-alpha/2, df=n-1)
#if teststat is outside of this range, mean1 & mean2 are different
c(-t_half_alpha,t_half_alpha)
#declares regression equation based on dependent variable (act), independent variable (pizzaCheese), and data (mydata)
regAct = lm(formula = act ~ pizzaCheese, data = mydata)
#outputs regression coefficients and statistics
summary(regAct)
#declares regression equation with multiple independent variables (pizzaCheese and gpa)
regAct2 = lm(formula = act ~ pizzaCheese + gpa, data = mydata)
summary(regAct2)
#declares regression equation with many independent variables
regAct3 = lm(formula = act ~ pizzaCheese + pizzaHI + pizzaMeat + pizzaPepp + sportFootball + animalCats + worldendZombie, data = mydata)
summary(regAct3) | /R_workshop_code.R | no_license | laurenbretz/R-workshop-110315 | R | false | false | 4,518 | r | #Spartan Hackers R Workshop 11-3-15
#display working directory
getwd()
#change path to reflect the location of the CSV data file on your machine
setwd("/Users/laurenbretz/Desktop")
#read in data
mydata = read.csv("R_workshop_data.csv")
#shorten variable names
names(mydata) = c("gender", "study", "class", "gpa", "act", "animal", "pizza", "sport", "worldend", "pres")
#display data
mydata
#transform "gender" variable into matrix of three dummy variables
tempmatrix = model.matrix(~gender -1, data=mydata)
#display matrix of gender dummy variables
tempmatrix
#iterates through each column in tempmatrix
for (i in 1:ncol(tempmatrix)){
#adds column i from tempmatrix to mydata
mydata[ncol(mydata)+1] = tempmatrix[,i]
#adds column names to gender dummy variables in mydata
colnames(mydata)[ncol(mydata)] = colnames(tempmatrix)[i]
}
#function that automates lines 15-26
#accepts two arguments: the variable that you want to tranform and the data frame that contains it
dummies=function(var,dataframe){
tempmatrix <- model.matrix(~var -1, data=dataframe)
for (i in 1:ncol(tempmatrix)){
mydata[ncol(mydata)+1]<-tempmatrix[,i]
colnames(mydata)[ncol(mydata)]<-colnames(tempmatrix)[i]
}
#returns a new dataset augmented with the desired dummy variables
return(mydata)
}
#calls the function to transform each non-numerical variable
#the $ symbol all you to refer to a specific variable from a specific dataset
mydata = dummies(mydata$study,mydata)
mydata = dummies(mydata$class,mydata)
mydata = dummies(mydata$animal,mydata)
mydata = dummies(mydata$pizza,mydata)
mydata = dummies(mydata$sport,mydata)
mydata = dummies(mydata$worldend,mydata)
mydata = dummies(mydata$pres,mydata)
#shorten new variable names
names(mydata) = c("gender", "study", "class", "gpa", "act", "animal", "pizza", "sport", "worldend","pres","genderF","genderM","genderNB","studyArts","studyBiz","studyComm","studyEdu","studyMed","studyNatsci","studySocsci","class1","class2","class3","class4","class5","classGrad","animalCats","animalDogs","pizzaCheese","pizzaHI","pizzaMeat","pizzaPepp","pizzaVeggie","sportBaseball","sportBasketball","sportBoard","sportEsports","sportFootball","sportSoccer","worldendWarm","worldendNukes","worldendSun","worldendRapture","worldendZombie","presSanders","presTrump","presClinton","presBush")
#display data
mydata
#summarize numerical data
summary(mydata$gpa)
summary(mydata$act)
#summarize non-numerical data
summary(mydata$pres)
#summarize with dummy variables
summary(mydata$presSanders)
summary(mydata$presTrump)
summary(mydata$presClinton)
summary(mydata$presBush)
#plain histogram of ACT scores
hist(mydata$act)
#pretty histogram of ACT scores
hist(mydata$act, #tell R which variable to plot
col=c("green","blue"), #alternates as many colors as you want
main="ACT Scores of Spartan Hackers", #title of histogram
xlab="ACT") #label on x-axis
#average GPA of people who would vote for a Republican
mean(subset(mydata,pres=="Donald Trump" | pres=="Jeb Bush")$gpa)
#average GPA of people who would vote for a Democrat
mean(subset(mydata,pres=="Hillary Clinton" | pres=="Bernie Sanders")$gpa)
#create variables for the two values we want to compare
mean1 = mean(subset(mydata,pres=="Donald Trump" | pres=="Jeb Bush")$gpa)
mean2 = mean(subset(mydata,pres=="Hillary Clinton" | pres=="Bernie Sanders")$gpa)
#n is the sample size, or number of observations (rows) in mydata
n = nrow(mydata)
#s is the sample standard deviation, or measure of the data’s spread
s = sd(mydata$gpa)
#a score for how different mean1 and mean2 are
teststat = (mean1 - mean2)/(s/sqrt(n))
#display the value of our test statistic
teststat
#The amount of error we are willing to accept (in this case, 5%)
alpha = 0.05
#critical value for our indicated alpha and n
t_half_alpha = qt(1-alpha/2, df=n-1)
#if teststat is outside of this range, mean1 & mean2 are different
c(-t_half_alpha,t_half_alpha)
#declares regression equation based on dependent variable (act), independent variable (pizzaCheese), and data (mydata)
regAct = lm(formula = act ~ pizzaCheese, data = mydata)
#outputs regression coefficients and statistics
summary(regAct)
#declares regression equation with multiple independent variables (pizzaCheese and gpa)
regAct2 = lm(formula = act ~ pizzaCheese + gpa, data = mydata)
summary(regAct2)
#declares regression equation with many independent variables
regAct3 = lm(formula = act ~ pizzaCheese + pizzaHI + pizzaMeat + pizzaPepp + sportFootball + animalCats + worldendZombie, data = mydata)
summary(regAct3) |
df <- read.table("household_power_consumption.txt", header = TRUE, sep =";", colClasses = c("character","character", rep("numeric",7)), na="?")
df$Time <- strptime(paste(df$Date, df$Time),"%d/%m/%Y %H:%M:%S")
df$Date <- as.Date(df$Date, "%d/%m/%Y")
dates <- as.Date(c("2007-02-01","2007-02-02"),"%Y-%m-%d")
df <- subset(df, Date %in% dates)
plot(df$Time, df$Global_active_power, type = "l", xlab ="", ylab="GAP(kw)")
dev.copy(png, file = "plot2.png")
png("plot2.png", width = 480, height = 480)
dev.off() | /plot2.R | no_license | cwang0129/ExData_Plotting1 | R | false | false | 504 | r | df <- read.table("household_power_consumption.txt", header = TRUE, sep =";", colClasses = c("character","character", rep("numeric",7)), na="?")
df$Time <- strptime(paste(df$Date, df$Time),"%d/%m/%Y %H:%M:%S")
df$Date <- as.Date(df$Date, "%d/%m/%Y")
dates <- as.Date(c("2007-02-01","2007-02-02"),"%Y-%m-%d")
df <- subset(df, Date %in% dates)
plot(df$Time, df$Global_active_power, type = "l", xlab ="", ylab="GAP(kw)")
dev.copy(png, file = "plot2.png")
png("plot2.png", width = 480, height = 480)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GUIfunctions.R
\docType{methods}
\name{groupVars}
\alias{groupVars}
\alias{groupVars,sdcMicroObj-method}
\alias{groupVars-methods}
\title{Join levels of a keyVariable in an object of class \code{\link{sdcMicroObj-class}}}
\usage{
groupVars(obj, var, before, after)
}
\arguments{
\item{obj}{object of class \code{\link{sdcMicroObj-class}}}
\item{var}{name of the keyVariable to change}
\item{before}{vector of levels before recoding}
\item{after}{vector of levels after recoding}
}
\value{
the modified \code{\link{sdcMicroObj-class}}
}
\description{
Transforms the factor variable into a factors with less levels and
recomputes risk.
}
\section{Methods}{
\describe{
\item{list("signature(obj = \"sdcMicroObj\")")}{
This method transform a factor variable with some levels into a new factor
variable with less levels. The user must make sure that all levels of the
original variable are listed in argument 'before' and that the number of
elements in argument 'after' (the new levels) have the same length. This
means that there should be a one to one mapping from any level of the
original factor to a level in the recoded variable. } }
}
\examples{
## for objects of class sdcMicro:
data(testdata2)
testdata2$urbrur <- as.factor(testdata2$urbrur)
sdc <- createSdcObj(testdata2,
keyVars=c('urbrur','roof','walls','water','electcon','relat','sex'),
numVars=c('expend','income','savings'), w='sampling_weight')
sdc <- groupVars(sdc, var="urbrur", before=c("1","2"), after=c("1","1"))
}
\keyword{methods}
| /man/groupVars.Rd | no_license | sinanshi/sdcMicro | R | false | true | 1,590 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GUIfunctions.R
\docType{methods}
\name{groupVars}
\alias{groupVars}
\alias{groupVars,sdcMicroObj-method}
\alias{groupVars-methods}
\title{Join levels of a keyVariable in an object of class \code{\link{sdcMicroObj-class}}}
\usage{
groupVars(obj, var, before, after)
}
\arguments{
\item{obj}{object of class \code{\link{sdcMicroObj-class}}}
\item{var}{name of the keyVariable to change}
\item{before}{vector of levels before recoding}
\item{after}{vector of levels after recoding}
}
\value{
the modified \code{\link{sdcMicroObj-class}}
}
\description{
Transforms the factor variable into a factors with less levels and
recomputes risk.
}
\section{Methods}{
\describe{
\item{list("signature(obj = \"sdcMicroObj\")")}{
This method transform a factor variable with some levels into a new factor
variable with less levels. The user must make sure that all levels of the
original variable are listed in argument 'before' and that the number of
elements in argument 'after' (the new levels) have the same length. This
means that there should be a one to one mapping from any level of the
original factor to a level in the recoded variable. } }
}
\examples{
## for objects of class sdcMicro:
data(testdata2)
testdata2$urbrur <- as.factor(testdata2$urbrur)
sdc <- createSdcObj(testdata2,
keyVars=c('urbrur','roof','walls','water','electcon','relat','sex'),
numVars=c('expend','income','savings'), w='sampling_weight')
sdc <- groupVars(sdc, var="urbrur", before=c("1","2"), after=c("1","1"))
}
\keyword{methods}
|
library(reshape2)
filename <- "getdata_projectfiles_UCI HAR Dataset.zip"
## Download and unzip the dataset:
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, filename, method="curl")
}
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
# Load activity labels + features
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
activityLabels[,2] <- as.character(activityLabels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
# Extract only the data on mean and standard deviation
featuresWanted <- grep(".*mean.*|.*std.*", features[,2])
featuresWanted.names <- features[featuresWanted,2]
featuresWanted.names = gsub('-mean', 'Mean', featuresWanted.names)
featuresWanted.names = gsub('-std', 'Std', featuresWanted.names)
featuresWanted.names <- gsub('[-()]', '', featuresWanted.names)
# Load the datasets
train <- read.table("UCI HAR Dataset/train/X_train.txt")[featuresWanted]
trainActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(trainSubjects, trainActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresWanted]
testActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(testSubjects, testActivities, test)
# merge datasets and add labels
allData <- rbind(train, test)
colnames(allData) <- c("subject", "activity", featuresWanted.names)
# turn activities & subjects into factors
allData$activity <- factor(allData$activity, levels = activityLabels[,1], labels = activityLabels[,2])
allData$subject <- as.factor(allData$subject)
allData.melted <- melt(allData, id = c("subject", "activity"))
allData.mean <- dcast(allData.melted, subject + activity ~ variable, mean)
write.table(allData.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
| /run_analysis.r | no_license | jscott061/Getting-and-Cleaning-Data---Course-Project | R | false | false | 2,131 | r | library(reshape2)
filename <- "getdata_projectfiles_UCI HAR Dataset.zip"
## Download and unzip the dataset:
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, filename, method="curl")
}
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
# Load activity labels + features
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
activityLabels[,2] <- as.character(activityLabels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
# Extract only the data on mean and standard deviation
featuresWanted <- grep(".*mean.*|.*std.*", features[,2])
featuresWanted.names <- features[featuresWanted,2]
featuresWanted.names = gsub('-mean', 'Mean', featuresWanted.names)
featuresWanted.names = gsub('-std', 'Std', featuresWanted.names)
featuresWanted.names <- gsub('[-()]', '', featuresWanted.names)
# Load the datasets
train <- read.table("UCI HAR Dataset/train/X_train.txt")[featuresWanted]
trainActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(trainSubjects, trainActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresWanted]
testActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(testSubjects, testActivities, test)
# merge datasets and add labels
allData <- rbind(train, test)
colnames(allData) <- c("subject", "activity", featuresWanted.names)
# turn activities & subjects into factors
allData$activity <- factor(allData$activity, levels = activityLabels[,1], labels = activityLabels[,2])
allData$subject <- as.factor(allData$subject)
allData.melted <- melt(allData, id = c("subject", "activity"))
allData.mean <- dcast(allData.melted, subject + activity ~ variable, mean)
write.table(allData.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
context("Test occurrence_details function")
is_empty_list <- function(z) is.list(z) && length(z) < 1
thischeck <- function() {
test_that("empty list returned for null inputs", {
skip_on_cran()
## null (empty string) input
empty_result <- occurrence_details("")
expect_is(empty_result, "list")
expect_equal(length(empty_result), 1)
expect_true(is_empty_list(empty_result[[1]]))
# uuid queries do not build correctly for tests to work
## one null and one invalid input
# empty_result <- occurrence_details(c("", "invalid-id"))
# expect_is(empty_result, "list")
# expect_equal(length(empty_result), 2)
# expect_true(is_empty_list(empty_result[[1]]))
# expect_true(is_empty_list(empty_result[[2]]))
## one valid, one null, one invalid input
# mixed_result <-
# occurrence_details(c("ba9dfe7f-77f8-4486-b77e-3ae366d3c2ae", "",
# "invalid-id"))
# expect_is(mixed_result, "list")
# expect_equal(length(mixed_result), 3)
# expect_false(is_empty_list(mixed_result[[1]]))
# expect_true(is_empty_list(mixed_result[[2]]))
# expect_true(is_empty_list(mixed_result[[3]]))
mixed_result <-
occurrence_details(c("ba9dfe7f-77f8-4486-b77e-3ae366d3c2ae", ""))
expect_is(mixed_result, "list")
expect_equal(length(mixed_result), 2)
expect_false(is_empty_list(mixed_result[[1]]))
expect_true(is_empty_list(mixed_result[[2]]))
})
}
check_caching(thischeck)
thischeck <- function() {
test_that("occurrence_details result has the expected fields", {
skip_on_cran()
## names are a bit changeable, but expect to see at least "processed",
## "raw", "userAssertions", "systemAssertions"
core_names <- c("processed", "raw", "userAssertions",
"systemAssertions")
## this one has images, so also images in the names
expect_true(all(c("images", core_names) %in%
names(occurrence_details(
"ba9dfe7f-77f8-4486-b77e-3ae366d3c2ae")[[1]])))
## no images
result <- occurrence_details("d765212d-5583-4ad4-9db4-1086b6d5cad9")
expect_true(all(core_names %in% names(result[[1]])))
expect_false("images" %in% names(result[[1]]))
})
}
check_caching(thischeck)
| /tests/testthat/test-occurrence-details.R | no_license | AtlasOfLivingAustralia/ALA4R | R | false | false | 2,493 | r | context("Test occurrence_details function")
is_empty_list <- function(z) is.list(z) && length(z) < 1
thischeck <- function() {
test_that("empty list returned for null inputs", {
skip_on_cran()
## null (empty string) input
empty_result <- occurrence_details("")
expect_is(empty_result, "list")
expect_equal(length(empty_result), 1)
expect_true(is_empty_list(empty_result[[1]]))
# uuid queries do not build correctly for tests to work
## one null and one invalid input
# empty_result <- occurrence_details(c("", "invalid-id"))
# expect_is(empty_result, "list")
# expect_equal(length(empty_result), 2)
# expect_true(is_empty_list(empty_result[[1]]))
# expect_true(is_empty_list(empty_result[[2]]))
## one valid, one null, one invalid input
# mixed_result <-
# occurrence_details(c("ba9dfe7f-77f8-4486-b77e-3ae366d3c2ae", "",
# "invalid-id"))
# expect_is(mixed_result, "list")
# expect_equal(length(mixed_result), 3)
# expect_false(is_empty_list(mixed_result[[1]]))
# expect_true(is_empty_list(mixed_result[[2]]))
# expect_true(is_empty_list(mixed_result[[3]]))
mixed_result <-
occurrence_details(c("ba9dfe7f-77f8-4486-b77e-3ae366d3c2ae", ""))
expect_is(mixed_result, "list")
expect_equal(length(mixed_result), 2)
expect_false(is_empty_list(mixed_result[[1]]))
expect_true(is_empty_list(mixed_result[[2]]))
})
}
check_caching(thischeck)
thischeck <- function() {
test_that("occurrence_details result has the expected fields", {
skip_on_cran()
## names are a bit changeable, but expect to see at least "processed",
## "raw", "userAssertions", "systemAssertions"
core_names <- c("processed", "raw", "userAssertions",
"systemAssertions")
## this one has images, so also images in the names
expect_true(all(c("images", core_names) %in%
names(occurrence_details(
"ba9dfe7f-77f8-4486-b77e-3ae366d3c2ae")[[1]])))
## no images
result <- occurrence_details("d765212d-5583-4ad4-9db4-1086b6d5cad9")
expect_true(all(core_names %in% names(result[[1]])))
expect_false("images" %in% names(result[[1]]))
})
}
check_caching(thischeck)
|
context("Mixed: structural tests")
# note: all calls with type 2 are wrapped in suppressWarnings()!
test_that("mixed: Maxell & Delaney (2004), Table 16.4, p. 842: Type 2", {
data(md_16.4)
md_16.4b <- md_16.4
md_16.4b$cog <- scale(md_16.4b$cog, scale=FALSE)
contrasts(md_16.4b$cond) <- "contr.sum"
suppressWarnings(mixed4_2 <- mixed(induct ~ cond*cog + (cog|room:cond), md_16.4b, type = 2, progress=FALSE))
lmer4_full <- lmer(induct ~ cond*cog + (cog|room:cond), md_16.4b)
lmer4_small <- lmer(induct ~ cond+cog + (cog|room:cond), md_16.4b)
expect_that(fixef(mixed4_2$full.model[[2]]), equals(fixef(lmer4_full)))
expect_that(fixef(mixed4_2$full.model[[1]]), is_equivalent_to(fixef(lmer4_small)))
})
test_that("mixed: Maxell & Delaney (2004), Table 16.4, p. 842: Type 3", {
data(md_16.4)
md_16.4b <- md_16.4
md_16.4b$cog <- scale(md_16.4b$cog, scale=FALSE)
contrasts(md_16.4b$cond) <- "contr.sum"
suppressWarnings(mixed4_2 <- mixed(induct ~ cond*cog + (cog|room:cond), md_16.4b, type = 3, progress=FALSE))
lmer4_full <- lmer(induct ~ cond*cog + (cog|room:cond), md_16.4b)
lmer4_small <- lmer(induct ~ cond+cog + (cog|room:cond), md_16.4b)
expect_that(fixef(mixed4_2$full.model), equals(fixef(lmer4_full)))
expect_that(mixed4_2$full.model, is_equivalent_to(lmer4_full))
expect_that(fixef(mixed4_2$restricted.models$`cond:cog`), is_equivalent_to(fixef(lmer4_small)))
})
test_that("mixed, obk.long: type 2 and LRTs", {
data(obk.long, package = "afex")
contrasts(obk.long$treatment) <- "contr.sum"
contrasts(obk.long$phase) <- "contr.sum"
suppressWarnings(t2 <- mixed(value ~ treatment*phase +(1|id), data = obk.long, method = "LRT", type = 2, progress=FALSE))
a2.f <- lmer(value ~ treatment*phase +(1|id), data = obk.long, REML=FALSE)
a2.h <- lmer(value ~ treatment+phase +(1|id), data = obk.long, REML=FALSE)
a2.t <- lmer(value ~ treatment +(1|id), data = obk.long, REML=FALSE)
a2.p <- lmer(value ~ phase +(1|id), data = obk.long, REML=FALSE)
extract_anova <- function(anova) unlist(anova)[c("Df1", "Chisq2", "Chi Df2", "Pr(>Chisq)2" )]
expect_that(
unlist(t2$anova_table[3,])
, is_equivalent_to(
extract_anova(anova(a2.h, a2.f))
))
expect_that(
unlist(t2$anova_table[2,])
, is_equivalent_to(
extract_anova(anova(a2.t, a2.h))
))
expect_that(
unlist(t2$anova_table[1,])
, is_equivalent_to(
extract_anova(anova(a2.p, a2.h))
))
})
test_that("mixed, mlmRev: type 3 and 2 LRTs for GLMMs", {
if (require("mlmRev")) {
suppressWarnings(gm1 <- mixed(use ~ age*urban + (1 | district), family = binomial, data = Contraception, method = "LRT", progress=FALSE))
suppressWarnings(gm2 <- mixed(use ~ age*urban + (1 | district), family = binomial, data = Contraception, method = "LRT", type = 2, progress=FALSE))
expect_that(gm1, is_a("mixed"))
expect_that(gm1, is_a("mixed"))
}
})
test_that("mixed, obk.long: LMM with method = PB", {
expect_that(mixed(value ~ treatment+phase*hour +(1|id), data = obk.long, method = "PB", args.test = list(nsim = 10), progress=FALSE), is_a("mixed"))
})
test_that("mixed, obk.long: multicore loads lme4 and produces the same results", {
#if (packageVersion("testthat") >= "0.9") {
if (FALSE) { # that never seems to run...
testthat::skip_on_cran()
testthat::skip_on_travis()
data(obk.long, package = "afex")
require(parallel)
cl <- makeCluster(rep("localhost", 2)) # make cluster
# 1. Obtain fits with multicore:
m_mc1 <- mixed(value ~ treatment +(phase|id), data = obk.long, method = "LRT", cl = cl, control = lmerControl(optCtrl=list(maxfun = 100000)), progress=FALSE)
cl_search <- clusterEvalQ(cl, search())
stopCluster(cl)
m_mc2 <- mixed(value ~ treatment +(phase|id), data = obk.long, method = "LRT", control = lmerControl(optCtrl=list(maxfun = 100000)), progress=FALSE)
expect_that(all(vapply(cl_search, function(x) any(grepl("^package:lme4$", x)), NA)), is_true())
expect_that(m_mc1, equals(m_mc2, check.attributes = FALSE))
}
})
test_that("print(mixed) works: only 1 or 2 fixed effects with all methods", {
data(obk.long, package = "afex")
expect_that(print(mixed(value ~ treatment+(1|id), data = obk.long)), is_a("data.frame"))
expect_that(print(mixed(value ~ treatment+phase+(1|id), data = obk.long)), is_a("data.frame"))
expect_that(print(mixed(value ~ treatment+(1|id), data = obk.long, method = "LRT")), is_a("data.frame"))
expect_that(print(mixed(value ~ treatment+phase+(1|id), data = obk.long, method = "LRT")), is_a("data.frame"))
require("mlmRev") # for the data, see ?Contraception
expect_that(print(mixed(use ~ urban + (1 | district), method = "PB", family = binomial, data = Contraception, args.test=list(nsim=2))), is_a("data.frame"))
expect_that(print(mixed(use ~ urban + livch + (1 | district), method = "PB", family = binomial, data = Contraception, args.test=list(nsim=2))), is_a("data.frame"))
})
# test_that("mixed, Maxell & Delaney (2004), Table 16.4, p. 842: bobyqa not fitting well", {
# data(md_16.4)
# # F-values and p-values are relatively off:
# expect_that(mixed(induct ~ cond*cog + (cog|room:cond), md_16.4, control=lmerControl(optimizer="bobyqa")), gives_warning("better fit"))
# expect_that(mixed(induct ~ cond*cog + (cog|room:cond), md_16.4, type=2, control=lmerControl(optimizer="bobyqa")), gives_warning("better fit"))
# })
test_that("mixed: set.data.arg", {
data(obk.long, package = "afex")
suppressWarnings(m1 <- mixed(value ~ treatment*phase +(1|id), obk.long, method = "LRT", progress=FALSE))
suppressWarnings(m2 <- mixed(value ~ treatment*phase +(1|id), obk.long, method = "LRT", progress=FALSE, set.data.arg = FALSE))
expect_that(m1$full.model@call[["data"]], is_identical_to(as.name("obk.long")))
expect_that(m2$full.model@call[["data"]], is_identical_to(as.name("data")))
})
test_that("mixed: anova with multiple mixed objexts", {
data("sk2011.2")
data("ks2013.3")
sk2_aff <- droplevels(sk2011.2[sk2011.2$what == "affirmation",])
sk_m1 <- mixed(response ~ instruction+(1|id), sk2_aff, method = "LRT", progress = FALSE)
sk_m2 <- mixed(response ~ instruction+(1|id)+(1|content), sk2_aff, method = "LRT", progress = FALSE)
sk_m3 <- lmer(response ~ instruction+(1|id)+(validity|content), sk2_aff, REML = FALSE)
sk_m4 <- lmer(response ~ instruction+(1|id)+(validity|content), sk2_aff, REML = TRUE)
t <- anova(sk_m1, sk_m2, sk_m3)
expect_is(t, c("anova", "data.frame"))
expect_is(anova(sk_m1, object = sk_m2, sk_m3), c("anova", "data.frame"))
expect_is(anova(sk_m1, object = sk_m2, sk_m3, ks2013.3), c("anova", "data.frame"))
expect_warning(anova(sk_m1, object = sk_m2, sk_m3, sk_m4), "some models fit with REML = TRUE, some not")
})
context("Mixed: Expand random effects")
test_that("mixed: expand_re argument, return = 'merMod'", {
data("ks2013.3")
m2 <- mixed(response ~ validity + (believability||id), ks2013.3, expand_re = TRUE, method = "LRT", progress=FALSE)
m3 <- mixed(response ~ validity + (believability|id), ks2013.3, method = "LRT", progress=FALSE)
expect_identical(length(unlist(summary(m2)$varcor)), nrow(summary(m3)$varcor$id))
expect_true(all.equal(unlist(summary(m2)$varcor), diag(summary(m3)$varcor$id), tolerance = 0.03, check.attributes = FALSE))
l2 <- mixed(response ~ validity + (believability||id), ks2013.3, expand_re = TRUE, return = "merMod")
expect_is(l2, "merMod")
expect_equivalent(m2$full.model, l2)
l3 <- lmer_alt(response ~ validity + (believability||id), ks2013.3)
l4 <- lmer_alt(response ~ validity + (believability||id), ks2013.3, control = lmerControl(optimizer = "Nelder_Mead"))
expect_equivalent(l2, l3)
expect_equal(l3, l4, check.attributes = FALSE)
l5 <- lmer_alt(response ~ validity + (believability||id), ks2013.3, control = lmerControl(optimizer = "Nelder_Mead"), check.contrasts = TRUE)
expect_equal(l2, l5, check.attributes = FALSE )
expect_identical(names(coef(l2)$id), names(coef(l5)$id)) # parameter names need to be identical (same contrasts)
expect_false(all(names(coef(l2)$id) == names(coef(l3)$id))) # parameter names need to be different (different contrasts)
l7 <- lmer_alt(response ~ validity + (1|id) + (0+validity*condition||content), ks2013.3, control = lmerControl(optCtrl = list(maxfun=1e6)))
expect_is(l7, "merMod")
expect_error(lmer_alt(response ~ validity + (0|id) + (0+validity*condition||content), ks2013.3), "Invalid random effects term")
expect_is(lmer_alt(response ~ validity + (validity||id) + (validity|content), ks2013.3), "merMod")
})
test_that("mixed: expand_re argument (longer)", {
if (packageVersion("testthat") >= "0.9") {
testthat::skip_on_cran()
testthat::skip_on_travis()
data("ks2013.3")
m4 <- mixed(response ~ validity + (believability*validity||id) + (validity*condition|content), ks2013.3, expand_re = TRUE, method = "LRT", control = lmerControl(optCtrl = list(maxfun=1e6)), progress=FALSE)
m5 <- suppressWarnings(mixed(response ~ validity + (believability*validity|id) + (validity*condition||content), ks2013.3, method = "LRT", control = lmerControl(optCtrl = list(maxfun=1e6)), expand_re = TRUE, progress=FALSE))
expect_identical(length(unlist(summary(m4)$varcor[-7])), nrow(summary(m5)$varcor$id))
expect_identical(length(unlist(summary(m5)$varcor[-1])), nrow(summary(m4)$varcor$content))
expect_equal(attr(summary(m5)$varcor, "sc"), attr(summary(m4)$varcor, "sc"), tolerance = 0.02)
}
})
test_that("mixed: return=data, expand_re argument, and allFit", {
#if (packageVersion("testthat") >= "0.9") {
if (FALSE) {
testthat::skip_on_cran()
testthat::skip_on_travis()
data("ks2013.3")
ks2013.3_tmp <- ks2013.3
m6 <- mixed(response ~ validity + (believability*validity||id), ks2013.3_tmp, expand_re = TRUE, method = "LRT", control = lmerControl(optCtrl = list(maxfun=1e6)), progress=FALSE, return = "merMod")
m6_all_1 <- allFit(m6, verbose = FALSE, data = ks2013.3_tmp)
expect_output(print(m6_all_1$`bobyqa.`), "object 're1.believability1' not found")
ks2013.3_tmp <- mixed(response ~ validity + (believability*validity||id), ks2013.3_tmp, expand_re = TRUE, method = "LRT", control = lmerControl(optCtrl = list(maxfun=1e6)), progress=FALSE, return = "data")
m6_all_2 <- suppressWarnings(allFit(m6, verbose = FALSE, data = ks2013.3_tmp))
expect_is(m6_all_2$`bobyqa.`, "merMod")
expect_is(m6_all_2$`Nelder_Mead.`, "merMod")
}
})
test_that("mixed: return=data works", {
data("ks2013.3")
ks2013.3_tmp <- ks2013.3
ks2013.3_tmp <- mixed(response ~ validity + (believability*validity||id), ks2013.3_tmp, expand_re = TRUE, method = "LRT", control = lmerControl(optCtrl = list(maxfun=1e6)), progress=FALSE, return = "data")
expect_is(ks2013.3_tmp, "data.frame")
if (packageVersion("testthat") >= "0.11.0.9000") expect_gt(ncol(ks2013.3_tmp), ncol(ks2013.3))
expect_output(print(colnames(ks2013.3_tmp)), "re1.believability1_by_validity1")
})
| /tests/testthat/test-mixed-structure.R | no_license | raviselker/afex | R | false | false | 10,996 | r |
context("Mixed: structural tests")
# note: all calls with type 2 are wrapped in suppressWarnings()!
test_that("mixed: Maxell & Delaney (2004), Table 16.4, p. 842: Type 2", {
data(md_16.4)
md_16.4b <- md_16.4
md_16.4b$cog <- scale(md_16.4b$cog, scale=FALSE)
contrasts(md_16.4b$cond) <- "contr.sum"
suppressWarnings(mixed4_2 <- mixed(induct ~ cond*cog + (cog|room:cond), md_16.4b, type = 2, progress=FALSE))
lmer4_full <- lmer(induct ~ cond*cog + (cog|room:cond), md_16.4b)
lmer4_small <- lmer(induct ~ cond+cog + (cog|room:cond), md_16.4b)
expect_that(fixef(mixed4_2$full.model[[2]]), equals(fixef(lmer4_full)))
expect_that(fixef(mixed4_2$full.model[[1]]), is_equivalent_to(fixef(lmer4_small)))
})
test_that("mixed: Maxell & Delaney (2004), Table 16.4, p. 842: Type 3", {
data(md_16.4)
md_16.4b <- md_16.4
md_16.4b$cog <- scale(md_16.4b$cog, scale=FALSE)
contrasts(md_16.4b$cond) <- "contr.sum"
suppressWarnings(mixed4_2 <- mixed(induct ~ cond*cog + (cog|room:cond), md_16.4b, type = 3, progress=FALSE))
lmer4_full <- lmer(induct ~ cond*cog + (cog|room:cond), md_16.4b)
lmer4_small <- lmer(induct ~ cond+cog + (cog|room:cond), md_16.4b)
expect_that(fixef(mixed4_2$full.model), equals(fixef(lmer4_full)))
expect_that(mixed4_2$full.model, is_equivalent_to(lmer4_full))
expect_that(fixef(mixed4_2$restricted.models$`cond:cog`), is_equivalent_to(fixef(lmer4_small)))
})
test_that("mixed, obk.long: type 2 and LRTs", {
data(obk.long, package = "afex")
contrasts(obk.long$treatment) <- "contr.sum"
contrasts(obk.long$phase) <- "contr.sum"
suppressWarnings(t2 <- mixed(value ~ treatment*phase +(1|id), data = obk.long, method = "LRT", type = 2, progress=FALSE))
a2.f <- lmer(value ~ treatment*phase +(1|id), data = obk.long, REML=FALSE)
a2.h <- lmer(value ~ treatment+phase +(1|id), data = obk.long, REML=FALSE)
a2.t <- lmer(value ~ treatment +(1|id), data = obk.long, REML=FALSE)
a2.p <- lmer(value ~ phase +(1|id), data = obk.long, REML=FALSE)
extract_anova <- function(anova) unlist(anova)[c("Df1", "Chisq2", "Chi Df2", "Pr(>Chisq)2" )]
expect_that(
unlist(t2$anova_table[3,])
, is_equivalent_to(
extract_anova(anova(a2.h, a2.f))
))
expect_that(
unlist(t2$anova_table[2,])
, is_equivalent_to(
extract_anova(anova(a2.t, a2.h))
))
expect_that(
unlist(t2$anova_table[1,])
, is_equivalent_to(
extract_anova(anova(a2.p, a2.h))
))
})
test_that("mixed, mlmRev: type 3 and 2 LRTs for GLMMs", {
if (require("mlmRev")) {
suppressWarnings(gm1 <- mixed(use ~ age*urban + (1 | district), family = binomial, data = Contraception, method = "LRT", progress=FALSE))
suppressWarnings(gm2 <- mixed(use ~ age*urban + (1 | district), family = binomial, data = Contraception, method = "LRT", type = 2, progress=FALSE))
expect_that(gm1, is_a("mixed"))
expect_that(gm1, is_a("mixed"))
}
})
test_that("mixed, obk.long: LMM with method = PB", {
expect_that(mixed(value ~ treatment+phase*hour +(1|id), data = obk.long, method = "PB", args.test = list(nsim = 10), progress=FALSE), is_a("mixed"))
})
test_that("mixed, obk.long: multicore loads lme4 and produces the same results", {
#if (packageVersion("testthat") >= "0.9") {
if (FALSE) { # that never seems to run...
testthat::skip_on_cran()
testthat::skip_on_travis()
data(obk.long, package = "afex")
require(parallel)
cl <- makeCluster(rep("localhost", 2)) # make cluster
# 1. Obtain fits with multicore:
m_mc1 <- mixed(value ~ treatment +(phase|id), data = obk.long, method = "LRT", cl = cl, control = lmerControl(optCtrl=list(maxfun = 100000)), progress=FALSE)
cl_search <- clusterEvalQ(cl, search())
stopCluster(cl)
m_mc2 <- mixed(value ~ treatment +(phase|id), data = obk.long, method = "LRT", control = lmerControl(optCtrl=list(maxfun = 100000)), progress=FALSE)
expect_that(all(vapply(cl_search, function(x) any(grepl("^package:lme4$", x)), NA)), is_true())
expect_that(m_mc1, equals(m_mc2, check.attributes = FALSE))
}
})
test_that("print(mixed) works: only 1 or 2 fixed effects with all methods", {
data(obk.long, package = "afex")
expect_that(print(mixed(value ~ treatment+(1|id), data = obk.long)), is_a("data.frame"))
expect_that(print(mixed(value ~ treatment+phase+(1|id), data = obk.long)), is_a("data.frame"))
expect_that(print(mixed(value ~ treatment+(1|id), data = obk.long, method = "LRT")), is_a("data.frame"))
expect_that(print(mixed(value ~ treatment+phase+(1|id), data = obk.long, method = "LRT")), is_a("data.frame"))
require("mlmRev") # for the data, see ?Contraception
expect_that(print(mixed(use ~ urban + (1 | district), method = "PB", family = binomial, data = Contraception, args.test=list(nsim=2))), is_a("data.frame"))
expect_that(print(mixed(use ~ urban + livch + (1 | district), method = "PB", family = binomial, data = Contraception, args.test=list(nsim=2))), is_a("data.frame"))
})
# test_that("mixed, Maxell & Delaney (2004), Table 16.4, p. 842: bobyqa not fitting well", {
# data(md_16.4)
# # F-values and p-values are relatively off:
# expect_that(mixed(induct ~ cond*cog + (cog|room:cond), md_16.4, control=lmerControl(optimizer="bobyqa")), gives_warning("better fit"))
# expect_that(mixed(induct ~ cond*cog + (cog|room:cond), md_16.4, type=2, control=lmerControl(optimizer="bobyqa")), gives_warning("better fit"))
# })
test_that("mixed: set.data.arg", {
data(obk.long, package = "afex")
suppressWarnings(m1 <- mixed(value ~ treatment*phase +(1|id), obk.long, method = "LRT", progress=FALSE))
suppressWarnings(m2 <- mixed(value ~ treatment*phase +(1|id), obk.long, method = "LRT", progress=FALSE, set.data.arg = FALSE))
expect_that(m1$full.model@call[["data"]], is_identical_to(as.name("obk.long")))
expect_that(m2$full.model@call[["data"]], is_identical_to(as.name("data")))
})
test_that("mixed: anova with multiple mixed objexts", {
data("sk2011.2")
data("ks2013.3")
sk2_aff <- droplevels(sk2011.2[sk2011.2$what == "affirmation",])
sk_m1 <- mixed(response ~ instruction+(1|id), sk2_aff, method = "LRT", progress = FALSE)
sk_m2 <- mixed(response ~ instruction+(1|id)+(1|content), sk2_aff, method = "LRT", progress = FALSE)
sk_m3 <- lmer(response ~ instruction+(1|id)+(validity|content), sk2_aff, REML = FALSE)
sk_m4 <- lmer(response ~ instruction+(1|id)+(validity|content), sk2_aff, REML = TRUE)
t <- anova(sk_m1, sk_m2, sk_m3)
expect_is(t, c("anova", "data.frame"))
expect_is(anova(sk_m1, object = sk_m2, sk_m3), c("anova", "data.frame"))
expect_is(anova(sk_m1, object = sk_m2, sk_m3, ks2013.3), c("anova", "data.frame"))
expect_warning(anova(sk_m1, object = sk_m2, sk_m3, sk_m4), "some models fit with REML = TRUE, some not")
})
context("Mixed: Expand random effects")
test_that("mixed: expand_re argument, return = 'merMod'", {
data("ks2013.3")
m2 <- mixed(response ~ validity + (believability||id), ks2013.3, expand_re = TRUE, method = "LRT", progress=FALSE)
m3 <- mixed(response ~ validity + (believability|id), ks2013.3, method = "LRT", progress=FALSE)
expect_identical(length(unlist(summary(m2)$varcor)), nrow(summary(m3)$varcor$id))
expect_true(all.equal(unlist(summary(m2)$varcor), diag(summary(m3)$varcor$id), tolerance = 0.03, check.attributes = FALSE))
l2 <- mixed(response ~ validity + (believability||id), ks2013.3, expand_re = TRUE, return = "merMod")
expect_is(l2, "merMod")
expect_equivalent(m2$full.model, l2)
l3 <- lmer_alt(response ~ validity + (believability||id), ks2013.3)
l4 <- lmer_alt(response ~ validity + (believability||id), ks2013.3, control = lmerControl(optimizer = "Nelder_Mead"))
expect_equivalent(l2, l3)
expect_equal(l3, l4, check.attributes = FALSE)
l5 <- lmer_alt(response ~ validity + (believability||id), ks2013.3, control = lmerControl(optimizer = "Nelder_Mead"), check.contrasts = TRUE)
expect_equal(l2, l5, check.attributes = FALSE )
expect_identical(names(coef(l2)$id), names(coef(l5)$id)) # parameter names need to be identical (same contrasts)
expect_false(all(names(coef(l2)$id) == names(coef(l3)$id))) # parameter names need to be different (different contrasts)
l7 <- lmer_alt(response ~ validity + (1|id) + (0+validity*condition||content), ks2013.3, control = lmerControl(optCtrl = list(maxfun=1e6)))
expect_is(l7, "merMod")
expect_error(lmer_alt(response ~ validity + (0|id) + (0+validity*condition||content), ks2013.3), "Invalid random effects term")
expect_is(lmer_alt(response ~ validity + (validity||id) + (validity|content), ks2013.3), "merMod")
})
test_that("mixed: expand_re argument (longer)", {
if (packageVersion("testthat") >= "0.9") {
testthat::skip_on_cran()
testthat::skip_on_travis()
data("ks2013.3")
m4 <- mixed(response ~ validity + (believability*validity||id) + (validity*condition|content), ks2013.3, expand_re = TRUE, method = "LRT", control = lmerControl(optCtrl = list(maxfun=1e6)), progress=FALSE)
m5 <- suppressWarnings(mixed(response ~ validity + (believability*validity|id) + (validity*condition||content), ks2013.3, method = "LRT", control = lmerControl(optCtrl = list(maxfun=1e6)), expand_re = TRUE, progress=FALSE))
expect_identical(length(unlist(summary(m4)$varcor[-7])), nrow(summary(m5)$varcor$id))
expect_identical(length(unlist(summary(m5)$varcor[-1])), nrow(summary(m4)$varcor$content))
expect_equal(attr(summary(m5)$varcor, "sc"), attr(summary(m4)$varcor, "sc"), tolerance = 0.02)
}
})
test_that("mixed: return=data, expand_re argument, and allFit", {
#if (packageVersion("testthat") >= "0.9") {
if (FALSE) {
testthat::skip_on_cran()
testthat::skip_on_travis()
data("ks2013.3")
ks2013.3_tmp <- ks2013.3
m6 <- mixed(response ~ validity + (believability*validity||id), ks2013.3_tmp, expand_re = TRUE, method = "LRT", control = lmerControl(optCtrl = list(maxfun=1e6)), progress=FALSE, return = "merMod")
m6_all_1 <- allFit(m6, verbose = FALSE, data = ks2013.3_tmp)
expect_output(print(m6_all_1$`bobyqa.`), "object 're1.believability1' not found")
ks2013.3_tmp <- mixed(response ~ validity + (believability*validity||id), ks2013.3_tmp, expand_re = TRUE, method = "LRT", control = lmerControl(optCtrl = list(maxfun=1e6)), progress=FALSE, return = "data")
m6_all_2 <- suppressWarnings(allFit(m6, verbose = FALSE, data = ks2013.3_tmp))
expect_is(m6_all_2$`bobyqa.`, "merMod")
expect_is(m6_all_2$`Nelder_Mead.`, "merMod")
}
})
test_that("mixed: return=data works", {
data("ks2013.3")
ks2013.3_tmp <- ks2013.3
ks2013.3_tmp <- mixed(response ~ validity + (believability*validity||id), ks2013.3_tmp, expand_re = TRUE, method = "LRT", control = lmerControl(optCtrl = list(maxfun=1e6)), progress=FALSE, return = "data")
expect_is(ks2013.3_tmp, "data.frame")
if (packageVersion("testthat") >= "0.11.0.9000") expect_gt(ncol(ks2013.3_tmp), ncol(ks2013.3))
expect_output(print(colnames(ks2013.3_tmp)), "re1.believability1_by_validity1")
})
|
## Purpose: Long COVID risk factors and prediction models
## Author: Yinghui Wei
## Content: Count and pie chart of snomed code for long COVID diagnosis
## Output: suppl_table_1.csv, suppl_table_1.html, suppl_figure_pie.svg
## function for small number suppression
source("analysis/functions/redactor2.R")
fs::dir_create(here::here("output", "review", "descriptives"))
fs::dir_create(here::here("output", "not_for_review", "descriptives"))
library(readr); library(dplyr); library(ggplot2)
## Read in data and identify factor variables and numerical variables------------
input <- read_rds("output/input_stage1_all.rds")
## keep only observations where long covid indicator is 1
input <- input %>% filter(lcovid_cens == 1)
## computational efficiency: only keep the needed variable
input <- input %>% dplyr::select("out_first_long_covid_code")
snomed_code <- input$out_first_long_covid_code
count_data <-table(snomed_code)
count_data <- data.frame(count_data)
names(count_data) <- c("snomed_code", "count")
count_data
count_data$percent = round(count_data$count / sum(count_data$count),3)
count_data
percent_function <- function(x, digits = 1, format = "f", ...) {
paste0(formatC(100 * x, format = format, digits = digits, ...), "%")
}
count_data$labels = percent_function(count_data$percent)
count_data$count <- redactor2(count_data$count)
index = which(is.na(count_data$count))
col_names <- c("count","percent","labels")
count_data[index,col_names]= NA
## use redactor for small number suppression
## index <- which(count_data$count < 6)
## count_data$count[index] = count_data$percent[index] = count_data$labels[index] = NA
count_data_active = count_data%>%filter(count>5)
## Pie Chart
suppl_figure_pie <- ggplot(count_data_active, aes(x = "", y = count, fill = snomed_code)) +
geom_bar(width = 1, stat = "identity") +
coord_polar(theta = "y") +
labs(x = "", y = "", fill = "SNOMED Code") +
geom_text(aes(label = labels),
position = position_stack(vjust = 0.5)) +
theme(plot.title = element_text(hjust = 0.5),
legend.title = element_text(hjust = 0.5, face="bold", size = 10),
axis.ticks = element_blank(),
axis.text.y = element_blank(),
axis.text.x = element_blank())
suppl_figure_pie
## supplementary figure - pie chart
ggsave(file="output/not_for_review/descriptives/snomed_code_pie.svg", plot=suppl_figure_pie, width=16, height=8)
## output underlying count data for supplementary figure - pie chart
## small number suppression - indicate NA as redacted
count_data[which(is.na(count_data$count)),col_names]="[redacted]"
write.csv(count_data, file="output/not_for_review/descriptives/snomed_code_table.csv")
| /analysis/table_snomed_code.R | permissive | opensafely/long-covid-risk-factors-and-prediction | R | false | false | 2,697 | r | ## Purpose: Long COVID risk factors and prediction models
## Author: Yinghui Wei
## Content: Count and pie chart of snomed code for long COVID diagnosis
## Output: suppl_table_1.csv, suppl_table_1.html, suppl_figure_pie.svg
## function for small number suppression
source("analysis/functions/redactor2.R")
fs::dir_create(here::here("output", "review", "descriptives"))
fs::dir_create(here::here("output", "not_for_review", "descriptives"))
library(readr); library(dplyr); library(ggplot2)
## Read in data and identify factor variables and numerical variables------------
input <- read_rds("output/input_stage1_all.rds")
## keep only observations where long covid indicator is 1
input <- input %>% filter(lcovid_cens == 1)
## computational efficiency: only keep the needed variable
input <- input %>% dplyr::select("out_first_long_covid_code")
snomed_code <- input$out_first_long_covid_code
count_data <-table(snomed_code)
count_data <- data.frame(count_data)
names(count_data) <- c("snomed_code", "count")
count_data
count_data$percent = round(count_data$count / sum(count_data$count),3)
count_data
percent_function <- function(x, digits = 1, format = "f", ...) {
paste0(formatC(100 * x, format = format, digits = digits, ...), "%")
}
count_data$labels = percent_function(count_data$percent)
count_data$count <- redactor2(count_data$count)
index = which(is.na(count_data$count))
col_names <- c("count","percent","labels")
count_data[index,col_names]= NA
## use redactor for small number suppression
## index <- which(count_data$count < 6)
## count_data$count[index] = count_data$percent[index] = count_data$labels[index] = NA
count_data_active = count_data%>%filter(count>5)
## Pie Chart
suppl_figure_pie <- ggplot(count_data_active, aes(x = "", y = count, fill = snomed_code)) +
geom_bar(width = 1, stat = "identity") +
coord_polar(theta = "y") +
labs(x = "", y = "", fill = "SNOMED Code") +
geom_text(aes(label = labels),
position = position_stack(vjust = 0.5)) +
theme(plot.title = element_text(hjust = 0.5),
legend.title = element_text(hjust = 0.5, face="bold", size = 10),
axis.ticks = element_blank(),
axis.text.y = element_blank(),
axis.text.x = element_blank())
suppl_figure_pie
## supplementary figure - pie chart
ggsave(file="output/not_for_review/descriptives/snomed_code_pie.svg", plot=suppl_figure_pie, width=16, height=8)
## output underlying count data for supplementary figure - pie chart
## small number suppression - indicate NA as redacted
count_data[which(is.na(count_data$count)),col_names]="[redacted]"
write.csv(count_data, file="output/not_for_review/descriptives/snomed_code_table.csv")
|
# Bakeman & McArthur correction (for long data): id = column with subject id, dv = column with dependent variable
BakemanL <- function (data, id=1, dv=2) {
idvar <- data[,id]
subjMeans <- aggregate(x=data[,dv], by=list(data[,id]), FUN=mean)
ids <- unique(idvar)
corrdata <- data
for (ii in 1:length(ids)) {
corrdata[data[,id]==ids[ii],dv] <- corrdata[data[,id]==ids[ii],dv] - subjMeans[ii,2] + mean(subjMeans[,2])
}
return(corrdata)
} | /BenchmarksWM.Data/Functions/BakemanL.R | no_license | joshsandry/BenchmarksWM | R | false | false | 452 | r | # Bakeman & McArthur correction (for long data): id = column with subject id, dv = column with dependent variable
BakemanL <- function (data, id=1, dv=2) {
idvar <- data[,id]
subjMeans <- aggregate(x=data[,dv], by=list(data[,id]), FUN=mean)
ids <- unique(idvar)
corrdata <- data
for (ii in 1:length(ids)) {
corrdata[data[,id]==ids[ii],dv] <- corrdata[data[,id]==ids[ii],dv] - subjMeans[ii,2] + mean(subjMeans[,2])
}
return(corrdata)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scate_functions.R
\name{init_binary_matrix}
\alias{init_binary_matrix}
\title{Initialize binary matrices given graph}
\usage{
init_binary_matrix(graph)
}
\arguments{
\item{graph}{igraph object}
}
\value{
List of matrices
}
\description{
Call matrices are populated with different parameters
}
\examples{
g <- igraph::make_graph(c("A", "B", "B", "C", "C", "D"), directed = TRUE)
init_binary_matrix(g)
}
| /man/init_binary_matrix.Rd | permissive | uyedaj/rphenoscate | R | false | true | 480 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scate_functions.R
\name{init_binary_matrix}
\alias{init_binary_matrix}
\title{Initialize binary matrices given graph}
\usage{
init_binary_matrix(graph)
}
\arguments{
\item{graph}{igraph object}
}
\value{
List of matrices
}
\description{
Call matrices are populated with different parameters
}
\examples{
g <- igraph::make_graph(c("A", "B", "B", "C", "C", "D"), directed = TRUE)
init_binary_matrix(g)
}
|
# The function rankall takes two arguments, and outcome name (outcome) and
# a hospital ranking (num). This function reads the
# outcome-of-care-measures.csv file and returns a 2-column data frame
# containing the hospital in each state that has the ranking specified by
# num.
# The first column in the data frame is named hospital, which contains the
# hospital name, and the second column is named state, which contains the
# 2-character abbreviation for the state.
# Hospitals that do not have data on a particular outcome are excluded
# from the set of hospitals when deciding the rankings.
rankall <- function(outcome,num="best") {
# list of state abbreviations in alphabetical order
data <- read.csv(paste("/home/bridget/Coursera/RProgramming/rprog_",
"data_ProgAssignment3-data/outcome-of-care-",
"measures.csv",sep=""),colClasses="character")
states <- unique(data$State)
states <- states[order(states)]
# apply rankhospital.R to all of these states
source("rankhospital.R")
hospital_name <- sapply(states,rankhospital,outcome=outcome,num=num)
# construct the output data frame
output <- data.frame("hospital"=hospital_name,"state"=states)
# colnames(output) <- c("hospital","state")
output
} | /Coursera/Hospital-Compare-Data-Analysis/rankall.R | no_license | bbertoni/R-Programs | R | false | false | 1,324 | r | # The function rankall takes two arguments, and outcome name (outcome) and
# a hospital ranking (num). This function reads the
# outcome-of-care-measures.csv file and returns a 2-column data frame
# containing the hospital in each state that has the ranking specified by
# num.
# The first column in the data frame is named hospital, which contains the
# hospital name, and the second column is named state, which contains the
# 2-character abbreviation for the state.
# Hospitals that do not have data on a particular outcome are excluded
# from the set of hospitals when deciding the rankings.
rankall <- function(outcome,num="best") {
# list of state abbreviations in alphabetical order
data <- read.csv(paste("/home/bridget/Coursera/RProgramming/rprog_",
"data_ProgAssignment3-data/outcome-of-care-",
"measures.csv",sep=""),colClasses="character")
states <- unique(data$State)
states <- states[order(states)]
# apply rankhospital.R to all of these states
source("rankhospital.R")
hospital_name <- sapply(states,rankhospital,outcome=outcome,num=num)
# construct the output data frame
output <- data.frame("hospital"=hospital_name,"state"=states)
# colnames(output) <- c("hospital","state")
output
} |
# Home Work - 3
# Viveksinh Solanki
rm(list=ls())
setwd('E:/STEVENS/study/FE-582/assignments/asst3/')
getwd()
### Extract top 10 and bottom 10 pairs by values ###
getTopOrBottom10 = function(m, top=TRUE){
# Ranking pairs by distance/similarity values
if(top==TRUE){
o <- order(m, decreasing = TRUE)[1:10]
}else{
o <- order(m)[1:10]
}
pos <- arrayInd(o, dim(m), useNames = TRUE)
# returns top/bottom values, if you want to return top/bottom indices use [2]
# instead of [1]
output_values <- list(values = m[o], position = pos)[1]
return(output_values)
}
### Extract top 10 and bottom 10 pairs by values (removing similarities with itself)
getTopOrBottom10_removing100 = function(m, top=TRUE){
# Ranking pairs by distance/similarity values
if(top==TRUE){
o <- order(m, decreasing = TRUE)[101:110]
}else{
o <- order(m)[101:110]
}
pos <- arrayInd(o, dim(m), useNames = TRUE)
# returns top/bottom values, if you want to return top/bottom indices use [2]
# instead of [1]
output_values <- list(values = m[o], position = pos)[1]
return(output_values)
}
## Read data into dataframes
sec_df <- read.csv('securities.csv')
fund_df <- read.csv('fundamentals.csv')
# To view files as table
#View(sec_df)
#View(fund_df)
# Subset for year 2013
fund_df_year_2013 <- subset(fund_df, fund_df$For.Year == '2013')
#View(fund_df_year_2013)
# Remove missing values
fund_df_year_2013_processed <- na.omit(fund_df_year_2013)
#View(fund_df_year_2013_processed)
# Subset of 100 tickers
fund_df_100_tickers <- fund_df_year_2013_processed[sample(nrow(fund_df_year_2013_processed),
100), ]
#View((fund_df_100_tickers))
# Subset 10 quantitative columns
col_names <- c('After.Tax.ROE', 'Cash.Ratio', 'Current.Ratio', 'Operating.Margin',
'Pre.Tax.Margin', 'Pre.Tax.ROE', 'Profit.Margin', 'Quick.Ratio',
'Total.Assets', 'Total.Liabilities')
fund_df_final_subset <- fund_df_100_tickers[col_names]
#View(fund_df_final_subset)
# Normalized subset
fund_df_final_subset_scaled <- scale(fund_df_final_subset)
#View(fund_df_final_subset_scaled)
### Lp-norm calculation ###
# Generalized Lp-norm function
lp_norm = function(x, y, p){
return(sum((abs(x-y))^p)^(1/p))
}
## a) lp-norm: p=1
lp_norm_1_matrix <- matrix(, nrow = 100, ncol = 100)
for(i in 1:100){
for(j in i:100){
if(i!=j){
lp_norm_1_matrix[i,j] <- lp_norm(fund_df_final_subset_scaled[i, ],
fund_df_final_subset_scaled[j, ],
1)
}
}
}
# Top 10 values for lp-norm where p=1
getTopOrBottom10(lp_norm_1_matrix)
# Bottom 10 values for lp-norm where p=1
getTopOrBottom10(lp_norm_1_matrix, top = FALSE)
## b) lp-norm: p=2
lp_norm_2_matrix <- matrix(, nrow = 100, ncol = 100)
for(i in 1:100){
for(j in i:100){
if(i!=j){
lp_norm_2_matrix[i,j] <- lp_norm(fund_df_final_subset_scaled[i, ],
fund_df_final_subset_scaled[j, ],
2)
}
}
}
# Top 10 values for lp-norm where p=2
getTopOrBottom10(lp_norm_2_matrix)
# Bottom 10 values for lp-norm where p=2
getTopOrBottom10(lp_norm_2_matrix, top = FALSE)
## c) lp-norm: p=3
lp_norm_3_matrix <- matrix(, nrow = 100, ncol = 100)
for(i in 1:100){
for(j in i:100){
if(i!=j){
lp_norm_3_matrix[i,j] <- lp_norm(fund_df_final_subset_scaled[i, ],
fund_df_final_subset_scaled[j, ],
3)
}
}
}
# Top 10 values for lp-norm where p=3
getTopOrBottom10(lp_norm_3_matrix)
# Bottom 10 values for lp-norm where p=3
getTopOrBottom10(lp_norm_3_matrix, top = FALSE)
## d) lp-norm: p=10
lp_norm_10_matrix <- matrix(, nrow = 100, ncol = 100)
for(i in 1:100){
for(j in i:100){
if(i!=j){
lp_norm_10_matrix[i,j] <- lp_norm(fund_df_final_subset_scaled[i, ],
fund_df_final_subset_scaled[j, ],
10)
}
}
}
# Top 10 values for lp-norm where p=10
getTopOrBottom10(lp_norm_10_matrix)
# Bottom 10 values for lp-norm where p=10
getTopOrBottom10(lp_norm_10_matrix, top = FALSE)
## e) Minkovski function - taking p=2 (square root)
# Variable importance based on random forest
install.packages('party')
library(party)
# Taking "profit margin" as target variable
cf1 <- cforest(Profit.Margin ~ . , data= data.frame(fund_df_final_subset_scaled),
control=cforest_unbiased(mtry=2,ntree=50))
weights <- varimp(cf1)
# initialize default weight vec to all values 1
weights_vec <- c(1,1,1,1,1,1,1,1,1,1)
# add random forest weights to weight vec
for(i in 1:9){
if(i>=7){
weights_vec[i+1] <- weights[[i]]
}else{
weights_vec[i] <- weights[[i]]
}
}
# Generalized minkovski function
minkovski_dist = function(x, y, p){
return(sum(weights_vec * (abs(x-y))^p)^(1/p))
}
minkovski_dist_matrix <- matrix(, nrow = 100, ncol = 100)
for(i in 1:100){
for(j in i:100){
if(i!=j){
minkovski_dist_matrix[i,j] <- minkovski_dist(fund_df_final_subset_scaled[i, ],
fund_df_final_subset_scaled[j, ],
2)
}
}
}
#View(minkovski_dist_matrix)
# Top 10 values for minkovski where p=2
getTopOrBottom10(minkovski_dist_matrix)
# Bottom 10 values for minkovki where p=2
getTopOrBottom10(minkovski_dist_matrix, top = FALSE)
## f) Match based similarity
match_based_sim = function(x, y, p){
final_sum = 0
for(i in 1:10){
final_sum = final_sum + ((1 - (abs(x[i]-y[i]))/2))^p
}
return((final_sum)^(1/p))
}
# taking p=2
match_based_sim_matrix <- matrix(, nrow = 100, ncol = 100)
for(i in 1:100){
for(j in i:100){
if(i!=j){
match_based_sim_matrix[i,j] <- match_based_sim(fund_df_final_subset_scaled[i, ],
fund_df_final_subset_scaled[j, ],
2)
}
}
}
#View(match_based_sim_matrix)
# Top 10 values for match based similarity where p=2
getTopOrBottom10(match_based_sim_matrix)
# Bottom 10 values for match based similarity where p=2
getTopOrBottom10(match_based_sim_matrix, top = FALSE)
## g) Mahalanobis distance
install.packages('StatMatch')
library(StatMatch)
mahalanobisDist <- mahalanobis.dist(fund_df_final_subset_scaled)
#View(mahalanobisDist)
# Top 10 values for mahalanobis
getTopOrBottom10(mahalanobisDist)
# Bottom 10 values for mahalanobis
# removing bottom 100 values, because they are comparision
# of each record with itself
getTopOrBottom10_removing100(mahalanobisDist, top=FALSE)
# create subset with categorical data as well
combined_df_subset <- merge(x=fund_df_100_tickers, y=sec_df, by='Ticker.Symbol')
#View(combined_df_subset)
# subset only categorical columns
cat_col_names <- c('GICS.Sector', 'GICS.Sub.Industry')
combined_df_final_subset <- combined_df_subset[cat_col_names]
#View(combined_df_final_subset)
## h) Overlap measure
overlap_sims <- matrix(, nrow = 100, ncol = 100)
for(i in 1:100){
for(j in i:100){
if(i!=j){
overlap_sims[i,j] <- sum(match(combined_df_final_subset[i, ],
combined_df_final_subset[j, ], nomatch=0)>0)
}
}
}
#View(overlap_sims)
# Top 10 values for overlap measure
getTopOrBottom10(overlap_sims)
# Bottom 10 values for overlap measure
getTopOrBottom10(overlap_sims, top = FALSE)
## i) Inverse frequency
install.packages('nomclust')
library('nomclust')
inverse_freq_measure <- iof(combined_df_final_subset)
#View(inverse_freq_measure)
# Top 10 values for Inverse frequency
getTopOrBottom10(inverse_freq_measure)
# Bottom 10 values for Inverse frequency
# removing bottom 100 values, because they are comparision
# of each record with itself
getTopOrBottom10_removing100(inverse_freq_measure, top=FALSE)
## j) Goodall measure
goodall_measure <- good1(combined_df_final_subset)
#View(goodall_measure)
# Top 10 values for Goodall measure
getTopOrBottom10(goodall_measure)
# Bottom 10 values for Goodall measure
# removing bottom 100 values, because they are comparision
# of each record with itself
getTopOrBottom10_removing100(goodall_measure, top=FALSE)
# Overall similarity on mixed type data
## k) Unnormalized
overall_sims_unnorm <- matrix(, nrow = 100, ncol = 100)
lambda <- 0.7
for(i in 1:100){
for(j in i:100){
if(i!=j){
num_sim <- minkovski_dist_matrix[i,j]
cat_sim <- inverse_freq_measure[i,j]
overall_sims_unnorm[i,j] <- lambda * num_sim + (1-lambda) * cat_sim
}
}
}
#View(overall_sims_unnorm)
# Top 10 values for Overall similarity unnormalized
getTopOrBottom10(overall_sims_unnorm)
# Bottom 10 values for Overall similarity unnormalized
getTopOrBottom10(overall_sims_unnorm, top = FALSE)
## l) Normalized
overall_sims_norm <- matrix(, nrow = 100, ncol = 100)
lambda <- 0.7
sigma_num <- 10 #number of numeric features
sigma_cat <- 2 #number of categrical features
for(i in 1:100){
for(j in i:100){
if(i!=j){
num_sim <- minkovski_dist_matrix[i,j]
cat_sim <- inverse_freq_measure[i,j]
overall_sims_norm[i,j] <- lambda * (num_sim/sigma_num)
+ (1-lambda) * (cat_sim/sigma_cat)
}
}
}
#View(overall_sims_norm)
# Top 10 values for Overall similarity normalized
getTopOrBottom10(overall_sims_norm)
# Bottom 10 values for Overall similarity normalized
getTopOrBottom10(overall_sims_norm, top = FALSE)
| /FE_582_Foundation_of_Financial_Datascience/Assignments/Assignment3/HW3.R | no_license | TheHexa1/Stevens2018-2020 | R | false | false | 9,935 | r | # Home Work - 3
# Viveksinh Solanki
rm(list=ls())
setwd('E:/STEVENS/study/FE-582/assignments/asst3/')
getwd()
### Extract top 10 and bottom 10 pairs by values ###
getTopOrBottom10 = function(m, top=TRUE){
# Ranking pairs by distance/similarity values
if(top==TRUE){
o <- order(m, decreasing = TRUE)[1:10]
}else{
o <- order(m)[1:10]
}
pos <- arrayInd(o, dim(m), useNames = TRUE)
# returns top/bottom values, if you want to return top/bottom indices use [2]
# instead of [1]
output_values <- list(values = m[o], position = pos)[1]
return(output_values)
}
### Extract top 10 and bottom 10 pairs by values (removing similarities with itself)
getTopOrBottom10_removing100 = function(m, top=TRUE){
# Ranking pairs by distance/similarity values
if(top==TRUE){
o <- order(m, decreasing = TRUE)[101:110]
}else{
o <- order(m)[101:110]
}
pos <- arrayInd(o, dim(m), useNames = TRUE)
# returns top/bottom values, if you want to return top/bottom indices use [2]
# instead of [1]
output_values <- list(values = m[o], position = pos)[1]
return(output_values)
}
## Read data into dataframes
sec_df <- read.csv('securities.csv')
fund_df <- read.csv('fundamentals.csv')
# To view files as table
#View(sec_df)
#View(fund_df)
# Subset for year 2013
fund_df_year_2013 <- subset(fund_df, fund_df$For.Year == '2013')
#View(fund_df_year_2013)
# Remove missing values
fund_df_year_2013_processed <- na.omit(fund_df_year_2013)
#View(fund_df_year_2013_processed)
# Subset of 100 tickers
fund_df_100_tickers <- fund_df_year_2013_processed[sample(nrow(fund_df_year_2013_processed),
100), ]
#View((fund_df_100_tickers))
# Subset 10 quantitative columns
col_names <- c('After.Tax.ROE', 'Cash.Ratio', 'Current.Ratio', 'Operating.Margin',
'Pre.Tax.Margin', 'Pre.Tax.ROE', 'Profit.Margin', 'Quick.Ratio',
'Total.Assets', 'Total.Liabilities')
fund_df_final_subset <- fund_df_100_tickers[col_names]
#View(fund_df_final_subset)
# Normalized subset
fund_df_final_subset_scaled <- scale(fund_df_final_subset)
#View(fund_df_final_subset_scaled)
### Lp-norm calculation ###
# Generalized Lp-norm function
lp_norm = function(x, y, p){
return(sum((abs(x-y))^p)^(1/p))
}
## a) lp-norm: p=1
lp_norm_1_matrix <- matrix(, nrow = 100, ncol = 100)
for(i in 1:100){
for(j in i:100){
if(i!=j){
lp_norm_1_matrix[i,j] <- lp_norm(fund_df_final_subset_scaled[i, ],
fund_df_final_subset_scaled[j, ],
1)
}
}
}
# Top 10 values for lp-norm where p=1
getTopOrBottom10(lp_norm_1_matrix)
# Bottom 10 values for lp-norm where p=1
getTopOrBottom10(lp_norm_1_matrix, top = FALSE)
## b) lp-norm: p=2
lp_norm_2_matrix <- matrix(, nrow = 100, ncol = 100)
for(i in 1:100){
for(j in i:100){
if(i!=j){
lp_norm_2_matrix[i,j] <- lp_norm(fund_df_final_subset_scaled[i, ],
fund_df_final_subset_scaled[j, ],
2)
}
}
}
# Top 10 values for lp-norm where p=2
getTopOrBottom10(lp_norm_2_matrix)
# Bottom 10 values for lp-norm where p=2
getTopOrBottom10(lp_norm_2_matrix, top = FALSE)
## c) lp-norm: p=3
lp_norm_3_matrix <- matrix(, nrow = 100, ncol = 100)
for(i in 1:100){
for(j in i:100){
if(i!=j){
lp_norm_3_matrix[i,j] <- lp_norm(fund_df_final_subset_scaled[i, ],
fund_df_final_subset_scaled[j, ],
3)
}
}
}
# Top 10 values for lp-norm where p=3
getTopOrBottom10(lp_norm_3_matrix)
# Bottom 10 values for lp-norm where p=3
getTopOrBottom10(lp_norm_3_matrix, top = FALSE)
## d) lp-norm: p=10
lp_norm_10_matrix <- matrix(, nrow = 100, ncol = 100)
for(i in 1:100){
for(j in i:100){
if(i!=j){
lp_norm_10_matrix[i,j] <- lp_norm(fund_df_final_subset_scaled[i, ],
fund_df_final_subset_scaled[j, ],
10)
}
}
}
# Top 10 values for lp-norm where p=10
getTopOrBottom10(lp_norm_10_matrix)
# Bottom 10 values for lp-norm where p=10
getTopOrBottom10(lp_norm_10_matrix, top = FALSE)
## e) Minkovski function - taking p=2 (square root)
# Variable importance based on random forest
install.packages('party')
library(party)
# Taking "profit margin" as target variable
cf1 <- cforest(Profit.Margin ~ . , data= data.frame(fund_df_final_subset_scaled),
control=cforest_unbiased(mtry=2,ntree=50))
weights <- varimp(cf1)
# initialize default weight vec to all values 1
weights_vec <- c(1,1,1,1,1,1,1,1,1,1)
# add random forest weights to weight vec
for(i in 1:9){
if(i>=7){
weights_vec[i+1] <- weights[[i]]
}else{
weights_vec[i] <- weights[[i]]
}
}
# Generalized minkovski function
minkovski_dist = function(x, y, p){
return(sum(weights_vec * (abs(x-y))^p)^(1/p))
}
minkovski_dist_matrix <- matrix(, nrow = 100, ncol = 100)
for(i in 1:100){
for(j in i:100){
if(i!=j){
minkovski_dist_matrix[i,j] <- minkovski_dist(fund_df_final_subset_scaled[i, ],
fund_df_final_subset_scaled[j, ],
2)
}
}
}
#View(minkovski_dist_matrix)
# Top 10 values for minkovski where p=2
getTopOrBottom10(minkovski_dist_matrix)
# Bottom 10 values for minkovki where p=2
getTopOrBottom10(minkovski_dist_matrix, top = FALSE)
## f) Match based similarity
match_based_sim = function(x, y, p){
final_sum = 0
for(i in 1:10){
final_sum = final_sum + ((1 - (abs(x[i]-y[i]))/2))^p
}
return((final_sum)^(1/p))
}
# taking p=2
match_based_sim_matrix <- matrix(, nrow = 100, ncol = 100)
for(i in 1:100){
for(j in i:100){
if(i!=j){
match_based_sim_matrix[i,j] <- match_based_sim(fund_df_final_subset_scaled[i, ],
fund_df_final_subset_scaled[j, ],
2)
}
}
}
#View(match_based_sim_matrix)
# Top 10 values for match based similarity where p=2
getTopOrBottom10(match_based_sim_matrix)
# Bottom 10 values for match based similarity where p=2
getTopOrBottom10(match_based_sim_matrix, top = FALSE)
## g) Mahalanobis distance
install.packages('StatMatch')
library(StatMatch)
mahalanobisDist <- mahalanobis.dist(fund_df_final_subset_scaled)
#View(mahalanobisDist)
# Top 10 values for mahalanobis
getTopOrBottom10(mahalanobisDist)
# Bottom 10 values for mahalanobis
# removing bottom 100 values, because they are comparision
# of each record with itself
getTopOrBottom10_removing100(mahalanobisDist, top=FALSE)
# create subset with categorical data as well
combined_df_subset <- merge(x=fund_df_100_tickers, y=sec_df, by='Ticker.Symbol')
#View(combined_df_subset)
# subset only categorical columns
cat_col_names <- c('GICS.Sector', 'GICS.Sub.Industry')
combined_df_final_subset <- combined_df_subset[cat_col_names]
#View(combined_df_final_subset)
## h) Overlap measure
overlap_sims <- matrix(, nrow = 100, ncol = 100)
for(i in 1:100){
for(j in i:100){
if(i!=j){
overlap_sims[i,j] <- sum(match(combined_df_final_subset[i, ],
combined_df_final_subset[j, ], nomatch=0)>0)
}
}
}
#View(overlap_sims)
# Top 10 values for overlap measure
getTopOrBottom10(overlap_sims)
# Bottom 10 values for overlap measure
getTopOrBottom10(overlap_sims, top = FALSE)
## i) Inverse frequency
install.packages('nomclust')
library('nomclust')
inverse_freq_measure <- iof(combined_df_final_subset)
#View(inverse_freq_measure)
# Top 10 values for Inverse frequency
getTopOrBottom10(inverse_freq_measure)
# Bottom 10 values for Inverse frequency
# removing bottom 100 values, because they are comparision
# of each record with itself
getTopOrBottom10_removing100(inverse_freq_measure, top=FALSE)
## j) Goodall measure
goodall_measure <- good1(combined_df_final_subset)
#View(goodall_measure)
# Top 10 values for Goodall measure
getTopOrBottom10(goodall_measure)
# Bottom 10 values for Goodall measure
# removing bottom 100 values, because they are comparision
# of each record with itself
getTopOrBottom10_removing100(goodall_measure, top=FALSE)
# Overall similarity on mixed type data
## k) Unnormalized
overall_sims_unnorm <- matrix(, nrow = 100, ncol = 100)
lambda <- 0.7
for(i in 1:100){
for(j in i:100){
if(i!=j){
num_sim <- minkovski_dist_matrix[i,j]
cat_sim <- inverse_freq_measure[i,j]
overall_sims_unnorm[i,j] <- lambda * num_sim + (1-lambda) * cat_sim
}
}
}
#View(overall_sims_unnorm)
# Top 10 values for Overall similarity unnormalized
getTopOrBottom10(overall_sims_unnorm)
# Bottom 10 values for Overall similarity unnormalized
getTopOrBottom10(overall_sims_unnorm, top = FALSE)
## l) Normalized
overall_sims_norm <- matrix(, nrow = 100, ncol = 100)
lambda <- 0.7
sigma_num <- 10 #number of numeric features
sigma_cat <- 2 #number of categrical features
for(i in 1:100){
for(j in i:100){
if(i!=j){
num_sim <- minkovski_dist_matrix[i,j]
cat_sim <- inverse_freq_measure[i,j]
overall_sims_norm[i,j] <- lambda * (num_sim/sigma_num)
+ (1-lambda) * (cat_sim/sigma_cat)
}
}
}
#View(overall_sims_norm)
# Top 10 values for Overall similarity normalized
getTopOrBottom10(overall_sims_norm)
# Bottom 10 values for Overall similarity normalized
getTopOrBottom10(overall_sims_norm, top = FALSE)
|
projDates <- c ("1/2/2007", "2/2/2007" )
a <- read.table ( file = "../household_power_consumption.txt", header = TRUE, sep =";", , stringsAsFactors=FALSE )
myDF <- as.data.frame(a)
projData <- myDF[which(myDF$Date %in% projDates),]
projData$Time <- strptime(do.call(paste0,projData[c(1,2)]), "%d/%m/%Y%H:%M:%S")
projData$Date <- as.Date(projData$Date, "%d/%m/%Y")
par(mfrow = c(2, 2))
with (projData ,{
plot (x=Time, y=as.numeric(Global_active_power)/500, type = "l" ,ylab = "Global Active Power" , xlab ="")
plot (x=Time, y=as.numeric(Voltage), type = "l" ,ylab = "Voltage" , xlab ="datetime")
plot (x=Time, y=as.numeric(Sub_metering_1), type = "l", ylab = "Energy Sub Metering" , xlab ="")
points(x=Time, y=as.numeric(Sub_metering_2), type="l", col="red")
points(x=Time, y=as.numeric(Sub_metering_3), type="l", col="blue")
legend("topright", pch = NA, lwd = 3, col = c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot (x=Time, y=as.numeric(Global_reactive_power), type = "l" ,ylab = "Global_reactive_power" , xlab ="datetime")
})
dev.copy(png,"plot4.png")
dev.off()
| /Plot4.R | no_license | leaflucas/Coursera_Exploratory_Data_Analysis_Project_1 | R | false | false | 1,124 | r | projDates <- c ("1/2/2007", "2/2/2007" )
a <- read.table ( file = "../household_power_consumption.txt", header = TRUE, sep =";", , stringsAsFactors=FALSE )
myDF <- as.data.frame(a)
projData <- myDF[which(myDF$Date %in% projDates),]
projData$Time <- strptime(do.call(paste0,projData[c(1,2)]), "%d/%m/%Y%H:%M:%S")
projData$Date <- as.Date(projData$Date, "%d/%m/%Y")
par(mfrow = c(2, 2))
with (projData ,{
plot (x=Time, y=as.numeric(Global_active_power)/500, type = "l" ,ylab = "Global Active Power" , xlab ="")
plot (x=Time, y=as.numeric(Voltage), type = "l" ,ylab = "Voltage" , xlab ="datetime")
plot (x=Time, y=as.numeric(Sub_metering_1), type = "l", ylab = "Energy Sub Metering" , xlab ="")
points(x=Time, y=as.numeric(Sub_metering_2), type="l", col="red")
points(x=Time, y=as.numeric(Sub_metering_3), type="l", col="blue")
legend("topright", pch = NA, lwd = 3, col = c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot (x=Time, y=as.numeric(Global_reactive_power), type = "l" ,ylab = "Global_reactive_power" , xlab ="datetime")
})
dev.copy(png,"plot4.png")
dev.off()
|
#' Highlight HTML Text
#'
#' Wraps text with a background color specific font tags.
#'
#' @param text A character vector or text copied to the clipboard. Default is to
#' read from the clipboard.
#' @param color A character string taken from R's built-in color names or a
#' hexidecimal color.
#' @param copy2clip logical. If \code{TRUE} attempts to copy the output to the
#' clipboard.
#' @param print logical. If TRUE \code{\link[base]{cat}} prints the output to the
#' console. If \code{FALSE} returns to the console.
#' @export
#' @examples
#' cat(HL("Do not trust robots!"), "They are bent on destruction.")
#' cat(HL("Jake is a cookie scientist,", color="pink"), "an honrable profession.")
HL <-
function(text = "clipboard", color = "yellow", copy2clip = interactive(),
print = FALSE) {
if (text == "clipboard") {
text <- read_clip()
}
a <- "<font style=\"background-color: "
if (!grepl("#", color)) {
color <- col2hex(color)
}
b <- ";\">"
d <- "</font>"
x <- paste0(a, color, b, text, d)
if(copy2clip){
write_clip(x)
}
prin(x = x, print = print)
}
| /R/HL.R | no_license | 2ndFloorStuff/reports | R | false | false | 1,141 | r | #' Highlight HTML Text
#'
#' Wraps text with a background color specific font tags.
#'
#' @param text A character vector or text copied to the clipboard. Default is to
#' read from the clipboard.
#' @param color A character string taken from R's built-in color names or a
#' hexidecimal color.
#' @param copy2clip logical. If \code{TRUE} attempts to copy the output to the
#' clipboard.
#' @param print logical. If TRUE \code{\link[base]{cat}} prints the output to the
#' console. If \code{FALSE} returns to the console.
#' @export
#' @examples
#' cat(HL("Do not trust robots!"), "They are bent on destruction.")
#' cat(HL("Jake is a cookie scientist,", color="pink"), "an honrable profession.")
HL <-
function(text = "clipboard", color = "yellow", copy2clip = interactive(),
print = FALSE) {
if (text == "clipboard") {
text <- read_clip()
}
a <- "<font style=\"background-color: "
if (!grepl("#", color)) {
color <- col2hex(color)
}
b <- ";\">"
d <- "</font>"
x <- paste0(a, color, b, text, d)
if(copy2clip){
write_clip(x)
}
prin(x = x, print = print)
}
|
#Data
revenue <- c(14574.49, 7606.46, 8611.41, 9175.41, 8058.65, 8105.44, 11496.28, 9766.09, 10305.32, 14379.96, 10713.97, 15433.50)
expenses <- c(12051.82, 5695.07, 12319.20, 12089.72, 8658.57, 840.20, 3285.73, 5821.12, 6976.93, 16618.61, 10054.37, 3803.96)
| /3_R_for_datascience/exercises/section_3/problem_3_homework_dataset.R | no_license | Shamsur-Rahaman/Data_science | R | false | false | 260 | r | #Data
revenue <- c(14574.49, 7606.46, 8611.41, 9175.41, 8058.65, 8105.44, 11496.28, 9766.09, 10305.32, 14379.96, 10713.97, 15433.50)
expenses <- c(12051.82, 5695.07, 12319.20, 12089.72, 8658.57, 840.20, 3285.73, 5821.12, 6976.93, 16618.61, 10054.37, 3803.96)
|
#' @title Get marginal effects from model terms
#' @name ggeffect
#'
#' @description
#' \code{ggeffect()} computes marginal effects of model terms. It internally
#' calls \code{\link[effects]{Effect}} and puts the result into tidy data
#' frames. \code{eff()} is an alias for \code{ggeffect()}.
#'
#' @param model A fitted model object, or a list of model objects. Any model
#' that is supported by the \CRANpkg{effects}-package should work.
#' @param ... Further arguments passed down to \code{\link[effects]{Effect}}.
#' @inheritParams ggpredict
#'
#' @return
#' A tibble (with \code{ggeffects} class attribute) with consistent data columns:
#' \describe{
#' \item{\code{x}}{the values of the model predictor to which the effect pertains, used as x-position in plots.}
#' \item{\code{predicted}}{the predicted values, used as y-position in plots.}
#' \item{\code{conf.low}}{the lower bound of the confidence interval for the predicted values.}
#' \item{\code{conf.high}}{the upper bound of the confidence interval for the predicted values.}
#' \item{\code{group}}{the grouping level from the second term in \code{terms}, used as grouping-aesthetics in plots.}
#' \item{\code{facet}}{the grouping level from the third term in \code{terms}, used to indicate facets in plots.}
#' }
#'
#' @note
#' The results of \code{ggeffect()} and \code{ggpredict()} are usually (almost)
#' identical. It's just that \code{ggpredict()} calls \code{predict()}, while
#' \code{ggeffect()} calls \code{\link[effects]{Effect}} to compute marginal
#' effects at the mean. However, results may differ when using factors inside
#' the formula: in such cases, \code{Effect()} takes the "mean" value of factors
#' (i.e. computes a kind of "average" value, which represents the proportions
#' of each factor's category), while \code{ggpredict()} uses the base
#' (reference) level when holding these predictors at a constant value.
#'
#' @examples
#' data(efc)
#' fit <- lm(barthtot ~ c12hour + neg_c_7 + c161sex + c172code, data = efc)
#' ggeffect(fit, terms = "c12hour")
#'
#' mydf <- ggeffect(fit, terms = c("c12hour", "c161sex"))
#' plot(mydf)
#'
#' @importFrom purrr map
#' @importFrom sjstats pred_vars resp_var
#' @importFrom dplyr if_else case_when bind_rows filter mutate
#' @importFrom tibble as_tibble
#' @importFrom sjmisc is_empty str_contains
#' @importFrom stats na.omit
#' @importFrom effects Effect
#' @importFrom sjlabelled as_numeric
#' @importFrom rlang .data
#' @export
ggeffect <- function(model, terms, ci.lvl = .95, ...) {
if (inherits(model, "list"))
purrr::map(model, ~ggeffect_helper(.x, terms, ci.lvl, ...))
else
ggeffect_helper(model, terms, ci.lvl, ...)
}
#' @importFrom sjstats model_frame
ggeffect_helper <- function(model, terms, ci.lvl, ...) {
# check terms argument
terms <- check_vars(terms)
# get link-function
fun <- get_model_function(model)
# get model frame
fitfram <- sjstats::model_frame(model)
# get model family
faminfo <- get_glm_family(model)
# create logical for family
poisson_fam <- faminfo$is_pois
binom_fam <- faminfo$is_bin
# check whether we have an argument "transformation" for effects()-function
# in this case, we need another default title, since we have
# non-transformed effects
add.args <- lapply(match.call(expand.dots = F)$`...`, function(x) x)
# check whether we have a "transformation" argument
t.add <- which(names(add.args) == "transformation")
# if we have a "transformation" argument, and it's NULL,
# no transformation of scale
no.transform <- !sjmisc::is_empty(t.add) && is.null(eval(add.args[[t.add]]))
# check if we have specific levels in square brackets
x.levels <- get_xlevels_vector(terms)
# clear argument from brackets
terms <- get_clear_vars(terms)
# prepare getting unique values of predictors,
# which are passed to the allEffects-function
xl <- list()
# create levels for all terms of interest
for (t in terms) {
# get unique values
dummy <- list(x = sort(unique(stats::na.omit(fitfram[[t]]))))
# name list, needed for effect-function
names(dummy) <- t
# create list for "xlevels" argument of allEffects fucntion
xl <- c(xl, dummy)
}
# compute marginal effects for each model term
eff <- effects::Effect(focal.predictors = terms, mod = model, xlevels = xl, confidence.level = ci.lvl, ...)
# get term, for which effects were calculated
t <- eff$term
# build data frame, with raw values
# predicted response and lower/upper ci
tmp <-
data.frame(
x = eff$x[[terms[1]]],
y = eff$fit,
lower = eff$lower,
upper = eff$upper
)
if (fun == "glm" && !no.transform) {
tmp <- dplyr::mutate(
tmp,
y = eff$transformation$inverse(eta = .data$y),
lower = eff$transformation$inverse(eta = .data$lower),
upper = eff$transformation$inverse(eta = .data$upper)
)
}
# define column names
cnames <- c("x", "predicted", "conf.low", "conf.high", "group")
# init legend labels
legend.labels <- NULL
# get axis titles and labels
all.labels <- get_all_labels(fitfram, terms, get_model_function(model), binom_fam, poisson_fam, no.transform)
# with or w/o grouping factor?
if (length(terms) == 1) {
# convert to factor for proper legend
tmp$group <- sjmisc::to_factor(1)
} else if (length(terms) == 2) {
tmp <- dplyr::mutate(tmp, group = sjmisc::to_factor(eff$x[[terms[2]]]))
} else {
tmp <- dplyr::mutate(
tmp,
group = sjmisc::to_factor(eff$x[[terms[2]]]),
facet = sjmisc::to_factor(eff$x[[terms[3]]])
)
cnames <- c(cnames, "facet")
}
# if we have any x-levels, go on and filter
if (!sjmisc::is_empty(x.levels) && !is.null(x.levels)) {
# slice data, only select observations that have specified
# levels for the grouping variables
filter.remove <- tmp$group %in% x.levels[[1]]
tmp <- dplyr::filter(tmp, !! filter.remove)
# slice data, only select observations that have specified
# levels for the facet variables
if (length(x.levels) > 1) {
filter.remove <- tmp$facet %in% x.levels[[2]]
tmp <- dplyr::filter(tmp, !! filter.remove)
}
}
# label grouping variables, for axis and legend labels in plot
if (length(terms) > 1) {
# grouping variable may not be labelled
# do this here, so we convert to labelled factor later
tmp <- add_groupvar_labels(tmp, fitfram, terms)
# convert to factor for proper legend
tmp <- groupvar_to_label(tmp)
# check if we have legend labels
legend.labels <- sjlabelled::get_labels(tmp$group, attr.only = FALSE, drop.unused = TRUE)
}
# cpnvert to tibble
mydf <- tibble::as_tibble(tmp)
# add raw data as well
attr(mydf, "rawdata") <- get_raw_data(model, fitfram, terms)
# set attributes with necessary information
mydf <-
set_attributes_and_class(
data = mydf,
model = model,
t.title = all.labels$t.title,
x.title = all.labels$x.title,
y.title = all.labels$y.title,
l.title = all.labels$l.title,
legend.labels = legend.labels,
x.axis.labels = all.labels$axis.labels,
faminfo = faminfo,
x.is.factor = ifelse(is.factor(fitfram[[t]]), "1", "0"),
full.data = "0"
)
# set consistent column names
colnames(mydf) <- cnames
# make x numeric
mydf$x <- sjlabelled::as_numeric(mydf$x, keep.labels = FALSE)
mydf
}
#' @rdname ggeffect
#' @export
eff <- function(model, terms, ci.lvl = .95, ...) {
ggeffect(model, terms, ci.lvl, ...)
}
| /R/ggeffect.R | no_license | guhjy/ggeffects | R | false | false | 7,825 | r | #' @title Get marginal effects from model terms
#' @name ggeffect
#'
#' @description
#' \code{ggeffect()} computes marginal effects of model terms. It internally
#' calls \code{\link[effects]{Effect}} and puts the result into tidy data
#' frames. \code{eff()} is an alias for \code{ggeffect()}.
#'
#' @param model A fitted model object, or a list of model objects. Any model
#' that is supported by the \CRANpkg{effects}-package should work.
#' @param ... Further arguments passed down to \code{\link[effects]{Effect}}.
#' @inheritParams ggpredict
#'
#' @return
#' A tibble (with \code{ggeffects} class attribute) with consistent data columns:
#' \describe{
#' \item{\code{x}}{the values of the model predictor to which the effect pertains, used as x-position in plots.}
#' \item{\code{predicted}}{the predicted values, used as y-position in plots.}
#' \item{\code{conf.low}}{the lower bound of the confidence interval for the predicted values.}
#' \item{\code{conf.high}}{the upper bound of the confidence interval for the predicted values.}
#' \item{\code{group}}{the grouping level from the second term in \code{terms}, used as grouping-aesthetics in plots.}
#' \item{\code{facet}}{the grouping level from the third term in \code{terms}, used to indicate facets in plots.}
#' }
#'
#' @note
#' The results of \code{ggeffect()} and \code{ggpredict()} are usually (almost)
#' identical. It's just that \code{ggpredict()} calls \code{predict()}, while
#' \code{ggeffect()} calls \code{\link[effects]{Effect}} to compute marginal
#' effects at the mean. However, results may differ when using factors inside
#' the formula: in such cases, \code{Effect()} takes the "mean" value of factors
#' (i.e. computes a kind of "average" value, which represents the proportions
#' of each factor's category), while \code{ggpredict()} uses the base
#' (reference) level when holding these predictors at a constant value.
#'
#' @examples
#' data(efc)
#' fit <- lm(barthtot ~ c12hour + neg_c_7 + c161sex + c172code, data = efc)
#' ggeffect(fit, terms = "c12hour")
#'
#' mydf <- ggeffect(fit, terms = c("c12hour", "c161sex"))
#' plot(mydf)
#'
#' @importFrom purrr map
#' @importFrom sjstats pred_vars resp_var
#' @importFrom dplyr if_else case_when bind_rows filter mutate
#' @importFrom tibble as_tibble
#' @importFrom sjmisc is_empty str_contains
#' @importFrom stats na.omit
#' @importFrom effects Effect
#' @importFrom sjlabelled as_numeric
#' @importFrom rlang .data
#' @export
ggeffect <- function(model, terms, ci.lvl = .95, ...) {
if (inherits(model, "list"))
purrr::map(model, ~ggeffect_helper(.x, terms, ci.lvl, ...))
else
ggeffect_helper(model, terms, ci.lvl, ...)
}
#' @importFrom sjstats model_frame
ggeffect_helper <- function(model, terms, ci.lvl, ...) {
# check terms argument
terms <- check_vars(terms)
# get link-function
fun <- get_model_function(model)
# get model frame
fitfram <- sjstats::model_frame(model)
# get model family
faminfo <- get_glm_family(model)
# create logical for family
poisson_fam <- faminfo$is_pois
binom_fam <- faminfo$is_bin
# check whether we have an argument "transformation" for effects()-function
# in this case, we need another default title, since we have
# non-transformed effects
add.args <- lapply(match.call(expand.dots = F)$`...`, function(x) x)
# check whether we have a "transformation" argument
t.add <- which(names(add.args) == "transformation")
# if we have a "transformation" argument, and it's NULL,
# no transformation of scale
no.transform <- !sjmisc::is_empty(t.add) && is.null(eval(add.args[[t.add]]))
# check if we have specific levels in square brackets
x.levels <- get_xlevels_vector(terms)
# clear argument from brackets
terms <- get_clear_vars(terms)
# prepare getting unique values of predictors,
# which are passed to the allEffects-function
xl <- list()
# create levels for all terms of interest
for (t in terms) {
# get unique values
dummy <- list(x = sort(unique(stats::na.omit(fitfram[[t]]))))
# name list, needed for effect-function
names(dummy) <- t
# create list for "xlevels" argument of allEffects fucntion
xl <- c(xl, dummy)
}
# compute marginal effects for each model term
eff <- effects::Effect(focal.predictors = terms, mod = model, xlevels = xl, confidence.level = ci.lvl, ...)
# get term, for which effects were calculated
t <- eff$term
# build data frame, with raw values
# predicted response and lower/upper ci
tmp <-
data.frame(
x = eff$x[[terms[1]]],
y = eff$fit,
lower = eff$lower,
upper = eff$upper
)
if (fun == "glm" && !no.transform) {
tmp <- dplyr::mutate(
tmp,
y = eff$transformation$inverse(eta = .data$y),
lower = eff$transformation$inverse(eta = .data$lower),
upper = eff$transformation$inverse(eta = .data$upper)
)
}
# define column names
cnames <- c("x", "predicted", "conf.low", "conf.high", "group")
# init legend labels
legend.labels <- NULL
# get axis titles and labels
all.labels <- get_all_labels(fitfram, terms, get_model_function(model), binom_fam, poisson_fam, no.transform)
# with or w/o grouping factor?
if (length(terms) == 1) {
# convert to factor for proper legend
tmp$group <- sjmisc::to_factor(1)
} else if (length(terms) == 2) {
tmp <- dplyr::mutate(tmp, group = sjmisc::to_factor(eff$x[[terms[2]]]))
} else {
tmp <- dplyr::mutate(
tmp,
group = sjmisc::to_factor(eff$x[[terms[2]]]),
facet = sjmisc::to_factor(eff$x[[terms[3]]])
)
cnames <- c(cnames, "facet")
}
# if we have any x-levels, go on and filter
if (!sjmisc::is_empty(x.levels) && !is.null(x.levels)) {
# slice data, only select observations that have specified
# levels for the grouping variables
filter.remove <- tmp$group %in% x.levels[[1]]
tmp <- dplyr::filter(tmp, !! filter.remove)
# slice data, only select observations that have specified
# levels for the facet variables
if (length(x.levels) > 1) {
filter.remove <- tmp$facet %in% x.levels[[2]]
tmp <- dplyr::filter(tmp, !! filter.remove)
}
}
# label grouping variables, for axis and legend labels in plot
if (length(terms) > 1) {
# grouping variable may not be labelled
# do this here, so we convert to labelled factor later
tmp <- add_groupvar_labels(tmp, fitfram, terms)
# convert to factor for proper legend
tmp <- groupvar_to_label(tmp)
# check if we have legend labels
legend.labels <- sjlabelled::get_labels(tmp$group, attr.only = FALSE, drop.unused = TRUE)
}
# cpnvert to tibble
mydf <- tibble::as_tibble(tmp)
# add raw data as well
attr(mydf, "rawdata") <- get_raw_data(model, fitfram, terms)
# set attributes with necessary information
mydf <-
set_attributes_and_class(
data = mydf,
model = model,
t.title = all.labels$t.title,
x.title = all.labels$x.title,
y.title = all.labels$y.title,
l.title = all.labels$l.title,
legend.labels = legend.labels,
x.axis.labels = all.labels$axis.labels,
faminfo = faminfo,
x.is.factor = ifelse(is.factor(fitfram[[t]]), "1", "0"),
full.data = "0"
)
# set consistent column names
colnames(mydf) <- cnames
# make x numeric
mydf$x <- sjlabelled::as_numeric(mydf$x, keep.labels = FALSE)
mydf
}
#' @rdname ggeffect
#' @export
eff <- function(model, terms, ci.lvl = .95, ...) {
ggeffect(model, terms, ci.lvl, ...)
}
|
##
# Testing glm modeling performance with wide Arcene dataset with and without strong rules.
# Test for JIRA PUB-853
# 'Early termination in glm resulting in underfitting'
##
test <- function() {
print("Reading in Arcene training data for binomial modeling.")
arcene.train = h2o.uploadFile(locate("smalldata/arcene/arcene_train.data"), destination_frame="arcene.train")
arcene.label = h2o.uploadFile(locate("smalldata/arcene/arcene_train_labels.labels"), destination_frame="arcene.label")
arcene.train.label = h2o.assign(data=ifelse(arcene.label==1,1,0), key="arcene.train.label")
colnames(arcene.train.label) <- 'arcene.train.label'
arcene.train.full = h2o.assign(data=h2o.cbind(arcene.train,arcene.train.label),key="arcene.train.full")
print("Reading in Arcene validation data.")
arcene.valid = h2o.uploadFile(locate("smalldata/arcene/arcene_valid.data"), destination_frame="arcene.valid", header=FALSE)
arcene.label = h2o.uploadFile(locate("smalldata/arcene/arcene_valid_labels.labels"), destination_frame="arcene.label", header=FALSE)
arcene.valid.label = h2o.assign(data=ifelse(arcene.label==1,1,0), key="arcene.valid.label")
colnames(arcene.valid.label) <- 'arcene.train.label' # have to have the same name as reponse in training!
arcene.valid.full = h2o.assign(data=h2o.cbind(arcene.valid,arcene.valid.label),key="arcene.valid.full")
print("Run model on 3250 columns of Arcene with strong rules off.")
time.noSR.3250 <- system.time(model.noSR.3250 <- h2o.glm(x=c(1:3250), y="arcene.train.label", training_frame=arcene.train.full, family="binomial", lambda_search=FALSE, alpha=1, nfolds=0))
print("Test model on validation set.")
predict.noSR.3250 <- predict(model.noSR.3250, arcene.valid.full)
print("Check performance of predictions.")
perf.noSR.3250 <- h2o.performance(model.noSR.3250, arcene.valid.full)
print("Check that prediction AUC better than guessing (0.5).")
stopifnot(h2o.auc(perf.noSR.3250) > 0.5)
}
doTest("Testing glm modeling performance with wide Arcene dataset with and without strong rules", test)
| /h2o-r/tests/testdir_algos/glm/runit_GLM_wide_dataset_large.R | permissive | StephRoark/h2o-3 | R | false | false | 2,208 | r | ##
# Testing glm modeling performance with wide Arcene dataset with and without strong rules.
# Test for JIRA PUB-853
# 'Early termination in glm resulting in underfitting'
##
test <- function() {
print("Reading in Arcene training data for binomial modeling.")
arcene.train = h2o.uploadFile(locate("smalldata/arcene/arcene_train.data"), destination_frame="arcene.train")
arcene.label = h2o.uploadFile(locate("smalldata/arcene/arcene_train_labels.labels"), destination_frame="arcene.label")
arcene.train.label = h2o.assign(data=ifelse(arcene.label==1,1,0), key="arcene.train.label")
colnames(arcene.train.label) <- 'arcene.train.label'
arcene.train.full = h2o.assign(data=h2o.cbind(arcene.train,arcene.train.label),key="arcene.train.full")
print("Reading in Arcene validation data.")
arcene.valid = h2o.uploadFile(locate("smalldata/arcene/arcene_valid.data"), destination_frame="arcene.valid", header=FALSE)
arcene.label = h2o.uploadFile(locate("smalldata/arcene/arcene_valid_labels.labels"), destination_frame="arcene.label", header=FALSE)
arcene.valid.label = h2o.assign(data=ifelse(arcene.label==1,1,0), key="arcene.valid.label")
colnames(arcene.valid.label) <- 'arcene.train.label' # have to have the same name as reponse in training!
arcene.valid.full = h2o.assign(data=h2o.cbind(arcene.valid,arcene.valid.label),key="arcene.valid.full")
print("Run model on 3250 columns of Arcene with strong rules off.")
time.noSR.3250 <- system.time(model.noSR.3250 <- h2o.glm(x=c(1:3250), y="arcene.train.label", training_frame=arcene.train.full, family="binomial", lambda_search=FALSE, alpha=1, nfolds=0))
print("Test model on validation set.")
predict.noSR.3250 <- predict(model.noSR.3250, arcene.valid.full)
print("Check performance of predictions.")
perf.noSR.3250 <- h2o.performance(model.noSR.3250, arcene.valid.full)
print("Check that prediction AUC better than guessing (0.5).")
stopifnot(h2o.auc(perf.noSR.3250) > 0.5)
}
doTest("Testing glm modeling performance with wide Arcene dataset with and without strong rules", test)
|
# reactive utility functions referencing global experiment data
# returns a tibble containing the currently configured display form of the region/experiment
# [ exp.label, region.disp ]
region.names <- reactive({
if (input$opt.region.disp=='region') {
tibble(exp.label=experiments$exp.label, region.disp=experiments$exp.title, region.abbrev=experiments$exp.abbrev)
} else {
tibble(exp.label=experiments$exp.label, region.disp=experiments$exp.label, region.abbrev=experiments$exp.abbrev)
}
})
# returns a tibble containing the currently configured display form of the cluster
# [ exp.label, cluster, cluster.disp ]
cluster.names <- reactive({
(
if (input$opt.cluster.disp=='numbers') {
tibble(exp.label=cluster.names_$exp.label, cluster=cluster.names_$cluster, cluster.disp=cluster.names_$cluster, class=cluster.names_$class)
} else { # annotated or all
df <- tibble(exp.label=cluster.names_$exp.label, cluster=cluster.names_$cluster, cluster.disp=cluster.names_$cluster_name, class=cluster.names_$class)
if (input$opt.cluster.disp=='all') {
mutate(df, cluster.disp=sprintf("%s [#%s]", cluster.disp, cluster))
} else
df
}
) %>% mutate(c.id=1:length(exp.label))
})
# returns a tibble for labeling clusters in plots. This is similar, but slightly different than cluster.names().
# The disp result is whatever cluster.names() returns as cluster.disp (which could be a number).
# The number result is the cluster.
# And none returns NA to inhibit plotting
cluster.labels <- reactive({
if (input$opt.plot.label=='disp') {
cluster.names()
} else if (input$opt.plot.label=='number') {
mutate(cluster.names(), cluster.disp=cluster)
} else if (input$opt.plot.label=='none') {
mutate(cluster.names(), cluster.disp=NA)
} else { stop("Unknown opt.plot.label") }
})
# returns a tibble containing the currently configured display form of the subcluster
# [ exp.label, subcluster, subcluster.disp ]
subcluster.names <- reactive({
(
if (input$opt.cluster.disp=='numbers') {
tibble(exp.label=subcluster.names_$exp.label, subcluster=subcluster.names_$subcluster, subcluster.disp=subcluster.names_$subcluster)
} else { # annotated or all
if (input$use.common.name)
df <- tibble(exp.label=subcluster.names_$exp.label, subcluster=subcluster.names_$subcluster, subcluster.disp=subcluster.names_$subcluster_name)
else
df <- tibble(exp.label=subcluster.names_$exp.label, subcluster=subcluster.names_$subcluster, subcluster.disp=subcluster.names_$full_name)
if (input$opt.cluster.disp=='all') {
mutate(df, subcluster.disp=sprintf("%s [#%s]", subcluster.disp, subcluster))
} else
df
}
) %>% mutate(sc.id=1:length(exp.label))
})
subcluster.labels <- reactive({
if (input$opt.plot.label=='disp') {
subcluster.names()
} else if (input$opt.plot.label=='number') {
mutate(subcluster.names(), subcluster.disp=subcluster)
} else if (input$opt.plot.label=='none') {
mutate(subcluster.names(), subcluster.disp=NA)
} else { stop("Unknown opt.plot.label") }
})
# to allow lookups of cluster from subcluster or vice-versa
all.subclusters <- reactive({
with(cell.types, tibble(exp.label=exp.label, cluster=cluster, subcluster=subcluster))
})
# a tibble with cluster and subcluster combined
# [ exp.label, cx, cx.disp ]
cx.names <- reactive({
rbind(dplyr::select(cluster.names(), exp.label, cx=cluster, cx.disp=cluster.disp), dplyr::select(subcluster.names(), exp.label, cx=subcluster, cx.disp=subcluster.disp)) %>%
mutate(cx=as.character(cx))
})
| /display_labels.R | no_license | onionpork/dropviz | R | false | false | 3,624 | r | # reactive utility functions referencing global experiment data
# returns a tibble containing the currently configured display form of the region/experiment
# [ exp.label, region.disp ]
region.names <- reactive({
if (input$opt.region.disp=='region') {
tibble(exp.label=experiments$exp.label, region.disp=experiments$exp.title, region.abbrev=experiments$exp.abbrev)
} else {
tibble(exp.label=experiments$exp.label, region.disp=experiments$exp.label, region.abbrev=experiments$exp.abbrev)
}
})
# returns a tibble containing the currently configured display form of the cluster
# [ exp.label, cluster, cluster.disp ]
cluster.names <- reactive({
(
if (input$opt.cluster.disp=='numbers') {
tibble(exp.label=cluster.names_$exp.label, cluster=cluster.names_$cluster, cluster.disp=cluster.names_$cluster, class=cluster.names_$class)
} else { # annotated or all
df <- tibble(exp.label=cluster.names_$exp.label, cluster=cluster.names_$cluster, cluster.disp=cluster.names_$cluster_name, class=cluster.names_$class)
if (input$opt.cluster.disp=='all') {
mutate(df, cluster.disp=sprintf("%s [#%s]", cluster.disp, cluster))
} else
df
}
) %>% mutate(c.id=1:length(exp.label))
})
# returns a tibble for labeling clusters in plots. This is similar, but slightly different than cluster.names().
# The disp result is whatever cluster.names() returns as cluster.disp (which could be a number).
# The number result is the cluster.
# And none returns NA to inhibit plotting
cluster.labels <- reactive({
if (input$opt.plot.label=='disp') {
cluster.names()
} else if (input$opt.plot.label=='number') {
mutate(cluster.names(), cluster.disp=cluster)
} else if (input$opt.plot.label=='none') {
mutate(cluster.names(), cluster.disp=NA)
} else { stop("Unknown opt.plot.label") }
})
# returns a tibble containing the currently configured display form of the subcluster
# [ exp.label, subcluster, subcluster.disp ]
subcluster.names <- reactive({
(
if (input$opt.cluster.disp=='numbers') {
tibble(exp.label=subcluster.names_$exp.label, subcluster=subcluster.names_$subcluster, subcluster.disp=subcluster.names_$subcluster)
} else { # annotated or all
if (input$use.common.name)
df <- tibble(exp.label=subcluster.names_$exp.label, subcluster=subcluster.names_$subcluster, subcluster.disp=subcluster.names_$subcluster_name)
else
df <- tibble(exp.label=subcluster.names_$exp.label, subcluster=subcluster.names_$subcluster, subcluster.disp=subcluster.names_$full_name)
if (input$opt.cluster.disp=='all') {
mutate(df, subcluster.disp=sprintf("%s [#%s]", subcluster.disp, subcluster))
} else
df
}
) %>% mutate(sc.id=1:length(exp.label))
})
subcluster.labels <- reactive({
if (input$opt.plot.label=='disp') {
subcluster.names()
} else if (input$opt.plot.label=='number') {
mutate(subcluster.names(), subcluster.disp=subcluster)
} else if (input$opt.plot.label=='none') {
mutate(subcluster.names(), subcluster.disp=NA)
} else { stop("Unknown opt.plot.label") }
})
# to allow lookups of cluster from subcluster or vice-versa
all.subclusters <- reactive({
with(cell.types, tibble(exp.label=exp.label, cluster=cluster, subcluster=subcluster))
})
# a tibble with cluster and subcluster combined
# [ exp.label, cx, cx.disp ]
cx.names <- reactive({
rbind(dplyr::select(cluster.names(), exp.label, cx=cluster, cx.disp=cluster.disp), dplyr::select(subcluster.names(), exp.label, cx=subcluster, cx.disp=subcluster.disp)) %>%
mutate(cx=as.character(cx))
})
|
setwd("/Users/kpeng/Sites/Developing/nyco-github-demo")
data=read.csv("mock-data.csv", header=TRUE)
| /edit-data.R | no_license | CityOfNewYork/nyco-git-demo | R | false | false | 102 | r | setwd("/Users/kpeng/Sites/Developing/nyco-github-demo")
data=read.csv("mock-data.csv", header=TRUE)
|
#' @title Tuning Functional Neural Networks
#'
#' @description
#' A convenience function for the user that implements a simple grid search for the purpose of tuning. For each combination
#' in the grid, a cross-validated error is calculated. The best combination is returned along with additional information.
#' This function only works for scalar responses.
#'
#' @return The following are returned:
#'
#' `Parameters` -- The final list of hyperparameter chosen by the tuning process.
#'
#' `All_Information` -- A list object containing the errors for every combination in the grid. Each element of the list
#' corresponds to a different choice of number of hidden layers.
#'
#' `Best_Per_Layer` -- An object that returns the best parameter combination for each choice of hidden layers.
#'
#' `Grid_List` -- An object containing information about all combinations tried by the tuning process.
#'
#' @details No additional details for now.
#'
#' @param tune_list This is a list object containing the values from which to develop the grid. For each of the hyperparameters
#' that can be tuned for (`num_hidden_layers`, `neurons`, `epochs`, `val_split`, `patience`, `learn_rate`, `num_basis`,
#' `activation_choice`), the user inputs a set of values to try. Note that the combinations are found based on the number of
#' hidden layers. For example, if `num_hidden_layers` = 3 and `neurons` = c(8, 16), then the combinations will begin as
#' c(8, 8, 8), c(8, 8, 16), ..., c(16, 16, 16). Example provided below.
#'
#' @param resp For scalar responses, this is a vector of the observed dependent variable. For functional responses,
#' this is a matrix where each row contains the basis coefficients defining the functional response (for each observation).
#'
#' @param func_cov The form of this depends on whether the `raw_data` argument is true or not. If true, then this is
#' a list of k matrices. The dimensionality of the matrices should be the same (n x p) where n is the number of
#' observations and p is the number of longitudinal observations. If `raw_data` is false, then the input should be a tensor
#' with dimensionality b x n x k where b is the number of basis functions used to define the functional covariates, n is
#' the number of observations, and k is the number of functional covariates.
#'
#' @param scalar_cov A matrix contained the multivariate information associated with the data set. This is all of your
#' non-longitudinal data.
#'
#' @param basis_choice A vector of size k (the number of functional covariates) with either "fourier" or "bspline" as the inputs.
#' This is the choice for the basis functions used for the functional weight expansion. If you only specify one, with k > 1,
#' then the argument will repeat that choice for all k functional covariates.
#'
#' @param domain_range List of size k. Each element of the list is a 2-dimensional vector containing the upper and lower
#' bounds of the k-th functional weight.
#'
#' @param batch_size Size of the batch for stochastic gradient descent.
#'
#' @param decay_rate A modification to the learning rate that decreases the learning rate as more and more learning
#' iterations are completed.
#'
#' @param nfolds The number of folds to be used in the cross-validation process.
#'
#' @param cores For the purpose of parallelization.
#'
#' @param raw_data If True, then user does not need to create functional observations beforehand. The function will
#' internally take care of that pre-processing.
#'
#' @examples
#' # libraries
#' library(fda)
#'
#' # Loading data
#' data("daily")
#'
#' # Obtaining response
#' total_prec = apply(daily$precav, 2, mean)
#'
#' # Creating functional data
#' temp_data = array(dim = c(65, 35, 1))
#' tempbasis65 = create.fourier.basis(c(0,365), 65)
#' timepts = seq(1, 365, 1)
#' temp_fd = Data2fd(timepts, daily$tempav, tempbasis65)
#'
#' # Data set up
#' temp_data[,,1] = temp_fd$coefs
#'
#' # Creating grid
#' tune_list_weather = list(num_hidden_layers = c(2),
#' neurons = c(8, 16),
#' epochs = c(250),
#' val_split = c(0.2),
#' patience = c(15),
#' learn_rate = c(0.01, 0.1),
#' num_basis = c(7),
#' activation_choice = c("relu", "sigmoid"))
#'
#' # Running Tuning
#' weather_tuned = fnn.tune(tune_list_weather,
#' total_prec,
#' temp_data,
#' basis_choice = c("fourier"),
#' domain_range = list(c(1, 24)),
#' nfolds = 2)
#'
#' # Looking at results
#' weather_tuned
#'
#' @export
# @import keras tensorflow fda.usc fda ggplot2 ggpubr caret pbapply reshape2 flux Matrix doParallel
#returns product of two numbers, as a trivial example
fnn.tune = function(tune_list,
resp,
func_cov,
scalar_cov = NULL,
basis_choice,
domain_range,
batch_size = 32,
decay_rate = 0,
nfolds = 5,
cores = 4,
raw_data = FALSE){
# Parallel apply set up
#plan(multiprocess, workers = cores)
#### Output size
if(is.vector(resp) == TRUE){
output_size = 1
} else {
output_size = ncol(resp)
}
if(raw_data == TRUE){
dim_check = length(func_cov)
} else {
dim_check = dim(func_cov)[3]
}
#### Creating functional observations in the case of raw data
if(raw_data == TRUE){
# Taking in data
dat = func_cov
# Setting up array
temp_tensor = array(dim = c(31, nrow(dat[[1]]), length(dat)))
for (t in 1:length(dat)) {
# Getting appropriate obs
curr_func = dat[[t]]
# Getting current domain
curr_domain = domain_range[[1]] # BE CAREFUL HERE - ALL DOMAINS NEED TO BE THE SAME IN THIS CASE
# Creating basis (using bspline)
basis_setup = create.bspline.basis(rangeval = c(curr_domain[1], curr_domain[2]),
nbasis = 31,
norder = 4)
# Time points
time_points = seq(curr_domain[1], curr_domain[2], length.out = ncol(curr_func))
# Making functional observation
temp_fd = Data2fd(time_points, t(curr_func), basis_setup)
# Storing data
temp_tensor[,,t] = temp_fd$coefs
}
# Saving as appropriate names
func_cov = temp_tensor
}
if(output_size == 1){
# Setting up function
tune_func = function(x,
nfolds,
resp,
func_cov,
scalar_cov,
basis_choice,
domain_range,
batch_size,
decay_rate,
raw_data){
# Setting seed
use_session_with_seed(
1,
disable_gpu = FALSE,
disable_parallel_cpu = FALSE,
quiet = TRUE
)
# Clearing irrelevant information
colnames(x) <- NULL
rownames(x) <- NULL
# Running model
model_results = fnn.cv(nfolds,
resp,
func_cov = func_cov,
scalar_cov = scalar_cov,
basis_choice = basis_choice,
num_basis = as.numeric(as.character((x[(current_layer + 1):(length(basis_choice) + current_layer)]))),
hidden_layers = current_layer,
neurons_per_layer = as.numeric(as.character(x[(length(basis_choice) + current_layer + 1):((length(basis_choice) + current_layer) + current_layer)])),
activations_in_layers = as.character(x[1:current_layer]),
domain_range = domain_range,
epochs = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 1])),
loss_choice = "mse",
metric_choice = list("mean_squared_error"),
val_split = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 2])),
learn_rate = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 4])),
patience_param = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 3])),
early_stopping = TRUE,
print_info = FALSE,
batch_size = batch_size,
decay_rate = decay_rate,
raw_data = FALSE)
# Putting together
list_returned <- list(MSPE = model_results$MSPE$Overall_MSPE,
num_basis = as.numeric(as.character((x[(current_layer + 1):(length(basis_choice) + current_layer)]))),
hidden_layers = current_layer,
neurons_per_layer = as.numeric(as.character(x[(length(basis_choice) + current_layer + 1):((length(basis_choice) + current_layer) + current_layer)])),
activations_in_layers = as.character(x[1:current_layer]),
epochs = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 1])),
val_split = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 2])),
patience_param = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 3])),
learn_rate = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 4])))
# Clearing backend
K <- backend()
K$clear_session()
# Returning
return(list_returned)
}
# Saving MSPEs
Errors = list()
All_Errors = list()
Grid_List = list()
# Setting up tuning parameters
for (i in 1:length(tune_list$num_hidden_layers)) {
# Current layer number
current_layer = tune_list$num_hidden_layers[i]
# Creating data frame of list
df = expand.grid(rep(list(tune_list$neurons), tune_list$num_hidden_layers[i]), stringsAsFactors = FALSE)
df2 = expand.grid(rep(list(tune_list$num_basis), length(basis_choice)), stringsAsFactors = FALSE)
df3 = expand.grid(rep(list(tune_list$activation_choice), tune_list$num_hidden_layers[i]), stringsAsFactors = FALSE)
colnames(df2)[length(basis_choice)] <- "Var2.y"
colnames(df3)[i] <- "Var2.z"
# Getting grid
pre_grid = expand.grid(df$Var1,
Var2.y = df2$Var2.y,
Var2.z = df3$Var2.z,
tune_list$epochs,
tune_list$val_split,
tune_list$patience,
tune_list$learn_rate)
# Merging
combined <- unique(merge(df, pre_grid, by = "Var1"))
combined2 <- unique(merge(df2, combined, by = "Var2.y"))
final_grid <- suppressWarnings(unique(merge(df3, combined2, by = "Var2.z")))
# Saving grid
Grid_List[[i]] = final_grid
# Now, we can pass on the combinations to the model
results = pbapply(final_grid, 1, tune_func,
nfolds = nfolds,
resp = resp,
func_cov = func_cov,
scalar_cov = scalar_cov,
basis_choice = basis_choice,
domain_range = domain_range,
batch_size = batch_size,
decay_rate = decay_rate,
raw_data = FALSE)
# Initializing
MSPE_vals = c()
# Collecting results
for (u in 1:length(results)) {
MSPE_vals[u] <- as.vector(results[[u]][1])
}
# All Errors
All_Errors[[i]] = results
# Getting best
Errors[[i]] = results[[which.min(do.call(c, MSPE_vals))]]
# Printing where we are at
cat("\n")
print(paste0("Done tuning for: ", current_layer, " hidden layers."))
}
# Initializing
MSPE_after = c()
# Getting best set of parameters
for (i in 1:length(tune_list$num_hidden_layers)) {
MSPE_after[i] = Errors[[i]]$MSPE
}
# Selecting minimum
best = which.min(MSPE_after)
# Returning best set of parameters
return(list(Parameters = Errors[[best]],
All_Information = All_Errors,
Best_Per_Layer = Errors,
Grid_List = Grid_List))
} else {
print("Tuning isn't available yet for functional responses")
return()
}
}
| /R/fnn.tune.R | no_license | b-thi/FNN | R | false | false | 12,953 | r | #' @title Tuning Functional Neural Networks
#'
#' @description
#' A convenience function for the user that implements a simple grid search for the purpose of tuning. For each combination
#' in the grid, a cross-validated error is calculated. The best combination is returned along with additional information.
#' This function only works for scalar responses.
#'
#' @return The following are returned:
#'
#' `Parameters` -- The final list of hyperparameter chosen by the tuning process.
#'
#' `All_Information` -- A list object containing the errors for every combination in the grid. Each element of the list
#' corresponds to a different choice of number of hidden layers.
#'
#' `Best_Per_Layer` -- An object that returns the best parameter combination for each choice of hidden layers.
#'
#' `Grid_List` -- An object containing information about all combinations tried by the tuning process.
#'
#' @details No additional details for now.
#'
#' @param tune_list This is a list object containing the values from which to develop the grid. For each of the hyperparameters
#' that can be tuned for (`num_hidden_layers`, `neurons`, `epochs`, `val_split`, `patience`, `learn_rate`, `num_basis`,
#' `activation_choice`), the user inputs a set of values to try. Note that the combinations are found based on the number of
#' hidden layers. For example, if `num_hidden_layers` = 3 and `neurons` = c(8, 16), then the combinations will begin as
#' c(8, 8, 8), c(8, 8, 16), ..., c(16, 16, 16). Example provided below.
#'
#' @param resp For scalar responses, this is a vector of the observed dependent variable. For functional responses,
#' this is a matrix where each row contains the basis coefficients defining the functional response (for each observation).
#'
#' @param func_cov The form of this depends on whether the `raw_data` argument is true or not. If true, then this is
#' a list of k matrices. The dimensionality of the matrices should be the same (n x p) where n is the number of
#' observations and p is the number of longitudinal observations. If `raw_data` is false, then the input should be a tensor
#' with dimensionality b x n x k where b is the number of basis functions used to define the functional covariates, n is
#' the number of observations, and k is the number of functional covariates.
#'
#' @param scalar_cov A matrix contained the multivariate information associated with the data set. This is all of your
#' non-longitudinal data.
#'
#' @param basis_choice A vector of size k (the number of functional covariates) with either "fourier" or "bspline" as the inputs.
#' This is the choice for the basis functions used for the functional weight expansion. If you only specify one, with k > 1,
#' then the argument will repeat that choice for all k functional covariates.
#'
#' @param domain_range List of size k. Each element of the list is a 2-dimensional vector containing the upper and lower
#' bounds of the k-th functional weight.
#'
#' @param batch_size Size of the batch for stochastic gradient descent.
#'
#' @param decay_rate A modification to the learning rate that decreases the learning rate as more and more learning
#' iterations are completed.
#'
#' @param nfolds The number of folds to be used in the cross-validation process.
#'
#' @param cores For the purpose of parallelization.
#'
#' @param raw_data If True, then user does not need to create functional observations beforehand. The function will
#' internally take care of that pre-processing.
#'
#' @examples
#' # libraries
#' library(fda)
#'
#' # Loading data
#' data("daily")
#'
#' # Obtaining response
#' total_prec = apply(daily$precav, 2, mean)
#'
#' # Creating functional data
#' temp_data = array(dim = c(65, 35, 1))
#' tempbasis65 = create.fourier.basis(c(0,365), 65)
#' timepts = seq(1, 365, 1)
#' temp_fd = Data2fd(timepts, daily$tempav, tempbasis65)
#'
#' # Data set up
#' temp_data[,,1] = temp_fd$coefs
#'
#' # Creating grid
#' tune_list_weather = list(num_hidden_layers = c(2),
#' neurons = c(8, 16),
#' epochs = c(250),
#' val_split = c(0.2),
#' patience = c(15),
#' learn_rate = c(0.01, 0.1),
#' num_basis = c(7),
#' activation_choice = c("relu", "sigmoid"))
#'
#' # Running Tuning
#' weather_tuned = fnn.tune(tune_list_weather,
#' total_prec,
#' temp_data,
#' basis_choice = c("fourier"),
#' domain_range = list(c(1, 24)),
#' nfolds = 2)
#'
#' # Looking at results
#' weather_tuned
#'
#' @export
# @import keras tensorflow fda.usc fda ggplot2 ggpubr caret pbapply reshape2 flux Matrix doParallel
#returns product of two numbers, as a trivial example
fnn.tune = function(tune_list,
resp,
func_cov,
scalar_cov = NULL,
basis_choice,
domain_range,
batch_size = 32,
decay_rate = 0,
nfolds = 5,
cores = 4,
raw_data = FALSE){
# Parallel apply set up
#plan(multiprocess, workers = cores)
#### Output size
if(is.vector(resp) == TRUE){
output_size = 1
} else {
output_size = ncol(resp)
}
if(raw_data == TRUE){
dim_check = length(func_cov)
} else {
dim_check = dim(func_cov)[3]
}
#### Creating functional observations in the case of raw data
if(raw_data == TRUE){
# Taking in data
dat = func_cov
# Setting up array
temp_tensor = array(dim = c(31, nrow(dat[[1]]), length(dat)))
for (t in 1:length(dat)) {
# Getting appropriate obs
curr_func = dat[[t]]
# Getting current domain
curr_domain = domain_range[[1]] # BE CAREFUL HERE - ALL DOMAINS NEED TO BE THE SAME IN THIS CASE
# Creating basis (using bspline)
basis_setup = create.bspline.basis(rangeval = c(curr_domain[1], curr_domain[2]),
nbasis = 31,
norder = 4)
# Time points
time_points = seq(curr_domain[1], curr_domain[2], length.out = ncol(curr_func))
# Making functional observation
temp_fd = Data2fd(time_points, t(curr_func), basis_setup)
# Storing data
temp_tensor[,,t] = temp_fd$coefs
}
# Saving as appropriate names
func_cov = temp_tensor
}
if(output_size == 1){
# Setting up function
tune_func = function(x,
nfolds,
resp,
func_cov,
scalar_cov,
basis_choice,
domain_range,
batch_size,
decay_rate,
raw_data){
# Setting seed
use_session_with_seed(
1,
disable_gpu = FALSE,
disable_parallel_cpu = FALSE,
quiet = TRUE
)
# Clearing irrelevant information
colnames(x) <- NULL
rownames(x) <- NULL
# Running model
model_results = fnn.cv(nfolds,
resp,
func_cov = func_cov,
scalar_cov = scalar_cov,
basis_choice = basis_choice,
num_basis = as.numeric(as.character((x[(current_layer + 1):(length(basis_choice) + current_layer)]))),
hidden_layers = current_layer,
neurons_per_layer = as.numeric(as.character(x[(length(basis_choice) + current_layer + 1):((length(basis_choice) + current_layer) + current_layer)])),
activations_in_layers = as.character(x[1:current_layer]),
domain_range = domain_range,
epochs = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 1])),
loss_choice = "mse",
metric_choice = list("mean_squared_error"),
val_split = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 2])),
learn_rate = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 4])),
patience_param = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 3])),
early_stopping = TRUE,
print_info = FALSE,
batch_size = batch_size,
decay_rate = decay_rate,
raw_data = FALSE)
# Putting together
list_returned <- list(MSPE = model_results$MSPE$Overall_MSPE,
num_basis = as.numeric(as.character((x[(current_layer + 1):(length(basis_choice) + current_layer)]))),
hidden_layers = current_layer,
neurons_per_layer = as.numeric(as.character(x[(length(basis_choice) + current_layer + 1):((length(basis_choice) + current_layer) + current_layer)])),
activations_in_layers = as.character(x[1:current_layer]),
epochs = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 1])),
val_split = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 2])),
patience_param = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 3])),
learn_rate = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 4])))
# Clearing backend
K <- backend()
K$clear_session()
# Returning
return(list_returned)
}
# Saving MSPEs
Errors = list()
All_Errors = list()
Grid_List = list()
# Setting up tuning parameters
for (i in 1:length(tune_list$num_hidden_layers)) {
# Current layer number
current_layer = tune_list$num_hidden_layers[i]
# Creating data frame of list
df = expand.grid(rep(list(tune_list$neurons), tune_list$num_hidden_layers[i]), stringsAsFactors = FALSE)
df2 = expand.grid(rep(list(tune_list$num_basis), length(basis_choice)), stringsAsFactors = FALSE)
df3 = expand.grid(rep(list(tune_list$activation_choice), tune_list$num_hidden_layers[i]), stringsAsFactors = FALSE)
colnames(df2)[length(basis_choice)] <- "Var2.y"
colnames(df3)[i] <- "Var2.z"
# Getting grid
pre_grid = expand.grid(df$Var1,
Var2.y = df2$Var2.y,
Var2.z = df3$Var2.z,
tune_list$epochs,
tune_list$val_split,
tune_list$patience,
tune_list$learn_rate)
# Merging
combined <- unique(merge(df, pre_grid, by = "Var1"))
combined2 <- unique(merge(df2, combined, by = "Var2.y"))
final_grid <- suppressWarnings(unique(merge(df3, combined2, by = "Var2.z")))
# Saving grid
Grid_List[[i]] = final_grid
# Now, we can pass on the combinations to the model
results = pbapply(final_grid, 1, tune_func,
nfolds = nfolds,
resp = resp,
func_cov = func_cov,
scalar_cov = scalar_cov,
basis_choice = basis_choice,
domain_range = domain_range,
batch_size = batch_size,
decay_rate = decay_rate,
raw_data = FALSE)
# Initializing
MSPE_vals = c()
# Collecting results
for (u in 1:length(results)) {
MSPE_vals[u] <- as.vector(results[[u]][1])
}
# All Errors
All_Errors[[i]] = results
# Getting best
Errors[[i]] = results[[which.min(do.call(c, MSPE_vals))]]
# Printing where we are at
cat("\n")
print(paste0("Done tuning for: ", current_layer, " hidden layers."))
}
# Initializing
MSPE_after = c()
# Getting best set of parameters
for (i in 1:length(tune_list$num_hidden_layers)) {
MSPE_after[i] = Errors[[i]]$MSPE
}
# Selecting minimum
best = which.min(MSPE_after)
# Returning best set of parameters
return(list(Parameters = Errors[[best]],
All_Information = All_Errors,
Best_Per_Layer = Errors,
Grid_List = Grid_List))
} else {
print("Tuning isn't available yet for functional responses")
return()
}
}
|
## This function creates a list of get and set functions for a value of a matrix and its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
#This function set the value of the matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
#get the value of the matrix
get <- function() x
#set the value of the inverse matrix
setInverseMatrix <- function(invMatrix) inv <<- invMatrix
#get the value of the inverse matrix
getInverseMatrix <- function() inv
list(set = set, get = get,
setInverseMatrix = setInverseMatrix,
getInverseMatrix = getInverseMatrix)
}
#This function computes the inverse of the special "matrix"
#returned by makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverseMatrix()
if (!is.null(inv)) {
#If the inverse has already been calculated (and the matrix
#has not changed), then cacheSolve should retrieve the inverse from the cache
message("getting cached inverse matrix")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverseMatrix(inv)
inv
}
## ^-^
| /cachematrix.R | no_license | badbot/ProgrammingAssignment2 | R | false | false | 1,139 | r |
## This function creates a list of get and set functions for a value of a matrix and its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
#This function set the value of the matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
#get the value of the matrix
get <- function() x
#set the value of the inverse matrix
setInverseMatrix <- function(invMatrix) inv <<- invMatrix
#get the value of the inverse matrix
getInverseMatrix <- function() inv
list(set = set, get = get,
setInverseMatrix = setInverseMatrix,
getInverseMatrix = getInverseMatrix)
}
#This function computes the inverse of the special "matrix"
#returned by makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverseMatrix()
if (!is.null(inv)) {
#If the inverse has already been calculated (and the matrix
#has not changed), then cacheSolve should retrieve the inverse from the cache
message("getting cached inverse matrix")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverseMatrix(inv)
inv
}
## ^-^
|
# Data analysis for BMT303 trial
# Fiona Tamburini
# required packages
library(ggplot2)
library(genefilter)
library(RColorBrewer)
library(plyr)
library(dplyr)
library(tibble)
library(reshape2)
library(scales)
library(MASS)
library(gtools)
library(vegan)
library(q2)
library(ggpubr)
library(cowplot)
######################################################################
### Setup ############################################################
######################################################################
### set this to /your/path/to/prebio2
setwd("/Users/Fiona/scg4_fiona/prebio2/prebio")
# color palette
# FOS, Control
my_pal <- c("#D55E00", "#0072B2")
names(my_pal) <- c("FOS", "Control")
dir.create("plots", showWarnings = F)
######################################################################
### Read in data and metadate files for prebiotic project analysis ###
######################################################################
# TO DO: change filepaths/organize for portability
# TO DO: remove P83 and re-save
### Read sample metadata -- which stools were collected/sequenced
prebio_meta_all <- read.table("metadata/prebio_meta.tsv", sep = '\t', header = T, quote="\"")
# set FOS/Control grouping
prebio_meta_all$group <- ifelse(startsWith(as.character(prebio_meta_all$patient_id), '303'), "FOS", "Control")
prebio_meta_all$group <- factor(prebio_meta_all$group, levels = c("Control", "FOS"))
# format columns as date
prebio_meta_all$date <- as.Date(prebio_meta_all$date)
prebio_meta_all$trx <- as.Date(prebio_meta_all$trx)
# set factor levels for downstream plots
prebio_meta_all$patient_id <- factor(prebio_meta_all$patient_id, levels = mixedsort(unique(prebio_meta_all$patient_id)))
# metadata for sequenced samples only
prebio_meta <- filter(prebio_meta_all, sequenced_status == T)
prebio_meta <- prebio_meta[mixedorder(unique(prebio_meta$sequencing_id)), ]
### Read taxonomic classification data
## bracken species read counts including unclassifed
brack_sp_reads <- read.table("input_data/bracken_species_reads.txt", sep = '\t', header = T, quote = "")
brack_g_reads <- read.table("input_data/bracken_genus_reads.txt", sep = '\t', header = T, quote = "")
# add pseudocount
brack_sp_pseudo <- brack_sp_reads
brack_sp_pseudo[brack_sp_pseudo == 0] <- 1
brack_sp_pseudo_rel <- sweep(brack_sp_pseudo[-which(rownames(brack_sp_pseudo) == "Unclassified"), ], 2, colSums(brack_sp_reads[-which(rownames(brack_sp_pseudo) == "Unclassified"), ]), FUN = "/")
## bracken species percentage -- classified only
brack_sp_perc <- read.table("input_data/bracken_species_perc.txt", sep = '\t', header = T, quote = "")
brack_g_perc <- read.table("input_data/bracken_genus_perc.txt", sep = '\t', header = T, quote = "")
## Read short chain fatty acid measurements
# repeated measurements may 2019
scfa2_f <- "input_data/prebio_scfa_may19.txt"
scfa2 <- read.table(scfa2_f, sep = '\t', header = T)
scfa2[is.na(scfa2)] <- 0
######################################################################
### Summary statistics ###############################################
######################################################################
# n patients, controls
print("FOS")
length(unique(filter(prebio_meta_all, group == "FOS")$patient_id))
print("Controls")
length(unique(filter(prebio_meta_all, group == "Control")$patient_id))
# n samples collected
length(prebio_meta_all$sequencing_id[!is.na(prebio_meta_all$sequencing_id)])
# n samples sequenced
length(prebio_meta_all$sequencing_id[prebio_meta_all$sequenced_status])
# samples collected but not sequenced
not_seqd <- filter(prebio_meta_all, !sequenced_status)
# samples collected per patient
all_freq <- plyr::count(prebio_meta_all[!is.na(prebio_meta_all$sequencing_id),], "patient_id")
fos_freq <- plyr::count(filter(prebio_meta_all[!is.na(prebio_meta_all$sequencing_id),], group == "FOS"), "patient_id")
ctrl_freq <- plyr::count(filter(prebio_meta_all[!is.na(prebio_meta_all$sequencing_id),], group == "Control"), "patient_id")
# median samples collected per patient
median(all_freq$freq)
median(fos_freq$freq)
median(ctrl_freq$freq)
# mean samples collected per patient
mean(all_freq$freq)
mean(fos_freq$freq)
mean(ctrl_freq$freq)
# range
range(all_freq$freq)
range(fos_freq$freq)
range(ctrl_freq$freq)
# samples not sequenced
filter(prebio_meta_all, sequenced_status == F & !is.na(date))
# samples not collected
filter(prebio_meta_all, sequenced_status == F & is.na(date))
######################################################################
### Readcount plots ##################################################
######################################################################
# readcounts file from preprocessing pipeline
readcounts_f <- "input_data/readcounts.tsv"
readcounts <- read.table(readcounts_f, sep = '\t', header = T)
counts <- readcounts[, c(1:3, 5, 7)]
colnames(counts) <- c("Sample", "Raw reads", "Trimmed reads", "Deduplicated reads", "Non-human reads")
counts_long <- melt(counts, id.vars = "Sample", variable.name = "step", value.name = "reads")
counts_long$reads_m <- (counts_long$reads / 1e6)
# plot readcounts
readcount_plot <- ggplot(counts_long, aes(x=reads_m, fill=step)) +
geom_histogram(binwidth = 1) +
scale_x_continuous(labels = comma, breaks = seq(0, 100, 10)) +
facet_grid(step ~ ., scales = "free_y") +
theme_cowplot(12) +
labs(
x = "\nReads (M)",
y = "Count\n",
fill = ""
) +
background_grid()
ggsave("plots/readcounts_preproccessing.png", readcount_plot, device = "png", height = 6, width = 7)
######################################################################
### Sample collection plot ###########################################
######################################################################
# plot relative to date of transplant
samples <- prebio_meta_all
samples$sample_day <- (samples$date - samples$trx)
# create patient labels, set order
fos <- filter(samples, group == "FOS")
control <- filter(samples, group == "Control")
labels <- data.frame(patient_id = sort(unique(fos$patient_id)), label = paste0("F", seq(unique(fos$patient_id))))
labels <- rbind(labels, data.frame(patient_id = mixedsort(as.character(unique(control$patient_id))), label = paste0("C", seq(unique(control$patient_id)))))
samples <- merge(samples, labels, by = "patient_id", all = T)
samples$label <- factor(samples$label, levels = rev(labels$label))
# set sequenced vs no
samples$sequenced_status <- ifelse(samples$sequenced_status, "Sequenced", "Not sequenced")
samples$sequenced_status <- ifelse(is.na(samples$sequencing_id), "Not collected", samples$sequenced_status)
samples$sequenced_status <- factor(samples$sequenced_status, levels = c("Sequenced", "Not sequenced", "Not collected"))
# if the sample wasn't collected and the day is NA, change sample day to actual day
samples$sample_day <- ifelse(is.na(samples$sample_day), samples$day, samples$sample_day)
# remove samples > day 100
# maybe change this so that samples >100 are included and axis is >100 ?
samples <- filter(samples, sample_day <= 100)
# plot collected samples
sample_plot <- ggplot(samples, aes(x=sample_day, y=label, shape=sequenced_status)) +
geom_point(size = 2, color = "black") +
scale_shape_manual(values = c(16, 1, 4)) +
facet_wrap(~ group, ncol = 1, strip.position = "top", scales = "free_y") +
theme_cowplot() +
labs(
x = "\nDay relative to transplant",
y = "Patient\n",
shape = "Status"
) +
scale_x_continuous(labels = comma, breaks = c(-5, 0, 7, 14, 28, 60, 100))
ggsave("plots/stool_sampling.png", sample_plot, device = "png", height = 6, width = 6)
# color by timepoint
sample_plot2 <- ggplot(samples, aes(x=sample_day, y=label, shape=sequenced_status)) +
geom_point(size = 2, aes(color = factor(day, levels = c(-5, 0, 7, 14, 28, 60, 100)))) +
scale_shape_manual(values = c(16, 1, 4)) +
facet_wrap(~ group, ncol = 1, strip.position = "top", scales = "free_y") +
theme_cowplot(12) +
labs(
x = "\nDay relative to transplant",
y = "Patient\n",
shape = "Status",
color = "Timepoint"
) +
scale_x_continuous(labels = comma, breaks = c(-5, 0, 7, 14, 28, 60, 100))
ggsave("plots/stool_sampling_colored.png", sample_plot2, device = "png", height = 6, width = 6)
######################################################################
### SCFA measurements ################################################
######################################################################
## repeated measurements may 2019
scfa_long2 <- melt(scfa2, id.vars = c("sample", "patient_id", "sequencing_id", "group"), variable.name = "scfa")
scfa_long2$scfa <- gsub("\\.A", " a", scfa_long2$scfa)
# set factor level for group
scfa_long2$group <- factor(scfa_long2$group, levels = c("FOS", "Control"))
## plot without log transformation, free y axis
pvals <- compare_means(value ~ group, data = scfa_long2, group.by = "scfa", method = "wilcox.test", p.adjust.method = "fdr")
pvals$p.signif <- ifelse(pvals$p.adj < 0.05, "*", "ns")
pvals$p.signif <- ifelse(pvals$p.adj < 0.01 & pvals$p.adj >= 0.001, "**", pvals$p.signif)
pvals$p.signif <- ifelse(pvals$p.adj < 0.001, "***", pvals$p.signif)
# set y position of signif for each plot
maxs <- aggregate(value ~ scfa,scfa_long2, FUN = max)
pvals$y.position <- maxs[match(pvals$scfa, maxs$scfa), "value"] * 1.10
scfa_plot <- ggplot(scfa_long2, aes(x = group, y = value)) +
geom_violin(aes(fill = group)) +
geom_point() +
facet_wrap(. ~ scfa, scales = "free_y") +
# pseudo_log_trans() +
labs(
x = "Short-chain fatty acid",
y = "Concentration (umol/g stool)",
fill="") +
scale_fill_manual(values = my_pal) +
stat_pvalue_manual(pvals, label = "p.signif") +
theme_cowplot(12)
ggsave("plots/scfa_may19_facet.png", scfa_plot, device = "png", height = 9, width = 8)
######################################################################
### Classified reads #################################################
######################################################################
## Plot histogram of classified reads
classified <- (1 - sweep(brack_sp_reads, 2, colSums(brack_sp_reads), "/")["Unclassified",]) * 100
read_plot <- ggplot(melt(classified), aes(x=value)) +
geom_histogram(binwidth = 1, fill = "cornflowerblue", color = "white") +
scale_x_continuous(breaks = seq(0, 100, 10)) +
theme_cowplot(12) +
scale_fill_manual(values = my_pal) +
labs(
x = "Percentage of reads classified",
y = "Count"
)
ggsave("plots/readcounts_classified_histo.png", read_plot, device = "png", height = 4, width = 5)
######################################################################
### Diversity plots ##################################################
######################################################################
# find shannon diversity with vegdist
shannon_div <- diversity(t(brack_sp_perc), index = "shannon")
div <- data.frame("shannon_div" = shannon_div, "sequencing_id" = names(shannon_div))
div_meta <- merge(div, prebio_meta, by = "sequencing_id")
## stat smooth shannon diversity over time
shannon_plot_smooth <- ggplot(div_meta, aes(day, shannon_div, color = group)) +
geom_point() +
stat_smooth() +
labs(
x = "Day",
y = "Shannon Diversity",
color="") +
theme_cowplot(12) +
scale_color_manual(values = my_pal) +
scale_x_continuous(labels = comma, breaks = c(-5, 0, 7, 14, 28, 60, 100))
ggsave("plots/shannon_line_smooth.png", shannon_plot_smooth, device = "png", height = 4, width = 6)
## violin plot -- alpha diversity at each timepoint
## compare means
pvals <- compare_means(shannon_div ~ group, data = div_meta, group.by = "day", method = "wilcox.test", p.adjust.method = "fdr")
pvals$p.signif <- ifelse(pvals$p.adj < 0.05, "*", "ns")
pvals$p.signif <- ifelse(pvals$p.adj < 0.01 & pvals$p.adj >= 0.001, "**", pvals$p.signif)
pvals$p.signif <- ifelse(pvals$p.adj < 0.001, "***", pvals$p.signif)
pvals$y.position <- 8
# plot
shannon_plot <- ggplot(div_meta, aes(x=group, y=shannon_div)) +
geom_violin(aes(fill = group), position=position_dodge(.9), trim = F) +
stat_summary(fun.data=mean_sdl, aes(group=group), position=position_dodge(.9), geom="pointrange", color="black") +
facet_grid(. ~ day, scales = "free") +
labs(
x = "\nTreatment",
y = "Shannon Diversity\n",
fill="") +
theme_cowplot(12) +
scale_fill_manual(values = my_pal) +
stat_pvalue_manual(pvals, label = "p.signif")
ggsave("plots/shannon_div.png", shannon_plot, device = "png", height = 4, width = 10)
######################################################################
### NMDS ordination ##################################################
######################################################################
### ordinate species-level classifications
### find pairwise bray-curtis distances with vegdist
vare_dis <- vegdist(t(brack_sp_perc), method = "bray")
### nmds ordinate
vare_mds0 <- isoMDS(vare_dis)
mds <- data.frame(vare_mds0$points)
mds$sequencing_id <- row.names(mds)
### merge pheno data
mds_meta <- merge(mds, prebio_meta, by = "sequencing_id")
### function to create scatterplot
nmds_plot <- ggplot(mds_meta, aes(x = X1, y = X2, color = group)) +
geom_point(size = 2) +
theme_cowplot(12) +
scale_color_manual(values = my_pal) +
labs(
x = "NMDS1",
y = "NMDS2",
color = ""
)
# add 95% confidence ellipse
nmds_plot_ci <- nmds_plot + stat_ellipse(type = 't', size = 1)
ggsave("plots/nmds_by_treatment_ci.png", nmds_plot_ci, device = "png", height = 5, width = 6)
# test group differences
# beta dispersions -- are assumptions for PERMANOVA met?
dispersion <- betadisper(vare_dis, group = prebio_meta$group)
permutest(dispersion)
adonis(vare_dis ~ group, data = filter(prebio_meta, sequenced_status == T))
######################################################################
### Differential features with DESeq2 ################################
######################################################################
# FOS vs control at day 14
count_data <- brack_sp_reads
col_data <- prebio_meta %>% filter(sequencing_id %in% names(count_data) & day == 14) %>% column_to_rownames(var = "sequencing_id")
count_data <- count_data[, rownames(col_data)]
# col_data <- col_data[order(row.names(col_data)), ]
count_data_filt <- count_data[genefilter(count_data, pOverA(p = 0.20, A = 1000)), ]
dds <- DESeqDataSetFromMatrix(countData = count_data_filt,
colData = col_data,
design= ~ group)
dds <- DESeq(dds)
resultsNames(dds) # lists the coefficients
res <- results(dds, name = "group_FOS_vs_Control", alpha = 0.05)
resOrdered <- data.frame(res[order(res$pvalue),])
res_filt <- resOrdered %>% rownames_to_column(var = "taxon") %>% filter(padj < 0.05) %>% arrange(log2FoldChange)
res_filt$taxon <- factor(res_filt$taxon, levels = as.character(res_filt$taxon))
res_filt$direction <- ifelse(res_filt$log2FoldChange < 0, "Control", "FOS")
res_filt$rel_abundance <- reshape2::melt(as.matrix(brack_sp_perc[, which(names(brack_sp_perc) %in% rownames(col_data))])) %>%
filter(Var1 %in% res_filt$taxon) %>% group_by(Var1) %>% summarise(rel_abundance = mean(value)) %>% arrange(factor(Var1, levels = res_filt$taxon)) %>% pull(rel_abundance)
res_filt$prevalence <- reshape2::melt(as.matrix(count_data)) %>% filter(Var1 %in% res_filt$taxon) %>%
group_by(Var1) %>% summarise(prevalence = (sum(value > 0)/length(value))*100) %>% arrange(factor(Var1, levels = res_filt$taxon)) %>% pull(prevalence)
ggplot(res_filt, aes(log2FoldChange, taxon, color = direction, size = rel_abundance * 100, alpha = prevalence)) +
geom_point() +
theme_cowplot(12) +
scale_color_manual(values = rev(my_pal)) +
labs(
x = "Log2 Fold Change",
y = "Taxon",
color = "Remission",
size = "Mean relative abundance (%)",
alpha = "Prevalence (%)"
)
ggsave("/Users/tamburif/Desktop/prebio_deseq2.png", width = 10, height = 6)
sp_long <- reshape2::melt(as.matrix(brack_sp_pseudo_rel[, which(names(brack_sp_pseudo_rel) %in% rownames(col_data))]))
sp_long <- merge(sp_long, prebio_meta, by.x = "Var2", by.y = "sequencing_id")
sp_long <- filter(sp_long, sp_long$Var1 %in% res_filt$taxon)
# sp_long$value_pseudocount <- sp_long$value
# sp_long$value_pseudocount[sp_long$value_pseudocount == 0] <- 1e-6
sp_long$Var1 = factor(sp_long$Var1, levels = sort(unique(as.character(sp_long$Var1))))
ggplot(sp_long, aes(group, value * 100, fill = group)) +
geom_boxplot(color = "black", outlier.shape = NA) +
geom_jitter(width = 0.3, color = "black", shape = 21) +
scale_fill_manual(values = rev(my_pal)) +
theme_cowplot(12) +
facet_wrap(~ Var1, scales = "free", ncol = 4) +
# scale_y_log10(label = comma) +
labs(
x = "",
y = "Relative abundance (%)",
fill = ""
)
ggsave("/Users/tamburif/Desktop/prebio_deseq2_boxplot.png", width = 12, height = 14)
ggplot(sp_long, aes(group, value * 100, fill = group)) +
geom_boxplot(color = "black", outlier.shape = NA) +
geom_jitter(width = 0.3, color = "black", shape = 21) +
scale_fill_manual(values = rev(my_pal)) +
theme_cowplot(12) +
facet_wrap(~ Var1, scales = "free", ncol = 4) +
scale_y_log10(label = comma) +
labs(
x = "",
y = "Relative abundance at day 14 (%)",
fill = ""
)
ggsave("/Users/tamburif/Desktop/prebio_deseq2_boxplot_log10.png", width = 12, height = 14)
## log2fc of these over time relative to baseline
counts_long <- melt(brack_sp_pseudo_rel %>% rownames_to_column(var = "species"), id.vars = "species", variable.name = "sequencing_id", value.name = "rel_abundance")
counts_meta <- merge(counts_long, prebio_meta, by = "sequencing_id")
species_list <- c("Bacteroides cellulosilyticus", "Sellimonas intestinalis", "Faecalibacterium prausnitzii", "Akkermansia muciniphila")
counts_meta <- counts_meta %>% filter(species %in% species_list & day <= 28)
# only consider patients with sample at day 0
patients_at_screening <- counts_meta %>% filter(day == -5) %>% pull(patient_code) %>% unique()
counts_fc <- counts_meta %>% filter(patient_code %in% patients_at_screening) %>% group_by(patient_code, species) %>% mutate(log2fc = log2(rel_abundance/rel_abundance[day == -5]))
# line plot
line <- ggplot(counts_fc, aes(x = day, y = log2fc, group = patient_id, color = group)) +
geom_line() +
geom_point() +
scale_color_manual(values = rev(my_pal)) +
facet_wrap(~ species, scales = "free") +
scale_x_continuous(breaks = c(-5, 0, 7, 14, 28)) +
labs(
x = "Day",
y = "Log2FC",
color = ""
) +
theme_cowplot()
# geom smooth
smooth <- ggplot(counts_fc, aes(x = day, y = log2fc, group = group, color = group)) +
geom_point() +
geom_smooth() +
scale_color_manual(values = rev(my_pal)) +
facet_wrap(~ species, scales = "free") +
scale_x_continuous(breaks = c(-5, 0, 7, 14, 28)) +
labs(
x = "Day",
y = "Log2FC",
color = ""
) +
theme_cowplot()
plot_grid(line, smooth, labels = c("A", "B"), ncol = 1)
ggsave("/Users/tamburif/Desktop/prebio_log2fc.png", width = 8.5, height = 11)
### DESeq2 time * group
days <- c(-5, 14)
count_data <- brack_sp_reads
col_data <- prebio_meta %>% filter(sequencing_id %in% names(count_data) & day %in% days) %>%
column_to_rownames(var = "sequencing_id")
col_data$day <- factor(col_data$day, levels = days)
count_data <- count_data[, rownames(col_data)]
count_data_filt <- count_data[genefilter(count_data, pOverA(p = 0.20, A = 1000)), ]
dds <- DESeqDataSetFromMatrix(countData = count_data_filt,
colData = col_data,
design = ~ group + day + group:day)
dds <- DESeq(dds, test="LRT", reduced = ~ group + day, parallel = T)
res <- results(dds, alpha = 0.05)
# res$symbol <- mcols(dds)$symbol
# head(res[order(res$padj),], 4)
# resultsNames(dds) # lists the coefficients
resOrdered <- data.frame(res[order(res$pvalue),])
res_filt <- resOrdered[which(resOrdered$padj < 0.05), ]
write.table(res_filt, "/Users/tamburif/Desktop/deseq2_res_day14.txt", sep = "\t", quote = F)
### DESeq2 time * group
count_data <- brack_sp_reads
col_data <- prebio_meta %>% filter(sequencing_id %in% names(count_data) & day %in% c(-5, 7)) %>%
column_to_rownames(var = "sequencing_id")
col_data$day <- factor(col_data$day, levels = c(-5, 7))
count_data <- count_data[, rownames(col_data)]
count_data_filt <- count_data[genefilter(count_data, pOverA(p = 0.20, A = 1000)), ]
dds <- DESeqDataSetFromMatrix(countData = count_data_filt,
colData = col_data,
design = ~ group + day + group:day)
dds <- DESeq(dds, test="LRT", reduced = ~ group + day, parallel = T)
res <- results(dds, alpha = 0.05)
# res$symbol <- mcols(dds)$symbol
# head(res[order(res$padj),], 4)
# resultsNames(dds) # lists the coefficients
resOrdered <- data.frame(res[order(res$pvalue),])
res_filt_day7 <- resOrdered[which(resOrdered$padj < 0.05), ]
write.table(res_filt_day7, "/Users/tamburif/Desktop/deseq2_res_day7.txt", sep = "\t", quote = F)
######################################################################
### Input tables for lefse ###########################################
######################################################################
dir.create("lefse")
# function to keep features that are at least A relative abundance and p prevalence
subset_lefse <- function(bracken_data, filt_day, relab, prop, rank){
# filter metadata
lefse_meta <- filter(prebio_meta, sequenced_status == T, day == filt_day)[, c("sequencing_id", "group")]
lefse_meta <- lefse_meta[order(as.character(lefse_meta$sequencing_id)), ]
lefse_meta_t <- t(lefse_meta)
# filter taxa
tax <- bracken_data[, sort(as.character(lefse_meta$sequencing_id))]
rownames(tax) <- gsub(' ', '_', rownames(tax))
# remove rows that sum to zero
tax <- tax[rowSums(tax) > 0, ]
keep <- data.frame(genefilter(tax, pOverA(p=prop, A=relab * 1e6)))
colnames(keep) <- "taxon"
keep$tax <- row.names(keep)
keep <- filter(keep, taxon == T)$tax
tax_filt <- tax[keep, ]
fname <- paste0("lefse/lefse_input_relab", relab, "_p", prop, "_", rank, ".txt")
write.table(lefse_meta_t, fname, sep = '\t', row.names = T, col.names = F, quote = F)
write.table(tax_filt, fname, sep = '\t', row.names = T, col.names = F, quote = F, append = T)
# print(F %in% (colnames(tax) == lefse_meta_t[1,]))
}
# 0.01% relative abundance, 10%
subset_lefse(brack_sp_perc * 1e6, 14, 0.01, 0.10, "sp")
subset_lefse(brack_g_perc * 1e6, 14, 0.01, 0.10, "g")
# no filtering
subset_lefse(brack_sp_perc * 1e6, 14, 0, 0, "sp")
subset_lefse(brack_g_perc * 1e6, 14, 0, 0, "g")
# next, run lefse on the Huttenhower lab galaxy server (https://huttenhower.sph.harvard.edu/galaxy/)
# or on the command line
######################################################################
### Taxonomy area plots ##############################################
######################################################################
dir.create("plots/area_plots", showWarnings = F)
## species
sp_data <- brack_sp_perc
sp_data$taxon <- row.names(sp_data)
sp_long <- melt(sp_data, id.vars = "taxon", variable.name = "sequencing_id", value.name = "rel_abundance")
sp_long_meta <- merge(sp_long, prebio_meta, by = "sequencing_id")
patient_list <- unique(sp_long_meta$patient_id)
for (patient in patient_list) {
plot_data <- filter(sp_long_meta, patient_id == patient)
# plot only n top taxa
n_taxa <- 20
# color palette for n taxa
myCols <- colorRampPalette(brewer.pal(12, "Paired"))
my_pal <- myCols(n_taxa)
my_pal <- sample(my_pal)
tax <- aggregate(rel_abundance ~ taxon, data = plot_data, sum)
tax <- tax[rev(order(tax$rel_abundance)), ]
top_taxa <- tax[1:n_taxa, "taxon"]
plot_filt <- filter(plot_data, taxon %in% top_taxa)
area_plot <- ggplot(plot_filt, aes(day, rel_abundance * 100, group = taxon)) +
geom_area(aes(fill = taxon)) +
labs(
title=paste("Patient", patient),
x = "Day",
y = "Species Relative Abundance",
fill="Species") +
scale_fill_manual(values=my_pal, guide = guide_legend(ncol = 1)) +
scale_x_continuous(breaks = c(-5, 0, 7, 14, 28, 60, 100)) +
scale_y_continuous(breaks = seq(0, 100, 10), limits = c(0, 100)) +
theme_cowplot(12)
ggsave(paste0("plots/area_plots/", patient, "_species.png"), area_plot, device = "png", height = 6, width = 10)
}
## genus
g_data <- brack_g_perc
g_data$taxon <- row.names(g_data)
g_long <- melt(g_data, id.vars = "taxon", variable.name = "sequencing_id", value.name = "rel_abundance")
g_long_meta <- merge(g_long, prebio_meta, by = "sequencing_id")
patient_list <- unique(g_long_meta$patient_id)
for (patient in patient_list) {
plot_data <- filter(g_long_meta, patient_id == patient)
# plot only n top taxa
n_taxa <- 20
# color palette for n taxa
myCols <- colorRampPalette(brewer.pal(12, "Paired"))
my_pal <- myCols(n_taxa)
my_pal <- sample(my_pal)
tax <- aggregate(rel_abundance ~ taxon, data = plot_data, sum)
tax <- tax[rev(order(tax$rel_abundance)), ]
top_taxa <- tax[1:n_taxa, "taxon"]
plot_filt <- filter(plot_data, taxon %in% top_taxa)
area_plot <- ggplot(plot_filt, aes(day, rel_abundance * 100, group = taxon)) +
geom_area(aes(fill = taxon)) +
labs(
title=paste("Patient", patient),
x = "Day",
y = "Species Relative Abundance",
fill="Species") +
scale_fill_manual(values=my_pal, guide = guide_legend(ncol = 1)) +
scale_x_continuous(breaks = c(-5, 0, 7, 14, 28, 60, 100)) +
scale_y_continuous(breaks = seq(0, 100, 10), limits = c(0, 100)) +
theme_cowplot(12)
ggsave(paste0("plots/area_plots/", patient, "_genus.png"), area_plot, device = "png", height = 6, width = 10)
}
######################################################################
### Boxplots of specific features ####################################
######################################################################
# plot_data <- brack_g_perc
# plot_data$taxon <- row.names(plot_data)
# data_long <- melt(plot_data, id.vars = "taxon", variable.name = "sequencing_id", value.name = "rel_abundance")
# data_long_meta <- merge(data_long, prebio_meta, by = "sequencing_id")
#
# taxa <- c("Lactobacillus", "Blautia")
# data_filt <- filter(data_long_meta, taxon %in% taxa, day == 14)
#
# tax_boxplot <- ggplot(data_filt, aes(x=taxon, y=rel_abundance)) +
# geom_boxplot(aes(fill = group), position=position_dodge(.9)) +
# # geom_dotplot(binaxis='y', stackdir='center', dotsize=0.2, aes(fill = Treatment), position=position_dodge(.9)) +
# # stat_summary(fun.data=mean_sdl, mult=1, aes(group=group), position=position_dodge(.9), geom="pointrange", color="black") +
# facet_wrap(. ~ taxon, scales = "free") +
# # scale_y_log10() +
# labs(title='',
# x = "\nGenus",
# y = "Relative abundance (%)\n",
# fill="") +
# theme_cowplot(12)
#
# ggsave("plots/lefse_g_boxplot.png", tax_boxplot, device = "png", height = 4, width = 2.5 * length(taxa))
| /prebio.R | no_license | tamburinif/prebio | R | false | false | 27,061 | r | # Data analysis for BMT303 trial
# Fiona Tamburini
# required packages
library(ggplot2)
library(genefilter)
library(RColorBrewer)
library(plyr)
library(dplyr)
library(tibble)
library(reshape2)
library(scales)
library(MASS)
library(gtools)
library(vegan)
library(q2)
library(ggpubr)
library(cowplot)
######################################################################
### Setup ############################################################
######################################################################
### set this to /your/path/to/prebio2
setwd("/Users/Fiona/scg4_fiona/prebio2/prebio")
# color palette
# FOS, Control
my_pal <- c("#D55E00", "#0072B2")
names(my_pal) <- c("FOS", "Control")
dir.create("plots", showWarnings = F)
######################################################################
### Read in data and metadate files for prebiotic project analysis ###
######################################################################
# TO DO: change filepaths/organize for portability
# TO DO: remove P83 and re-save
### Read sample metadata -- which stools were collected/sequenced
prebio_meta_all <- read.table("metadata/prebio_meta.tsv", sep = '\t', header = T, quote="\"")
# set FOS/Control grouping
prebio_meta_all$group <- ifelse(startsWith(as.character(prebio_meta_all$patient_id), '303'), "FOS", "Control")
prebio_meta_all$group <- factor(prebio_meta_all$group, levels = c("Control", "FOS"))
# format columns as date
prebio_meta_all$date <- as.Date(prebio_meta_all$date)
prebio_meta_all$trx <- as.Date(prebio_meta_all$trx)
# set factor levels for downstream plots
prebio_meta_all$patient_id <- factor(prebio_meta_all$patient_id, levels = mixedsort(unique(prebio_meta_all$patient_id)))
# metadata for sequenced samples only
prebio_meta <- filter(prebio_meta_all, sequenced_status == T)
prebio_meta <- prebio_meta[mixedorder(unique(prebio_meta$sequencing_id)), ]
### Read taxonomic classification data
## bracken species read counts including unclassifed
brack_sp_reads <- read.table("input_data/bracken_species_reads.txt", sep = '\t', header = T, quote = "")
brack_g_reads <- read.table("input_data/bracken_genus_reads.txt", sep = '\t', header = T, quote = "")
# add pseudocount
brack_sp_pseudo <- brack_sp_reads
brack_sp_pseudo[brack_sp_pseudo == 0] <- 1
brack_sp_pseudo_rel <- sweep(brack_sp_pseudo[-which(rownames(brack_sp_pseudo) == "Unclassified"), ], 2, colSums(brack_sp_reads[-which(rownames(brack_sp_pseudo) == "Unclassified"), ]), FUN = "/")
## bracken species percentage -- classified only
brack_sp_perc <- read.table("input_data/bracken_species_perc.txt", sep = '\t', header = T, quote = "")
brack_g_perc <- read.table("input_data/bracken_genus_perc.txt", sep = '\t', header = T, quote = "")
## Read short chain fatty acid measurements
# repeated measurements may 2019
scfa2_f <- "input_data/prebio_scfa_may19.txt"
scfa2 <- read.table(scfa2_f, sep = '\t', header = T)
scfa2[is.na(scfa2)] <- 0
######################################################################
### Summary statistics ###############################################
######################################################################
# n patients, controls
print("FOS")
length(unique(filter(prebio_meta_all, group == "FOS")$patient_id))
print("Controls")
length(unique(filter(prebio_meta_all, group == "Control")$patient_id))
# n samples collected
length(prebio_meta_all$sequencing_id[!is.na(prebio_meta_all$sequencing_id)])
# n samples sequenced
length(prebio_meta_all$sequencing_id[prebio_meta_all$sequenced_status])
# samples collected but not sequenced
not_seqd <- filter(prebio_meta_all, !sequenced_status)
# samples collected per patient
all_freq <- plyr::count(prebio_meta_all[!is.na(prebio_meta_all$sequencing_id),], "patient_id")
fos_freq <- plyr::count(filter(prebio_meta_all[!is.na(prebio_meta_all$sequencing_id),], group == "FOS"), "patient_id")
ctrl_freq <- plyr::count(filter(prebio_meta_all[!is.na(prebio_meta_all$sequencing_id),], group == "Control"), "patient_id")
# median samples collected per patient
median(all_freq$freq)
median(fos_freq$freq)
median(ctrl_freq$freq)
# mean samples collected per patient
mean(all_freq$freq)
mean(fos_freq$freq)
mean(ctrl_freq$freq)
# range
range(all_freq$freq)
range(fos_freq$freq)
range(ctrl_freq$freq)
# samples not sequenced
filter(prebio_meta_all, sequenced_status == F & !is.na(date))
# samples not collected
filter(prebio_meta_all, sequenced_status == F & is.na(date))
######################################################################
### Readcount plots ##################################################
######################################################################
# readcounts file from preprocessing pipeline
readcounts_f <- "input_data/readcounts.tsv"
readcounts <- read.table(readcounts_f, sep = '\t', header = T)
counts <- readcounts[, c(1:3, 5, 7)]
colnames(counts) <- c("Sample", "Raw reads", "Trimmed reads", "Deduplicated reads", "Non-human reads")
counts_long <- melt(counts, id.vars = "Sample", variable.name = "step", value.name = "reads")
counts_long$reads_m <- (counts_long$reads / 1e6)
# plot readcounts
readcount_plot <- ggplot(counts_long, aes(x=reads_m, fill=step)) +
geom_histogram(binwidth = 1) +
scale_x_continuous(labels = comma, breaks = seq(0, 100, 10)) +
facet_grid(step ~ ., scales = "free_y") +
theme_cowplot(12) +
labs(
x = "\nReads (M)",
y = "Count\n",
fill = ""
) +
background_grid()
ggsave("plots/readcounts_preproccessing.png", readcount_plot, device = "png", height = 6, width = 7)
######################################################################
### Sample collection plot ###########################################
######################################################################
# plot relative to date of transplant
samples <- prebio_meta_all
samples$sample_day <- (samples$date - samples$trx)
# create patient labels, set order
fos <- filter(samples, group == "FOS")
control <- filter(samples, group == "Control")
labels <- data.frame(patient_id = sort(unique(fos$patient_id)), label = paste0("F", seq(unique(fos$patient_id))))
labels <- rbind(labels, data.frame(patient_id = mixedsort(as.character(unique(control$patient_id))), label = paste0("C", seq(unique(control$patient_id)))))
samples <- merge(samples, labels, by = "patient_id", all = T)
samples$label <- factor(samples$label, levels = rev(labels$label))
# set sequenced vs no
samples$sequenced_status <- ifelse(samples$sequenced_status, "Sequenced", "Not sequenced")
samples$sequenced_status <- ifelse(is.na(samples$sequencing_id), "Not collected", samples$sequenced_status)
samples$sequenced_status <- factor(samples$sequenced_status, levels = c("Sequenced", "Not sequenced", "Not collected"))
# if the sample wasn't collected and the day is NA, change sample day to actual day
samples$sample_day <- ifelse(is.na(samples$sample_day), samples$day, samples$sample_day)
# remove samples > day 100
# maybe change this so that samples >100 are included and axis is >100 ?
samples <- filter(samples, sample_day <= 100)
# plot collected samples
sample_plot <- ggplot(samples, aes(x=sample_day, y=label, shape=sequenced_status)) +
geom_point(size = 2, color = "black") +
scale_shape_manual(values = c(16, 1, 4)) +
facet_wrap(~ group, ncol = 1, strip.position = "top", scales = "free_y") +
theme_cowplot() +
labs(
x = "\nDay relative to transplant",
y = "Patient\n",
shape = "Status"
) +
scale_x_continuous(labels = comma, breaks = c(-5, 0, 7, 14, 28, 60, 100))
ggsave("plots/stool_sampling.png", sample_plot, device = "png", height = 6, width = 6)
# color by timepoint
sample_plot2 <- ggplot(samples, aes(x=sample_day, y=label, shape=sequenced_status)) +
geom_point(size = 2, aes(color = factor(day, levels = c(-5, 0, 7, 14, 28, 60, 100)))) +
scale_shape_manual(values = c(16, 1, 4)) +
facet_wrap(~ group, ncol = 1, strip.position = "top", scales = "free_y") +
theme_cowplot(12) +
labs(
x = "\nDay relative to transplant",
y = "Patient\n",
shape = "Status",
color = "Timepoint"
) +
scale_x_continuous(labels = comma, breaks = c(-5, 0, 7, 14, 28, 60, 100))
ggsave("plots/stool_sampling_colored.png", sample_plot2, device = "png", height = 6, width = 6)
######################################################################
### SCFA measurements ################################################
######################################################################
## repeated measurements may 2019
scfa_long2 <- melt(scfa2, id.vars = c("sample", "patient_id", "sequencing_id", "group"), variable.name = "scfa")
scfa_long2$scfa <- gsub("\\.A", " a", scfa_long2$scfa)
# set factor level for group
scfa_long2$group <- factor(scfa_long2$group, levels = c("FOS", "Control"))
## plot without log transformation, free y axis
pvals <- compare_means(value ~ group, data = scfa_long2, group.by = "scfa", method = "wilcox.test", p.adjust.method = "fdr")
pvals$p.signif <- ifelse(pvals$p.adj < 0.05, "*", "ns")
pvals$p.signif <- ifelse(pvals$p.adj < 0.01 & pvals$p.adj >= 0.001, "**", pvals$p.signif)
pvals$p.signif <- ifelse(pvals$p.adj < 0.001, "***", pvals$p.signif)
# set y position of signif for each plot
maxs <- aggregate(value ~ scfa,scfa_long2, FUN = max)
pvals$y.position <- maxs[match(pvals$scfa, maxs$scfa), "value"] * 1.10
scfa_plot <- ggplot(scfa_long2, aes(x = group, y = value)) +
geom_violin(aes(fill = group)) +
geom_point() +
facet_wrap(. ~ scfa, scales = "free_y") +
# pseudo_log_trans() +
labs(
x = "Short-chain fatty acid",
y = "Concentration (umol/g stool)",
fill="") +
scale_fill_manual(values = my_pal) +
stat_pvalue_manual(pvals, label = "p.signif") +
theme_cowplot(12)
ggsave("plots/scfa_may19_facet.png", scfa_plot, device = "png", height = 9, width = 8)
######################################################################
### Classified reads #################################################
######################################################################
## Plot histogram of classified reads
classified <- (1 - sweep(brack_sp_reads, 2, colSums(brack_sp_reads), "/")["Unclassified",]) * 100
read_plot <- ggplot(melt(classified), aes(x=value)) +
geom_histogram(binwidth = 1, fill = "cornflowerblue", color = "white") +
scale_x_continuous(breaks = seq(0, 100, 10)) +
theme_cowplot(12) +
scale_fill_manual(values = my_pal) +
labs(
x = "Percentage of reads classified",
y = "Count"
)
ggsave("plots/readcounts_classified_histo.png", read_plot, device = "png", height = 4, width = 5)
######################################################################
### Diversity plots ##################################################
######################################################################
# find shannon diversity with vegdist
shannon_div <- diversity(t(brack_sp_perc), index = "shannon")
div <- data.frame("shannon_div" = shannon_div, "sequencing_id" = names(shannon_div))
div_meta <- merge(div, prebio_meta, by = "sequencing_id")
## stat smooth shannon diversity over time
shannon_plot_smooth <- ggplot(div_meta, aes(day, shannon_div, color = group)) +
geom_point() +
stat_smooth() +
labs(
x = "Day",
y = "Shannon Diversity",
color="") +
theme_cowplot(12) +
scale_color_manual(values = my_pal) +
scale_x_continuous(labels = comma, breaks = c(-5, 0, 7, 14, 28, 60, 100))
ggsave("plots/shannon_line_smooth.png", shannon_plot_smooth, device = "png", height = 4, width = 6)
## violin plot -- alpha diversity at each timepoint
## compare means
pvals <- compare_means(shannon_div ~ group, data = div_meta, group.by = "day", method = "wilcox.test", p.adjust.method = "fdr")
pvals$p.signif <- ifelse(pvals$p.adj < 0.05, "*", "ns")
pvals$p.signif <- ifelse(pvals$p.adj < 0.01 & pvals$p.adj >= 0.001, "**", pvals$p.signif)
pvals$p.signif <- ifelse(pvals$p.adj < 0.001, "***", pvals$p.signif)
pvals$y.position <- 8
# plot
shannon_plot <- ggplot(div_meta, aes(x=group, y=shannon_div)) +
geom_violin(aes(fill = group), position=position_dodge(.9), trim = F) +
stat_summary(fun.data=mean_sdl, aes(group=group), position=position_dodge(.9), geom="pointrange", color="black") +
facet_grid(. ~ day, scales = "free") +
labs(
x = "\nTreatment",
y = "Shannon Diversity\n",
fill="") +
theme_cowplot(12) +
scale_fill_manual(values = my_pal) +
stat_pvalue_manual(pvals, label = "p.signif")
ggsave("plots/shannon_div.png", shannon_plot, device = "png", height = 4, width = 10)
######################################################################
### NMDS ordination ##################################################
######################################################################
### ordinate species-level classifications
### find pairwise bray-curtis distances with vegdist
vare_dis <- vegdist(t(brack_sp_perc), method = "bray")
### nmds ordinate
vare_mds0 <- isoMDS(vare_dis)
mds <- data.frame(vare_mds0$points)
mds$sequencing_id <- row.names(mds)
### merge pheno data
mds_meta <- merge(mds, prebio_meta, by = "sequencing_id")
### function to create scatterplot
nmds_plot <- ggplot(mds_meta, aes(x = X1, y = X2, color = group)) +
geom_point(size = 2) +
theme_cowplot(12) +
scale_color_manual(values = my_pal) +
labs(
x = "NMDS1",
y = "NMDS2",
color = ""
)
# add 95% confidence ellipse
nmds_plot_ci <- nmds_plot + stat_ellipse(type = 't', size = 1)
ggsave("plots/nmds_by_treatment_ci.png", nmds_plot_ci, device = "png", height = 5, width = 6)
# test group differences
# beta dispersions -- are assumptions for PERMANOVA met?
dispersion <- betadisper(vare_dis, group = prebio_meta$group)
permutest(dispersion)
adonis(vare_dis ~ group, data = filter(prebio_meta, sequenced_status == T))
######################################################################
### Differential features with DESeq2 ################################
######################################################################
# FOS vs control at day 14
count_data <- brack_sp_reads
col_data <- prebio_meta %>% filter(sequencing_id %in% names(count_data) & day == 14) %>% column_to_rownames(var = "sequencing_id")
count_data <- count_data[, rownames(col_data)]
# col_data <- col_data[order(row.names(col_data)), ]
count_data_filt <- count_data[genefilter(count_data, pOverA(p = 0.20, A = 1000)), ]
dds <- DESeqDataSetFromMatrix(countData = count_data_filt,
colData = col_data,
design= ~ group)
dds <- DESeq(dds)
resultsNames(dds) # lists the coefficients
res <- results(dds, name = "group_FOS_vs_Control", alpha = 0.05)
resOrdered <- data.frame(res[order(res$pvalue),])
res_filt <- resOrdered %>% rownames_to_column(var = "taxon") %>% filter(padj < 0.05) %>% arrange(log2FoldChange)
res_filt$taxon <- factor(res_filt$taxon, levels = as.character(res_filt$taxon))
res_filt$direction <- ifelse(res_filt$log2FoldChange < 0, "Control", "FOS")
res_filt$rel_abundance <- reshape2::melt(as.matrix(brack_sp_perc[, which(names(brack_sp_perc) %in% rownames(col_data))])) %>%
filter(Var1 %in% res_filt$taxon) %>% group_by(Var1) %>% summarise(rel_abundance = mean(value)) %>% arrange(factor(Var1, levels = res_filt$taxon)) %>% pull(rel_abundance)
res_filt$prevalence <- reshape2::melt(as.matrix(count_data)) %>% filter(Var1 %in% res_filt$taxon) %>%
group_by(Var1) %>% summarise(prevalence = (sum(value > 0)/length(value))*100) %>% arrange(factor(Var1, levels = res_filt$taxon)) %>% pull(prevalence)
ggplot(res_filt, aes(log2FoldChange, taxon, color = direction, size = rel_abundance * 100, alpha = prevalence)) +
geom_point() +
theme_cowplot(12) +
scale_color_manual(values = rev(my_pal)) +
labs(
x = "Log2 Fold Change",
y = "Taxon",
color = "Remission",
size = "Mean relative abundance (%)",
alpha = "Prevalence (%)"
)
ggsave("/Users/tamburif/Desktop/prebio_deseq2.png", width = 10, height = 6)
sp_long <- reshape2::melt(as.matrix(brack_sp_pseudo_rel[, which(names(brack_sp_pseudo_rel) %in% rownames(col_data))]))
sp_long <- merge(sp_long, prebio_meta, by.x = "Var2", by.y = "sequencing_id")
sp_long <- filter(sp_long, sp_long$Var1 %in% res_filt$taxon)
# sp_long$value_pseudocount <- sp_long$value
# sp_long$value_pseudocount[sp_long$value_pseudocount == 0] <- 1e-6
sp_long$Var1 = factor(sp_long$Var1, levels = sort(unique(as.character(sp_long$Var1))))
ggplot(sp_long, aes(group, value * 100, fill = group)) +
geom_boxplot(color = "black", outlier.shape = NA) +
geom_jitter(width = 0.3, color = "black", shape = 21) +
scale_fill_manual(values = rev(my_pal)) +
theme_cowplot(12) +
facet_wrap(~ Var1, scales = "free", ncol = 4) +
# scale_y_log10(label = comma) +
labs(
x = "",
y = "Relative abundance (%)",
fill = ""
)
ggsave("/Users/tamburif/Desktop/prebio_deseq2_boxplot.png", width = 12, height = 14)
ggplot(sp_long, aes(group, value * 100, fill = group)) +
geom_boxplot(color = "black", outlier.shape = NA) +
geom_jitter(width = 0.3, color = "black", shape = 21) +
scale_fill_manual(values = rev(my_pal)) +
theme_cowplot(12) +
facet_wrap(~ Var1, scales = "free", ncol = 4) +
scale_y_log10(label = comma) +
labs(
x = "",
y = "Relative abundance at day 14 (%)",
fill = ""
)
ggsave("/Users/tamburif/Desktop/prebio_deseq2_boxplot_log10.png", width = 12, height = 14)
## log2fc of these over time relative to baseline
counts_long <- melt(brack_sp_pseudo_rel %>% rownames_to_column(var = "species"), id.vars = "species", variable.name = "sequencing_id", value.name = "rel_abundance")
counts_meta <- merge(counts_long, prebio_meta, by = "sequencing_id")
species_list <- c("Bacteroides cellulosilyticus", "Sellimonas intestinalis", "Faecalibacterium prausnitzii", "Akkermansia muciniphila")
counts_meta <- counts_meta %>% filter(species %in% species_list & day <= 28)
# only consider patients with sample at day 0
patients_at_screening <- counts_meta %>% filter(day == -5) %>% pull(patient_code) %>% unique()
counts_fc <- counts_meta %>% filter(patient_code %in% patients_at_screening) %>% group_by(patient_code, species) %>% mutate(log2fc = log2(rel_abundance/rel_abundance[day == -5]))
# line plot
line <- ggplot(counts_fc, aes(x = day, y = log2fc, group = patient_id, color = group)) +
geom_line() +
geom_point() +
scale_color_manual(values = rev(my_pal)) +
facet_wrap(~ species, scales = "free") +
scale_x_continuous(breaks = c(-5, 0, 7, 14, 28)) +
labs(
x = "Day",
y = "Log2FC",
color = ""
) +
theme_cowplot()
# geom smooth
smooth <- ggplot(counts_fc, aes(x = day, y = log2fc, group = group, color = group)) +
geom_point() +
geom_smooth() +
scale_color_manual(values = rev(my_pal)) +
facet_wrap(~ species, scales = "free") +
scale_x_continuous(breaks = c(-5, 0, 7, 14, 28)) +
labs(
x = "Day",
y = "Log2FC",
color = ""
) +
theme_cowplot()
plot_grid(line, smooth, labels = c("A", "B"), ncol = 1)
ggsave("/Users/tamburif/Desktop/prebio_log2fc.png", width = 8.5, height = 11)
### DESeq2 time * group
days <- c(-5, 14)
count_data <- brack_sp_reads
col_data <- prebio_meta %>% filter(sequencing_id %in% names(count_data) & day %in% days) %>%
column_to_rownames(var = "sequencing_id")
col_data$day <- factor(col_data$day, levels = days)
count_data <- count_data[, rownames(col_data)]
count_data_filt <- count_data[genefilter(count_data, pOverA(p = 0.20, A = 1000)), ]
dds <- DESeqDataSetFromMatrix(countData = count_data_filt,
colData = col_data,
design = ~ group + day + group:day)
dds <- DESeq(dds, test="LRT", reduced = ~ group + day, parallel = T)
res <- results(dds, alpha = 0.05)
# res$symbol <- mcols(dds)$symbol
# head(res[order(res$padj),], 4)
# resultsNames(dds) # lists the coefficients
resOrdered <- data.frame(res[order(res$pvalue),])
res_filt <- resOrdered[which(resOrdered$padj < 0.05), ]
write.table(res_filt, "/Users/tamburif/Desktop/deseq2_res_day14.txt", sep = "\t", quote = F)
### DESeq2 time * group
count_data <- brack_sp_reads
col_data <- prebio_meta %>% filter(sequencing_id %in% names(count_data) & day %in% c(-5, 7)) %>%
column_to_rownames(var = "sequencing_id")
col_data$day <- factor(col_data$day, levels = c(-5, 7))
count_data <- count_data[, rownames(col_data)]
count_data_filt <- count_data[genefilter(count_data, pOverA(p = 0.20, A = 1000)), ]
dds <- DESeqDataSetFromMatrix(countData = count_data_filt,
colData = col_data,
design = ~ group + day + group:day)
dds <- DESeq(dds, test="LRT", reduced = ~ group + day, parallel = T)
res <- results(dds, alpha = 0.05)
# res$symbol <- mcols(dds)$symbol
# head(res[order(res$padj),], 4)
# resultsNames(dds) # lists the coefficients
resOrdered <- data.frame(res[order(res$pvalue),])
res_filt_day7 <- resOrdered[which(resOrdered$padj < 0.05), ]
write.table(res_filt_day7, "/Users/tamburif/Desktop/deseq2_res_day7.txt", sep = "\t", quote = F)
######################################################################
### Input tables for lefse ###########################################
######################################################################
dir.create("lefse")
# function to keep features that are at least A relative abundance and p prevalence
subset_lefse <- function(bracken_data, filt_day, relab, prop, rank){
# filter metadata
lefse_meta <- filter(prebio_meta, sequenced_status == T, day == filt_day)[, c("sequencing_id", "group")]
lefse_meta <- lefse_meta[order(as.character(lefse_meta$sequencing_id)), ]
lefse_meta_t <- t(lefse_meta)
# filter taxa
tax <- bracken_data[, sort(as.character(lefse_meta$sequencing_id))]
rownames(tax) <- gsub(' ', '_', rownames(tax))
# remove rows that sum to zero
tax <- tax[rowSums(tax) > 0, ]
keep <- data.frame(genefilter(tax, pOverA(p=prop, A=relab * 1e6)))
colnames(keep) <- "taxon"
keep$tax <- row.names(keep)
keep <- filter(keep, taxon == T)$tax
tax_filt <- tax[keep, ]
fname <- paste0("lefse/lefse_input_relab", relab, "_p", prop, "_", rank, ".txt")
write.table(lefse_meta_t, fname, sep = '\t', row.names = T, col.names = F, quote = F)
write.table(tax_filt, fname, sep = '\t', row.names = T, col.names = F, quote = F, append = T)
# print(F %in% (colnames(tax) == lefse_meta_t[1,]))
}
# 0.01% relative abundance, 10%
subset_lefse(brack_sp_perc * 1e6, 14, 0.01, 0.10, "sp")
subset_lefse(brack_g_perc * 1e6, 14, 0.01, 0.10, "g")
# no filtering
subset_lefse(brack_sp_perc * 1e6, 14, 0, 0, "sp")
subset_lefse(brack_g_perc * 1e6, 14, 0, 0, "g")
# next, run lefse on the Huttenhower lab galaxy server (https://huttenhower.sph.harvard.edu/galaxy/)
# or on the command line
######################################################################
### Taxonomy area plots ##############################################
######################################################################
dir.create("plots/area_plots", showWarnings = F)
## species
sp_data <- brack_sp_perc
sp_data$taxon <- row.names(sp_data)
sp_long <- melt(sp_data, id.vars = "taxon", variable.name = "sequencing_id", value.name = "rel_abundance")
sp_long_meta <- merge(sp_long, prebio_meta, by = "sequencing_id")
patient_list <- unique(sp_long_meta$patient_id)
for (patient in patient_list) {
plot_data <- filter(sp_long_meta, patient_id == patient)
# plot only n top taxa
n_taxa <- 20
# color palette for n taxa
myCols <- colorRampPalette(brewer.pal(12, "Paired"))
my_pal <- myCols(n_taxa)
my_pal <- sample(my_pal)
tax <- aggregate(rel_abundance ~ taxon, data = plot_data, sum)
tax <- tax[rev(order(tax$rel_abundance)), ]
top_taxa <- tax[1:n_taxa, "taxon"]
plot_filt <- filter(plot_data, taxon %in% top_taxa)
area_plot <- ggplot(plot_filt, aes(day, rel_abundance * 100, group = taxon)) +
geom_area(aes(fill = taxon)) +
labs(
title=paste("Patient", patient),
x = "Day",
y = "Species Relative Abundance",
fill="Species") +
scale_fill_manual(values=my_pal, guide = guide_legend(ncol = 1)) +
scale_x_continuous(breaks = c(-5, 0, 7, 14, 28, 60, 100)) +
scale_y_continuous(breaks = seq(0, 100, 10), limits = c(0, 100)) +
theme_cowplot(12)
ggsave(paste0("plots/area_plots/", patient, "_species.png"), area_plot, device = "png", height = 6, width = 10)
}
## genus
g_data <- brack_g_perc
g_data$taxon <- row.names(g_data)
g_long <- melt(g_data, id.vars = "taxon", variable.name = "sequencing_id", value.name = "rel_abundance")
g_long_meta <- merge(g_long, prebio_meta, by = "sequencing_id")
patient_list <- unique(g_long_meta$patient_id)
for (patient in patient_list) {
plot_data <- filter(g_long_meta, patient_id == patient)
# plot only n top taxa
n_taxa <- 20
# color palette for n taxa
myCols <- colorRampPalette(brewer.pal(12, "Paired"))
my_pal <- myCols(n_taxa)
my_pal <- sample(my_pal)
tax <- aggregate(rel_abundance ~ taxon, data = plot_data, sum)
tax <- tax[rev(order(tax$rel_abundance)), ]
top_taxa <- tax[1:n_taxa, "taxon"]
plot_filt <- filter(plot_data, taxon %in% top_taxa)
area_plot <- ggplot(plot_filt, aes(day, rel_abundance * 100, group = taxon)) +
geom_area(aes(fill = taxon)) +
labs(
title=paste("Patient", patient),
x = "Day",
y = "Species Relative Abundance",
fill="Species") +
scale_fill_manual(values=my_pal, guide = guide_legend(ncol = 1)) +
scale_x_continuous(breaks = c(-5, 0, 7, 14, 28, 60, 100)) +
scale_y_continuous(breaks = seq(0, 100, 10), limits = c(0, 100)) +
theme_cowplot(12)
ggsave(paste0("plots/area_plots/", patient, "_genus.png"), area_plot, device = "png", height = 6, width = 10)
}
######################################################################
### Boxplots of specific features ####################################
######################################################################
# plot_data <- brack_g_perc
# plot_data$taxon <- row.names(plot_data)
# data_long <- melt(plot_data, id.vars = "taxon", variable.name = "sequencing_id", value.name = "rel_abundance")
# data_long_meta <- merge(data_long, prebio_meta, by = "sequencing_id")
#
# taxa <- c("Lactobacillus", "Blautia")
# data_filt <- filter(data_long_meta, taxon %in% taxa, day == 14)
#
# tax_boxplot <- ggplot(data_filt, aes(x=taxon, y=rel_abundance)) +
# geom_boxplot(aes(fill = group), position=position_dodge(.9)) +
# # geom_dotplot(binaxis='y', stackdir='center', dotsize=0.2, aes(fill = Treatment), position=position_dodge(.9)) +
# # stat_summary(fun.data=mean_sdl, mult=1, aes(group=group), position=position_dodge(.9), geom="pointrange", color="black") +
# facet_wrap(. ~ taxon, scales = "free") +
# # scale_y_log10() +
# labs(title='',
# x = "\nGenus",
# y = "Relative abundance (%)\n",
# fill="") +
# theme_cowplot(12)
#
# ggsave("plots/lefse_g_boxplot.png", tax_boxplot, device = "png", height = 4, width = 2.5 * length(taxa))
|
source('/well/donnelly/ukbiobank_project_8874/clare/commonScripts/myManhattan.R')
h = c("/well/ukbiobank/expt/V2_QCed.SNP-QC/src/V2_QCed.snpqc-tests.R","/well/ukbiobank/expt/V2_QCed.SNP-QC/src/V2_QCed.bin2clusterplots.R","/well/ukbiobank/qcoutput.V2_QCed.sample-QC/QC-Scripts/R/scripts/readPSperformance.R","/well/ukbiobank/qcoutput.V2_QCed.sample-QC/QC-Scripts/R/scripts/auxFunctions.R")
for(s in h) source(s)
library(dplyr)
library(qqman)
library(stringr)
sexChroms = c(23,24,25,26)
names(sexChroms) = c("X","Y","XY","MT")
plot.BOLT.pvalues <- function(GWASdata,chrom,minmaf,mininfo,maxmiss,plotOutDir,plotQQ=TRUE,extraTitle="",Ymax=FALSE,catFile = NULL,QCexclude=c(),...) {
DF = dplyr::tbl_df(DFraw)
print(head(DF))
if("INFO"%in%colnames(DFraw)){
Pvalset = paste(basename(GWASdata),".chr",chrom,".maf",minmaf,".info",mininfo,".pruned",extraTitle,sep="")
DF = dplyr::filter(DF, MAF > minmaf & INFO > mininfo)
} else {
Pvalset = paste(basename(GWASdata),".chr",chrom,".maf",minmaf,".miss",maxmiss,".pruned",extraTitle,sep="")
DF = dplyr::filter(DF, MAF > minmaf & F_MISS < maxmiss)
}
if(chrom!="genome") DF = dplyr::filter(DF, CHR %in% chrom) # subset by chromosome
if( "-qc" %in%args ) DF = dplyr::filter(DF, !SNP %in% QCexclude) # exclude SNPs in array,imageArtefact, or concordance lists. Only relevant with clare's plink gwas data.
print(Pvalset)
if("P_BOLT_LMM_INF" %in% colnames(DF)) {
print("using BOLT_LMM_INF")
DF = dplyr::rename(DF, P = P_BOLT_LMM_INF) } else {
DF = dplyr::rename(DF, P = P_LINREG)
}
if(chrom%in%c("X","XY","Y","MT")) DF$CHR = sexChroms[chrom] else DF$CHR = as.numeric(DF$CHR)
maxP = round(max(-log10(DF$P),na.rm=T))
print(paste('max -log(pval) = ',maxP))
nFail = length(which(-log10(DF$P) > 8))
percentFailed = nFail/nrow(DF) * 100
if(!Ymax){
Ymax = ceiling(max(-log10(DF$P[DF$P!=0]),na.rm=T)) + 10
Ymax = min(Ymax,50)
}
png(paste(plotOutDir,"/",Pvalset,"-manhattan%02d.png",sep=""),width=41,height=12,units="in",res=150)
par(las=1,font.main=1,cex.axis=2,cex.lab=2,mar=c(7 ,7, 5 ,2))
myManhattan(DF,ymax=Ymax,suggestiveline = FALSE,xpd=NA,cex=1,...)
# myManhattan(DF,ymax=Ymax,suggestiveline = FALSE,xpd=NA,cex=1,col="transparent")
# add extra catalogue hits?
if(!is.null(catFile) & (chrom!="genome")) {
print( "Printing catalogue hits..." )
catFileSub = catFile[catFile$CHR %in% chrom,]
print( paste0( sum(-log10(catFileSub$Pvalue)>Ymax)," catalogue hits above ",Ymax) )
catFileSub$Pvalue[-log10(catFileSub$Pvalue)>Ymax] = 10^(-Ymax)
# plot non-European hits differently
colors = rep("red",dim(catFileSub)[1])
# colors[!grepl("European",catFileSub$Ancestry)] = "blue"
print( table(catFileSub$Ancestry[!grepl("European",catFileSub$Ancestry)]) )
# do we have these SNPs in UKBiobank? match on chrom and position
#inHere = (catFileSub$BP %in% DF$BP)&(catFileSub$CHR == DF$CHR)
#catFileSub$Pvalue[inHere] = DF$Pvalue[]
points(catFileSub$BP,-log10(catFileSub$Pvalue),pch=8,col=colors,cex=4,lwd=1)
points(catFileSub$BP,-log10(catFileSub$Pvalue),pch=16,col=colors,cex=2)
}
dev.off()
########### qq plot p-values
if(plotQQ){
png(paste(plotOutDir,"/",Pvalset,"-qqplot%02d.png",sep=""),height=1000,width=1000,res=150)
DF$P2=DF$P
DF$P2[DF$P<(10^-Ymax)] = 10^-Ymax
qqman::qq(DF$P2)
dev.off()
}
########## plot effect sizes
DF$index = DF$BP
if( length(unique(DF$CHR)) > 1 ){
for(i in unique(DF$CHR)){
if(i>1) DF$index[DF$CHR==i] = DF$index[DF$CHR==i] + max(DF$BP[DF$CHR==(i - 1)])
}
}
snps = which((DF$P < 5e-8)&(!is.na(DF$P)))
if("BETA"%in%colnames(DF) & ( length(snps) > 0 )){
beta = DF
beta$BETA[DF$A1FREQ > 0.5] = -beta$BETA[DF$A1FREQ > 0.5]
beta = beta[snps,]
png(paste(plotOutDir,"/",Pvalset,"-EffectSizes.png",sep=""),height=1000,width=1000,res=150)
myManhattan(beta,p="BETA",logtransform=FALSE,genomewideline=0,suggestiveline=FALSE)
dev.off()
}
}
######## Which chromosome and what data are we plotting?
args = commandArgs(TRUE)
#args = c("test.out","1","plots", "-ymax","50","-hits", "/well/ukbiobank/qcoutput.V2_QCed.sample-QC/QC-Scripts/GWAS/otherGWAS/GWAScatalogue/hg19/gwasCatalog-subset-Standing.height.RData", "-title", "-Euro-hits")
#args = c("/well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/otherGWAS/Standing.height/BOLTLMM.v3/Standing.height-BOLT-LMM-v3.out","5","plots", "-ymax","50","-qc","-hits", "/well/ukbiobank/qcoutput.V2_QCed.sample-QC/QC-Scripts/GWAS/otherGWAS/GWAScatalogue/hg19/gwasCatalog-subset-Standing.height.RData", "-title", "-Euro-hits-for-talk")
print(args)
dataFile = args[1]
chroms = args[2]
plotOutDir = args[3]
if("-title"%in%args) extraTitle = args[which(args=="-title")+1] else extraTitle=""
# Should we highlight some SNPs?
highlightSNPs=NULL
highlightCols=NULL
if(( "-hi"%in%args )|("-qc" %in%args )){
print("Reading QC snps lists...")
QCSNPList = read.SNPQC.files(justSNPs=TRUE)
# QCexclude = unique(c(QCSNPList$arraySNPs,QCSNPList$imageSNPs,QCSNPList$concordanceSNPs))
QCexclude = unique(c(QCSNPList$arraySNPs,QCSNPList$concordanceSNPs)) # this is only required if using -hi or clare's versions of plink genotype files.
if( "-hi"%in%args ){
highlightSNPs = unique(unlist(QCSNPList))
colors = rep("black",length(highlightSNPs))
colors[highlightSNPs%in%QCSNPList$batchHweSNPs] = "green" # HWE (apply first)
colors[highlightSNPs%in%c(QCSNPList$plateSNPs,QCSNPList$batchSNPs)] = "purple" # BATCH/PLATE
colors[highlightSNPs%in%c(QCSNPList$imageSNPs)] = "orange" # IMAGE ARTEFACT
colors[highlightSNPs%in%c(QCSNPList$arraySNPs)] = "red" # ARRAY
colors[highlightSNPs%in%c(QCSNPList$concordanceSNPs)] = "blue" # CONCORDANCE
highlightCols = colors
print(table(highlightCols))
}
}
# get data
print("printing the following chromosomes")
print( chroms )
#DFraw = read.table(dataFile,sep="",header=TRUE,stringsAsFactors=FALSE)
if(!grepl("%%",dataFile)){
print("reading in GWAS output file")
DFraw = read.table(dataFile,sep="",header=TRUE,stringsAsFactors=FALSE)
DFraw$MAF = DFraw$A1FREQ
DFraw$MAF[DFraw$A1FREQ > 0.5] = 1-DFraw$A1FREQ[DFraw$A1FREQ > 0.5]
}
######## Get GWAS catalogue information
# NOTE: field descriptons are here: http://genome.ucsc.edu/cgi-bin/hgTables
catFile = NULL
if("-hits"%in%args) {
# NOTE: this overrides the -hi for QC colours
catInfo = args[which(args=="-hits")+1]
load(catInfo,verbose=TRUE)
catFile = catPheno # just europeans
colnames(catFile)[ncol(catFile)] = "Ancestry"
catFile$Pvalue = catFile$V18
catFile$SNP = catFile$V5
catFile$BP = catFile$V4 # this is the chromEnd field
catFile$CHR = gsub("chr","",catFile$V2)
catFile$CHR[catFile$CHR%in%names(sexChroms)] = sexChroms[catFile$CHR[catFile$CHR%in%names(sexChroms)]]
catFile$CHR = as.numeric(catFile$CHR)
print( head(catFile) )
}
# do we fix the y-axis?
Ymax = FALSE
if("-ymax"%in%args) Ymax = as.numeric(args[which(args=="-ymax")+1])
# which chroms?
if(chroms!="genome") {
if(chroms=="all") chroms = 1:22 else chroms = parse.range.string(chroms)
}
for(chrom in chroms){
if(grepl("%%",dataFile)) {
DFraw = read.table(gsub("%%",chrom,dataFile),sep="",header=TRUE,stringsAsFactors=FALSE)
DFraw$MAF = DFraw$A1FREQ
DFraw$MAF[DFraw$A1FREQ > 0.5] = 1-DFraw$A1FREQ[DFraw$A1FREQ > 0.5]
}
print(chrom)
if("-qc" %in% args){
# QC THRESHOLDS
minmaf = 0.001
mininfo = 0.3
maxmiss = 0.05 # maximum 5% missing data
} else {
minmaf=0
mininfo=0
maxmiss=1
}
plot.BOLT.pvalues(GWASdata=gsub("%%",chrom,dataFile),chrom=chrom,minmaf=minmaf,mininfo=mininfo,maxmiss=maxmiss,plotOutDir=plotOutDir,extraTitle=extraTitle,highlight=highlightSNPs,highlightCols=highlightCols,Ymax=Ymax,catFile=catFile)
}
############# EXTRAS #############
# snps
#hiEF = catFile[(catFile$V20>5)&(!is.na(catFile$V20)),c("V5","CHR","BP","Pvalue","V20","V21")]
#DFraw$SNPID = paste0(DFraw$CHR,".",DFraw$BP)
#hiEF$SNPID = paste0(hiEF$CHR,".",hiEF$BP)
#snps = DFraw[match(hiEF$SNPID,DFraw$SNPID),]
#write.table(snps$SNP,file="Height.hi.OR.snps.txt",quote=FALSE,row.names=FALSE,col.names=FALSE)
#system("plink --bfile /well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/Combined/b1__b11-b001__b095-autosome-oxfordqc --keep-allele-order --extract Height.hi.OR.snps.txt --recode AD --out Height.hi.OR.snps")
#geno = read.table("Height.hi.OR.snps.raw",header=TRUE)
#map = read.table("Height.hi.OR.snps.map",header=FALSE)
#pheno = read.table("/well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/otherGWAS/PhenotypesForBOLT-v3.txt",header=TRUE)
### get height phenotype
#geno$pheno = pheno$Standing.height[match(geno$IID,pheno$IID)]
#snp = "Affx-20256845"
#snp1 = 5 + 2*which(map$V2==snp)
#png(paste0("Standing.height-effect-",snp,".png"),height=1000,width=1000,res=150)
#boxplot(log(geno$pheno)~geno[,snp1])
#dev.off()
#lm(geno$pheno~geno[,snp1])
############# TESTING & USAGE #############
#Rscript plot-BOLT-results.R /well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/YXintensity/BOLTLMM.v1/Ychrom-BOLT-LMM-quant-Age-v1.out all plots > Logs/plot-Ychrom-BOLT-LMM-quant-Age-v1.log &
#Rscript plot-BOLT-results.R /well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/YXintensity/BOLTLMM.v1/Ychrom-BOLT-LREG-quant-Age-v1.out all plots > Logs/plot-Ychrom-BOLT-LREG-quant-Age-v1.log &
#Rscript ../otherGWAS/plot-BOLT-results.R /well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/YXintensity/BOLTLMM.v1/Ychrom-BOLT-LMM-quant-Age-v1.out all plots -hi -title -QCcolors > Logs/plot-Ychrom-BOLT-LMM-quant-Age-v1.log &
#Rscript ../otherGWAS/plot-BOLT-results.R /well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/YXintensity/BOLTLMM.v1/Ychrom-BOLT-LMM-all-snps-quant-Age-v1.out all plots -hi -title -QCcolors > Logs/plot-Ychrom-BOLT-LMM-quant-all-snps-Age-all-v1.log &
#Rscript ../otherGWAS/plot-BOLT-results.R /well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/YXintensity/BOLTLMM.v1/Ychrom-BOLT-LMM-all-snps-quant-Age-v1.out genome plots -hi -title -QCcolors > Logs/plot-Ychrom-BOLT-LMM-quant-all-snps-Age-all-v1.log &
# dataFile="/well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/otherGWAS/Place.of.birth.in.UK...north.co.ordinate/BOLTLMM.v1/Place.of.birth.in.UK...north.co.ordinate-BOLT-LMM-all-snps-v1.out"
| /QC-Scripts/GWAS/otherGWAS/plot-BOLT-results-known-hits-old.R | no_license | cgbycroft/UK_biobank | R | false | false | 10,888 | r | source('/well/donnelly/ukbiobank_project_8874/clare/commonScripts/myManhattan.R')
h = c("/well/ukbiobank/expt/V2_QCed.SNP-QC/src/V2_QCed.snpqc-tests.R","/well/ukbiobank/expt/V2_QCed.SNP-QC/src/V2_QCed.bin2clusterplots.R","/well/ukbiobank/qcoutput.V2_QCed.sample-QC/QC-Scripts/R/scripts/readPSperformance.R","/well/ukbiobank/qcoutput.V2_QCed.sample-QC/QC-Scripts/R/scripts/auxFunctions.R")
for(s in h) source(s)
library(dplyr)
library(qqman)
library(stringr)
sexChroms = c(23,24,25,26)
names(sexChroms) = c("X","Y","XY","MT")
plot.BOLT.pvalues <- function(GWASdata,chrom,minmaf,mininfo,maxmiss,plotOutDir,plotQQ=TRUE,extraTitle="",Ymax=FALSE,catFile = NULL,QCexclude=c(),...) {
DF = dplyr::tbl_df(DFraw)
print(head(DF))
if("INFO"%in%colnames(DFraw)){
Pvalset = paste(basename(GWASdata),".chr",chrom,".maf",minmaf,".info",mininfo,".pruned",extraTitle,sep="")
DF = dplyr::filter(DF, MAF > minmaf & INFO > mininfo)
} else {
Pvalset = paste(basename(GWASdata),".chr",chrom,".maf",minmaf,".miss",maxmiss,".pruned",extraTitle,sep="")
DF = dplyr::filter(DF, MAF > minmaf & F_MISS < maxmiss)
}
if(chrom!="genome") DF = dplyr::filter(DF, CHR %in% chrom) # subset by chromosome
if( "-qc" %in%args ) DF = dplyr::filter(DF, !SNP %in% QCexclude) # exclude SNPs in array,imageArtefact, or concordance lists. Only relevant with clare's plink gwas data.
print(Pvalset)
if("P_BOLT_LMM_INF" %in% colnames(DF)) {
print("using BOLT_LMM_INF")
DF = dplyr::rename(DF, P = P_BOLT_LMM_INF) } else {
DF = dplyr::rename(DF, P = P_LINREG)
}
if(chrom%in%c("X","XY","Y","MT")) DF$CHR = sexChroms[chrom] else DF$CHR = as.numeric(DF$CHR)
maxP = round(max(-log10(DF$P),na.rm=T))
print(paste('max -log(pval) = ',maxP))
nFail = length(which(-log10(DF$P) > 8))
percentFailed = nFail/nrow(DF) * 100
if(!Ymax){
Ymax = ceiling(max(-log10(DF$P[DF$P!=0]),na.rm=T)) + 10
Ymax = min(Ymax,50)
}
png(paste(plotOutDir,"/",Pvalset,"-manhattan%02d.png",sep=""),width=41,height=12,units="in",res=150)
par(las=1,font.main=1,cex.axis=2,cex.lab=2,mar=c(7 ,7, 5 ,2))
myManhattan(DF,ymax=Ymax,suggestiveline = FALSE,xpd=NA,cex=1,...)
# myManhattan(DF,ymax=Ymax,suggestiveline = FALSE,xpd=NA,cex=1,col="transparent")
# add extra catalogue hits?
if(!is.null(catFile) & (chrom!="genome")) {
print( "Printing catalogue hits..." )
catFileSub = catFile[catFile$CHR %in% chrom,]
print( paste0( sum(-log10(catFileSub$Pvalue)>Ymax)," catalogue hits above ",Ymax) )
catFileSub$Pvalue[-log10(catFileSub$Pvalue)>Ymax] = 10^(-Ymax)
# plot non-European hits differently
colors = rep("red",dim(catFileSub)[1])
# colors[!grepl("European",catFileSub$Ancestry)] = "blue"
print( table(catFileSub$Ancestry[!grepl("European",catFileSub$Ancestry)]) )
# do we have these SNPs in UKBiobank? match on chrom and position
#inHere = (catFileSub$BP %in% DF$BP)&(catFileSub$CHR == DF$CHR)
#catFileSub$Pvalue[inHere] = DF$Pvalue[]
points(catFileSub$BP,-log10(catFileSub$Pvalue),pch=8,col=colors,cex=4,lwd=1)
points(catFileSub$BP,-log10(catFileSub$Pvalue),pch=16,col=colors,cex=2)
}
dev.off()
########### qq plot p-values
if(plotQQ){
png(paste(plotOutDir,"/",Pvalset,"-qqplot%02d.png",sep=""),height=1000,width=1000,res=150)
DF$P2=DF$P
DF$P2[DF$P<(10^-Ymax)] = 10^-Ymax
qqman::qq(DF$P2)
dev.off()
}
########## plot effect sizes
DF$index = DF$BP
if( length(unique(DF$CHR)) > 1 ){
for(i in unique(DF$CHR)){
if(i>1) DF$index[DF$CHR==i] = DF$index[DF$CHR==i] + max(DF$BP[DF$CHR==(i - 1)])
}
}
snps = which((DF$P < 5e-8)&(!is.na(DF$P)))
if("BETA"%in%colnames(DF) & ( length(snps) > 0 )){
beta = DF
beta$BETA[DF$A1FREQ > 0.5] = -beta$BETA[DF$A1FREQ > 0.5]
beta = beta[snps,]
png(paste(plotOutDir,"/",Pvalset,"-EffectSizes.png",sep=""),height=1000,width=1000,res=150)
myManhattan(beta,p="BETA",logtransform=FALSE,genomewideline=0,suggestiveline=FALSE)
dev.off()
}
}
######## Which chromosome and what data are we plotting?
args = commandArgs(TRUE)
#args = c("test.out","1","plots", "-ymax","50","-hits", "/well/ukbiobank/qcoutput.V2_QCed.sample-QC/QC-Scripts/GWAS/otherGWAS/GWAScatalogue/hg19/gwasCatalog-subset-Standing.height.RData", "-title", "-Euro-hits")
#args = c("/well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/otherGWAS/Standing.height/BOLTLMM.v3/Standing.height-BOLT-LMM-v3.out","5","plots", "-ymax","50","-qc","-hits", "/well/ukbiobank/qcoutput.V2_QCed.sample-QC/QC-Scripts/GWAS/otherGWAS/GWAScatalogue/hg19/gwasCatalog-subset-Standing.height.RData", "-title", "-Euro-hits-for-talk")
print(args)
dataFile = args[1]
chroms = args[2]
plotOutDir = args[3]
if("-title"%in%args) extraTitle = args[which(args=="-title")+1] else extraTitle=""
# Should we highlight some SNPs?
highlightSNPs=NULL
highlightCols=NULL
if(( "-hi"%in%args )|("-qc" %in%args )){
print("Reading QC snps lists...")
QCSNPList = read.SNPQC.files(justSNPs=TRUE)
# QCexclude = unique(c(QCSNPList$arraySNPs,QCSNPList$imageSNPs,QCSNPList$concordanceSNPs))
QCexclude = unique(c(QCSNPList$arraySNPs,QCSNPList$concordanceSNPs)) # this is only required if using -hi or clare's versions of plink genotype files.
if( "-hi"%in%args ){
highlightSNPs = unique(unlist(QCSNPList))
colors = rep("black",length(highlightSNPs))
colors[highlightSNPs%in%QCSNPList$batchHweSNPs] = "green" # HWE (apply first)
colors[highlightSNPs%in%c(QCSNPList$plateSNPs,QCSNPList$batchSNPs)] = "purple" # BATCH/PLATE
colors[highlightSNPs%in%c(QCSNPList$imageSNPs)] = "orange" # IMAGE ARTEFACT
colors[highlightSNPs%in%c(QCSNPList$arraySNPs)] = "red" # ARRAY
colors[highlightSNPs%in%c(QCSNPList$concordanceSNPs)] = "blue" # CONCORDANCE
highlightCols = colors
print(table(highlightCols))
}
}
# get data
print("printing the following chromosomes")
print( chroms )
#DFraw = read.table(dataFile,sep="",header=TRUE,stringsAsFactors=FALSE)
if(!grepl("%%",dataFile)){
print("reading in GWAS output file")
DFraw = read.table(dataFile,sep="",header=TRUE,stringsAsFactors=FALSE)
DFraw$MAF = DFraw$A1FREQ
DFraw$MAF[DFraw$A1FREQ > 0.5] = 1-DFraw$A1FREQ[DFraw$A1FREQ > 0.5]
}
######## Get GWAS catalogue information
# NOTE: field descriptons are here: http://genome.ucsc.edu/cgi-bin/hgTables
catFile = NULL
if("-hits"%in%args) {
# NOTE: this overrides the -hi for QC colours
catInfo = args[which(args=="-hits")+1]
load(catInfo,verbose=TRUE)
catFile = catPheno # just europeans
colnames(catFile)[ncol(catFile)] = "Ancestry"
catFile$Pvalue = catFile$V18
catFile$SNP = catFile$V5
catFile$BP = catFile$V4 # this is the chromEnd field
catFile$CHR = gsub("chr","",catFile$V2)
catFile$CHR[catFile$CHR%in%names(sexChroms)] = sexChroms[catFile$CHR[catFile$CHR%in%names(sexChroms)]]
catFile$CHR = as.numeric(catFile$CHR)
print( head(catFile) )
}
# do we fix the y-axis?
Ymax = FALSE
if("-ymax"%in%args) Ymax = as.numeric(args[which(args=="-ymax")+1])
# which chroms?
if(chroms!="genome") {
if(chroms=="all") chroms = 1:22 else chroms = parse.range.string(chroms)
}
for(chrom in chroms){
if(grepl("%%",dataFile)) {
DFraw = read.table(gsub("%%",chrom,dataFile),sep="",header=TRUE,stringsAsFactors=FALSE)
DFraw$MAF = DFraw$A1FREQ
DFraw$MAF[DFraw$A1FREQ > 0.5] = 1-DFraw$A1FREQ[DFraw$A1FREQ > 0.5]
}
print(chrom)
if("-qc" %in% args){
# QC THRESHOLDS
minmaf = 0.001
mininfo = 0.3
maxmiss = 0.05 # maximum 5% missing data
} else {
minmaf=0
mininfo=0
maxmiss=1
}
plot.BOLT.pvalues(GWASdata=gsub("%%",chrom,dataFile),chrom=chrom,minmaf=minmaf,mininfo=mininfo,maxmiss=maxmiss,plotOutDir=plotOutDir,extraTitle=extraTitle,highlight=highlightSNPs,highlightCols=highlightCols,Ymax=Ymax,catFile=catFile)
}
############# EXTRAS #############
# snps
#hiEF = catFile[(catFile$V20>5)&(!is.na(catFile$V20)),c("V5","CHR","BP","Pvalue","V20","V21")]
#DFraw$SNPID = paste0(DFraw$CHR,".",DFraw$BP)
#hiEF$SNPID = paste0(hiEF$CHR,".",hiEF$BP)
#snps = DFraw[match(hiEF$SNPID,DFraw$SNPID),]
#write.table(snps$SNP,file="Height.hi.OR.snps.txt",quote=FALSE,row.names=FALSE,col.names=FALSE)
#system("plink --bfile /well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/Combined/b1__b11-b001__b095-autosome-oxfordqc --keep-allele-order --extract Height.hi.OR.snps.txt --recode AD --out Height.hi.OR.snps")
#geno = read.table("Height.hi.OR.snps.raw",header=TRUE)
#map = read.table("Height.hi.OR.snps.map",header=FALSE)
#pheno = read.table("/well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/otherGWAS/PhenotypesForBOLT-v3.txt",header=TRUE)
### get height phenotype
#geno$pheno = pheno$Standing.height[match(geno$IID,pheno$IID)]
#snp = "Affx-20256845"
#snp1 = 5 + 2*which(map$V2==snp)
#png(paste0("Standing.height-effect-",snp,".png"),height=1000,width=1000,res=150)
#boxplot(log(geno$pheno)~geno[,snp1])
#dev.off()
#lm(geno$pheno~geno[,snp1])
############# TESTING & USAGE #############
#Rscript plot-BOLT-results.R /well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/YXintensity/BOLTLMM.v1/Ychrom-BOLT-LMM-quant-Age-v1.out all plots > Logs/plot-Ychrom-BOLT-LMM-quant-Age-v1.log &
#Rscript plot-BOLT-results.R /well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/YXintensity/BOLTLMM.v1/Ychrom-BOLT-LREG-quant-Age-v1.out all plots > Logs/plot-Ychrom-BOLT-LREG-quant-Age-v1.log &
#Rscript ../otherGWAS/plot-BOLT-results.R /well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/YXintensity/BOLTLMM.v1/Ychrom-BOLT-LMM-quant-Age-v1.out all plots -hi -title -QCcolors > Logs/plot-Ychrom-BOLT-LMM-quant-Age-v1.log &
#Rscript ../otherGWAS/plot-BOLT-results.R /well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/YXintensity/BOLTLMM.v1/Ychrom-BOLT-LMM-all-snps-quant-Age-v1.out all plots -hi -title -QCcolors > Logs/plot-Ychrom-BOLT-LMM-quant-all-snps-Age-all-v1.log &
#Rscript ../otherGWAS/plot-BOLT-results.R /well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/YXintensity/BOLTLMM.v1/Ychrom-BOLT-LMM-all-snps-quant-Age-v1.out genome plots -hi -title -QCcolors > Logs/plot-Ychrom-BOLT-LMM-quant-all-snps-Age-all-v1.log &
# dataFile="/well/ukbiobank/qcoutput.V2_QCed.sample-QC/data/GWAS/otherGWAS/Place.of.birth.in.UK...north.co.ordinate/BOLTLMM.v1/Place.of.birth.in.UK...north.co.ordinate-BOLT-LMM-all-snps-v1.out"
|
items.from <- function(data) {
tail(colnames(data), -1)
}
| /R/items_from.R | no_license | gbeine/RKanban | R | false | false | 60 | r | items.from <- function(data) {
tail(colnames(data), -1)
}
|
make_req <- function(verb, path){
req <- new.env()
req$REQUEST_METHOD <- toupper(verb)
req$PATH_INFO <- path
req$rook.input <- list(read_lines = function(){ "" })
req
}
test_that("Images are properly rendered", {
r <- plumber$new("files/image.R")
resp <- r$serve(make_req("GET", "/png"), PlumberResponse$new())
expect_equal(length(resp$body), 13044) # This may change with changes to base graphics that slightly alter the plot format. But we'll start here.
resp <- r$serve(make_req("GET", "/jpeg"), PlumberResponse$new())
expect_equal(length(resp$body), 13958) # This may change with changes to base graphics that slightly alter the plot format. But we'll start here.
})
| /tests/testthat/test-image.R | permissive | mpmenne/plumber | R | false | false | 694 | r | make_req <- function(verb, path){
req <- new.env()
req$REQUEST_METHOD <- toupper(verb)
req$PATH_INFO <- path
req$rook.input <- list(read_lines = function(){ "" })
req
}
test_that("Images are properly rendered", {
r <- plumber$new("files/image.R")
resp <- r$serve(make_req("GET", "/png"), PlumberResponse$new())
expect_equal(length(resp$body), 13044) # This may change with changes to base graphics that slightly alter the plot format. But we'll start here.
resp <- r$serve(make_req("GET", "/jpeg"), PlumberResponse$new())
expect_equal(length(resp$body), 13958) # This may change with changes to base graphics that slightly alter the plot format. But we'll start here.
})
|
##Read data in
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Subset for Baltimore on Road data
BmDataOnRoad <- subset(NEI, fips =="24510" & type=="ON-ROAD")
#Aggregate Baltimore OnRoad Data
Totalemyearbmonroad <- aggregate(Emissions ~ year, BmDataOnRoad, sum)
library(ggplot2)
#Plot Png
png("plot5.png", width=840, height = 480)
p <- ggplot(Totalemyearbmonroad, aes(factor(year), Emissions))
p <- p+geom_bar(stat="identity") + xlab("Year") + ylab(expression("Total PM'[2.5]*Emissions")) + ggtitle("Total Emission from motor vechicle in Baltimore City from 1999 to 2008")
print(p)
dev.off() | /Exploratory_Data_Analysis/Course_Project_2/Plot5.R | no_license | sbantu/datasciencecoursera | R | false | false | 642 | r | ##Read data in
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Subset for Baltimore on Road data
BmDataOnRoad <- subset(NEI, fips =="24510" & type=="ON-ROAD")
#Aggregate Baltimore OnRoad Data
Totalemyearbmonroad <- aggregate(Emissions ~ year, BmDataOnRoad, sum)
library(ggplot2)
#Plot Png
png("plot5.png", width=840, height = 480)
p <- ggplot(Totalemyearbmonroad, aes(factor(year), Emissions))
p <- p+geom_bar(stat="identity") + xlab("Year") + ylab(expression("Total PM'[2.5]*Emissions")) + ggtitle("Total Emission from motor vechicle in Baltimore City from 1999 to 2008")
print(p)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mermaid.R
\name{mermaid}
\alias{mermaid}
\title{R + mermaid.js}
\usage{
mermaid(diagram = "", ..., width = NULL, height = NULL)
}
\arguments{
\item{diagram}{diagram in mermaid markdown-like
language or file (as a connection or file name)
containing a diagram specification. If no diagram
is provided \code{diagram = ""} then the function
will assume that a diagram will be provided by
\code{\link[htmltools]{tags}} and
\code{DiagrammeR} is just being used for dependency
injection.}
\item{...}{other arguments and parameters you would
like to send to Javascript.}
\item{width}{the width of the resulting graphic in
pixels.}
\item{height}{the height of the resulting graphic in
pixels.}
}
\value{
An object of class \code{htmlwidget} that
will intelligently print itself into HTML in a
variety of contexts including the R console, within
R Markdown documents, and within Shiny output
bindings.
}
\description{
Make diagrams in R using
\href{https://github.com/knsv/mermaid/wiki}{mermaid.js} with infrastructure
provided by \href{http://www.htmlwidgets.org/}{htmlwidgets}.
}
\examples{
\dontrun{
# Create a simple graph running left to right (note
# that the whitespace is not important)
DiagrammeR("
graph LR
A-->B
A-->C
C-->E
B-->D
C-->D
D-->F
E-->F
")
# Create the equivalent graph but have it running
# from top to bottom
DiagrammeR("
graph TB
A-->B
A-->C
C-->E
B-->D
C-->D
D-->F
E-->F
")
# Create a graph with different node shapes and
# provide fill styles for each node
DiagrammeR("graph LR;A(Rounded)-->B[Squared];B-->C{A Decision};
C-->D[Square One];C-->E[Square Two];
style A fill:#E5E25F; style B fill:#87AB51; style C fill:#3C8937;
style D fill:#23772C; style E fill:#B6E6E6;"
)
# Load in the 'mtcars' dataset
data(mtcars)
connections <- sapply(
1:ncol(mtcars)
,function(i) {
paste0(
i
,"(",colnames(mtcars)[i],")---"
,i,"-stats("
,paste0(
names(summary(mtcars[,i]))
,": "
,unname(summary(mtcars[,i]))
,collapse="<br/>"
)
,")"
)
}
)
# Create a diagram using the 'connections' object
DiagrammeR(
paste0(
"graph TD;", "\\n",
paste(connections, collapse = "\\n"),"\\n",
"classDef column fill:#0001CC, stroke:#0D3FF3, stroke-width:1px;" ,"\\n",
"class ", paste0(1:length(connections), collapse = ","), " column;"
)
)
# Also with \\code{DiagrammeR()}, you can use tags
# from \\code{htmltools} (just make sure to use
# \\code{class = "mermaid"})
library(htmltools)
diagramSpec = "
graph LR;
id1(Start)-->id2(Stop);
style id1 fill:#f9f,stroke:#333,stroke-width:4px;
style id2 fill:#ccf,stroke:#f66,stroke-width:2px,stroke-dasharray: 5, 5;
"
html_print(tagList(
tags$h1("R + mermaid.js = Something Special")
,tags$pre(diagramSpec)
,tags$div(class="mermaid",diagramSpec)
,DiagrammeR()
))
# Create a sequence diagram
DiagrammeR("
sequenceDiagram;
customer->>ticket seller: ask for a ticket;
ticket seller->>database: seats;
alt tickets available
database->>ticket seller: ok;
ticket seller->>customer: confirm;
customer->>ticket seller: ok;
ticket seller->>database: book a seat;
ticket seller->>printer: print a ticket;
else sold out
database->>ticket seller: none left;
ticket seller->>customer: sorry;
end
")
}
}
| /man/mermaid.Rd | no_license | timelyportfolio/DiagrammeR | R | false | true | 3,436 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mermaid.R
\name{mermaid}
\alias{mermaid}
\title{R + mermaid.js}
\usage{
mermaid(diagram = "", ..., width = NULL, height = NULL)
}
\arguments{
\item{diagram}{diagram in mermaid markdown-like
language or file (as a connection or file name)
containing a diagram specification. If no diagram
is provided \code{diagram = ""} then the function
will assume that a diagram will be provided by
\code{\link[htmltools]{tags}} and
\code{DiagrammeR} is just being used for dependency
injection.}
\item{...}{other arguments and parameters you would
like to send to Javascript.}
\item{width}{the width of the resulting graphic in
pixels.}
\item{height}{the height of the resulting graphic in
pixels.}
}
\value{
An object of class \code{htmlwidget} that
will intelligently print itself into HTML in a
variety of contexts including the R console, within
R Markdown documents, and within Shiny output
bindings.
}
\description{
Make diagrams in R using
\href{https://github.com/knsv/mermaid/wiki}{mermaid.js} with infrastructure
provided by \href{http://www.htmlwidgets.org/}{htmlwidgets}.
}
\examples{
\dontrun{
# Create a simple graph running left to right (note
# that the whitespace is not important)
DiagrammeR("
graph LR
A-->B
A-->C
C-->E
B-->D
C-->D
D-->F
E-->F
")
# Create the equivalent graph but have it running
# from top to bottom
DiagrammeR("
graph TB
A-->B
A-->C
C-->E
B-->D
C-->D
D-->F
E-->F
")
# Create a graph with different node shapes and
# provide fill styles for each node
DiagrammeR("graph LR;A(Rounded)-->B[Squared];B-->C{A Decision};
C-->D[Square One];C-->E[Square Two];
style A fill:#E5E25F; style B fill:#87AB51; style C fill:#3C8937;
style D fill:#23772C; style E fill:#B6E6E6;"
)
# Load in the 'mtcars' dataset
data(mtcars)
connections <- sapply(
1:ncol(mtcars)
,function(i) {
paste0(
i
,"(",colnames(mtcars)[i],")---"
,i,"-stats("
,paste0(
names(summary(mtcars[,i]))
,": "
,unname(summary(mtcars[,i]))
,collapse="<br/>"
)
,")"
)
}
)
# Create a diagram using the 'connections' object
DiagrammeR(
paste0(
"graph TD;", "\\n",
paste(connections, collapse = "\\n"),"\\n",
"classDef column fill:#0001CC, stroke:#0D3FF3, stroke-width:1px;" ,"\\n",
"class ", paste0(1:length(connections), collapse = ","), " column;"
)
)
# Also with \\code{DiagrammeR()}, you can use tags
# from \\code{htmltools} (just make sure to use
# \\code{class = "mermaid"})
library(htmltools)
diagramSpec = "
graph LR;
id1(Start)-->id2(Stop);
style id1 fill:#f9f,stroke:#333,stroke-width:4px;
style id2 fill:#ccf,stroke:#f66,stroke-width:2px,stroke-dasharray: 5, 5;
"
html_print(tagList(
tags$h1("R + mermaid.js = Something Special")
,tags$pre(diagramSpec)
,tags$div(class="mermaid",diagramSpec)
,DiagrammeR()
))
# Create a sequence diagram
DiagrammeR("
sequenceDiagram;
customer->>ticket seller: ask for a ticket;
ticket seller->>database: seats;
alt tickets available
database->>ticket seller: ok;
ticket seller->>customer: confirm;
customer->>ticket seller: ok;
ticket seller->>database: book a seat;
ticket seller->>printer: print a ticket;
else sold out
database->>ticket seller: none left;
ticket seller->>customer: sorry;
end
")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ADI.R
\name{ADI}
\alias{ADI}
\title{Function ADI}
\usage{
ADI(data_sheet, bytes, ...)
}
\arguments{
\item{data_sheet}{\bold{either} a data.frame f.e imported from a data sheet containing\cr
"Name","item.number"\cr
"action.from.","action.to","kind.of.action"\cr
"name.of.action","action.number","classification","weighting"\cr
\cr
\bold{or} only "action.from.","action.to","kind.of.action"if exists actions and items\cr
\cr
actions: with "name.of.action","action.number","classification","weighting\cr
weighting the factor which should be used to calculate the behavior (1 for "action.from"" wins -1 for "action.to" wins")\cr
Setting a behaviour to 2 means it is count double\cr
items: with "Name","item.number"\cr}
\item{bytes}{a string where each enabled action is set to 1 and each disabled action is set to 0\cr
Setting a behaviour to 2 means it is count double\cr}
\item{\dots}{Additional parameters:
\describe{
\item{\bold{actions}}{(data.frame) with "name.of.action","action.number","classification","weighting"; Classification 1 if "action.from"" wins; Classification 2 if "action.to" wins}
\item{\bold{weighting}}{the factor which should be used to calculate the behavior (1 for "action.from"" wins -1 for "action.to" wins")\cr
Setting a behaviour to 2 means it is count double}
\item{\bold{vcolors}}{as much colors as items, colors will returned as sorted ADI colors means color 1 = item rank 1, color 2 = item rank 2, and so on}
}}
}
\value{
returns a list with\cr
ADI - the Average Dominance index\cr
Colors - the colors supported by vcolors sorted by ADI of the items\cr
ADI_count_matrix - the counts from which the ADI was calculated\cr
}
\description{
Calculates Average Dominance Index.
}
\details{
Calculates Average Dominance Index.
}
\examples{
{ #you can eihter use:
data_sheet=data.frame ("action.from"=c(1,4,2,3,4,3,4,3,4,3,4,3,4,3,4),
"action.to"=c(4,1,1,4,3,4,3,4,3,4,3,4,3,4,3),
"kind.of.action"= c(4,1,1,4,3,4,3,4,3,4,3,4,3,4,3),stringsAsFactors=FALSE)
items= data.frame ("Name"=c("item1","item2","item3","item4","item5","item6") ,
"item.number"=c(1:6),stringsAsFactors=FALSE)
actions=data.frame("name.of.action"= c("leading","following","approach","bite","threat to bite",
"kick","threat to kick", "chase","retreat"),
"action.number"=c(1:9),
"classification"=c(1,2,1,1,1,1,1,1,2) ,
"weighting"=c(1,-1,1,1,1,1,1,1,-1),stringsAsFactors=FALSE)
#all encounters without leading and following
bytes= "001111111"
ADI(data_sheet,items=items,actions=actions,bytes)
# or you can use a complete f.e Excel sheet
# you can save this data as basic excel sheet to work with
data(data_ADI)
bytes= "001111111"
ADI(data_ADI,bytes)
}
}
\references{
{
The Construction of Dominance Order: Comparing Performance of Five Methods Using an Individual-Based Model C. K. Hemelrijk, J. Wantia and L. Gygax, Behaviour Vol. 142, No. 8 (Aug., 2005), pp. 1037-1058
\doi{10.1163/156853905774405290}\cr
On using the DomWorld model to evaluate dominance ranking methods , de Vries, Han, Behaviour, Volume 146, Number 6, 2009 , pp. 843-869(27)
\doi{10.1163/156853909X412241}
}
}
\author{
Knut Krueger, \email{Knut.Krueger@equine-science.de}
}
| /man/ADI.Rd | no_license | cran/Dominance | R | false | true | 3,372 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ADI.R
\name{ADI}
\alias{ADI}
\title{Function ADI}
\usage{
ADI(data_sheet, bytes, ...)
}
\arguments{
\item{data_sheet}{\bold{either} a data.frame f.e imported from a data sheet containing\cr
"Name","item.number"\cr
"action.from.","action.to","kind.of.action"\cr
"name.of.action","action.number","classification","weighting"\cr
\cr
\bold{or} only "action.from.","action.to","kind.of.action"if exists actions and items\cr
\cr
actions: with "name.of.action","action.number","classification","weighting\cr
weighting the factor which should be used to calculate the behavior (1 for "action.from"" wins -1 for "action.to" wins")\cr
Setting a behaviour to 2 means it is count double\cr
items: with "Name","item.number"\cr}
\item{bytes}{a string where each enabled action is set to 1 and each disabled action is set to 0\cr
Setting a behaviour to 2 means it is count double\cr}
\item{\dots}{Additional parameters:
\describe{
\item{\bold{actions}}{(data.frame) with "name.of.action","action.number","classification","weighting"; Classification 1 if "action.from"" wins; Classification 2 if "action.to" wins}
\item{\bold{weighting}}{the factor which should be used to calculate the behavior (1 for "action.from"" wins -1 for "action.to" wins")\cr
Setting a behaviour to 2 means it is count double}
\item{\bold{vcolors}}{as much colors as items, colors will returned as sorted ADI colors means color 1 = item rank 1, color 2 = item rank 2, and so on}
}}
}
\value{
returns a list with\cr
ADI - the Average Dominance index\cr
Colors - the colors supported by vcolors sorted by ADI of the items\cr
ADI_count_matrix - the counts from which the ADI was calculated\cr
}
\description{
Calculates Average Dominance Index.
}
\details{
Calculates Average Dominance Index.
}
\examples{
{ #you can eihter use:
data_sheet=data.frame ("action.from"=c(1,4,2,3,4,3,4,3,4,3,4,3,4,3,4),
"action.to"=c(4,1,1,4,3,4,3,4,3,4,3,4,3,4,3),
"kind.of.action"= c(4,1,1,4,3,4,3,4,3,4,3,4,3,4,3),stringsAsFactors=FALSE)
items= data.frame ("Name"=c("item1","item2","item3","item4","item5","item6") ,
"item.number"=c(1:6),stringsAsFactors=FALSE)
actions=data.frame("name.of.action"= c("leading","following","approach","bite","threat to bite",
"kick","threat to kick", "chase","retreat"),
"action.number"=c(1:9),
"classification"=c(1,2,1,1,1,1,1,1,2) ,
"weighting"=c(1,-1,1,1,1,1,1,1,-1),stringsAsFactors=FALSE)
#all encounters without leading and following
bytes= "001111111"
ADI(data_sheet,items=items,actions=actions,bytes)
# or you can use a complete f.e Excel sheet
# you can save this data as basic excel sheet to work with
data(data_ADI)
bytes= "001111111"
ADI(data_ADI,bytes)
}
}
\references{
{
The Construction of Dominance Order: Comparing Performance of Five Methods Using an Individual-Based Model C. K. Hemelrijk, J. Wantia and L. Gygax, Behaviour Vol. 142, No. 8 (Aug., 2005), pp. 1037-1058
\doi{10.1163/156853905774405290}\cr
On using the DomWorld model to evaluate dominance ranking methods , de Vries, Han, Behaviour, Volume 146, Number 6, 2009 , pp. 843-869(27)
\doi{10.1163/156853909X412241}
}
}
\author{
Knut Krueger, \email{Knut.Krueger@equine-science.de}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/canada.R
\docType{data}
\name{canada}
\alias{canada}
\title{Time series from 35 weather stations of Canada.}
\format{
A list with four matrices:
\describe{
\item{m_data}{A matrix with 34 columns where each column is a wheather
station}
\item{m_coord}{A matrix with 34 rows where each row is a weather station}
\item{ThePas_coord}{Coordinate of the The Pas station}
\item{ThePas_ts}{Observed time series of the station The Pas}
}
}
\source{
\url{https://weather.gc.ca}
}
\usage{
data(canada)
}
\description{
A dataset containing time series from 35 weather stations (The Pas station
and more 34 stations to estimate the temperature curve at the Pas station).
This dataset is present in the \code{fda} package.
}
\references{
J. O. Ramsay, Spencer Graves and Giles Hooker (2020). \code{fda}:
Functional Data Analysis. R package version 5.1.9.
\url{https://CRAN.R-project.org/package=fda}
}
\keyword{datasets}
| /man/canada.Rd | permissive | gilberto-sassi/geoFourierFDA | R | false | true | 985 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/canada.R
\docType{data}
\name{canada}
\alias{canada}
\title{Time series from 35 weather stations of Canada.}
\format{
A list with four matrices:
\describe{
\item{m_data}{A matrix with 34 columns where each column is a wheather
station}
\item{m_coord}{A matrix with 34 rows where each row is a weather station}
\item{ThePas_coord}{Coordinate of the The Pas station}
\item{ThePas_ts}{Observed time series of the station The Pas}
}
}
\source{
\url{https://weather.gc.ca}
}
\usage{
data(canada)
}
\description{
A dataset containing time series from 35 weather stations (The Pas station
and more 34 stations to estimate the temperature curve at the Pas station).
This dataset is present in the \code{fda} package.
}
\references{
J. O. Ramsay, Spencer Graves and Giles Hooker (2020). \code{fda}:
Functional Data Analysis. R package version 5.1.9.
\url{https://CRAN.R-project.org/package=fda}
}
\keyword{datasets}
|
tformshapes <- function(singletext=FALSE,transform=NA,jacobian=FALSE,driftdiag=FALSE, parname='param',stan=FALSE){
out = c('param',
'(log1p_exp(param))',
'(exp(param))',
'(1/(1+exp(-param)))',
'((param)^3)',
'log1p(param)', #why is this here? results in NA's / warnings.
'meanscale',
'1/(1+exp(-param))',
'exp(param)',
'1/(1+exp(-param))-(exp(param)^2)/(1+exp(param))^2',
'3*param^2',
'1/(1+param)')
tfvec=c(0:5,50:55)
out=gsub('param',parname,out,fixed=TRUE)
# if(driftdiag && jacobian) out = paste0(out,' * param')
# out = sapply(out,Simplify)
# names(out)=paste0('fn',1:length(out))
# if(jacobian) out = jacobianSymb(out,variables='param')
if(!is.na(transform)&&transform!=0) out = out[tfvec == transform] #ifelse(jacobian,0,1):(length(out)-ifelse(jacobian,1,0))
if(!singletext) {
out = paste0('if(transform==', tfvec,') param = ',out,';\n',collapse='')
if(!stan) out <- paste0('param = parin * meanscale + inneroffset; \n ',out,'
param=param*multiplier;
if(transform < 49) param = param+offset;')
if(stan) out <- paste0('if(meanscale!=1.0) param *= meanscale;
if(inneroffset != 0.0) param += inneroffset; \n',out,'
if(multiplier != 1.0) param *=multiplier;
if(transform < 49 && offset != 0.0) param+=offset;')
}
if(singletext) out <- paste0('offset + multiplier*',gsub('param','(param*meanscale+inneroffset)',out))
out=gsub(' ','',out,fixed=TRUE)
return(out)
}
tform <- function(parin, transform, multiplier, meanscale, offset, inneroffset, extratforms='',singletext=FALSE,jacobian=FALSE,driftdiag=FALSE){
param=parin
if(!is.na(suppressWarnings(as.integer(transform)))) {
out <- tformshapes(singletext=singletext,transform=as.integer(transform))#,jacobian=jacobian)
if(!singletext) paste0(out,extratforms)
if(singletext) {
for(i in c('param','multiplier', 'meanscale', 'inneroffset','offset')){
irep = get(i)
out <- gsub(pattern = i,replacement = irep,out)
}
}
}
# if(jacobian) transform <- transform + ifelse(driftdiag,60,50)
if(is.na(suppressWarnings(as.integer(transform)))) out <- transform
if(!singletext) out <- eval(parse(text=out))
return(out)
}
# Jtformshapes <- function(){
# fn=sapply(tformshapes(singletext = TRUE),function(x) Simplify(x))
# names(fn)=paste0('fn',1:length(fn))
# jacobianSymb(fn,variables = c('param'))
# }
| /R/tformshapes.R | no_license | cdriveraus/ctsem | R | false | false | 2,415 | r | tformshapes <- function(singletext=FALSE,transform=NA,jacobian=FALSE,driftdiag=FALSE, parname='param',stan=FALSE){
out = c('param',
'(log1p_exp(param))',
'(exp(param))',
'(1/(1+exp(-param)))',
'((param)^3)',
'log1p(param)', #why is this here? results in NA's / warnings.
'meanscale',
'1/(1+exp(-param))',
'exp(param)',
'1/(1+exp(-param))-(exp(param)^2)/(1+exp(param))^2',
'3*param^2',
'1/(1+param)')
tfvec=c(0:5,50:55)
out=gsub('param',parname,out,fixed=TRUE)
# if(driftdiag && jacobian) out = paste0(out,' * param')
# out = sapply(out,Simplify)
# names(out)=paste0('fn',1:length(out))
# if(jacobian) out = jacobianSymb(out,variables='param')
if(!is.na(transform)&&transform!=0) out = out[tfvec == transform] #ifelse(jacobian,0,1):(length(out)-ifelse(jacobian,1,0))
if(!singletext) {
out = paste0('if(transform==', tfvec,') param = ',out,';\n',collapse='')
if(!stan) out <- paste0('param = parin * meanscale + inneroffset; \n ',out,'
param=param*multiplier;
if(transform < 49) param = param+offset;')
if(stan) out <- paste0('if(meanscale!=1.0) param *= meanscale;
if(inneroffset != 0.0) param += inneroffset; \n',out,'
if(multiplier != 1.0) param *=multiplier;
if(transform < 49 && offset != 0.0) param+=offset;')
}
if(singletext) out <- paste0('offset + multiplier*',gsub('param','(param*meanscale+inneroffset)',out))
out=gsub(' ','',out,fixed=TRUE)
return(out)
}
tform <- function(parin, transform, multiplier, meanscale, offset, inneroffset, extratforms='',singletext=FALSE,jacobian=FALSE,driftdiag=FALSE){
param=parin
if(!is.na(suppressWarnings(as.integer(transform)))) {
out <- tformshapes(singletext=singletext,transform=as.integer(transform))#,jacobian=jacobian)
if(!singletext) paste0(out,extratforms)
if(singletext) {
for(i in c('param','multiplier', 'meanscale', 'inneroffset','offset')){
irep = get(i)
out <- gsub(pattern = i,replacement = irep,out)
}
}
}
# if(jacobian) transform <- transform + ifelse(driftdiag,60,50)
if(is.na(suppressWarnings(as.integer(transform)))) out <- transform
if(!singletext) out <- eval(parse(text=out))
return(out)
}
# Jtformshapes <- function(){
# fn=sapply(tformshapes(singletext = TRUE),function(x) Simplify(x))
# names(fn)=paste0('fn',1:length(fn))
# jacobianSymb(fn,variables = c('param'))
# }
|
testlist <- list(x = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.1240659337578e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance::fastdist,testlist)
str(result) | /multivariance/inst/testfiles/fastdist/AFL_fastdist/fastdist_valgrind_files/1613097404-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 302 | r | testlist <- list(x = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.1240659337578e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance::fastdist,testlist)
str(result) |
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252170339e+295, 1.22810536108277e+146, 2.25092825522432e-308, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613108378-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 344 | r | testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252170339e+295, 1.22810536108277e+146, 2.25092825522432e-308, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
## ----------------------------------------------------------------- ##
## nondom_ts.R ----------------------------------------------------- ##
## Author: Peter Norwood, NC State University ---------------------- ##
## Purpose: run an experiment with nondom thompson sampling -------- ##
## ----------------------------------------------------------------- ##
## load functions
setwd("~/Research/NonDomSeqExp/NonDomSeqExp/contextual_bandit")
source("funcs.R")
library(MASS)
## nondom_ts
## Purpose: run an experiment with thompson sampling
## param train_set: dataset with context for N individuals
## param burn_in: sample size of simple randomization
## param A: vector of possible treatments
## param theta: true mean outcome parameter vector
## param sd_Y: standard deviation for response
## return dat: dataframe with X,A,mu,Y,regret,norm
nondom_ts <- function(train_set,burn_in,A,theta,sd_Y){
## number of subjects
N <- nrow(train_set)
## dimension of context
p <- ncol(train_set)-3
## number of arms
K <- length(A)
## trial dataset
dat <- matrix(NA,nrow=N,ncol=p+6)
## context
dat[1:N,1:p] <- as.matrix(train_set)[1:N,1:p]
## first burn_in interventions
dat[1:burn_in,p+1] <- train_set$A[1:burn_in]
## first burn_in means
dat[1:burn_in,p+2] <- train_set$mu[1:burn_in]
## first burn_in outcomes
dat[1:burn_in,p+3] <- train_set$Y[1:burn_in]
## name the same colnames
colnames(dat) <- c(colnames(train_set),"regret","norm","non_dom")
## loop through the new patients
for(i in (burn_in+1):N){
## fit the outcome model
X_temp <- dat[1:(i-1),1:p]
A_temp <- dat[1:(i-1),p+1]
Y <- dat[1:(i-1),p+3]
temp <- data.frame(X_temp,A=A_temp,Y)
fit <- lm(Y~-1+as.factor(A)+as.factor(A):.-
as.factor(A):A,
data=temp)
## gather parameter convergence information
coef_fit <- coef(fit)
#Sigma <- vcov(fit)
theta_hat <- c()
## put them in the same format as the theta vector
tik <- 1
for(ii in 1:K){
for(jj in 0:p){
theta_hat[tik] <- coef_fit[ii+(K)*jj]
tik=tik+1
}
}
## measure the euclidean norm between theta and theta_hat
dat[i,p+5] <- norm(matrix(theta-theta_hat),type="F")
## loop through interventions to find greedy intevention
info <- matrix(NA,nrow=length(A),ncol=4)
tick=1
for(a in A){
## gather ests if a is assigned
temp_dat <- data.frame(t(dat[i,1:p]),A=a,Y=0)
## estiamted mean outcome given a
mu_hat <- predict(fit,temp_dat)
## true mean outcome given a
mu <- mean_outcome(X=dat[i,1:p],A=A,a=a,theta=theta)
## new design
temp_df <- rbind(temp,temp_dat)
temp_X <- model.matrix(Y~-1+as.factor(A)+as.factor(A):.-
as.factor(A):A,temp_df)
XtX <- t(temp_X) %*% temp_X
XtXi <- solve(XtX)
info_gain <- 1/sum(diag(XtXi))
## save info
info[tick,] <- c(a,mu_hat,mu,info_gain)
tick=tick+1
}
## save info as dataframe
info <- data.frame(info)
colnames(info) <- c("A","mu_hat","mu","info_gain")
## true non-dominated
true_nondom <- comb(info$mu,info$info_gain)
est_nondom <- comb(info$mu_hat,info$info_gain)
## randomize via thompson sampling
ts_probs <- thompson_probs(fit=fit,txt=est_nondom,
new_sub=data.frame(t(dat[i,1:p]),A=1,Y=0))
ts_probs$A <- as.numeric(as.character(ts_probs$A))
## assign intervention (e-greedy)
if(nrow(ts_probs)==1){
dat[i,p+1] <- ts_probs$A[1]
}else{
dat[i,p+1] <- sample(ts_probs$A,1,prob=ts_probs$probs)
}
## find mean outcome
dat[i,p+2] <- info$mu[dat[i,p+1]]
## find outcome
dat[i,p+3] <- rnorm(1,dat[i,p+2],sd_Y)
## find regret
dat[i,p+4] <- max(info$mu) - dat[i,p+2]
## determine if it was non-dominated
dat[i,p+6] <- ifelse(dat[i,p+1] %in% true_nondom,1,0)
}
dat <- data.frame(dat)
dat$sub <- 1:nrow(dat)
return(dat)
}
| /contextual_bandit/nondom_ts.R | no_license | peterpnorwood/NonDomSeqExp | R | false | false | 4,079 | r | ## ----------------------------------------------------------------- ##
## nondom_ts.R ----------------------------------------------------- ##
## Author: Peter Norwood, NC State University ---------------------- ##
## Purpose: run an experiment with nondom thompson sampling -------- ##
## ----------------------------------------------------------------- ##
## load functions
setwd("~/Research/NonDomSeqExp/NonDomSeqExp/contextual_bandit")
source("funcs.R")
library(MASS)
## nondom_ts
## Purpose: run an experiment with thompson sampling
## param train_set: dataset with context for N individuals
## param burn_in: sample size of simple randomization
## param A: vector of possible treatments
## param theta: true mean outcome parameter vector
## param sd_Y: standard deviation for response
## return dat: dataframe with X,A,mu,Y,regret,norm
nondom_ts <- function(train_set,burn_in,A,theta,sd_Y){
## number of subjects
N <- nrow(train_set)
## dimension of context
p <- ncol(train_set)-3
## number of arms
K <- length(A)
## trial dataset
dat <- matrix(NA,nrow=N,ncol=p+6)
## context
dat[1:N,1:p] <- as.matrix(train_set)[1:N,1:p]
## first burn_in interventions
dat[1:burn_in,p+1] <- train_set$A[1:burn_in]
## first burn_in means
dat[1:burn_in,p+2] <- train_set$mu[1:burn_in]
## first burn_in outcomes
dat[1:burn_in,p+3] <- train_set$Y[1:burn_in]
## name the same colnames
colnames(dat) <- c(colnames(train_set),"regret","norm","non_dom")
## loop through the new patients
for(i in (burn_in+1):N){
## fit the outcome model
X_temp <- dat[1:(i-1),1:p]
A_temp <- dat[1:(i-1),p+1]
Y <- dat[1:(i-1),p+3]
temp <- data.frame(X_temp,A=A_temp,Y)
fit <- lm(Y~-1+as.factor(A)+as.factor(A):.-
as.factor(A):A,
data=temp)
## gather parameter convergence information
coef_fit <- coef(fit)
#Sigma <- vcov(fit)
theta_hat <- c()
## put them in the same format as the theta vector
tik <- 1
for(ii in 1:K){
for(jj in 0:p){
theta_hat[tik] <- coef_fit[ii+(K)*jj]
tik=tik+1
}
}
## measure the euclidean norm between theta and theta_hat
dat[i,p+5] <- norm(matrix(theta-theta_hat),type="F")
## loop through interventions to find greedy intevention
info <- matrix(NA,nrow=length(A),ncol=4)
tick=1
for(a in A){
## gather ests if a is assigned
temp_dat <- data.frame(t(dat[i,1:p]),A=a,Y=0)
## estiamted mean outcome given a
mu_hat <- predict(fit,temp_dat)
## true mean outcome given a
mu <- mean_outcome(X=dat[i,1:p],A=A,a=a,theta=theta)
## new design
temp_df <- rbind(temp,temp_dat)
temp_X <- model.matrix(Y~-1+as.factor(A)+as.factor(A):.-
as.factor(A):A,temp_df)
XtX <- t(temp_X) %*% temp_X
XtXi <- solve(XtX)
info_gain <- 1/sum(diag(XtXi))
## save info
info[tick,] <- c(a,mu_hat,mu,info_gain)
tick=tick+1
}
## save info as dataframe
info <- data.frame(info)
colnames(info) <- c("A","mu_hat","mu","info_gain")
## true non-dominated
true_nondom <- comb(info$mu,info$info_gain)
est_nondom <- comb(info$mu_hat,info$info_gain)
## randomize via thompson sampling
ts_probs <- thompson_probs(fit=fit,txt=est_nondom,
new_sub=data.frame(t(dat[i,1:p]),A=1,Y=0))
ts_probs$A <- as.numeric(as.character(ts_probs$A))
## assign intervention (e-greedy)
if(nrow(ts_probs)==1){
dat[i,p+1] <- ts_probs$A[1]
}else{
dat[i,p+1] <- sample(ts_probs$A,1,prob=ts_probs$probs)
}
## find mean outcome
dat[i,p+2] <- info$mu[dat[i,p+1]]
## find outcome
dat[i,p+3] <- rnorm(1,dat[i,p+2],sd_Y)
## find regret
dat[i,p+4] <- max(info$mu) - dat[i,p+2]
## determine if it was non-dominated
dat[i,p+6] <- ifelse(dat[i,p+1] %in% true_nondom,1,0)
}
dat <- data.frame(dat)
dat$sub <- 1:nrow(dat)
return(dat)
}
|
# > file written: Sun, 09 Dec 2018 07:36:20 +0100
# in this file, settings that are specific for a run on a dataset
# gives path to output folder
pipOutFold <- "OUTPUT_FOLDER/TCGAbrca_lum_bas"
# full path (starting with /mnt/...)
# following format expected for the input
# colnames = samplesID
# rownames = geneID
# !!! geneID are expected not difficulted
# *************************************************************************************************************************
# ************************************ SETTINGS FOR 0_prepGeneData
# *************************************************************************************************************************
# UPDATE 07.12.2018: for RSEM data, the "analog" FPKM file is provided separately (built in prepData)
rna_fpkmDT_file <- "/mnt/ed4/marie/other_datasets/TCGAbrca_lum_bas/fpkmDT.Rdata"
rnaseqDT_file <- "/mnt/ed4/marie/other_datasets/TCGAbrca_lum_bas/rnaseqDT_v2.Rdata"
my_sep <- "\t"
# input is Rdata or txt file ?
# TRUE if the input is Rdata
inRdata <- TRUE
# can be ensemblID, entrezID, geneSymbol
geneID_format <- "entrezID"
stopifnot(geneID_format %in% c("ensemblID", "entrezID", "geneSymbol"))
# are geneID rownames ? -> "rn" or numeric giving the column
geneID_loc <- "rn"
stopifnot(geneID_loc == "rn" | is.numeric(geneID_loc))
removeDupGeneID <- TRUE
# *************************************************************************************************************************
# ************************************ SETTINGS FOR 1_runGeneDE
# *************************************************************************************************************************
# labels for conditions
cond1 <- "lum"
cond2 <- "bas"
# path to sampleID for each condition - should be Rdata ( ! sample1 for cond1, sample2 for cond2 ! )
sample1_file <- "/mnt/ed4/marie/other_datasets/TCGAbrca_lum_bas/lum_ID.Rdata"
sample2_file <- "/mnt/ed4/marie/other_datasets/TCGAbrca_lum_bas/bas_ID.Rdata"
minCpmRatio <- 20/888
inputDataType <- "RSEM"
nCpu <- 20
# number of permutations
nRandomPermut <- 10000
step8_for_permutGenes <- TRUE
step8_for_randomTADsFix <- FALSE
step8_for_randomTADsGaussian <- FALSE
step8_for_randomTADsShuffle <- FALSE
step14_for_randomTADsShuffle <- FALSE
# > file edited: Sat, 22 Feb 2020 09:47:44 +0100
# path to output folder:
pipOutFold <- "/mnt/etemp/marie/v2_Yuanlong_Cancer_HiC_data_TAD_DA/PIPELINE/OUTPUT_FOLDER/Barutcu_MCF-7_RANDOMSHIFT_40kb/TCGAbrca_lum_bas"
# OVERWRITE THE DEFAULT SETTINGS FOR INPUT FILES - use TADs from the current Hi-C dataset
TADpos_file <- paste0(setDir, "/mnt/etemp/marie/v2_Yuanlong_Cancer_HiC_data_TAD_DA/Barutcu_MCF-7_RANDOMSHIFT_40kb/genes2tad/all_assigned_regions.txt")
#chr1 chr1_TAD1 750001 1300000
#chr1 chr1_TAD2 2750001 3650000
#chr1 chr1_TAD3 3650001 4150000
gene2tadDT_file <- paste0(setDir, "/mnt/etemp/marie/v2_Yuanlong_Cancer_HiC_data_TAD_DA/Barutcu_MCF-7_RANDOMSHIFT_40kb/genes2tad/all_genes_positions.txt")
#LINC00115 chr1 761586 762902 chr1_TAD1
#FAM41C chr1 803451 812283 chr1_TAD1
#SAMD11 chr1 860260 879955 chr1_TAD1
#NOC2L chr1 879584 894689 chr1_TAD1
# overwrite main_settings.R: nCpu <- 25
nCpu <- 40
# *************************************************************************************************************************
# ************************************ SETTINGS FOR PERMUTATIONS (5#_, 8c_)
# *************************************************************************************************************************
# number of permutations
nRandomPermut <- 100000
gene2tadAssignMethod <- "maxOverlap"
nRandomPermutShuffle <- 100000
step8_for_permutGenes <- TRUE
step8_for_randomTADsFix <- FALSE
step8_for_randomTADsGaussian <- FALSE
step8_for_randomTADsShuffle <- FALSE
step14_for_randomTADsShuffle <- FALSE
# added here 13.08.2019 to change the filtering of min. read counts
rm(minCpmRatio)
min_counts <- 5
min_sampleRatio <- 0.8
# to have compatible versions of Rdata
options(save.defaults = list(version = 2))
| /INPUT_FILES/Barutcu_MCF-7_RANDOMSHIFT_40kb/run_settings_TCGAbrca_lum_bas.R | no_license | marzuf/v2_Yuanlong_Cancer_HiC_data_TAD_DA_PIPELINE_INPUT_FILES | R | false | false | 4,279 | r |
# > file written: Sun, 09 Dec 2018 07:36:20 +0100
# in this file, settings that are specific for a run on a dataset
# gives path to output folder
pipOutFold <- "OUTPUT_FOLDER/TCGAbrca_lum_bas"
# full path (starting with /mnt/...)
# following format expected for the input
# colnames = samplesID
# rownames = geneID
# !!! geneID are expected not difficulted
# *************************************************************************************************************************
# ************************************ SETTINGS FOR 0_prepGeneData
# *************************************************************************************************************************
# UPDATE 07.12.2018: for RSEM data, the "analog" FPKM file is provided separately (built in prepData)
rna_fpkmDT_file <- "/mnt/ed4/marie/other_datasets/TCGAbrca_lum_bas/fpkmDT.Rdata"
rnaseqDT_file <- "/mnt/ed4/marie/other_datasets/TCGAbrca_lum_bas/rnaseqDT_v2.Rdata"
my_sep <- "\t"
# input is Rdata or txt file ?
# TRUE if the input is Rdata
inRdata <- TRUE
# can be ensemblID, entrezID, geneSymbol
geneID_format <- "entrezID"
stopifnot(geneID_format %in% c("ensemblID", "entrezID", "geneSymbol"))
# are geneID rownames ? -> "rn" or numeric giving the column
geneID_loc <- "rn"
stopifnot(geneID_loc == "rn" | is.numeric(geneID_loc))
removeDupGeneID <- TRUE
# *************************************************************************************************************************
# ************************************ SETTINGS FOR 1_runGeneDE
# *************************************************************************************************************************
# labels for conditions
cond1 <- "lum"
cond2 <- "bas"
# path to sampleID for each condition - should be Rdata ( ! sample1 for cond1, sample2 for cond2 ! )
sample1_file <- "/mnt/ed4/marie/other_datasets/TCGAbrca_lum_bas/lum_ID.Rdata"
sample2_file <- "/mnt/ed4/marie/other_datasets/TCGAbrca_lum_bas/bas_ID.Rdata"
minCpmRatio <- 20/888
inputDataType <- "RSEM"
nCpu <- 20
# number of permutations
nRandomPermut <- 10000
step8_for_permutGenes <- TRUE
step8_for_randomTADsFix <- FALSE
step8_for_randomTADsGaussian <- FALSE
step8_for_randomTADsShuffle <- FALSE
step14_for_randomTADsShuffle <- FALSE
# > file edited: Sat, 22 Feb 2020 09:47:44 +0100
# path to output folder:
pipOutFold <- "/mnt/etemp/marie/v2_Yuanlong_Cancer_HiC_data_TAD_DA/PIPELINE/OUTPUT_FOLDER/Barutcu_MCF-7_RANDOMSHIFT_40kb/TCGAbrca_lum_bas"
# OVERWRITE THE DEFAULT SETTINGS FOR INPUT FILES - use TADs from the current Hi-C dataset
TADpos_file <- paste0(setDir, "/mnt/etemp/marie/v2_Yuanlong_Cancer_HiC_data_TAD_DA/Barutcu_MCF-7_RANDOMSHIFT_40kb/genes2tad/all_assigned_regions.txt")
#chr1 chr1_TAD1 750001 1300000
#chr1 chr1_TAD2 2750001 3650000
#chr1 chr1_TAD3 3650001 4150000
gene2tadDT_file <- paste0(setDir, "/mnt/etemp/marie/v2_Yuanlong_Cancer_HiC_data_TAD_DA/Barutcu_MCF-7_RANDOMSHIFT_40kb/genes2tad/all_genes_positions.txt")
#LINC00115 chr1 761586 762902 chr1_TAD1
#FAM41C chr1 803451 812283 chr1_TAD1
#SAMD11 chr1 860260 879955 chr1_TAD1
#NOC2L chr1 879584 894689 chr1_TAD1
# overwrite main_settings.R: nCpu <- 25
nCpu <- 40
# *************************************************************************************************************************
# ************************************ SETTINGS FOR PERMUTATIONS (5#_, 8c_)
# *************************************************************************************************************************
# number of permutations
nRandomPermut <- 100000
gene2tadAssignMethod <- "maxOverlap"
nRandomPermutShuffle <- 100000
step8_for_permutGenes <- TRUE
step8_for_randomTADsFix <- FALSE
step8_for_randomTADsGaussian <- FALSE
step8_for_randomTADsShuffle <- FALSE
step14_for_randomTADsShuffle <- FALSE
# added here 13.08.2019 to change the filtering of min. read counts
rm(minCpmRatio)
min_counts <- 5
min_sampleRatio <- 0.8
# to have compatible versions of Rdata
options(save.defaults = list(version = 2))
|
#' @title IBM Watson Audio Transcriber
#' @description Convert your audio to transcripts with optional keyword
#' detection and profanity cleaning.
#' @param audios Character vector (list) of paths to images or to .zip files containing
#' upto 100 images.
#' @param userpwd Character scalar containing username:password for the service.
#' @param keep_data Character scalar specifying whether to share your data with
#' Watson services for the purpose of training their models.
#' @param callback Function that can be applied to responses to examine http status,
#' headers, and content, to debug or to write a custom parser for content.
#' The default callback parses content into a data.frame while dropping other
#' response values to make the output easily passable to tidyverse packages like
#' dplyr or ggplot2. For further details or debugging one can pass a print or a
#' more compicated function.
#' @param model Character scalar specifying language and bandwidth model. Alternatives
#' are ar-AR_BroadbandModel, en-UK_BroadbandModel, en-UK_NarrowbandModel,
#' en-US_NarrowbandModel, es-ES_BroadbandModel, es-ES_NarrowbandModel,
#' fr-FR_BroadbandModel, ja-JP_BroadbandModel, ja-JP_NarrowbandModel,
#' pt-BR_BroadbandModel, pt-BR_NarrowbandModel, zh-CN_BroadbandModel,
#' zh-CN_NarrowbandModel.
#' @param inactivity_timeout Integer scalar giving the number of seconds after which
#' the result is returned if no speech is detected.
#' @param keywords List of keywords to be detected in the speech stream.
#' @param keywords_threshold Double scalar from 0 to 1 specifying the lower bound on
#' confidence to accept detected keywords in speech.
#' @param max_alternatives Integer scalar giving the maximum number of alternative
#' transcripts to return.
#' @param word_alternatives_threshold Double scalar from 0 to 1 giving lower bound
#' on confidence of possible words.
#' @param word_confidence Logical scalar indicating whether to return confidence for
#' each word.
#' @param timestamps Logical scalar indicating whether to return time alignment for
#' each word.
#' @param profanity_filter Logical scalar indicating whether to censor profane words.
#' @param smart_formatting Logical scalar indicating whether dates, times, numbers, etc.
#' are to be formatted nicely in the transcript.
#' @param content_type Character scalar showing format of the audio file. Alternatives
#' are audio/flac, audio/l16;rate=n;channels=k (16 channel limit),
#' audio/wav (9 channel limit), audio/ogg;codecs=opus,
#' audio/basic (narrowband models only).
#' @param speaker_labels Logical scalar indicating whether to infer speakers on a mono
#' channel. Automatically turns on timestamp collection for each word.
#' @return List of parsed responses.
#' @export
audio_text <- function(
audios,
userpwd,
keep_data = "true",
callback = NULL,
model = "en-US_BroadbandModel",
inactivity_timeout = -1,
keywords = list(),
keywords_threshold = NA,
max_alternatives = 1,
word_alternatives_threshold = NA,
word_confidence = FALSE,
timestamps = FALSE,
profanity_filter = TRUE,
smart_formatting = FALSE,
content_type = "audio/wav",
speaker_labels = FALSE)
{
protocol <- "https://"
service <- "stream.watsonplatform.net/speech-to-text/api/v1/recognize?"
parameters <- paste("model", model, sep = "=")
url <- paste0(protocol, service, parameters)
metadata <- list(
"part_content_type" = content_type,
"data_parts_count" = 1,
"inactivity_timeout" = inactivity_timeout,
"keywords" = keywords,
"keywords_threshold" = keywords_threshold,
"max_alternatives" = max_alternatives,
"word_alternatives_threshold" = word_alternatives_threshold,
"word_confidence" = word_confidence,
"timestamps" = timestamps,
"profanity_filter" = profanity_filter,
"smart_formatting" = smart_formatting,
"speaker_labels" = speaker_labels
)
metadata <- toJSON(metadata[!is.na(metadata)], auto_unbox = TRUE)
done <- if (is.null(callback)) function(resp, index) {
resps[[index]] <<- fromJSON(rawToChar(resp$content))
invisible(NULL)
} else callback
fail <- function(resp, index) {
resps[[index]] <<- resp
invisible(NULL)
}
resps <- vector("list", length(audios))
invisible(
lapply(
seq_along(audios),
function(index) {
if (is.null(callback)) formals(done)$index <- index
formals(fail)$index <- index
form <- form_file(audios[index], content_type)
new_handle(url = url) %>%
handle_setopt("userpwd" = userpwd) %>%
handle_setheaders(
"X-Watson-Learning-Opt-Out"= keep_data,
"Content-Type" = "multipart/form-data",
"Transfer-Encoding" = "chunked"
) %>%
handle_setform(metadata = metadata, upload = form) %>%
multi_add(done = done, fail = fail)
}
)
)
multi_run()
resps
} | /R/audio_cognizers.R | no_license | cspenn/cognizer | R | false | false | 4,947 | r |
#' @title IBM Watson Audio Transcriber
#' @description Convert your audio to transcripts with optional keyword
#' detection and profanity cleaning.
#' @param audios Character vector (list) of paths to images or to .zip files containing
#' upto 100 images.
#' @param userpwd Character scalar containing username:password for the service.
#' @param keep_data Character scalar specifying whether to share your data with
#' Watson services for the purpose of training their models.
#' @param callback Function that can be applied to responses to examine http status,
#' headers, and content, to debug or to write a custom parser for content.
#' The default callback parses content into a data.frame while dropping other
#' response values to make the output easily passable to tidyverse packages like
#' dplyr or ggplot2. For further details or debugging one can pass a print or a
#' more compicated function.
#' @param model Character scalar specifying language and bandwidth model. Alternatives
#' are ar-AR_BroadbandModel, en-UK_BroadbandModel, en-UK_NarrowbandModel,
#' en-US_NarrowbandModel, es-ES_BroadbandModel, es-ES_NarrowbandModel,
#' fr-FR_BroadbandModel, ja-JP_BroadbandModel, ja-JP_NarrowbandModel,
#' pt-BR_BroadbandModel, pt-BR_NarrowbandModel, zh-CN_BroadbandModel,
#' zh-CN_NarrowbandModel.
#' @param inactivity_timeout Integer scalar giving the number of seconds after which
#' the result is returned if no speech is detected.
#' @param keywords List of keywords to be detected in the speech stream.
#' @param keywords_threshold Double scalar from 0 to 1 specifying the lower bound on
#' confidence to accept detected keywords in speech.
#' @param max_alternatives Integer scalar giving the maximum number of alternative
#' transcripts to return.
#' @param word_alternatives_threshold Double scalar from 0 to 1 giving lower bound
#' on confidence of possible words.
#' @param word_confidence Logical scalar indicating whether to return confidence for
#' each word.
#' @param timestamps Logical scalar indicating whether to return time alignment for
#' each word.
#' @param profanity_filter Logical scalar indicating whether to censor profane words.
#' @param smart_formatting Logical scalar indicating whether dates, times, numbers, etc.
#' are to be formatted nicely in the transcript.
#' @param content_type Character scalar showing format of the audio file. Alternatives
#' are audio/flac, audio/l16;rate=n;channels=k (16 channel limit),
#' audio/wav (9 channel limit), audio/ogg;codecs=opus,
#' audio/basic (narrowband models only).
#' @param speaker_labels Logical scalar indicating whether to infer speakers on a mono
#' channel. Automatically turns on timestamp collection for each word.
#' @return List of parsed responses.
#' @export
audio_text <- function(
audios,
userpwd,
keep_data = "true",
callback = NULL,
model = "en-US_BroadbandModel",
inactivity_timeout = -1,
keywords = list(),
keywords_threshold = NA,
max_alternatives = 1,
word_alternatives_threshold = NA,
word_confidence = FALSE,
timestamps = FALSE,
profanity_filter = TRUE,
smart_formatting = FALSE,
content_type = "audio/wav",
speaker_labels = FALSE)
{
protocol <- "https://"
service <- "stream.watsonplatform.net/speech-to-text/api/v1/recognize?"
parameters <- paste("model", model, sep = "=")
url <- paste0(protocol, service, parameters)
metadata <- list(
"part_content_type" = content_type,
"data_parts_count" = 1,
"inactivity_timeout" = inactivity_timeout,
"keywords" = keywords,
"keywords_threshold" = keywords_threshold,
"max_alternatives" = max_alternatives,
"word_alternatives_threshold" = word_alternatives_threshold,
"word_confidence" = word_confidence,
"timestamps" = timestamps,
"profanity_filter" = profanity_filter,
"smart_formatting" = smart_formatting,
"speaker_labels" = speaker_labels
)
metadata <- toJSON(metadata[!is.na(metadata)], auto_unbox = TRUE)
done <- if (is.null(callback)) function(resp, index) {
resps[[index]] <<- fromJSON(rawToChar(resp$content))
invisible(NULL)
} else callback
fail <- function(resp, index) {
resps[[index]] <<- resp
invisible(NULL)
}
resps <- vector("list", length(audios))
invisible(
lapply(
seq_along(audios),
function(index) {
if (is.null(callback)) formals(done)$index <- index
formals(fail)$index <- index
form <- form_file(audios[index], content_type)
new_handle(url = url) %>%
handle_setopt("userpwd" = userpwd) %>%
handle_setheaders(
"X-Watson-Learning-Opt-Out"= keep_data,
"Content-Type" = "multipart/form-data",
"Transfer-Encoding" = "chunked"
) %>%
handle_setform(metadata = metadata, upload = form) %>%
multi_add(done = done, fail = fail)
}
)
)
multi_run()
resps
} |
#' Radial bar plot of use reports (UR) per species
#'
#' Creates a radial bar plot of use reports (UR) per species based on the `UR function`.
#' @param data is an ethnobotany data set with column 1 'informant' and 2 'sp_name' as row identifiers of informants and of species names respectively.
#' The rest of the columns are the identified ethnobotany use categories. The data should be populated with counts of uses per person (should be 0 or 1 values).
#' @param analysis is one of the quantitative ethnobotany functions from ethnobotanyR, i.e. ethnobotanyR::FCs.
#' @keywords ethnobotany, cultural value, use report
#'
#' @importFrom magrittr %>%
#' @importFrom dplyr filter summarize select left_join group_by
#' @importFrom assertthat validate_that see_if
#' @importFrom ggplot2 ggplot aes geom_bar coord_polar theme_minimal geom_bar scale_y_continuous
#'
#' @examples
#'
#' #Use built-in ethnobotany data example and Frequency of Citation function FCs()
#' Radial_plot(ethnobotanydata, analysis = FCs)
#'
#' #Generate random dataset of three informants uses for four species
#' eb_data <- data.frame(replicate(10,sample(0:1,20,rep=TRUE)))
#' names(eb_data) <- gsub(x = names(eb_data), pattern = "X", replacement = "Use_")
#' eb_data$informant<-sample(c('User_1', 'User_2', 'User_3'), 20, replace=TRUE)
#' eb_data$sp_name<-sample(c('sp_1', 'sp_2', 'sp_3', 'sp_4'), 20, replace=TRUE)
#' Radial_plot(data = eb_data, analysis = FCs)
#'
#' @export Radial_plot
Radial_plot <- function(data, analysis) {
if (!requireNamespace("dplyr", quietly = TRUE)) {
stop("Package \"dplyr\" needed for this function to work. Please install it.",
call. = FALSE)
}
if (!requireNamespace("magrittr", quietly = TRUE)) {
stop("Package \"magrittr\" needed for this function to work. Please install it.",
call. = FALSE)
}
value <- meltURdata <- URdata <- URs <- sp_name <- informant <- URps <- NULL # Setting the variables to NULL first, appeasing R CMD check
#add error stops with validate_that
assertthat::validate_that("informant" %in% colnames(data), msg = "The required column called \"informant\" is missing from your data. Add it.")
assertthat::validate_that("sp_name" %in% colnames(data), msg = "The required column called \"sp_name\" is missing from your data. Add it.")
assertthat::validate_that(is.factor(data$informant), msg = "The \"informant\" is not a factor variable. Transform it.")
assertthat::validate_that(is.factor(data$sp_name), msg = "The \"sp_name\" is not a factor variable. Transform it.")
assertthat::validate_that(all(sum(dplyr::select(data, -informant, -sp_name)>0)) , msg = "The sum of all UR is not greater than zero. Perhaps not all uses have values or are not numeric.")
## Use 'complete.cases' from stats to get to the collection of obs without NA
data_complete<-data[stats::complete.cases(data), ]
#message about complete cases
assertthat::see_if(length(data_complete) == length(data), msg = "Some of your observations included \"NA\" and were removed. Consider using \"0\" instead.")
Radial_plot_data <- analysis(data) #create subset-able data
names(Radial_plot_data)[length(names(Radial_plot_data))]<-"value"
Radial_plot <-
ggplot2::ggplot(Radial_plot_data, ggplot2::aes(x = sp_name, y = value, fill = sp_name)) +
ggplot2::geom_bar(width = 1, stat = "identity", color = "white") +
ggplot2::scale_y_continuous(breaks = 0:nlevels(Radial_plot_data$sp_name), position = "right") +
ggplot2::coord_polar() +
ggplot2::theme_minimal() +
ggplot2::theme(axis.title.x=ggplot2::element_blank())+
ggplot2::theme(axis.title.y=ggplot2::element_blank(),
axis.text.y=ggplot2::element_blank(),
axis.ticks.y=ggplot2::element_blank())+
ggplot2::geom_text(ggplot2::aes(label=value), position=ggplot2::position_dodge(width=0.9), vjust=-0.25)+
ggplot2::theme(legend.position = "none")
print(Radial_plot)
}
| /R/Radial_plot.R | no_license | liyan620/ethnobotanyR | R | false | false | 3,977 | r | #' Radial bar plot of use reports (UR) per species
#'
#' Creates a radial bar plot of use reports (UR) per species based on the `UR function`.
#' @param data is an ethnobotany data set with column 1 'informant' and 2 'sp_name' as row identifiers of informants and of species names respectively.
#' The rest of the columns are the identified ethnobotany use categories. The data should be populated with counts of uses per person (should be 0 or 1 values).
#' @param analysis is one of the quantitative ethnobotany functions from ethnobotanyR, i.e. ethnobotanyR::FCs.
#' @keywords ethnobotany, cultural value, use report
#'
#' @importFrom magrittr %>%
#' @importFrom dplyr filter summarize select left_join group_by
#' @importFrom assertthat validate_that see_if
#' @importFrom ggplot2 ggplot aes geom_bar coord_polar theme_minimal geom_bar scale_y_continuous
#'
#' @examples
#'
#' #Use built-in ethnobotany data example and Frequency of Citation function FCs()
#' Radial_plot(ethnobotanydata, analysis = FCs)
#'
#' #Generate random dataset of three informants uses for four species
#' eb_data <- data.frame(replicate(10,sample(0:1,20,rep=TRUE)))
#' names(eb_data) <- gsub(x = names(eb_data), pattern = "X", replacement = "Use_")
#' eb_data$informant<-sample(c('User_1', 'User_2', 'User_3'), 20, replace=TRUE)
#' eb_data$sp_name<-sample(c('sp_1', 'sp_2', 'sp_3', 'sp_4'), 20, replace=TRUE)
#' Radial_plot(data = eb_data, analysis = FCs)
#'
#' @export Radial_plot
Radial_plot <- function(data, analysis) {
if (!requireNamespace("dplyr", quietly = TRUE)) {
stop("Package \"dplyr\" needed for this function to work. Please install it.",
call. = FALSE)
}
if (!requireNamespace("magrittr", quietly = TRUE)) {
stop("Package \"magrittr\" needed for this function to work. Please install it.",
call. = FALSE)
}
value <- meltURdata <- URdata <- URs <- sp_name <- informant <- URps <- NULL # Setting the variables to NULL first, appeasing R CMD check
#add error stops with validate_that
assertthat::validate_that("informant" %in% colnames(data), msg = "The required column called \"informant\" is missing from your data. Add it.")
assertthat::validate_that("sp_name" %in% colnames(data), msg = "The required column called \"sp_name\" is missing from your data. Add it.")
assertthat::validate_that(is.factor(data$informant), msg = "The \"informant\" is not a factor variable. Transform it.")
assertthat::validate_that(is.factor(data$sp_name), msg = "The \"sp_name\" is not a factor variable. Transform it.")
assertthat::validate_that(all(sum(dplyr::select(data, -informant, -sp_name)>0)) , msg = "The sum of all UR is not greater than zero. Perhaps not all uses have values or are not numeric.")
## Use 'complete.cases' from stats to get to the collection of obs without NA
data_complete<-data[stats::complete.cases(data), ]
#message about complete cases
assertthat::see_if(length(data_complete) == length(data), msg = "Some of your observations included \"NA\" and were removed. Consider using \"0\" instead.")
Radial_plot_data <- analysis(data) #create subset-able data
names(Radial_plot_data)[length(names(Radial_plot_data))]<-"value"
Radial_plot <-
ggplot2::ggplot(Radial_plot_data, ggplot2::aes(x = sp_name, y = value, fill = sp_name)) +
ggplot2::geom_bar(width = 1, stat = "identity", color = "white") +
ggplot2::scale_y_continuous(breaks = 0:nlevels(Radial_plot_data$sp_name), position = "right") +
ggplot2::coord_polar() +
ggplot2::theme_minimal() +
ggplot2::theme(axis.title.x=ggplot2::element_blank())+
ggplot2::theme(axis.title.y=ggplot2::element_blank(),
axis.text.y=ggplot2::element_blank(),
axis.ticks.y=ggplot2::element_blank())+
ggplot2::geom_text(ggplot2::aes(label=value), position=ggplot2::position_dodge(width=0.9), vjust=-0.25)+
ggplot2::theme(legend.position = "none")
print(Radial_plot)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.