content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
testlist <- list(detail = 0L, x = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(ggforce:::bezierPath,testlist)
str(result)
|
/ggforce/inst/testfiles/bezierPath/libFuzzer_bezierPath/bezierPath_valgrind_files/1609955101-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 332
|
r
|
testlist <- list(detail = 0L, x = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(ggforce:::bezierPath,testlist)
str(result)
|
mapFeature = function(X1, X2) {
degree = 6
out = as.matrix(rep(1,length(X1)))
for(i in 1:degree) {
for(j in 0:i) {
out = cbind(out, (X1^j) * (X2^(i-j)))
}
}
return(out)
}
h = function(theta, X) {
return(1/(1+exp(-X %*% theta)) )
}
computeCost = function(theta, X, y, lamda) {
m = length(y)
n = length(theta)
theta = as.matrix(theta)
J = (1/(2*m)) * ( sum(ifelse(y == 1, -log(h(theta,X)), -log(1-h(theta,X)))) + lamda*sum(theta[2:n,1]^2) )
return(J)
}
gradient = function(theta, X, y, lamda) {
lamda = rep(lamda, length(theta))
lamda[1]=0
return( t(X) %*% (h(X) - y) + lamda*theta )
}
predict = function(theta, X) {
return(ifelse(X %*% as.matrix(theta)>0,1,0))
}
plotDecisionBoundary = function(theta, x1, x2, y1, y2) {
n = 100
X = seq(x1,x2, length.out=n)
Y = seq(y1,y2, length.out=n)
Z = matrix(rep(0,2*n*n), ncol = 2, nrow=n*n)
for(i in 1:n) {
for(j in 1:n) {
Z[(i-1)*n+j,1] = X[i]
Z[(i-1)*n+j,2] = Y[j]
}
}
Z1 = predict(theta, mapFeature(Z[,1],Z[,2]))
#plot(Z[,1], Z[,2], xlab="Exam 1 score", ylab="Exam 2 score", col=ifelse(Z1 == 1,'red','green'))
Z = matrix(predict(theta, mapFeature(Z[,1], Z[,2])), nrow=n, ncol=n)
contour(X,Y,Z, xlim=c(x1,x2), ylim=c(y1,y2))
}
A = scan("ex2data2.txt", sep=",")
A = matrix(A, nrow=length(A)/3, ncol=3, byrow=TRUE)
m = length(A[,1])
X = A[,1:2]
y = A[,3]
x1 = min(X[,1])
x2 = max(X[,1])
y1 = min(X[,2])
y2 = max(X[,2])
plot(X[,1], X[,2], xlab="X1", ylab="X2", col=ifelse(y == 1,'red','green'), xlim=c(x1,x2), ylim=c(y1,y2))
par(new=TRUE)
X = mapFeature(X[,1], X[,2])
initial_theta = rep(0, length(X[1,]))
lamda = 1
iterations = 23000
result = optim(initial_theta, computeCost, gradient, X, y, lamda ,control = list(maxit=iterations))
print(result)
theta = result$par
cat("\nTheta found by gradient descent: \n")
print(theta)
p = predict(theta, X)
plotDecisionBoundary(theta, x1, x2, y1, y2)
|
/solR/exercise2/ex2_reg.R
|
no_license
|
AbheekG/machineLearningAssignments
|
R
| false
| false
| 1,886
|
r
|
mapFeature = function(X1, X2) {
degree = 6
out = as.matrix(rep(1,length(X1)))
for(i in 1:degree) {
for(j in 0:i) {
out = cbind(out, (X1^j) * (X2^(i-j)))
}
}
return(out)
}
h = function(theta, X) {
return(1/(1+exp(-X %*% theta)) )
}
computeCost = function(theta, X, y, lamda) {
m = length(y)
n = length(theta)
theta = as.matrix(theta)
J = (1/(2*m)) * ( sum(ifelse(y == 1, -log(h(theta,X)), -log(1-h(theta,X)))) + lamda*sum(theta[2:n,1]^2) )
return(J)
}
gradient = function(theta, X, y, lamda) {
lamda = rep(lamda, length(theta))
lamda[1]=0
return( t(X) %*% (h(X) - y) + lamda*theta )
}
predict = function(theta, X) {
return(ifelse(X %*% as.matrix(theta)>0,1,0))
}
plotDecisionBoundary = function(theta, x1, x2, y1, y2) {
n = 100
X = seq(x1,x2, length.out=n)
Y = seq(y1,y2, length.out=n)
Z = matrix(rep(0,2*n*n), ncol = 2, nrow=n*n)
for(i in 1:n) {
for(j in 1:n) {
Z[(i-1)*n+j,1] = X[i]
Z[(i-1)*n+j,2] = Y[j]
}
}
Z1 = predict(theta, mapFeature(Z[,1],Z[,2]))
#plot(Z[,1], Z[,2], xlab="Exam 1 score", ylab="Exam 2 score", col=ifelse(Z1 == 1,'red','green'))
Z = matrix(predict(theta, mapFeature(Z[,1], Z[,2])), nrow=n, ncol=n)
contour(X,Y,Z, xlim=c(x1,x2), ylim=c(y1,y2))
}
A = scan("ex2data2.txt", sep=",")
A = matrix(A, nrow=length(A)/3, ncol=3, byrow=TRUE)
m = length(A[,1])
X = A[,1:2]
y = A[,3]
x1 = min(X[,1])
x2 = max(X[,1])
y1 = min(X[,2])
y2 = max(X[,2])
plot(X[,1], X[,2], xlab="X1", ylab="X2", col=ifelse(y == 1,'red','green'), xlim=c(x1,x2), ylim=c(y1,y2))
par(new=TRUE)
X = mapFeature(X[,1], X[,2])
initial_theta = rep(0, length(X[1,]))
lamda = 1
iterations = 23000
result = optim(initial_theta, computeCost, gradient, X, y, lamda ,control = list(maxit=iterations))
print(result)
theta = result$par
cat("\nTheta found by gradient descent: \n")
print(theta)
p = predict(theta, X)
plotDecisionBoundary(theta, x1, x2, y1, y2)
|
## configure_repo.r
## configure_repo.r ensures all files in your repo are properly configured.
## It must be sourced before calculating OHI scores with ohicore::CalculateAll();
## it can be sourced here or is also sourced from calculate_scores.r.
## You are encouraged to use this script when developing individual goal models. A good workflow is:
## 1. prepare data layers in the /prep folders (script as much as possible in R)
## 2. register data layers in layers.csv and save them in /layers folder
## 3. source configure_repo.r to ensure proper configuration
## 4. develop goal models in functions.r, running individual goal models line by line
## load ohicore and libraries used in functions.r models
if (!"ohicore" %in% (.packages())) {
suppressWarnings(require(ohicore))
library(tidyr) # install.packages('tidyr')
library(dplyr) # install.packages('dplyr')
library(stringr) # install.packages('stringr')
}
## set working directory to the scenario that contains conf and layers directories
setwd('~/github/cnc/eez2016')
## load scenario configuration
conf = ohicore::Conf('conf')
## check that scenario layers files in the \layers folder match layers.csv registration. Layers files are not modified.
ohicore::CheckLayers('layers.csv', 'layers', flds_id=conf$config$layers_id_fields)
## load scenario layers for ohicore to access. Layers files are not modified.
layers = ohicore::Layers('layers.csv', 'layers')
|
/eez2016/configure_toolbox.r
|
no_license
|
OHI-Science/cnc
|
R
| false
| false
| 1,451
|
r
|
## configure_repo.r
## configure_repo.r ensures all files in your repo are properly configured.
## It must be sourced before calculating OHI scores with ohicore::CalculateAll();
## it can be sourced here or is also sourced from calculate_scores.r.
## You are encouraged to use this script when developing individual goal models. A good workflow is:
## 1. prepare data layers in the /prep folders (script as much as possible in R)
## 2. register data layers in layers.csv and save them in /layers folder
## 3. source configure_repo.r to ensure proper configuration
## 4. develop goal models in functions.r, running individual goal models line by line
## load ohicore and libraries used in functions.r models
if (!"ohicore" %in% (.packages())) {
suppressWarnings(require(ohicore))
library(tidyr) # install.packages('tidyr')
library(dplyr) # install.packages('dplyr')
library(stringr) # install.packages('stringr')
}
## set working directory to the scenario that contains conf and layers directories
setwd('~/github/cnc/eez2016')
## load scenario configuration
conf = ohicore::Conf('conf')
## check that scenario layers files in the \layers folder match layers.csv registration. Layers files are not modified.
ohicore::CheckLayers('layers.csv', 'layers', flds_id=conf$config$layers_id_fields)
## load scenario layers for ohicore to access. Layers files are not modified.
layers = ohicore::Layers('layers.csv', 'layers')
|
library(HiddenMarkov)
library(fitdistrplus)
library(beepr)
"Lectura de datos"
data <- read.csv(file="/home/yordan/YORDAN/UNAL/TESIS_MAESTRIA/25_expo_2018/datos_PP_NovAbr_anom_925.csv"
, header=FALSE, sep=",")
spd = c(data$V1)
spd = spd/max(spd) * 0.9999999 # Se multiplica por 0.9999999 porque la serie no puede tomar el valor de 1, porque luego no es posible calcular la bondad de ajuste
#spd_cal = spd[1:5075] #Datos de calibración. Hasta 2007-04-30
#spd_val = tail(spd, n=-5075) #Datos de validación. A partir de 2007-11-01
"Gamma"
fl <- fitdist(spd, "gamma")
"2 Estados"
Pi <- rbind(c(0.7, 0.3),
c(0.4, 0.6))
delta <- c(1,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=1000, posdiff=FALSE))
beep()
print(y$LL)
"3 Estados"
Pi <- rbind(c(0.7, 0.2, 0.1),
c(0.3, 0.6, 0.1),
c(0.2, 0.2, 0.6))
delta <- c(1,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=1000, posdiff=FALSE))
beep()
print(y$LL)
"4 Estados"
Pi <- rbind(c(0.8, 0.1, 0.05, 0.05),
c(0.1, 0.6, 0.25, 0.05),
c(0.1, 0.15, 0.5, 0.25),
c(0.2, 0.05, 0.2, 0.55))
delta <- c(1,0,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate =c(fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=10000, posdiff=FALSE))
beep()
print(y$LL)
"5 Estados"
Pi <- rbind(c(0.7, 0.1, 0.05, 0.05, 0.1),
c(0.05, 0.6, 0.25, 0.05, 0.05),
c(0.1, 0.1, 0.5, 0.25, 0.05),
c(0.2, 0.05, 0.15, 0.55, 0.05),
c(0.05, 0.3, 0.1, 0.05, 0.5))
delta <- c(1,0,0,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=10000, posdiff=FALSE))
beep()
print(y$LL)
"6 Estados"
Pi <- rbind(c(0.65, 0.1, 0.05, 0.05, 0.07, 0.08),
c(0.02, 0.6, 0.2, 0.08, 0.05, 0.05),
c(0.1, 0.15, 0.5, 0.15, 0.05, 0.05),
c(0.07, 0.13, 0.05, 0.55, 0.15, 0.05),
c(0.05, 0.23, 0.1, 0.05, 0.5, 0.07),
c(0.15, 0.1, 0.03, 0.07, 0.15, 0.5))
delta <- c(1,0,0,0,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=10000, posdiff=FALSE))
beep()
print(y$LL)
"7 estados"
Pi <- rbind(c(0.65, 0.1, 0.05, 0.02, 0.05, 0.07, 0.06),
c(0.02, 0.4, 0.2, 0.08, 0.05, 0.2, 0.05),
c(0.1, 0.1, 0.5, 0.15, 0.05, 0.05, 0.05),
c(0.03, 0.07, 0.13, 0.52, 0.05, 0.15, 0.05),
c(0.05, 0.2, 0.1, 0.03, 0.5, 0.05, 0.07),
c(0.15, 0.1, 0.1, 0.03, 0.07, 0.4, 0.15),
c(0.17, 0.2, 0.05, 0.03, 0.07, 0.15, 0.33))
delta <- c(1,0,0,0,0,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=10000, posdiff=FALSE))
beep()
print(y$LL)
"8 estados"
Pi <- rbind(c(0.45, 0.1, 0.07, 0.12, 0.03, 0.07, 0.07, 0.09),
c(0.02, 0.4, 0.15, 0.05, 0.08, 0.05, 0.2, 0.05),
c(0.1, 0.1, 0.5, 0.07, 0.08, 0.05, 0.05, 0.05),
c(0.02, 0.03, 0.07, 0.5, 0.13, 0.05, 0.15, 0.05),
c(0.05, 0.2, 0.1, 0.03, 0.3, 0.05, 0.07, 0.2),
c(0.13, 0.1, 0.05, 0.1, 0.03, 0.4, 0.07, 0.12),
c(0.1, 0.1, 0.1, 0.03, 0.07, 0.15, 0.33, 0.12),
c(0.13, 0.07, 0.1, 0.02, 0.03, 0.15, 0.08, 0.42))
delta <- c(1,0,0,0,0,0,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=10000, posdiff=FALSE))
beep()
print(y$LL)
"9 estados"
Pi <- rbind(c(0.36, 0.09, 0.075, 0.13, 0.04, 0.08, 0.07, 0.09, 0.065),
c(0.02, 0.4, 0.13, 0.05, 0.02, 0.08, 0.05, 0.2, 0.05),
c(0.07, 0.07, 0.5, 0.07, 0.06, 0.08, 0.05, 0.05, 0.05),
c(0.02, 0.03, 0.07, 0.35, 0.13, 0.05, 0.1, 0.05, 0.2),
c(0.05, 0.25, 0.05, 0.03, 0.3, 0.05, 0.07, 0.18, 0.02),
c(0.13, 0.09, 0.05, 0.1, 0.03, 0.4, 0.07, 0.1, 0.03),
c(0.1, 0.05, 0.12, 0.03, 0.07, 0.15, 0.3, 0.07, 0.11),
c(0.13, 0.07, 0.1, 0.08, 0.02, 0.03, 0.12, 0.37, 0.08),
c(0.11, 0.07, 0.09, 0.08, 0.02, 0.03, 0.11, 0.06, 0.43))
delta <- c(1,0,0,0,0,0,0,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=10000, posdiff=FALSE))
beep()
print(y$LL)
"10 estados"
Pi <- rbind(c(0.33, 0.06, 0.07, 0.11, 0.09, 0.04, 0.08, 0.07, 0.09, 0.06),
c(0.07, 0.35, 0.02, 0.12, 0.04, 0.02, 0.08, 0.05, 0.2, 0.05),
c(0.07, 0.05, 0.48, 0.07, 0.06, 0.085, 0.065, 0.02, 0.05, 0.05),
c(0.02, 0.03, 0.07, 0.34, 0.13, 0.05, 0.1, 0.05, 0.2, 0.01),
c(0.05, 0.23, 0.05, 0.03, 0.3, 0.05, 0.07, 0.15, 0.05, 0.02),
c(0.11, 0.09, 0.05, 0.1, 0.03, 0.37, 0.07, 0.1, 0.03, 0.05),
c(0.1, 0.05, 0.12, 0.03, 0.07, 0.14, 0.29, 0.07, 0.11, 0.02),
c(0.1, 0.09, 0.07, 0.1, 0.07, 0.02, 0.03, 0.33, 0.11, 0.08),
c(0.11, 0.07, 0.1, 0.09, 0.08, 0.02, 0.03, 0.11, 0.33, 0.06),
c(0.09, 0.17, 0.05, 0.11, 0.04, 0.01, 0.03, 0.09, 0.05, 0.36))
delta <- c(1,0,0,0,0,0,0,0,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=10000, posdiff=FALSE))
beep()
print(y$LL)
|
/26_expo_2018/Validation/validation_NovAbr_PP_Gamma.R
|
no_license
|
yordanarango/MASTER_THESIS
|
R
| false
| false
| 8,014
|
r
|
library(HiddenMarkov)
library(fitdistrplus)
library(beepr)
"Lectura de datos"
data <- read.csv(file="/home/yordan/YORDAN/UNAL/TESIS_MAESTRIA/25_expo_2018/datos_PP_NovAbr_anom_925.csv"
, header=FALSE, sep=",")
spd = c(data$V1)
spd = spd/max(spd) * 0.9999999 # Se multiplica por 0.9999999 porque la serie no puede tomar el valor de 1, porque luego no es posible calcular la bondad de ajuste
#spd_cal = spd[1:5075] #Datos de calibración. Hasta 2007-04-30
#spd_val = tail(spd, n=-5075) #Datos de validación. A partir de 2007-11-01
"Gamma"
fl <- fitdist(spd, "gamma")
"2 Estados"
Pi <- rbind(c(0.7, 0.3),
c(0.4, 0.6))
delta <- c(1,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=1000, posdiff=FALSE))
beep()
print(y$LL)
"3 Estados"
Pi <- rbind(c(0.7, 0.2, 0.1),
c(0.3, 0.6, 0.1),
c(0.2, 0.2, 0.6))
delta <- c(1,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=1000, posdiff=FALSE))
beep()
print(y$LL)
"4 Estados"
Pi <- rbind(c(0.8, 0.1, 0.05, 0.05),
c(0.1, 0.6, 0.25, 0.05),
c(0.1, 0.15, 0.5, 0.25),
c(0.2, 0.05, 0.2, 0.55))
delta <- c(1,0,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate =c(fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=10000, posdiff=FALSE))
beep()
print(y$LL)
"5 Estados"
Pi <- rbind(c(0.7, 0.1, 0.05, 0.05, 0.1),
c(0.05, 0.6, 0.25, 0.05, 0.05),
c(0.1, 0.1, 0.5, 0.25, 0.05),
c(0.2, 0.05, 0.15, 0.55, 0.05),
c(0.05, 0.3, 0.1, 0.05, 0.5))
delta <- c(1,0,0,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=10000, posdiff=FALSE))
beep()
print(y$LL)
"6 Estados"
Pi <- rbind(c(0.65, 0.1, 0.05, 0.05, 0.07, 0.08),
c(0.02, 0.6, 0.2, 0.08, 0.05, 0.05),
c(0.1, 0.15, 0.5, 0.15, 0.05, 0.05),
c(0.07, 0.13, 0.05, 0.55, 0.15, 0.05),
c(0.05, 0.23, 0.1, 0.05, 0.5, 0.07),
c(0.15, 0.1, 0.03, 0.07, 0.15, 0.5))
delta <- c(1,0,0,0,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=10000, posdiff=FALSE))
beep()
print(y$LL)
"7 estados"
Pi <- rbind(c(0.65, 0.1, 0.05, 0.02, 0.05, 0.07, 0.06),
c(0.02, 0.4, 0.2, 0.08, 0.05, 0.2, 0.05),
c(0.1, 0.1, 0.5, 0.15, 0.05, 0.05, 0.05),
c(0.03, 0.07, 0.13, 0.52, 0.05, 0.15, 0.05),
c(0.05, 0.2, 0.1, 0.03, 0.5, 0.05, 0.07),
c(0.15, 0.1, 0.1, 0.03, 0.07, 0.4, 0.15),
c(0.17, 0.2, 0.05, 0.03, 0.07, 0.15, 0.33))
delta <- c(1,0,0,0,0,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=10000, posdiff=FALSE))
beep()
print(y$LL)
"8 estados"
Pi <- rbind(c(0.45, 0.1, 0.07, 0.12, 0.03, 0.07, 0.07, 0.09),
c(0.02, 0.4, 0.15, 0.05, 0.08, 0.05, 0.2, 0.05),
c(0.1, 0.1, 0.5, 0.07, 0.08, 0.05, 0.05, 0.05),
c(0.02, 0.03, 0.07, 0.5, 0.13, 0.05, 0.15, 0.05),
c(0.05, 0.2, 0.1, 0.03, 0.3, 0.05, 0.07, 0.2),
c(0.13, 0.1, 0.05, 0.1, 0.03, 0.4, 0.07, 0.12),
c(0.1, 0.1, 0.1, 0.03, 0.07, 0.15, 0.33, 0.12),
c(0.13, 0.07, 0.1, 0.02, 0.03, 0.15, 0.08, 0.42))
delta <- c(1,0,0,0,0,0,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=10000, posdiff=FALSE))
beep()
print(y$LL)
"9 estados"
Pi <- rbind(c(0.36, 0.09, 0.075, 0.13, 0.04, 0.08, 0.07, 0.09, 0.065),
c(0.02, 0.4, 0.13, 0.05, 0.02, 0.08, 0.05, 0.2, 0.05),
c(0.07, 0.07, 0.5, 0.07, 0.06, 0.08, 0.05, 0.05, 0.05),
c(0.02, 0.03, 0.07, 0.35, 0.13, 0.05, 0.1, 0.05, 0.2),
c(0.05, 0.25, 0.05, 0.03, 0.3, 0.05, 0.07, 0.18, 0.02),
c(0.13, 0.09, 0.05, 0.1, 0.03, 0.4, 0.07, 0.1, 0.03),
c(0.1, 0.05, 0.12, 0.03, 0.07, 0.15, 0.3, 0.07, 0.11),
c(0.13, 0.07, 0.1, 0.08, 0.02, 0.03, 0.12, 0.37, 0.08),
c(0.11, 0.07, 0.09, 0.08, 0.02, 0.03, 0.11, 0.06, 0.43))
delta <- c(1,0,0,0,0,0,0,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=10000, posdiff=FALSE))
beep()
print(y$LL)
"10 estados"
Pi <- rbind(c(0.33, 0.06, 0.07, 0.11, 0.09, 0.04, 0.08, 0.07, 0.09, 0.06),
c(0.07, 0.35, 0.02, 0.12, 0.04, 0.02, 0.08, 0.05, 0.2, 0.05),
c(0.07, 0.05, 0.48, 0.07, 0.06, 0.085, 0.065, 0.02, 0.05, 0.05),
c(0.02, 0.03, 0.07, 0.34, 0.13, 0.05, 0.1, 0.05, 0.2, 0.01),
c(0.05, 0.23, 0.05, 0.03, 0.3, 0.05, 0.07, 0.15, 0.05, 0.02),
c(0.11, 0.09, 0.05, 0.1, 0.03, 0.37, 0.07, 0.1, 0.03, 0.05),
c(0.1, 0.05, 0.12, 0.03, 0.07, 0.14, 0.29, 0.07, 0.11, 0.02),
c(0.1, 0.09, 0.07, 0.1, 0.07, 0.02, 0.03, 0.33, 0.11, 0.08),
c(0.11, 0.07, 0.1, 0.09, 0.08, 0.02, 0.03, 0.11, 0.33, 0.06),
c(0.09, 0.17, 0.05, 0.11, 0.04, 0.01, 0.03, 0.09, 0.05, 0.36))
delta <- c(1,0,0,0,0,0,0,0,0,0)
pm <- list(shape=c(fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1], fl$estimate[1]),
rate=c(fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2], fl$estimate[2]))
x <- dthmm(NULL, Pi, delta, "gamma", pm , discrete = FALSE)
x$x <- spd
x$nonstat <- FALSE
y <- BaumWelch(x, bwcontrol(maxiter=10000, posdiff=FALSE))
beep()
print(y$LL)
|
library(Gmisc)
### Name: mergeLists
### Title: Merging of multiple lists
### Aliases: mergeLists
### ** Examples
v1 <- list("a"=c(1,2), b="test 1", sublist=list(one=20:21, two=21:22))
v2 <- list("a"=c(3,4), b="test 2", sublist=list(one=10:11, two=11:12, three=1:2))
mergeLists(v1, v2)
|
/data/genthat_extracted_code/Gmisc/examples/mergeLists.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 292
|
r
|
library(Gmisc)
### Name: mergeLists
### Title: Merging of multiple lists
### Aliases: mergeLists
### ** Examples
v1 <- list("a"=c(1,2), b="test 1", sublist=list(one=20:21, two=21:22))
v2 <- list("a"=c(3,4), b="test 2", sublist=list(one=10:11, two=11:12, three=1:2))
mergeLists(v1, v2)
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118 ), LenMids = c(-5.15510035957975e+44, 2.59028521047075e+149, NA, 1.74851929178852e+35, NA, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, NA, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, Inf, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21, 4.82715169897616e-30, 2.46619966623764e+181, -4.54780390412952e-306, 2.30289211325379e+223, 4.95323535309608e-36, 2.69586614324169e+251, 1.24829282321313e-264, -6.93344245698241e-248, 1.66480219708517e-305, -2.08633459786369e-239, 2.81991272491703e-308, -4.83594859887756e+25, -9.41849315083338e-162, 4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -9.01717072844665e+119, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126, 0), Linf = -5.85373311417744e-255, MK = 9.00286239024321e+218, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(-7.75783099525155e+59, 1.18313663502752e-314, 0, 0, 0, 0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615835342-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 2,078
|
r
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118 ), LenMids = c(-5.15510035957975e+44, 2.59028521047075e+149, NA, 1.74851929178852e+35, NA, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, NA, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, Inf, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21, 4.82715169897616e-30, 2.46619966623764e+181, -4.54780390412952e-306, 2.30289211325379e+223, 4.95323535309608e-36, 2.69586614324169e+251, 1.24829282321313e-264, -6.93344245698241e-248, 1.66480219708517e-305, -2.08633459786369e-239, 2.81991272491703e-308, -4.83594859887756e+25, -9.41849315083338e-162, 4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -9.01717072844665e+119, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126, 0), Linf = -5.85373311417744e-255, MK = 9.00286239024321e+218, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(-7.75783099525155e+59, 1.18313663502752e-314, 0, 0, 0, 0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
answers <- result
pml_write_files <- function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_results/problem_id_",i,".txt")
write.table(x[i], file=filename, quote=FALSE,
row.names=FALSE, col.names=FALSE)
}
}
pml_write_files(answers)
|
/submission script.R
|
no_license
|
pratikshr/project
|
R
| false
| false
| 330
|
r
|
answers <- result
pml_write_files <- function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_results/problem_id_",i,".txt")
write.table(x[i], file=filename, quote=FALSE,
row.names=FALSE, col.names=FALSE)
}
}
pml_write_files(answers)
|
spinsas <- function(sasfile, text=NULL, keep=FALSE, ...) {
# stopifnot((length(sasfile)==1 && file.exists(sasfile))||length(text)==1)
if (is.null(text)) {
vtext <- readLines(sasfile, warn=FALSE)
} else {
vtext <- text
}
# reshape from R to SAS
vtext <- unlist(strsplit(paste(vtext, collapse="\n"), ";\\n[[:blank:]]*"))
# track and strip leading line breaks
pre <- rep(0,length(vtext))
while (any(br <- grepl(x=vtext, pattern="^[[:space:]]*\\n"))) {
pre <- pre + br
vtext <- sub(pattern="^[[:space:]]*\\n", replacement="", x=vtext)
}
docl <- grepl(pattern="^[[:space:]]*\\*\\* ", x=vtext) # normal text lines
chunkl <- grepl(pattern="^[[:space:]]*\\*\\+ ", x=vtext) # chunk lines
Rl <- grepl(pattern="^[[:space:]]*\\*R ", x=vtext) # R code lines
sasl <- !grepl(pattern="^[[:space:]]*\\*[*+R] ", x=vtext) # SAS code lines
# Normal text (document)
vtext <- sub("^[[:space:]]*\\*\\* ", "#' ", vtext) # convert leading "** " to "#'"
vtext[docl] <- gsub("\\n", "\\\n#' ", vtext[docl]) # mark new lines
vtext[docl] <- sub(";$", "", vtext[docl]) # strip trailing ";"
# Chunk header
vtext <- sub("^[[:space:]]*\\*\\+ ", "#\\+ ", vtext) # convert leading "*+" to "#+"
vtext[chunkl] <- sub(";$", "", vtext[chunkl]) # strip trailing ";"
vtext[chunkl] <- gsub("\\n", "", vtext[chunkl]) # strip internal \n
vtext[chunkl] <- paste0(vtext[chunkl], "\n") # ensure trailing \n
# R code
vtext[Rl] <- sub("^[[:space:]]*\\*R ", "", vtext[Rl]) # convert leading "*R" to " "
vtext[Rl] <- sub(";$", "", vtext[Rl]) # strip trailing ";"
# SAS code
vtext[sasl] <- paste0(vtext[sasl], ";\n")
# ensure chunk headers start on new lines
vtext[(pre & chunkl)!=chunkl] <- paste0("\n", vtext[(pre & chunkl)!=chunkl])
# restore leading line breaks
lb <- vector("character", length=length(pre))
while (any(pre>0)) {
lb[pre>0] <- paste0(lb[pre>0], "\n")
pre[pre>0] <- pre[pre>0]-1
}
vtext <- paste0(lb, vtext)
# reshape from SAS to R
vtext <- unlist(strsplit(paste(vtext, collapse=""), "\n"))
if (is.null(text)) {
rfile <- sub("[.]sas$", ".r", sasfile)
writeLines(vtext, rfile)
if (!keep)
on.exit(unlink(rfile), add=TRUE)
knitr::spin(rfile, precious=keep, comment=c("^/[*][*]", "^.*[*]/[*] *$"), ...)
} else {
return(knitr::spin(text=vtext, precious=keep, comment=c("^/[*][*]", "^.*[*]/[*] *$"), ...))
}
}
|
/R/spinsas.r
|
no_license
|
mlamias/SASmarkdown
|
R
| false
| false
| 2,704
|
r
|
spinsas <- function(sasfile, text=NULL, keep=FALSE, ...) {
# stopifnot((length(sasfile)==1 && file.exists(sasfile))||length(text)==1)
if (is.null(text)) {
vtext <- readLines(sasfile, warn=FALSE)
} else {
vtext <- text
}
# reshape from R to SAS
vtext <- unlist(strsplit(paste(vtext, collapse="\n"), ";\\n[[:blank:]]*"))
# track and strip leading line breaks
pre <- rep(0,length(vtext))
while (any(br <- grepl(x=vtext, pattern="^[[:space:]]*\\n"))) {
pre <- pre + br
vtext <- sub(pattern="^[[:space:]]*\\n", replacement="", x=vtext)
}
docl <- grepl(pattern="^[[:space:]]*\\*\\* ", x=vtext) # normal text lines
chunkl <- grepl(pattern="^[[:space:]]*\\*\\+ ", x=vtext) # chunk lines
Rl <- grepl(pattern="^[[:space:]]*\\*R ", x=vtext) # R code lines
sasl <- !grepl(pattern="^[[:space:]]*\\*[*+R] ", x=vtext) # SAS code lines
# Normal text (document)
vtext <- sub("^[[:space:]]*\\*\\* ", "#' ", vtext) # convert leading "** " to "#'"
vtext[docl] <- gsub("\\n", "\\\n#' ", vtext[docl]) # mark new lines
vtext[docl] <- sub(";$", "", vtext[docl]) # strip trailing ";"
# Chunk header
vtext <- sub("^[[:space:]]*\\*\\+ ", "#\\+ ", vtext) # convert leading "*+" to "#+"
vtext[chunkl] <- sub(";$", "", vtext[chunkl]) # strip trailing ";"
vtext[chunkl] <- gsub("\\n", "", vtext[chunkl]) # strip internal \n
vtext[chunkl] <- paste0(vtext[chunkl], "\n") # ensure trailing \n
# R code
vtext[Rl] <- sub("^[[:space:]]*\\*R ", "", vtext[Rl]) # convert leading "*R" to " "
vtext[Rl] <- sub(";$", "", vtext[Rl]) # strip trailing ";"
# SAS code
vtext[sasl] <- paste0(vtext[sasl], ";\n")
# ensure chunk headers start on new lines
vtext[(pre & chunkl)!=chunkl] <- paste0("\n", vtext[(pre & chunkl)!=chunkl])
# restore leading line breaks
lb <- vector("character", length=length(pre))
while (any(pre>0)) {
lb[pre>0] <- paste0(lb[pre>0], "\n")
pre[pre>0] <- pre[pre>0]-1
}
vtext <- paste0(lb, vtext)
# reshape from SAS to R
vtext <- unlist(strsplit(paste(vtext, collapse=""), "\n"))
if (is.null(text)) {
rfile <- sub("[.]sas$", ".r", sasfile)
writeLines(vtext, rfile)
if (!keep)
on.exit(unlink(rfile), add=TRUE)
knitr::spin(rfile, precious=keep, comment=c("^/[*][*]", "^.*[*]/[*] *$"), ...)
} else {
return(knitr::spin(text=vtext, precious=keep, comment=c("^/[*][*]", "^.*[*]/[*] *$"), ...))
}
}
|
context("Custom Vision project and image creation")
custvis_url <- Sys.getenv("AZ_TEST_CUSTOMVISION_URL")
custvis_key <- Sys.getenv("AZ_TEST_CUSTOMVISION_KEY")
storage <- Sys.getenv("AZ_TEST_STORAGE_ACCT")
custvis_sas <- Sys.getenv("AZ_TEST_CUSTOMVISION_SAS")
if(custvis_url == "" || custvis_key == "" || storage == "" || custvis_sas == "")
skip("Tests skipped: resource details not set")
projname <- paste0(sample(letters, 10, TRUE), collapse="")
test_that("Custom Vision project creation works",
{
endp <- customvision_training_endpoint(custvis_url, key=custvis_key)
expect_is(endp, c("customvision_training_endpoint", "cognitive_endpoint"))
expect_true(is_empty(list_projects(endp)))
proj <- create_classification_project(endp, projname)
expect_is(proj, "classification_project")
expect_true(!is_empty(list_projects(endp)))
})
test_that("Adding and tagging images works",
{
endp <- customvision_training_endpoint(custvis_url, key=custvis_key)
proj <- get_project(endp, projname)
cans <- paste0(storage, "customvision/", 1:5, ".jpg", custvis_sas)
cartons <- paste0(storage, "customvision/", 33:37, ".jpg", custvis_sas)
tags <- rep(c("can", "carton"), each=5)
img_ids <- add_images(proj, c(cans, cartons), tags)
expect_type(img_ids, "character")
img_df <- list_images(proj, "tagged", as="dataframe")
expect_is(img_df, "data.frame")
img_df <- img_df[match(img_ids, img_df$id), ]
img_tags <- do.call(rbind.data.frame, img_df$tags)$tagName
expect_identical(img_tags, tags)
img_loc <- add_images(proj, paste0("../../inst/images/", c("can1.jpg", "carton1.jpg")))
expect_type(img_loc, "character")
untagged_ids <- list_images(proj, "untagged")
expect_type(untagged_ids, "character")
expect_identical(sort(untagged_ids), sort(img_loc))
tagged_ids <- add_image_tags(proj, img_loc, list(c("can", "object"), c("carton", "object")))
expect_identical(tagged_ids, img_loc)
tags <- list_tags(proj)
expect_identical(sort(tags), c("can", "carton", "object"))
tagdf <- add_negative_tag(proj, "negtag")
expect_true("negtag" %in% tagdf$name)
tags <- list_tags(proj)
expect_true("negtag" %in% tags)
untagged_ids <- remove_image_tags(proj, list_images(proj))
expect_type(untagged_ids, "character")
expect_true(is_empty(list_images(proj, "tagged")))
})
endp <- customvision_training_endpoint(custvis_url, key=custvis_key)
delete_project(endp, projname, confirm=FALSE)
|
/tests/testthat/test02_custvis.R
|
permissive
|
isabella232/AzureVision
|
R
| false
| false
| 2,510
|
r
|
context("Custom Vision project and image creation")
custvis_url <- Sys.getenv("AZ_TEST_CUSTOMVISION_URL")
custvis_key <- Sys.getenv("AZ_TEST_CUSTOMVISION_KEY")
storage <- Sys.getenv("AZ_TEST_STORAGE_ACCT")
custvis_sas <- Sys.getenv("AZ_TEST_CUSTOMVISION_SAS")
if(custvis_url == "" || custvis_key == "" || storage == "" || custvis_sas == "")
skip("Tests skipped: resource details not set")
projname <- paste0(sample(letters, 10, TRUE), collapse="")
test_that("Custom Vision project creation works",
{
endp <- customvision_training_endpoint(custvis_url, key=custvis_key)
expect_is(endp, c("customvision_training_endpoint", "cognitive_endpoint"))
expect_true(is_empty(list_projects(endp)))
proj <- create_classification_project(endp, projname)
expect_is(proj, "classification_project")
expect_true(!is_empty(list_projects(endp)))
})
test_that("Adding and tagging images works",
{
endp <- customvision_training_endpoint(custvis_url, key=custvis_key)
proj <- get_project(endp, projname)
cans <- paste0(storage, "customvision/", 1:5, ".jpg", custvis_sas)
cartons <- paste0(storage, "customvision/", 33:37, ".jpg", custvis_sas)
tags <- rep(c("can", "carton"), each=5)
img_ids <- add_images(proj, c(cans, cartons), tags)
expect_type(img_ids, "character")
img_df <- list_images(proj, "tagged", as="dataframe")
expect_is(img_df, "data.frame")
img_df <- img_df[match(img_ids, img_df$id), ]
img_tags <- do.call(rbind.data.frame, img_df$tags)$tagName
expect_identical(img_tags, tags)
img_loc <- add_images(proj, paste0("../../inst/images/", c("can1.jpg", "carton1.jpg")))
expect_type(img_loc, "character")
untagged_ids <- list_images(proj, "untagged")
expect_type(untagged_ids, "character")
expect_identical(sort(untagged_ids), sort(img_loc))
tagged_ids <- add_image_tags(proj, img_loc, list(c("can", "object"), c("carton", "object")))
expect_identical(tagged_ids, img_loc)
tags <- list_tags(proj)
expect_identical(sort(tags), c("can", "carton", "object"))
tagdf <- add_negative_tag(proj, "negtag")
expect_true("negtag" %in% tagdf$name)
tags <- list_tags(proj)
expect_true("negtag" %in% tags)
untagged_ids <- remove_image_tags(proj, list_images(proj))
expect_type(untagged_ids, "character")
expect_true(is_empty(list_images(proj, "tagged")))
})
endp <- customvision_training_endpoint(custvis_url, key=custvis_key)
delete_project(endp, projname, confirm=FALSE)
|
#reference: https://github.com/kbroman/pkg_primer/blob/gh-pages/example/stage3/R/plot_crayons.R
#' Annotates HiCcompare interaction data output.
#'
#' @param filename filename of the HiCcompare output, in csv format.If NULL, then the provided dataframe is used instead.
#'
#' @param df dataframe containing HiCcompare output, containing fields "chr1" "start1" "end1" "chr2" "start2" "end2" "IF1" "IF2" "D" "M" "adj.IF1" "adj.IF2" "adj.M" "mc" "A" "p.value" "fold.change"
#' @param is Interactionset containing the above. Please note: only one of the three options may be used.
#' @return annotatedInteractionSet An annotated interaction set, containing HGNC symbols
#' @author James L. T. Dalgleish
#' @seealso \code{\link[GIChipSeqCompare]}
#' @keywords chipseq HiC HiChip GInteractions
#'
#' @examples
#' afunction()
#'
#' @export
#' @importFrom readr read_csv
#' @importFrom GenomicInteractions
#'
#'
|
/R/annotation_GInt.R
|
no_license
|
seandavi/HiClink
|
R
| false
| false
| 1,013
|
r
|
#reference: https://github.com/kbroman/pkg_primer/blob/gh-pages/example/stage3/R/plot_crayons.R
#' Annotates HiCcompare interaction data output.
#'
#' @param filename filename of the HiCcompare output, in csv format.If NULL, then the provided dataframe is used instead.
#'
#' @param df dataframe containing HiCcompare output, containing fields "chr1" "start1" "end1" "chr2" "start2" "end2" "IF1" "IF2" "D" "M" "adj.IF1" "adj.IF2" "adj.M" "mc" "A" "p.value" "fold.change"
#' @param is Interactionset containing the above. Please note: only one of the three options may be used.
#' @return annotatedInteractionSet An annotated interaction set, containing HGNC symbols
#' @author James L. T. Dalgleish
#' @seealso \code{\link[GIChipSeqCompare]}
#' @keywords chipseq HiC HiChip GInteractions
#'
#' @examples
#' afunction()
#'
#' @export
#' @importFrom readr read_csv
#' @importFrom GenomicInteractions
#'
#'
|
library(testthat)
library(CoreComp)
test_check("CoreComp")
|
/tests/testthat.R
|
permissive
|
keocorak/CoreComp
|
R
| false
| false
| 60
|
r
|
library(testthat)
library(CoreComp)
test_check("CoreComp")
|
\name{wdSubsubsection}
\Rdversion{1.1}
\alias{wdSubsubsection}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Start a new Word section, add a heading and a bookmark.
}
\description{
Start a subsection (see \code{\link{wdSection}} for details.
}
\usage{
wdSubsubsection(title, label = gsub("[.,-:?!@#+* ]", "_",
paste("subsec", title, sep = "_")),
newpage = FALSE, wdapp = .R2wd)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{title}{
the sub-sub-section title.
}
\item{label}{
the bookmark.
}
\item{newpage}{
whether the section should start on a new page.
}
\item{wdapp}{
the handle to the Word Application (usually not needed).
}
}
\author{
Christian Ritter
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
\dontrun{
wdGet()
wdTitle("Title")
wdSection("Section 1",newpage=TRUE)
wdSubsection("Sub-section 1.1")
wdSubsubsection("Sub-sub-section 1.1.a")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{IO}
\keyword{connection}
|
/man/wdSubsubsection.Rd
|
no_license
|
cran/R2wd
|
R
| false
| false
| 1,130
|
rd
|
\name{wdSubsubsection}
\Rdversion{1.1}
\alias{wdSubsubsection}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Start a new Word section, add a heading and a bookmark.
}
\description{
Start a subsection (see \code{\link{wdSection}} for details.
}
\usage{
wdSubsubsection(title, label = gsub("[.,-:?!@#+* ]", "_",
paste("subsec", title, sep = "_")),
newpage = FALSE, wdapp = .R2wd)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{title}{
the sub-sub-section title.
}
\item{label}{
the bookmark.
}
\item{newpage}{
whether the section should start on a new page.
}
\item{wdapp}{
the handle to the Word Application (usually not needed).
}
}
\author{
Christian Ritter
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
\dontrun{
wdGet()
wdTitle("Title")
wdSection("Section 1",newpage=TRUE)
wdSubsection("Sub-section 1.1")
wdSubsubsection("Sub-sub-section 1.1.a")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{IO}
\keyword{connection}
|
#Day_4_BioStats
#19/04/2018
#Matthew_Arendse
#3440197
#Day 4 of the BSc BCB(Hons) BioStats Module 2018: ANOVA Tests
# Load Libraries ----------------------------------------------------------
library(tidyverse)
library(ggpubr)
# Run a T-Test ------------------------------------------------------------
# First load the data
chicks <- as_tibble(ChickWeight)
# Then subset out only the sample sets to be compared
chicks_sub <- chicks %>%
filter(Diet %in% c(1, 2), Time == 21) # used to filter out and retain only the data specified
# Run the T-test
t.test(weight ~ Diet, data = chicks_sub, var.equal = FALSE)
# Run a one-way ANOVA -----------------------------------------------------
# Research Question: Is there a diiference in chicken mass after 21 days based on diet type?
# Hypothesis (0): There is no difference in mean chicken mass after 21 days based on diet type.
# Hypothesis (1): A difference exisits in mean chicken mass after 21 days based on diet type.
# subset the data for Day 21 only
chicks21 <- chicks %>%
filter(Time == 21)
# Run the ANOVA
chicks.aov1 <- aov(weight ~ Diet, data = chicks21)
summary(chicks.aov1)
# F statistic < 0.05 therefore we reject the Null Hypothesis
# A significant difference does exist in mean chicken mass after 21 days based on diet type.
chick21_av <- chicks21 %>%
group_by(Diet) %>%
summarise(mn.wt = mean(weight))
ggplot(data = chicks21, aes( x = Diet , y = weight))+
geom_boxplot(aes(fill = Diet), notch = TRUE)+
geom_segment(aes(x = Diet, xend = Diet, y = weight, yend = weight+2))+#This plot allows us to see which diet types are significantly different from one another
geom_point(data = chick21_av, size = 2, shape = 16,
aes(y = mn.wt), colour = "yellow") +
labs(title = "Variation in mass of chickens",
subtitle = "Based on diet type", y = "Weight (g)",x = "Diet Type")
# To test where the significant diference lies after an ANOVA has been done
TukeyHSD(chicks.aov1)
# Try to visualise
?TukeyHSD
chicks.aov1 <- aov(weight ~ Diet, data = chicks21)
TukeyHSD(chicks.aov1, "Diet", ordered = TRUE)
# In Base R
plot(TukeyHSD(chicks.aov1, "Diet"))
# Using ggplot
chicks_Tukey <- as.data.frame(TukeyHSD(aov(weight ~ Diet, data = chicks21))$Diet)
chicks_Tukey$pairs <- as.factor(row.names(chicks_Tukey))
ggplot(chicks_Tukey, aes(x = pairs, y = diff))+ #group = 1 states that all points plotted belong to one group and that said group needs to be connected by a line.
geom_errorbar(aes(ymin = lwr,
ymax = upr))+
geom_point(aes(colour = pairs))+
geom_hline(yintercept = 0, linetype = "dashed")+
labs(title = "TukeyHSD for one-tail ANOVA",
subtitle = "For variation in mean chick mass by diet type",
y = "Difference",x = "Diet Pairs")+
theme_pubr()+
theme(legend.position = "right")
# RWS: Very nice!
# Play Own Data
Vipers <- read_csv("C:/R Workshop 2018/Vipers(Max SVL).csv")
Av_all_vipers <- Vipers %>%
na.omit() %>%
group_by(SF) %>%
summarise(mn.TL = mean(TL))
vipers.aov1 <- aov(TL ~ SF, data = Vipers)
summary(vipers.aov1)
TukeyHSD(vipers.aov1, "SF", ordered = TRUE)
#Base R plot
plot(TukeyHSD(vipers.aov1, "SF"))
#Tidy R plot
vipers_Tukey <- as.data.frame(TukeyHSD(aov(TL ~ SF, data = Vipers))$SF)
vipers_Tukey$SF <- as.factor(row.names(vipers_Tukey))
ggplot(vipers_Tukey, aes(x = SF, y = diff))+
geom_errorbar(aes(ymin = lwr,
ymax = upr))+
geom_point(aes(colour = SF))+
geom_hline(yintercept = 0, linetype = "dashed")+
labs(title = "TukeyHSD for one-tail ANOVA",
subtitle = "For variation in mean body size by Clade",
y = "Difference",x = "Diet Pairs")+
theme_pubr()+
theme(legend.position = "right")
# Here we see that viperines are significantly smaller (on average) than crotalines
# Run a Multiple Factor ANOVA ---------------------------------------------------
# does time have an effect on chicken mass
chicks.aov2 <- aov(weight ~ as.factor(Time), data = filter(chicks, Time %in% c(0, 2, 10, 21)))
summary(chicks.aov2)
#Tukey post-hoc plot
plot(TukeyHSD(chicks.aov2))
# How to run a Multiple Factor ANOVA
summary(aov(weight ~ Diet * as.factor(Time), #this is where you add more factors
data = filter(chicks, Time %in% c(4, 21)))) #this is used to specify the time range
TukeyHSD(aov(weight ~ Diet * as.factor(Time), data = filter(chicks, Time %in% c(0, 21))))
#Tukey post-hoc plot
plot(TukeyHSD(aov(weight ~ Diet * as.factor(Time), data = filter(chicks, Time %in% c(0, 21)))))
#Better Visualise the Data
chicks_mn <- ChickWeight %>%
group_by(Diet, Time) %>%
summarise(mn.wt = mean(weight,na.rm = TRUE))
ggplot(data = chicks_mn, aes(x = Time, y = mn.wt))+
geom_point(aes(colour = Diet))+
geom_line(aes(colour = Diet))+
theme_pubr()+
theme(legend.position = "right")+
labs(title = "Variation in Mean Chicken weight by Diet",
subtitle = "Observing changes over time",
y = "Mean weight (g)",x = "No. of Days")
|
/Day_4_BioStats.R
|
no_license
|
MatthewArendse/BioStats_R_2018
|
R
| false
| false
| 5,449
|
r
|
#Day_4_BioStats
#19/04/2018
#Matthew_Arendse
#3440197
#Day 4 of the BSc BCB(Hons) BioStats Module 2018: ANOVA Tests
# Load Libraries ----------------------------------------------------------
library(tidyverse)
library(ggpubr)
# Run a T-Test ------------------------------------------------------------
# First load the data
chicks <- as_tibble(ChickWeight)
# Then subset out only the sample sets to be compared
chicks_sub <- chicks %>%
filter(Diet %in% c(1, 2), Time == 21) # used to filter out and retain only the data specified
# Run the T-test
t.test(weight ~ Diet, data = chicks_sub, var.equal = FALSE)
# Run a one-way ANOVA -----------------------------------------------------
# Research Question: Is there a diiference in chicken mass after 21 days based on diet type?
# Hypothesis (0): There is no difference in mean chicken mass after 21 days based on diet type.
# Hypothesis (1): A difference exisits in mean chicken mass after 21 days based on diet type.
# subset the data for Day 21 only
chicks21 <- chicks %>%
filter(Time == 21)
# Run the ANOVA
chicks.aov1 <- aov(weight ~ Diet, data = chicks21)
summary(chicks.aov1)
# F statistic < 0.05 therefore we reject the Null Hypothesis
# A significant difference does exist in mean chicken mass after 21 days based on diet type.
chick21_av <- chicks21 %>%
group_by(Diet) %>%
summarise(mn.wt = mean(weight))
ggplot(data = chicks21, aes( x = Diet , y = weight))+
geom_boxplot(aes(fill = Diet), notch = TRUE)+
geom_segment(aes(x = Diet, xend = Diet, y = weight, yend = weight+2))+#This plot allows us to see which diet types are significantly different from one another
geom_point(data = chick21_av, size = 2, shape = 16,
aes(y = mn.wt), colour = "yellow") +
labs(title = "Variation in mass of chickens",
subtitle = "Based on diet type", y = "Weight (g)",x = "Diet Type")
# To test where the significant diference lies after an ANOVA has been done
TukeyHSD(chicks.aov1)
# Try to visualise
?TukeyHSD
chicks.aov1 <- aov(weight ~ Diet, data = chicks21)
TukeyHSD(chicks.aov1, "Diet", ordered = TRUE)
# In Base R
plot(TukeyHSD(chicks.aov1, "Diet"))
# Using ggplot
chicks_Tukey <- as.data.frame(TukeyHSD(aov(weight ~ Diet, data = chicks21))$Diet)
chicks_Tukey$pairs <- as.factor(row.names(chicks_Tukey))
ggplot(chicks_Tukey, aes(x = pairs, y = diff))+ #group = 1 states that all points plotted belong to one group and that said group needs to be connected by a line.
geom_errorbar(aes(ymin = lwr,
ymax = upr))+
geom_point(aes(colour = pairs))+
geom_hline(yintercept = 0, linetype = "dashed")+
labs(title = "TukeyHSD for one-tail ANOVA",
subtitle = "For variation in mean chick mass by diet type",
y = "Difference",x = "Diet Pairs")+
theme_pubr()+
theme(legend.position = "right")
# RWS: Very nice!
# Play Own Data
Vipers <- read_csv("C:/R Workshop 2018/Vipers(Max SVL).csv")
Av_all_vipers <- Vipers %>%
na.omit() %>%
group_by(SF) %>%
summarise(mn.TL = mean(TL))
vipers.aov1 <- aov(TL ~ SF, data = Vipers)
summary(vipers.aov1)
TukeyHSD(vipers.aov1, "SF", ordered = TRUE)
#Base R plot
plot(TukeyHSD(vipers.aov1, "SF"))
#Tidy R plot
vipers_Tukey <- as.data.frame(TukeyHSD(aov(TL ~ SF, data = Vipers))$SF)
vipers_Tukey$SF <- as.factor(row.names(vipers_Tukey))
ggplot(vipers_Tukey, aes(x = SF, y = diff))+
geom_errorbar(aes(ymin = lwr,
ymax = upr))+
geom_point(aes(colour = SF))+
geom_hline(yintercept = 0, linetype = "dashed")+
labs(title = "TukeyHSD for one-tail ANOVA",
subtitle = "For variation in mean body size by Clade",
y = "Difference",x = "Diet Pairs")+
theme_pubr()+
theme(legend.position = "right")
# Here we see that viperines are significantly smaller (on average) than crotalines
# Run a Multiple Factor ANOVA ---------------------------------------------------
# does time have an effect on chicken mass
chicks.aov2 <- aov(weight ~ as.factor(Time), data = filter(chicks, Time %in% c(0, 2, 10, 21)))
summary(chicks.aov2)
#Tukey post-hoc plot
plot(TukeyHSD(chicks.aov2))
# How to run a Multiple Factor ANOVA
summary(aov(weight ~ Diet * as.factor(Time), #this is where you add more factors
data = filter(chicks, Time %in% c(4, 21)))) #this is used to specify the time range
TukeyHSD(aov(weight ~ Diet * as.factor(Time), data = filter(chicks, Time %in% c(0, 21))))
#Tukey post-hoc plot
plot(TukeyHSD(aov(weight ~ Diet * as.factor(Time), data = filter(chicks, Time %in% c(0, 21)))))
#Better Visualise the Data
chicks_mn <- ChickWeight %>%
group_by(Diet, Time) %>%
summarise(mn.wt = mean(weight,na.rm = TRUE))
ggplot(data = chicks_mn, aes(x = Time, y = mn.wt))+
geom_point(aes(colour = Diet))+
geom_line(aes(colour = Diet))+
theme_pubr()+
theme(legend.position = "right")+
labs(title = "Variation in Mean Chicken weight by Diet",
subtitle = "Observing changes over time",
y = "Mean weight (g)",x = "No. of Days")
|
library(shiny)
typingSpeedInput <- function(inputId, label, placeholder = NULL) {
div(
class = "form-group typing-speed",
tags$label(class = "control-label", `for` = inputId),
tags$textarea(id = inputId, class = "form-control", placeholder = placeholder),
htmltools::htmlDependency(
name = "typing-speed",
version = "0.0.1",
src = ".",
script = "typing.js",
all_files = FALSE
)
)
}
ui <- fluidPage(
# textAreaInput("typing", "Type here..."),
typingSpeedInput("typing", "Type here..."),
verbatimTextOutput("debug")
)
server <- function(input, output, session) {
output$debug <- renderPrint(input$typing)
}
shinyApp(ui, server)
|
/frappeCharts/type-racer-2/app.R
|
no_license
|
holmesjoli/rconf
|
R
| false
| false
| 692
|
r
|
library(shiny)
typingSpeedInput <- function(inputId, label, placeholder = NULL) {
div(
class = "form-group typing-speed",
tags$label(class = "control-label", `for` = inputId),
tags$textarea(id = inputId, class = "form-control", placeholder = placeholder),
htmltools::htmlDependency(
name = "typing-speed",
version = "0.0.1",
src = ".",
script = "typing.js",
all_files = FALSE
)
)
}
ui <- fluidPage(
# textAreaInput("typing", "Type here..."),
typingSpeedInput("typing", "Type here..."),
verbatimTextOutput("debug")
)
server <- function(input, output, session) {
output$debug <- renderPrint(input$typing)
}
shinyApp(ui, server)
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{plot_sizecomp_res}
\alias{plot_sizecomp_res}
\title{Plot size composition residuals}
\usage{
plot_sizecomp_res(replist, which_plots = "all")
}
\arguments{
\item{replist}{List object created by read_admb function}
}
\value{
Bubble plot of size composition residuals
}
\description{
TODO: Insert more information here.
}
|
/Rsrc/man/plot_sizecomp_res.Rd
|
no_license
|
LarryJacobson/gmacs
|
R
| false
| false
| 379
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{plot_sizecomp_res}
\alias{plot_sizecomp_res}
\title{Plot size composition residuals}
\usage{
plot_sizecomp_res(replist, which_plots = "all")
}
\arguments{
\item{replist}{List object created by read_admb function}
}
\value{
Bubble plot of size composition residuals
}
\description{
TODO: Insert more information here.
}
|
#' Template Functionalities.
#'
#' @rdname TemplatesFunctions
#' @name TemplatesFunctions
#'
#' @description
#' \describe{
#' \item{\code{adjust_binding_regions}}{Adjusts the existing annotation
#' of binding regions by specifying
#' a new binding interval relative to the existing binding region.}
#' \item{\code{assign_binding_regions}}{Assigns the primer target binding
#' regions to a set of template sequences.}
#' \item{\code{update_template_cvg}}{Annotates the template coverage.}
#' \item{\code{select_regions_by_conservation}}{Computes Shannon entropy
#' for the defined binding regions and determines the most conserved regions.}
#' }
#'
#' @param region.fw Interval of new binding regions relative to the forward binding region defined in \code{template.df}.
#' @param region.rev Interval of new binding regions relative to the reverse binding region defined in \code{template.df}
#' @param template.df An object of class \code{Templates}.
#' @param fw Binding regions for forward primers. Either a numeric interval indicating a uniform
#' binding range relative to the template 5' end or a path to a FASTA file providing
#' binding sequences for every template. If \code{fw} is missing, only
#' \code{rev} is considered.
#' @param rev Binding regions for reverse primers. Either a numeric interval indicating a uniform
#' binding range relative to the template 3' end or the path to a FASTA file providing
#' binding sequences for every template. If \code{rev} is missing,
#' only \code{fw} is considered.
#' @param optimize.region If \code{TRUE}, the binding regions
#' specified via \code{fw} and \code{rev} are
#' adjusted such that binding regions that may form secondary structures are
#' avoided. This feature requires ViennaRNA (see notes). If \code{FALSE}
#' (the default), the input binding regions are not modified.
#' @param primer.length A numeric scalar providing the probe length that is used for
#' adjusting the primer binding regions when \code{optimize.region} is \code{TRUE}.
#' @param gap.char The character in the input file representing gaps.
#' @param primer.df An object of class \code{Primers} containing
#' primers with annotated coverage that are to be used to update
#' the template coverage in \code{template.df}.
#' @param mode.directionality The directionality of primers/templates.
#' @param win.len The extent of the desired primer binding region.
#' This should be smaller than the \code{allowed.region}. The default is 30.
#' @param by.group Shall the determination of binding regions be stratified
#' according to the groups defined in \code{template.df}. By default,
#' this is set to \code{TRUE}.
NULL
|
/R/TemplatesDoc.R
|
no_license
|
dfajar2/openPrimeR
|
R
| false
| false
| 2,660
|
r
|
#' Template Functionalities.
#'
#' @rdname TemplatesFunctions
#' @name TemplatesFunctions
#'
#' @description
#' \describe{
#' \item{\code{adjust_binding_regions}}{Adjusts the existing annotation
#' of binding regions by specifying
#' a new binding interval relative to the existing binding region.}
#' \item{\code{assign_binding_regions}}{Assigns the primer target binding
#' regions to a set of template sequences.}
#' \item{\code{update_template_cvg}}{Annotates the template coverage.}
#' \item{\code{select_regions_by_conservation}}{Computes Shannon entropy
#' for the defined binding regions and determines the most conserved regions.}
#' }
#'
#' @param region.fw Interval of new binding regions relative to the forward binding region defined in \code{template.df}.
#' @param region.rev Interval of new binding regions relative to the reverse binding region defined in \code{template.df}
#' @param template.df An object of class \code{Templates}.
#' @param fw Binding regions for forward primers. Either a numeric interval indicating a uniform
#' binding range relative to the template 5' end or a path to a FASTA file providing
#' binding sequences for every template. If \code{fw} is missing, only
#' \code{rev} is considered.
#' @param rev Binding regions for reverse primers. Either a numeric interval indicating a uniform
#' binding range relative to the template 3' end or the path to a FASTA file providing
#' binding sequences for every template. If \code{rev} is missing,
#' only \code{fw} is considered.
#' @param optimize.region If \code{TRUE}, the binding regions
#' specified via \code{fw} and \code{rev} are
#' adjusted such that binding regions that may form secondary structures are
#' avoided. This feature requires ViennaRNA (see notes). If \code{FALSE}
#' (the default), the input binding regions are not modified.
#' @param primer.length A numeric scalar providing the probe length that is used for
#' adjusting the primer binding regions when \code{optimize.region} is \code{TRUE}.
#' @param gap.char The character in the input file representing gaps.
#' @param primer.df An object of class \code{Primers} containing
#' primers with annotated coverage that are to be used to update
#' the template coverage in \code{template.df}.
#' @param mode.directionality The directionality of primers/templates.
#' @param win.len The extent of the desired primer binding region.
#' This should be smaller than the \code{allowed.region}. The default is 30.
#' @param by.group Shall the determination of binding regions be stratified
#' according to the groups defined in \code{template.df}. By default,
#' this is set to \code{TRUE}.
NULL
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 24778
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 24778
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/incrementer-encoder/incrementer-enc02-nonuniform-depth-15.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 9553
c no.of clauses 24778
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 24778
c
c QBFLIB/Miller-Marin/incrementer-encoder/incrementer-enc02-nonuniform-depth-15.qdimacs 9553 24778 E1 [] 0 32 9445 24778 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Marin/incrementer-encoder/incrementer-enc02-nonuniform-depth-15/incrementer-enc02-nonuniform-depth-15.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 707
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 24778
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 24778
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/incrementer-encoder/incrementer-enc02-nonuniform-depth-15.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 9553
c no.of clauses 24778
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 24778
c
c QBFLIB/Miller-Marin/incrementer-encoder/incrementer-enc02-nonuniform-depth-15.qdimacs 9553 24778 E1 [] 0 32 9445 24778 NONE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{melt.gct}
\alias{melt.gct}
\title{Transform a GCT object in to a long form \code{\link{data.table}} (aka 'melt')}
\usage{
melt.gct(g, suffixes = NULL, remove_symmetries = F, keep_rdesc = T,
keep_cdesc = T, ...)
}
\arguments{
\item{g}{the GCT object}
\item{suffixes}{the character suffixes to be applied if there are
collisions between the names of the row and column descriptors}
\item{remove_symmetries}{boolean indicating whether to remove
the lower triangle of the matrix (only applies if \code{g@mat} is symmetric)}
\item{keep_rdesc}{boolean indicating whether to keep the row
descriptors in the final result}
\item{keep_cdesc}{boolean indicating whether to keep the column
descriptors in the final result}
\item{...}{further arguments passed along to \code{data.table::merge}}
}
\value{
a \code{\link{data.table}} object with the row and column ids and the matrix
values and (optinally) the row and column descriptors
}
\description{
Utilizes the \code{\link{data.table::melt}} function to transform the
matrix into long form. Optionally can include the row and column
annotations in the transformed \code{\link{data.table}}.
}
\examples{
# simple melt, keeping both row and column meta
head(melt.gct(ds))
# update row/colum suffixes to indicate rows are genes, columns experiments
head(melt.gct(ds, suffixes = c("_gene", "_experiment")))
# ignore row/column meta
head(melt.gct(ds, keep_rdesc = FALSE, keep_cdesc = FALSE))
}
\seealso{
Other GCT utilities: \code{\link{annotate.gct}},
\code{\link{merge.gct}}, \code{\link{rank.gct}},
\code{\link{subset.gct}}
}
\concept{GCT utilities}
|
/man/melt.gct.Rd
|
permissive
|
wangdi2014/cmapR
|
R
| false
| true
| 1,701
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{melt.gct}
\alias{melt.gct}
\title{Transform a GCT object in to a long form \code{\link{data.table}} (aka 'melt')}
\usage{
melt.gct(g, suffixes = NULL, remove_symmetries = F, keep_rdesc = T,
keep_cdesc = T, ...)
}
\arguments{
\item{g}{the GCT object}
\item{suffixes}{the character suffixes to be applied if there are
collisions between the names of the row and column descriptors}
\item{remove_symmetries}{boolean indicating whether to remove
the lower triangle of the matrix (only applies if \code{g@mat} is symmetric)}
\item{keep_rdesc}{boolean indicating whether to keep the row
descriptors in the final result}
\item{keep_cdesc}{boolean indicating whether to keep the column
descriptors in the final result}
\item{...}{further arguments passed along to \code{data.table::merge}}
}
\value{
a \code{\link{data.table}} object with the row and column ids and the matrix
values and (optinally) the row and column descriptors
}
\description{
Utilizes the \code{\link{data.table::melt}} function to transform the
matrix into long form. Optionally can include the row and column
annotations in the transformed \code{\link{data.table}}.
}
\examples{
# simple melt, keeping both row and column meta
head(melt.gct(ds))
# update row/colum suffixes to indicate rows are genes, columns experiments
head(melt.gct(ds, suffixes = c("_gene", "_experiment")))
# ignore row/column meta
head(melt.gct(ds, keep_rdesc = FALSE, keep_cdesc = FALSE))
}
\seealso{
Other GCT utilities: \code{\link{annotate.gct}},
\code{\link{merge.gct}}, \code{\link{rank.gct}},
\code{\link{subset.gct}}
}
\concept{GCT utilities}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/soccerStandardiseCols.R
\name{soccerStandardiseCols}
\alias{soccerStandardiseCols}
\title{Rename columns in a dataframe for easier use with other {soccermatics} functions}
\usage{
soccerStandardiseCols(df, method = c("statsbomb"))
}
\arguments{
\item{df}{a dataframe of Statsbomb event data}
\item{method}{source of data; only \code{"statsbomb"} currently supported}
}
\value{
a dataframe with column names x, y, distance, angle, player_id, player_name, team_name, event_name
}
\description{
Rename columns (e.g. \code{"location.x"} -> \code{"x"}, \code{"team.name"} -> \code{"team"}, etc...) to interface directly with other soccermatics functions without having to explicitly define column names as arguments. Currently only supports Statsbomb data.
}
\examples{
library(dplyr)
data(statsbomb)
# transform x,y-coords, standardise column names
my_df <- statsbomb \%>\%
soccerTransform(method = 'statsbomb') \%>\%
soccerStandardiseCols(method = 'statsbomb')
# feed to other functions without defining variables,
# x, y, id,distance, angle, etc...
soccerHeatmap(my_df)
}
|
/man/soccerStandardiseCols.Rd
|
no_license
|
mthesen/soccermatics
|
R
| false
| true
| 1,160
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/soccerStandardiseCols.R
\name{soccerStandardiseCols}
\alias{soccerStandardiseCols}
\title{Rename columns in a dataframe for easier use with other {soccermatics} functions}
\usage{
soccerStandardiseCols(df, method = c("statsbomb"))
}
\arguments{
\item{df}{a dataframe of Statsbomb event data}
\item{method}{source of data; only \code{"statsbomb"} currently supported}
}
\value{
a dataframe with column names x, y, distance, angle, player_id, player_name, team_name, event_name
}
\description{
Rename columns (e.g. \code{"location.x"} -> \code{"x"}, \code{"team.name"} -> \code{"team"}, etc...) to interface directly with other soccermatics functions without having to explicitly define column names as arguments. Currently only supports Statsbomb data.
}
\examples{
library(dplyr)
data(statsbomb)
# transform x,y-coords, standardise column names
my_df <- statsbomb \%>\%
soccerTransform(method = 'statsbomb') \%>\%
soccerStandardiseCols(method = 'statsbomb')
# feed to other functions without defining variables,
# x, y, id,distance, angle, etc...
soccerHeatmap(my_df)
}
|
library(tidyverse)
head(diamonds)
nrow(diamonds)
diamond<-diamonds
levels(diamond$clarity)[levels(diamond$clarity) == "I1"] <- "worst"
levels(diamond$clarity)[levels(diamond$clarity) == "SI2"] <- "worst"
levels(diamond$clarity)[levels(diamond$clarity) == "SI1"] <- "okay"
levels(diamond$clarity)[levels(diamond$clarity) == "VS2"] <- "okay"
levels(diamond$clarity)[levels(diamond$clarity) == "VS1"] <- "good"
levels(diamond$clarity)[levels(diamond$clarity) == "VVS2"] <- "good"
levels(diamond$clarity)[levels(diamond$clarity) == "VVS1"] <- "top"
levels(diamond$clarity)[levels(diamond$clarity) == "IF"] <- "top"
levels(diamond$cut)[levels(diamond$cut) == "Good"] <- "Fair"
levels(diamond$color)[levels(diamond$color) == "D"] <- "best"
levels(diamond$color)[levels(diamond$color) == "E"] <- "best"
levels(diamond$color)[levels(diamond$color) == "F"] <- "well"
levels(diamond$color)[levels(diamond$color) == "G"] <- "well"
levels(diamond$color)[levels(diamond$color) == "H"] <- "well"
levels(diamond$color)[levels(diamond$color) == "I"] <- "worst"
levels(diamond$color)[levels(diamond$color) == "J"] <- "worst"
### put y variable to the last
diam.mut <- diamond %>%
mutate(cut = as.character(cut), color = as.character(color), clarity = as.character(clarity), price = log(price), carat = log(carat))
BFast<-function(r1, r2, p, data){
formula = as.formula("price ~ . -1")
dt = model.matrix(formula, data)
PI1.BF <- matrix(sample(c(-1, 1), r1 * nrow(data), replace = TRUE), nrow = r1, ncol = nrow(data))
PI2.BF <- matrix(sample(c(-1, 1), p * r2, replace = TRUE), nrow = p, ncol = r2) ### dummy variable?
R.BF <- qr.R(qr((PI1.BF)%*%(dt)))
Rinv.BF <- solve(R.BF)
dtfrm.BF <- (dt)%*%(Rinv.BF)%*%(PI2.BF)
Xpinv.BF <- solve(t(dtfrm.BF)%*%(dtfrm.BF))
prob.BF <- rowSums((dtfrm.BF)%*%(Xpinv.BF) * (dtfrm.BF))/r2
return(prob.BF)
}
#### MSE ###
MSEaverage<-function(n, loop, r, r1, r2, p, method){
#n is nrow(data), p # parameter, r size of subsample, loop # of loops for error term
#dataType: GA, T3, T1, method: UNIF, LEV, BFSLEV, BFLEV, BFLEVUNW, BFDLW
# r1, r2 are for Bfast leverage
storeMSE<-matrix(NA, nrow=loop, ncol=1)
for (i in 1:loop){
fit <- lm(price ~ .-1, x=TRUE,data = diam.mut)
leverage<-hatvalues(fit)
pi<-leverage/sum(leverage)
if (method=="UNIF"){
SampInd <- sample(1:nrow(diam.mut), size = r, replace = T)
slr<-lm(price ~ .-1, x=TRUE,data = diam.mut[SampInd,])
} else if (method=="LEV"){
SampInd <- sample(1:nrow(diam.mut), size = r, prob = pi, rep = T)
slr<-lm(price ~ .-1, x=TRUE,data = diam.mut[SampInd,], weights = 1/pi[SampInd])
} else if (method=="BFLEV"){
BFprob<-BFast(r1, r2, p, diam.mut)
dat<-cbind(diam.mut, BFprob)
SampInd <- sample(1:nrow(dat), size = r, prob = dat$BFprob, rep = T)
slr<-lm(price ~ .-1, x=TRUE,data = dat[SampInd, 1:(ncol(dat)-1)], weights = 1/dat$BFprob[SampInd])
} else if (method=="BFSLEV"){
BFprob<-BFast(r1, r2, p, diam.mut)
dat<-cbind(diam.mut, BFprob)
SampInd <- sample(1:nrow(dat), size = r, prob = (0.9*dat$BFprob+0.1*(1/nrow(dat))), rep = T)
slr<-lm(price ~ .-1, x=TRUE,data = dat[SampInd, 1:(ncol(dat)-1)], weights=1/(0.9*dat$BFprob+0.1*(1/nrow(dat)))[SampInd])
} else if (method=="BFLEVUNW"){
BFprob<-BFast(r1, r2, p, diam.mut)
dat<-cbind(diam.mut, BFprob)
SampInd <- sample(1:nrow(dat), size = r, prob = dat$BFprob, rep = T)
slr<-lm(price ~ .-1, x=TRUE,data = dat[SampInd, 1:(ncol(dat)-1)])
} else if (method=="BFDLEV"){
BFprob<-BFast(r1, r2, p, diam.mut)
dat<-cbind(diam.mut, BFprob)
SampInd <- sample(1:nrow(dat), size = r, prob = dat$BFprob, rep = T)
slr<-lm(price ~ .-1, x=TRUE,data = dat[SampInd, 1:(ncol(dat)-1)], weights = dat$BFprob[SampInd])
} else if (method=="true"){
slr<-lm(price ~ .-1, x=TRUE,data = diam.mut)
}
storeMSE[i, ]<-mean((predict(slr, diam.mut)-diam.mut$price)^2)
}
return(mean(storeMSE))
}
diamondMSE<-as.data.frame(matrix(NA, ncol=5))
names(diamondMSE)<-c("data", "method","data size", "subsample size", "MSE")
route<-0
p=15
r1=2*p
for (n in c(nrow(diam.mut))){
subsamplesize<-c(150, 200, 250, 300, 400, 500, 1000, 5000, 10000, 50000)
r2=4
for (r in subsamplesize){
for (method in c("UNIF", "LEV", "BFLEV", "BFSLEV", "BFLEVUNW", "BFDLEV")){ #### test which warning is it ####
print(method)
route<-route+1
diamondMSE[route,1]<-"diamonds"
diamondMSE[route, 2]<-method
diamondMSE[route, 3]<-n
diamondMSE[route, 4]<-r
diamondMSE[route, 5]<-MSEaverage(n, 1000, r, r1, r2, p, method=method)
}
}
}
ggplot(data=diamondMSE, aes(x=`subsample size`, y=MSE, color=method))+
geom_line()+
ggtitle("diamonds")+
ylim(0.025, 0.10)+
xlim(0, 5000)
### with sim and diamonds, make nice plot, and put into latex, and email Dr. Smucker (black and white)
### change n=1000 from diamond dataset
### n=1000, p=10, 50, 100
### n=5000, p=50, 100, 500
### n=10,000, p=50, 100, 500
|
/BFast on real data/Bfast on diamonds.R
|
no_license
|
wangy63/Leverage-Subsampling
|
R
| false
| false
| 5,019
|
r
|
library(tidyverse)
head(diamonds)
nrow(diamonds)
diamond<-diamonds
levels(diamond$clarity)[levels(diamond$clarity) == "I1"] <- "worst"
levels(diamond$clarity)[levels(diamond$clarity) == "SI2"] <- "worst"
levels(diamond$clarity)[levels(diamond$clarity) == "SI1"] <- "okay"
levels(diamond$clarity)[levels(diamond$clarity) == "VS2"] <- "okay"
levels(diamond$clarity)[levels(diamond$clarity) == "VS1"] <- "good"
levels(diamond$clarity)[levels(diamond$clarity) == "VVS2"] <- "good"
levels(diamond$clarity)[levels(diamond$clarity) == "VVS1"] <- "top"
levels(diamond$clarity)[levels(diamond$clarity) == "IF"] <- "top"
levels(diamond$cut)[levels(diamond$cut) == "Good"] <- "Fair"
levels(diamond$color)[levels(diamond$color) == "D"] <- "best"
levels(diamond$color)[levels(diamond$color) == "E"] <- "best"
levels(diamond$color)[levels(diamond$color) == "F"] <- "well"
levels(diamond$color)[levels(diamond$color) == "G"] <- "well"
levels(diamond$color)[levels(diamond$color) == "H"] <- "well"
levels(diamond$color)[levels(diamond$color) == "I"] <- "worst"
levels(diamond$color)[levels(diamond$color) == "J"] <- "worst"
### put y variable to the last
diam.mut <- diamond %>%
mutate(cut = as.character(cut), color = as.character(color), clarity = as.character(clarity), price = log(price), carat = log(carat))
BFast<-function(r1, r2, p, data){
formula = as.formula("price ~ . -1")
dt = model.matrix(formula, data)
PI1.BF <- matrix(sample(c(-1, 1), r1 * nrow(data), replace = TRUE), nrow = r1, ncol = nrow(data))
PI2.BF <- matrix(sample(c(-1, 1), p * r2, replace = TRUE), nrow = p, ncol = r2) ### dummy variable?
R.BF <- qr.R(qr((PI1.BF)%*%(dt)))
Rinv.BF <- solve(R.BF)
dtfrm.BF <- (dt)%*%(Rinv.BF)%*%(PI2.BF)
Xpinv.BF <- solve(t(dtfrm.BF)%*%(dtfrm.BF))
prob.BF <- rowSums((dtfrm.BF)%*%(Xpinv.BF) * (dtfrm.BF))/r2
return(prob.BF)
}
#### MSE ###
MSEaverage<-function(n, loop, r, r1, r2, p, method){
#n is nrow(data), p # parameter, r size of subsample, loop # of loops for error term
#dataType: GA, T3, T1, method: UNIF, LEV, BFSLEV, BFLEV, BFLEVUNW, BFDLW
# r1, r2 are for Bfast leverage
storeMSE<-matrix(NA, nrow=loop, ncol=1)
for (i in 1:loop){
fit <- lm(price ~ .-1, x=TRUE,data = diam.mut)
leverage<-hatvalues(fit)
pi<-leverage/sum(leverage)
if (method=="UNIF"){
SampInd <- sample(1:nrow(diam.mut), size = r, replace = T)
slr<-lm(price ~ .-1, x=TRUE,data = diam.mut[SampInd,])
} else if (method=="LEV"){
SampInd <- sample(1:nrow(diam.mut), size = r, prob = pi, rep = T)
slr<-lm(price ~ .-1, x=TRUE,data = diam.mut[SampInd,], weights = 1/pi[SampInd])
} else if (method=="BFLEV"){
BFprob<-BFast(r1, r2, p, diam.mut)
dat<-cbind(diam.mut, BFprob)
SampInd <- sample(1:nrow(dat), size = r, prob = dat$BFprob, rep = T)
slr<-lm(price ~ .-1, x=TRUE,data = dat[SampInd, 1:(ncol(dat)-1)], weights = 1/dat$BFprob[SampInd])
} else if (method=="BFSLEV"){
BFprob<-BFast(r1, r2, p, diam.mut)
dat<-cbind(diam.mut, BFprob)
SampInd <- sample(1:nrow(dat), size = r, prob = (0.9*dat$BFprob+0.1*(1/nrow(dat))), rep = T)
slr<-lm(price ~ .-1, x=TRUE,data = dat[SampInd, 1:(ncol(dat)-1)], weights=1/(0.9*dat$BFprob+0.1*(1/nrow(dat)))[SampInd])
} else if (method=="BFLEVUNW"){
BFprob<-BFast(r1, r2, p, diam.mut)
dat<-cbind(diam.mut, BFprob)
SampInd <- sample(1:nrow(dat), size = r, prob = dat$BFprob, rep = T)
slr<-lm(price ~ .-1, x=TRUE,data = dat[SampInd, 1:(ncol(dat)-1)])
} else if (method=="BFDLEV"){
BFprob<-BFast(r1, r2, p, diam.mut)
dat<-cbind(diam.mut, BFprob)
SampInd <- sample(1:nrow(dat), size = r, prob = dat$BFprob, rep = T)
slr<-lm(price ~ .-1, x=TRUE,data = dat[SampInd, 1:(ncol(dat)-1)], weights = dat$BFprob[SampInd])
} else if (method=="true"){
slr<-lm(price ~ .-1, x=TRUE,data = diam.mut)
}
storeMSE[i, ]<-mean((predict(slr, diam.mut)-diam.mut$price)^2)
}
return(mean(storeMSE))
}
diamondMSE<-as.data.frame(matrix(NA, ncol=5))
names(diamondMSE)<-c("data", "method","data size", "subsample size", "MSE")
route<-0
p=15
r1=2*p
for (n in c(nrow(diam.mut))){
subsamplesize<-c(150, 200, 250, 300, 400, 500, 1000, 5000, 10000, 50000)
r2=4
for (r in subsamplesize){
for (method in c("UNIF", "LEV", "BFLEV", "BFSLEV", "BFLEVUNW", "BFDLEV")){ #### test which warning is it ####
print(method)
route<-route+1
diamondMSE[route,1]<-"diamonds"
diamondMSE[route, 2]<-method
diamondMSE[route, 3]<-n
diamondMSE[route, 4]<-r
diamondMSE[route, 5]<-MSEaverage(n, 1000, r, r1, r2, p, method=method)
}
}
}
ggplot(data=diamondMSE, aes(x=`subsample size`, y=MSE, color=method))+
geom_line()+
ggtitle("diamonds")+
ylim(0.025, 0.10)+
xlim(0, 5000)
### with sim and diamonds, make nice plot, and put into latex, and email Dr. Smucker (black and white)
### change n=1000 from diamond dataset
### n=1000, p=10, 50, 100
### n=5000, p=50, 100, 500
### n=10,000, p=50, 100, 500
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eurostat-package.R
\docType{package}
\name{eurostat-package}
\alias{eurostat-package}
\alias{eurostat}
\title{R Tools for Eurostat open data}
\description{
Brief summary of the eurostat package
}
\details{
\tabular{ll}{
\tab \cr
\strong{Package} \tab eurostat \cr
\strong{Type} \tab Package \cr
\strong{Version} \tab 3.8.3 \cr
\strong{Date} \tab 2014-2022 \cr
\strong{License} \tab BSD_2_clause + file LICENSE \cr
\strong{LazyLoad} \tab yes \cr
}
R Tools for Eurostat Open Data
}
\section{regions functions}{
For working with sub-national statistics the basic functions of the
regions package are imported \url{https://regions.dataobservatory.eu/}.
}
\examples{
library(eurostat)
}
\references{
See \code{citation("eurostat")}:
\if{html}{\out{<div class="sourceCode">}}\preformatted{#
# Kindly cite the eurostat R package as follows:
#
# (C) Leo Lahti, Janne Huovari, Markus Kainu, Przemyslaw Biecek.
# Retrieval and analysis of Eurostat open data with the eurostat
# package. R Journal 9(1):385-392, 2017. doi: 10.32614/RJ-2017-019
# Package URL: http://ropengov.github.io/eurostat Article URL:
# https://journal.r-project.org/archive/2017/RJ-2017-019/index.html
#
# A BibTeX entry for LaTeX users is
#
# @Article\{,
# title = \{Retrieval and Analysis of Eurostat Open Data with the eurostat Package\},
# author = \{Leo Lahti and Janne Huovari and Markus Kainu and Przemyslaw Biecek\},
# journal = \{The R Journal\},
# volume = \{9\},
# number = \{1\},
# pages = \{385--392\},
# year = \{2017\},
# doi = \{10.32614/RJ-2017-019\},
# url = \{https://doi.org/10.32614/RJ-2017-019\},
# \}
}\if{html}{\out{</div>}}
}
\seealso{
\code{help("regions")}, \url{https://regions.dataobservatory.eu/}
}
\author{
Leo Lahti, Janne Huovari, Markus Kainu, Przemyslaw Biecek
}
\keyword{package}
|
/man/eurostat-package.Rd
|
permissive
|
rOpenGov/eurostat
|
R
| false
| true
| 1,938
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eurostat-package.R
\docType{package}
\name{eurostat-package}
\alias{eurostat-package}
\alias{eurostat}
\title{R Tools for Eurostat open data}
\description{
Brief summary of the eurostat package
}
\details{
\tabular{ll}{
\tab \cr
\strong{Package} \tab eurostat \cr
\strong{Type} \tab Package \cr
\strong{Version} \tab 3.8.3 \cr
\strong{Date} \tab 2014-2022 \cr
\strong{License} \tab BSD_2_clause + file LICENSE \cr
\strong{LazyLoad} \tab yes \cr
}
R Tools for Eurostat Open Data
}
\section{regions functions}{
For working with sub-national statistics the basic functions of the
regions package are imported \url{https://regions.dataobservatory.eu/}.
}
\examples{
library(eurostat)
}
\references{
See \code{citation("eurostat")}:
\if{html}{\out{<div class="sourceCode">}}\preformatted{#
# Kindly cite the eurostat R package as follows:
#
# (C) Leo Lahti, Janne Huovari, Markus Kainu, Przemyslaw Biecek.
# Retrieval and analysis of Eurostat open data with the eurostat
# package. R Journal 9(1):385-392, 2017. doi: 10.32614/RJ-2017-019
# Package URL: http://ropengov.github.io/eurostat Article URL:
# https://journal.r-project.org/archive/2017/RJ-2017-019/index.html
#
# A BibTeX entry for LaTeX users is
#
# @Article\{,
# title = \{Retrieval and Analysis of Eurostat Open Data with the eurostat Package\},
# author = \{Leo Lahti and Janne Huovari and Markus Kainu and Przemyslaw Biecek\},
# journal = \{The R Journal\},
# volume = \{9\},
# number = \{1\},
# pages = \{385--392\},
# year = \{2017\},
# doi = \{10.32614/RJ-2017-019\},
# url = \{https://doi.org/10.32614/RJ-2017-019\},
# \}
}\if{html}{\out{</div>}}
}
\seealso{
\code{help("regions")}, \url{https://regions.dataobservatory.eu/}
}
\author{
Leo Lahti, Janne Huovari, Markus Kainu, Przemyslaw Biecek
}
\keyword{package}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{getYtdCumSharpeCpp}
\alias{getYtdCumSharpeCpp}
\title{get cumu ytd sharpe}
\usage{
getYtdCumSharpeCpp(x)
}
\arguments{
\item{x}{numvec}
}
\description{
get cumu ytd sharpe
}
|
/man/getYtdCumSharpeCpp.Rd
|
no_license
|
lukas1421/chinaTrading
|
R
| false
| true
| 272
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{getYtdCumSharpeCpp}
\alias{getYtdCumSharpeCpp}
\title{get cumu ytd sharpe}
\usage{
getYtdCumSharpeCpp(x)
}
\arguments{
\item{x}{numvec}
}
\description{
get cumu ytd sharpe
}
|
### making maps and preparing shapefiles
## load libraries
library(fields)
library(dplyr)
library(dismo)
library(maptools)
##using file made from Ownbey textbook source
#importing species csv files into R
alliumcanadense<-read.csv("alliumdataset_map_data.csv")
#remove missing data in alliumcanadense
alliumcanadense <- na.omit(alliumcanadense)
#assign scientific name to an object
target1<-c("Allium canadense var. canadense")
#filtered allium canadense canadense csv file
alliumcanadense1<-alliumcanadense %>%
select(Taxon,Latitude,Longitude) %>%
filter(Taxon == target1)
#remove point outside of climate layer extent
alliumcanadense1<-alliumcanadense1[c(2:5,8,10:21),]
#assign scientific name to an object
target2<-c("Allium canadense var. ecristatum")
#filtered allium canadense ecristatum csv file
alliumcanadense2<-alliumcanadense %>%
select(Taxon,Latitude,Longitude) %>%
filter(Taxon == target2)
#assign scientific name to an object
target3<-c("Allium canadense var. Fraseri")
#filtered allium canadense Fraseri csv file
alliumcanadense3<-alliumcanadense %>%
select(Taxon,Latitude,Longitude) %>%
filter(Taxon == target3)
#assign scientific name to an object
target4<-c("Allium canadense var. hyacinthoides")
#filtered allium canadense hyacinthoides csv file
alliumcanadense4<-alliumcanadense %>%
select(Taxon,Latitude,Longitude) %>%
filter(Taxon == target4)
#assign scientific name to an object
target5<-c("Allium canadense var. lavendulare")
#filtered allium canadense lavendulare csv file
alliumcanadense5<-alliumcanadense %>%
select(Taxon,Latitude,Longitude) %>%
filter(Taxon == target5)
#remove point outside of climate layer extent
alliumcanadense5<-alliumcanadense5[c(1:6),]
#assign scientific name to an object
target6<-c("Allium canadense var. mobilense")
#filtered allium canadense mobilense csv file
alliumcanadense6<-alliumcanadense %>%
select(Taxon,Latitude,Longitude) %>%
filter(Taxon == target6)
## quick and dirty plot on map (could also plot points first and add map)
US(xlim=c(-85,-77), ylim=c(26,37))
points(alliumcanadense1$Longitude, alliumcanadense1$Latitude, col='purple', pch=20, cex=2)
points(alliumcanadense2$Longitude, alliumcanadense2$Latitude, col='orange', pch=20, cex=2)
points(alliumcanadense3$Longitude, alliumcanadense3$Latitude, col='blue', pch=20, cex=2)
points(alliumcanadense4$Longitude, alliumcanadense4$Latitude, col='gray', pch=20, cex=2)
points(alliumcanadense5$Longitude, alliumcanadense5$Latitude, col='red', pch=20, cex=2)
points(alliumcanadense6$Longitude, alliumcanadense6$Latitude, col='dark green', pch=20, cex=2)
## a slightly more refined map (using built-in state outlines)
midUS <- c("texas","oklahoma","kansas","missouri")
map(database="state", regions = midUS, interior=T, lwd=2)
points(alliumcanadense1$Longitude, alliumcanadense1$Latitude, col='purple', pch=20, cex=2)
points(alliumcanadense2$Longitude, alliumcanadense2$Latitude, col='orange', pch=20, cex=2)
points(alliumcanadense3$Longitude, alliumcanadense3$Latitude, col='blue', pch=20, cex=2)
points(alliumcanadense4$Longitude, alliumcanadense4$Latitude, col='gray', pch=20, cex=2)
points(alliumcanadense5$Longitude, alliumcanadense5$Latitude, col='red', pch=20, cex=2)
points(alliumcanadense6$Longitude, alliumcanadense6$Latitude, col='dark green', pch=20, cex=2)
## using US census shapefiles, save custom (best) shapefile for modeling later
# download, unzip all state shapefiles to new directory
download.file("http://www2.census.gov/geo/tiger/GENZ2015/shp/cb_2015_us_state_20m.zip", "cb_2015_us_state_20m.zip")
dir.create("shapefiles")
unzip("cb_2015_us_state_20m.zip", exdir="shapefiles")
# load shapefiles and set projection
state <- readShapePoly("shapefiles/cb_2015_us_state_20m.shp")
projection(state) <- CRS("+proj=longlat +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +no_defs")
# extract shapefiles of interest and save to file
midUSCap <- c("Texas","Oklahoma","Kansas","Missouri")
middleUS <- state[as.character(state@data$NAME) %in% midUSCap, ]
writeSpatialShape(middleUS, "shapefiles/middleUS")
# map using custom shapefile and save to file
dir.create("figures")
pdf(file="figures/midUSmapping.pdf")
map(middleUS)
points(alliumcanadense1$Longitude, alliumcanadense1$Latitude, col='purple', pch=20, cex=2)
points(alliumcanadense2$Longitude, alliumcanadense2$Latitude, col='orange', pch=20, cex=2)
points(alliumcanadense3$Longitude, alliumcanadense3$Latitude, col='blue', pch=20, cex=2)
points(alliumcanadense4$Longitude, alliumcanadense4$Latitude, col='gray', pch=20, cex=2)
points(alliumcanadense5$Longitude, alliumcanadense5$Latitude, col='red', pch=20, cex=2)
points(alliumcanadense6$Longitude, alliumcanadense6$Latitude, col='dark green', pch=20, cex=2)
dev.off()
|
/mapping.R
|
no_license
|
aortiz24/Allium_nicheModeling
|
R
| false
| false
| 4,732
|
r
|
### making maps and preparing shapefiles
## load libraries
library(fields)
library(dplyr)
library(dismo)
library(maptools)
##using file made from Ownbey textbook source
#importing species csv files into R
alliumcanadense<-read.csv("alliumdataset_map_data.csv")
#remove missing data in alliumcanadense
alliumcanadense <- na.omit(alliumcanadense)
#assign scientific name to an object
target1<-c("Allium canadense var. canadense")
#filtered allium canadense canadense csv file
alliumcanadense1<-alliumcanadense %>%
select(Taxon,Latitude,Longitude) %>%
filter(Taxon == target1)
#remove point outside of climate layer extent
alliumcanadense1<-alliumcanadense1[c(2:5,8,10:21),]
#assign scientific name to an object
target2<-c("Allium canadense var. ecristatum")
#filtered allium canadense ecristatum csv file
alliumcanadense2<-alliumcanadense %>%
select(Taxon,Latitude,Longitude) %>%
filter(Taxon == target2)
#assign scientific name to an object
target3<-c("Allium canadense var. Fraseri")
#filtered allium canadense Fraseri csv file
alliumcanadense3<-alliumcanadense %>%
select(Taxon,Latitude,Longitude) %>%
filter(Taxon == target3)
#assign scientific name to an object
target4<-c("Allium canadense var. hyacinthoides")
#filtered allium canadense hyacinthoides csv file
alliumcanadense4<-alliumcanadense %>%
select(Taxon,Latitude,Longitude) %>%
filter(Taxon == target4)
#assign scientific name to an object
target5<-c("Allium canadense var. lavendulare")
#filtered allium canadense lavendulare csv file
alliumcanadense5<-alliumcanadense %>%
select(Taxon,Latitude,Longitude) %>%
filter(Taxon == target5)
#remove point outside of climate layer extent
alliumcanadense5<-alliumcanadense5[c(1:6),]
#assign scientific name to an object
target6<-c("Allium canadense var. mobilense")
#filtered allium canadense mobilense csv file
alliumcanadense6<-alliumcanadense %>%
select(Taxon,Latitude,Longitude) %>%
filter(Taxon == target6)
## quick and dirty plot on map (could also plot points first and add map)
US(xlim=c(-85,-77), ylim=c(26,37))
points(alliumcanadense1$Longitude, alliumcanadense1$Latitude, col='purple', pch=20, cex=2)
points(alliumcanadense2$Longitude, alliumcanadense2$Latitude, col='orange', pch=20, cex=2)
points(alliumcanadense3$Longitude, alliumcanadense3$Latitude, col='blue', pch=20, cex=2)
points(alliumcanadense4$Longitude, alliumcanadense4$Latitude, col='gray', pch=20, cex=2)
points(alliumcanadense5$Longitude, alliumcanadense5$Latitude, col='red', pch=20, cex=2)
points(alliumcanadense6$Longitude, alliumcanadense6$Latitude, col='dark green', pch=20, cex=2)
## a slightly more refined map (using built-in state outlines)
midUS <- c("texas","oklahoma","kansas","missouri")
map(database="state", regions = midUS, interior=T, lwd=2)
points(alliumcanadense1$Longitude, alliumcanadense1$Latitude, col='purple', pch=20, cex=2)
points(alliumcanadense2$Longitude, alliumcanadense2$Latitude, col='orange', pch=20, cex=2)
points(alliumcanadense3$Longitude, alliumcanadense3$Latitude, col='blue', pch=20, cex=2)
points(alliumcanadense4$Longitude, alliumcanadense4$Latitude, col='gray', pch=20, cex=2)
points(alliumcanadense5$Longitude, alliumcanadense5$Latitude, col='red', pch=20, cex=2)
points(alliumcanadense6$Longitude, alliumcanadense6$Latitude, col='dark green', pch=20, cex=2)
## using US census shapefiles, save custom (best) shapefile for modeling later
# download, unzip all state shapefiles to new directory
download.file("http://www2.census.gov/geo/tiger/GENZ2015/shp/cb_2015_us_state_20m.zip", "cb_2015_us_state_20m.zip")
dir.create("shapefiles")
unzip("cb_2015_us_state_20m.zip", exdir="shapefiles")
# load shapefiles and set projection
state <- readShapePoly("shapefiles/cb_2015_us_state_20m.shp")
projection(state) <- CRS("+proj=longlat +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +no_defs")
# extract shapefiles of interest and save to file
midUSCap <- c("Texas","Oklahoma","Kansas","Missouri")
middleUS <- state[as.character(state@data$NAME) %in% midUSCap, ]
writeSpatialShape(middleUS, "shapefiles/middleUS")
# map using custom shapefile and save to file
dir.create("figures")
pdf(file="figures/midUSmapping.pdf")
map(middleUS)
points(alliumcanadense1$Longitude, alliumcanadense1$Latitude, col='purple', pch=20, cex=2)
points(alliumcanadense2$Longitude, alliumcanadense2$Latitude, col='orange', pch=20, cex=2)
points(alliumcanadense3$Longitude, alliumcanadense3$Latitude, col='blue', pch=20, cex=2)
points(alliumcanadense4$Longitude, alliumcanadense4$Latitude, col='gray', pch=20, cex=2)
points(alliumcanadense5$Longitude, alliumcanadense5$Latitude, col='red', pch=20, cex=2)
points(alliumcanadense6$Longitude, alliumcanadense6$Latitude, col='dark green', pch=20, cex=2)
dev.off()
|
#Downloaded data from strain collection. Cleaned up by hand first.
gluc_conc_comp_datatable <- read.delim(
"~/Desktop/gluc_conc_comp_datatable.txt", header=FALSE, na.strings=c("","NA"),
strip.white=TRUE)
x<-which(gluc_conc_comp_datatable$V2=="unknown")
y<-which(gluc_conc_comp_datatable$V2=="Unknown")
z<-which(gluc_conc_comp_datatable$V2=="UNKNOWN")
a<-which(gluc_conc_comp_datatable$V2=="redo")
b<-which(gluc_conc_comp_datatable$V2=="REDO")
c<-which(grepl("bacteria", gluc_conc_comp_datatable$V2))
d<-which(grepl("Bacteria", gluc_conc_comp_datatable$V2))
e<-which(is.na(gluc_conc_comp_datatable$V3))
torm<-c(x,y,z,a,b,c,d,e)
gluc_conc_comp_datatable<-gluc_conc_comp_datatable[-torm, ]
colnames(gluc_conc_comp_datatable)<-c("StrainID","Species","gluc_percent")
length(which(gluc_conc_comp_datatable$gluc_percent==8))
#1097 at 8%
length(which(gluc_conc_comp_datatable$gluc_percent==0.8))
#2238 at 0.8%
### Looking for assns in ALL data - not just the dataset we're publishing.
gluc_conc_perms<-Run_WY_association_permutations(
gluc_conc_comp_datatable, permutations = 10000,
colnames_to_permute = c("Species", "gluc_percent"))
gluc_conc_sigs<-gluc_conc_perms[c(1:4)]
gluc_conc_sigs$FDR<-p.adjust(gluc_conc_sigs$pval, method="BH")
gluc_conc_sigs<-gluc_conc_sigs[order(gluc_conc_sigs$FDR), ]
## TAXA ARE NOT UNIFORMLY DISTRIBUTED AMONGST GLUCOSE CONCENTRATIONS ##
######
raw_WY_dataframe<-read.delim("~/SurveyPaper/data/WY_df_2018-02-08.tsv",
header=TRUE, stringsAsFactors=FALSE,
na.strings=c("","NA"),
strip.white=TRUE)
#merged into raw wy dataframe
gluc_comp<-merge(raw_WY_dataframe,
gluc_conc_comp_datatable[c(1,3)], by="StrainID", all=TRUE)
#496 total isolations (about 25%) of isolations are accompanied by gluc percent data
length(which(gluc_comp$gluc_percent==8))
#only 2 instances of 8% in the entire dataset
length(which(gluc_comp$gluc_percent==0.8))
#494 instances of .8%
# I can't really do anything with these data.
#Can I assume that isolates without a sugar indication are 8%?
x<-gluc_conc_comp_datatable[which(gluc_conc_comp_datatable$StrainID %in% raw_WY_dataframe$StrainID), ]
gluc_comp<-merge(raw_WY_dataframe,
x[c(1,3)], by="StrainID", all=TRUE)
gluc_comp$gluc_percent[which(is.na(gluc_comp$gluc_percent))]<-8
length(which(gluc_comp$gluc_percent==8))
#1468 isolates
length(which(gluc_comp$gluc_percent==0.8))
#still 494 instances of .8%
#assuming 8% for no data isolates - performing permutations for spp-sugar
species_gluc_conc_assns<-Run_WY_association_permutations(
all_observations_dataframe = unique(gluc_comp[c(22, 29, 32)]),
permutations = 10000,colnames_to_permute = c("Species", "gluc_percent"))
species_gluc_conc_assns<-species_gluc_conc_assns[c(1:4)]
species_gluc_conc_assns$FDR<-p.adjust(species_gluc_conc_assns$pval, method="BH")
species_gluc_conc_assns<-species_gluc_conc_assns[order(species_gluc_conc_assns$FDR), ]
# In this analysis only Torulaspora delbrueckii is significantly ass
# w/ sugar percent (8%)
#Sacch cerevisiae no enrichment in this dataset.
# what about quercus? equally sampled in both?
plntgns_gluc_conc_assns<-Run_WY_association_permutations(
all_observations_dataframe = unique(gluc_comp[c(22, 15, 32)]),
permutations = 10000,colnames_to_permute = c("Plant.Genus", "gluc_percent"))
plntgns_gluc_conc_assns<-plntgns_gluc_conc_assns[c(1:4)]
plntgns_gluc_conc_assns$FDR<-p.adjust(plntgns_gluc_conc_assns$pval, method="BH")
plntgns_gluc_conc_assns<-plntgns_gluc_conc_assns[order(plntgns_gluc_conc_assns$FDR), ]
#quercus is equally sampled in both. there are 3 plant genera that are not
#Fagus 8
# Acer 8
# Malus 0.8
#none of these genera are in the significant assn's we found.
spcsub_gluc_conc_assns<-Run_WY_association_permutations(
all_observations_dataframe = unique(gluc_comp[c(22, 10, 32)]),
permutations = 10000,colnames_to_permute = c("Specific", "gluc_percent"))
spcsub_gluc_conc_assns<-spcsub_gluc_conc_assns[c(1:4)]
spcsub_gluc_conc_assns$FDR<-p.adjust(spcsub_gluc_conc_assns$pval, method="BH")
spcsub_gluc_conc_assns<-spcsub_gluc_conc_assns[order(spcsub_gluc_conc_assns$FDR), ]
# Bark enriched for 8% samples
# Bark does show up 2x in specific substrate associations
|
/scripts/troubleshooting_gluc_conc_issues.r
|
no_license
|
KatieFish/SurveyPaper
|
R
| false
| false
| 4,355
|
r
|
#Downloaded data from strain collection. Cleaned up by hand first.
gluc_conc_comp_datatable <- read.delim(
"~/Desktop/gluc_conc_comp_datatable.txt", header=FALSE, na.strings=c("","NA"),
strip.white=TRUE)
x<-which(gluc_conc_comp_datatable$V2=="unknown")
y<-which(gluc_conc_comp_datatable$V2=="Unknown")
z<-which(gluc_conc_comp_datatable$V2=="UNKNOWN")
a<-which(gluc_conc_comp_datatable$V2=="redo")
b<-which(gluc_conc_comp_datatable$V2=="REDO")
c<-which(grepl("bacteria", gluc_conc_comp_datatable$V2))
d<-which(grepl("Bacteria", gluc_conc_comp_datatable$V2))
e<-which(is.na(gluc_conc_comp_datatable$V3))
torm<-c(x,y,z,a,b,c,d,e)
gluc_conc_comp_datatable<-gluc_conc_comp_datatable[-torm, ]
colnames(gluc_conc_comp_datatable)<-c("StrainID","Species","gluc_percent")
length(which(gluc_conc_comp_datatable$gluc_percent==8))
#1097 at 8%
length(which(gluc_conc_comp_datatable$gluc_percent==0.8))
#2238 at 0.8%
### Looking for assns in ALL data - not just the dataset we're publishing.
gluc_conc_perms<-Run_WY_association_permutations(
gluc_conc_comp_datatable, permutations = 10000,
colnames_to_permute = c("Species", "gluc_percent"))
gluc_conc_sigs<-gluc_conc_perms[c(1:4)]
gluc_conc_sigs$FDR<-p.adjust(gluc_conc_sigs$pval, method="BH")
gluc_conc_sigs<-gluc_conc_sigs[order(gluc_conc_sigs$FDR), ]
## TAXA ARE NOT UNIFORMLY DISTRIBUTED AMONGST GLUCOSE CONCENTRATIONS ##
######
raw_WY_dataframe<-read.delim("~/SurveyPaper/data/WY_df_2018-02-08.tsv",
header=TRUE, stringsAsFactors=FALSE,
na.strings=c("","NA"),
strip.white=TRUE)
#merged into raw wy dataframe
gluc_comp<-merge(raw_WY_dataframe,
gluc_conc_comp_datatable[c(1,3)], by="StrainID", all=TRUE)
#496 total isolations (about 25%) of isolations are accompanied by gluc percent data
length(which(gluc_comp$gluc_percent==8))
#only 2 instances of 8% in the entire dataset
length(which(gluc_comp$gluc_percent==0.8))
#494 instances of .8%
# I can't really do anything with these data.
#Can I assume that isolates without a sugar indication are 8%?
x<-gluc_conc_comp_datatable[which(gluc_conc_comp_datatable$StrainID %in% raw_WY_dataframe$StrainID), ]
gluc_comp<-merge(raw_WY_dataframe,
x[c(1,3)], by="StrainID", all=TRUE)
gluc_comp$gluc_percent[which(is.na(gluc_comp$gluc_percent))]<-8
length(which(gluc_comp$gluc_percent==8))
#1468 isolates
length(which(gluc_comp$gluc_percent==0.8))
#still 494 instances of .8%
#assuming 8% for no data isolates - performing permutations for spp-sugar
species_gluc_conc_assns<-Run_WY_association_permutations(
all_observations_dataframe = unique(gluc_comp[c(22, 29, 32)]),
permutations = 10000,colnames_to_permute = c("Species", "gluc_percent"))
species_gluc_conc_assns<-species_gluc_conc_assns[c(1:4)]
species_gluc_conc_assns$FDR<-p.adjust(species_gluc_conc_assns$pval, method="BH")
species_gluc_conc_assns<-species_gluc_conc_assns[order(species_gluc_conc_assns$FDR), ]
# In this analysis only Torulaspora delbrueckii is significantly ass
# w/ sugar percent (8%)
#Sacch cerevisiae no enrichment in this dataset.
# what about quercus? equally sampled in both?
plntgns_gluc_conc_assns<-Run_WY_association_permutations(
all_observations_dataframe = unique(gluc_comp[c(22, 15, 32)]),
permutations = 10000,colnames_to_permute = c("Plant.Genus", "gluc_percent"))
plntgns_gluc_conc_assns<-plntgns_gluc_conc_assns[c(1:4)]
plntgns_gluc_conc_assns$FDR<-p.adjust(plntgns_gluc_conc_assns$pval, method="BH")
plntgns_gluc_conc_assns<-plntgns_gluc_conc_assns[order(plntgns_gluc_conc_assns$FDR), ]
#quercus is equally sampled in both. there are 3 plant genera that are not
#Fagus 8
# Acer 8
# Malus 0.8
#none of these genera are in the significant assn's we found.
spcsub_gluc_conc_assns<-Run_WY_association_permutations(
all_observations_dataframe = unique(gluc_comp[c(22, 10, 32)]),
permutations = 10000,colnames_to_permute = c("Specific", "gluc_percent"))
spcsub_gluc_conc_assns<-spcsub_gluc_conc_assns[c(1:4)]
spcsub_gluc_conc_assns$FDR<-p.adjust(spcsub_gluc_conc_assns$pval, method="BH")
spcsub_gluc_conc_assns<-spcsub_gluc_conc_assns[order(spcsub_gluc_conc_assns$FDR), ]
# Bark enriched for 8% samples
# Bark does show up 2x in specific substrate associations
|
hpcdata <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(hpcdata) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subsetdates <- subset(hpcdata,hpcdata$Date=="1/2/2007" | hpcdata$Date =="2/2/2007")
#histogram
#main = "Global Active Power"
#x title= "Global Active Power (kilowatts)" 0 to 6
#y title = "Frequency" 1 to 1200
#12 red bars
#png
png("plot1.png", width=480, height=480)
#plot1
activenumeric <- as.numeric(as.character(subsetdates$Global_active_power))
hist(activenumeric, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)", breaks = 12)
#png off
dev.off()
|
/plot1.R
|
no_license
|
AnnaHarrington/ExData_Plotting1
|
R
| false
| false
| 746
|
r
|
hpcdata <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(hpcdata) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subsetdates <- subset(hpcdata,hpcdata$Date=="1/2/2007" | hpcdata$Date =="2/2/2007")
#histogram
#main = "Global Active Power"
#x title= "Global Active Power (kilowatts)" 0 to 6
#y title = "Frequency" 1 to 1200
#12 red bars
#png
png("plot1.png", width=480, height=480)
#plot1
activenumeric <- as.numeric(as.character(subsetdates$Global_active_power))
hist(activenumeric, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)", breaks = 12)
#png off
dev.off()
|
\encoding{utf-8}
\name{power.scABEL}
\alias{power.scABEL}
\title{
(Empirical) Power of BE decision via scaled (widened) BE acceptance limits
}
\description{
These function performs the power calculation of the BE decision via
scaled (widened) BE acceptance limits by simulations.
}
\usage{
power.scABEL(alpha = 0.05, theta1, theta2, theta0, CV, n,
design = c("2x3x3", "2x2x4", "2x2x3"), regulator,
nsims, details = FALSE, setseed = TRUE)
}
\arguments{
\item{alpha}{
Type I error probability, significance level. Conventionally mostly set to 0.05.
}
\item{theta1}{
Conventional lower ABE limit to be applied in the mixed procedure if
\code{CVsWR <= CVswitch}. Also lower limit for the point estimate constraint.\cr
Defaults to 0.8 if not given explicitly.
}
\item{theta2}{
Conventional upper ABE limit to be applied in the mixed procedure if
\code{CVsWR <= CVswitch}. Also upper limit for the point estimate constraint.\cr
Defaults to 1.25 if not given explicitly.
}
\item{theta0}{
\sQuote{True} or assumed T/R ratio. \cr
Defaults to 0.90 according to the two \enc{Laszlós}{Laszlos} if not given explicitly.
}
\item{CV}{
Intra-subject coefficient(s) of variation as ratio (not percent).
\itemize{
\item If given as a scalar (\code{length(CV)==1}) the \emph{same} CV of Test
and Reference is assumed (homoscedasticity, \code{CVwT==CVwR}).
\item If given as a vector (\code{length(CV)==2}), \emph{i.e.}, assuming
heteroscedasticity, the CV of the Test \strong{must} be given in \code{CV[1]} and the one of the Reference in the \code{CV[2]}.
}
}
\item{n}{
Number of subjects under study.\cr
May be given as vector. In that case it is assumed that \code{n} contains the number
of subjects in the sequence groups.\cr
If \code{n} is given as single number (total sample size) and this number is not
divisible by the number of sequences of the design an unbalanced design is
assumed. A corresponding message is thrown showing the numbers of subjects
in sequence groups.\cr
Attention! In case of the \code{"2x2x3"} (TRT|RTR) design the order of sample sizes is important
if given as vector. \code{n[1]} is for sequence group 'TRT' and \code{n[2]} is for
sequence group 'RTR'.
}
\item{design}{
Design of the study.\cr
\code{"2x3x3"} is the partial replicate design.\cr
\code{"2x2x4"} is a full replicate design with 2 sequences and 4 periods.\cr
\code{"2x2x3"} is a full replicate design with 2 sequences and 3 periods.\cr
Defaults to \code{"2x3x3"}. Details are given the section about Designs.
}
\item{regulator}{
Regulatory settings for the widening of the BE acceptance limits.\cr
May be given as character from the choices \code{"EMA"}, \code{"HC"}, \code{"FDA"} or as an object of
class 'regSet' (see \code{\link{reg_const}}).\cr
Defaults to \code{regulator="EMA"} if missing.\cr
This argument may be given also in lower case if given as character.\cr
The former \code{regulator="ANVISA"} is no longer allowed. Since 2016 the ANVISA recommends the EMA\enc{’}{'} regulatory settings.
}
\item{nsims}{
Number of simulations to be performed to obtain the empirical power.
Defaults to 100,000 = 1e+05.\cr
If not given and \code{theta0} equals one of the expanded limits (\emph{i.e.}, simulating empirical alpha), defaults to 1e+06.
}
\item{details}{
If set to \code{TRUE} the computational time is shown as well as the components
for the BE decision.\cr
p(BE-wABEL) is the probability that the CI is within (widened) limits.\cr
p(BE-PE) is the probability that the point estimate is within theta1 ... theta2.\cr
p(BE-ABE) is the simulated probability for the conventional ABE test.
}
\item{setseed}{
Simulations are dependent on the starting point of the (pseudo) random number
generator. To avoid differences in power for different runs a \code{set.seed()}
is issued if \code{setseed=TRUE}, the default.
}
}
\details{
The methods rely on the analysis of log-transformed data, \emph{i.e.}, assume a
log-normal distribution on the original scale.\cr\cr
The widened BE acceptance limits will be calculated by the formula\cr
\verb{ [L, U] = exp(-/+ r_const * sWR)}\cr
with \code{r_const} the regulatory constant and \code{sWR} the standard deviation of the within
subjects variability of the Reference. \code{r_const = 0.76} (~log(1.25)/0.29356) is used
in case of \code{regulator="EMA"} or \code{regulator="HC"} and in case of
\code{regulator="FDA"} \code{r_const = 0.89257...} (log(1.25)/0.25).
If the CVwR of the Reference is < CVswitch=0.3 the conventional ABE limits
apply (mixed procedure).\cr\cr
In case of \code{regulator="EMA"} a cap is placed on the widened limits if
CVwR>0.5, \emph{i.e.}, the widened limits are held at value calculated for CVwR=0.5.
In case of \code{regulator="HC"} the capping is done such that the acceptance
limits are 0.6666 ... 1.5 at maximum.\cr\cr
The simulations are done via the distributional properties of the statistical
quantities necessary for deciding BE based on widened ABEL.\cr
For more details see the document \verb{Implementation_scaledABE_simsVx.yy.pdf} in the
\code{/doc} sub-directory of the package.\cr\cr
Function \code{power.scABEL()} implements the simulation via distributional
characteristics of the \sQuote{key} statistics obtained from the EMA recommended
evaluation via ANOVA if \code{regulator="EMA"} or if the regulator component
\code{est_method} is set to \code{"ANOVA"} if regulator is an object of class 'regSet'.\cr
Otherwise the simulations are based on the distributional characteristis of the
\sQuote{key} statistics obtained from evaluation via intra-subject contrasts (ISC),
as recommended by the FDA.
}
\value{
Returns the value of the (empirical) power if argument \code{details=FALSE}.\cr\cr
Returns a named vector if argument \code{details=TRUE}.\cr
p(BE) is the power, p(BE-ABEL) is the power of the widened ABEL criterion alone
and p(BE-pe) is the power of the criterion \sQuote{point estimate within acceptance
range} alone. p(BE-ABE) is the power of the conventional ABE test given for
comparative purposes.
}
\references{
\enc{Tóthfalusi}{Tothfalusi} L, \enc{Endrényi}{Endrenyi} L. \emph{Sample Sizes for Designing Bioequivalence Studies for Highly Variable Drugs.} J Pharm Pharmaceut Sci. 2011;15(1):73--84. \href{https://ejournals.library.ualberta.ca/index.php/JPPS/article/download/11612/9489}{open source}
}
\author{
D. Labes
}
\note{
In case of \code{regulator="FDA"} the (empirical) power is only approximate since
the BE decision method is not exactly what is expected by the FDA. But the \dQuote{Two \enc{Laszlós}{Laszlos}} state that the scABEL method should be \sQuote{operational equivalent} to the
FDA method.\cr
To get the power for the FDA favored method via linearized scaled ABE criterion
use function \code{\link{power.RSABE}}.\cr\cr
In case of \code{regulator="HC"} (based on ISC), power is also only approximative since Health Canada recommends an evaluation via mixed model approach. This could only implemented via
subject data simulations which are very time consuming. But ISC may be a good
substitute.
}
\section{Designs}{
Although some designs are more \sQuote{popular} than others, power calculations are valid for \emph{all} of the following designs:
\tabular{ll}{
\code{"2x2x4"} \tab TRTR | RTRT\cr
\tab TRRT | RTTR\cr
\tab TTRR | RRTT\cr
\code{"2x2x3"} \tab TRT | RTR\cr
\tab TRR | RTT\cr
\code{"2x3x3"} \tab TRR | RTR | RRT
}
}
\section{Warning }{
Cross-validation of the simulations as implemented here and via the \sQuote{classical}
subject data simulation have shown somewhat unsatisfactory results for the
2x3x3 design if the variabilities for Test and Reference are different and/or sequences exteremly unbalanced.\cr
The function \code{power.scABEL()} therefore gives a warning if calculations
with different CVwT and CVwR are requested for the 2x3x3 partial replicate design. For \code{"EMA"} subject simulations are provided in \code{\link{power.scABEL.sdsims}}.
For more details see the above mentioned document \verb{Implementation_scaledABE_simsVy.xx.pdf}.
}
\seealso{
\code{\link{sampleN.scABEL}, \link{power.RSABE}, \link{reg_const}}
}
\examples{
# using all the defaults:
# design="2x3x3", EMA regulatory settings
# PE constraint 0.8-1.25, cap on widening if CV>0.5
# true ratio=0.90, 1E+6 simulations
power.scABEL(CV = 0.4, n = 29)
# should give:
# Unbalanced design. n(i)=10/10/9 assumed.
# [1] 0.66113
#
# with details=TRUE to view the computational time and components
power.scABEL(CV = 0.5, n = 54, theta0 = 1.15, details = TRUE)
# should give (times may differ depending on your machine):
# 1e+05sims. Time elapsed (sec): 0.07
#
# p(BE) p(BE-wABEL) p(BE-pe) p(BE-ABE)
# 0.81727 0.82078 0.85385 0.27542
#
# exploring 'pure ABEL' with the EMA regulatory constant
# (without mixed method, without capping, without pe constraint)
rs <- reg_const("EMA")
rs$CVswitch <- 0
rs$CVcap <- Inf
rs$pe_constr <- FALSE
power.scABEL(CV = 0.5, n = 54, theta0 = 1.15, regulator = rs)
# should give
# [1] 0.8519
}
|
/man/power.scABEL.Rd
|
no_license
|
ShuguangSun/PowerTOST
|
R
| false
| false
| 9,109
|
rd
|
\encoding{utf-8}
\name{power.scABEL}
\alias{power.scABEL}
\title{
(Empirical) Power of BE decision via scaled (widened) BE acceptance limits
}
\description{
These function performs the power calculation of the BE decision via
scaled (widened) BE acceptance limits by simulations.
}
\usage{
power.scABEL(alpha = 0.05, theta1, theta2, theta0, CV, n,
design = c("2x3x3", "2x2x4", "2x2x3"), regulator,
nsims, details = FALSE, setseed = TRUE)
}
\arguments{
\item{alpha}{
Type I error probability, significance level. Conventionally mostly set to 0.05.
}
\item{theta1}{
Conventional lower ABE limit to be applied in the mixed procedure if
\code{CVsWR <= CVswitch}. Also lower limit for the point estimate constraint.\cr
Defaults to 0.8 if not given explicitly.
}
\item{theta2}{
Conventional upper ABE limit to be applied in the mixed procedure if
\code{CVsWR <= CVswitch}. Also upper limit for the point estimate constraint.\cr
Defaults to 1.25 if not given explicitly.
}
\item{theta0}{
\sQuote{True} or assumed T/R ratio. \cr
Defaults to 0.90 according to the two \enc{Laszlós}{Laszlos} if not given explicitly.
}
\item{CV}{
Intra-subject coefficient(s) of variation as ratio (not percent).
\itemize{
\item If given as a scalar (\code{length(CV)==1}) the \emph{same} CV of Test
and Reference is assumed (homoscedasticity, \code{CVwT==CVwR}).
\item If given as a vector (\code{length(CV)==2}), \emph{i.e.}, assuming
heteroscedasticity, the CV of the Test \strong{must} be given in \code{CV[1]} and the one of the Reference in the \code{CV[2]}.
}
}
\item{n}{
Number of subjects under study.\cr
May be given as vector. In that case it is assumed that \code{n} contains the number
of subjects in the sequence groups.\cr
If \code{n} is given as single number (total sample size) and this number is not
divisible by the number of sequences of the design an unbalanced design is
assumed. A corresponding message is thrown showing the numbers of subjects
in sequence groups.\cr
Attention! In case of the \code{"2x2x3"} (TRT|RTR) design the order of sample sizes is important
if given as vector. \code{n[1]} is for sequence group 'TRT' and \code{n[2]} is for
sequence group 'RTR'.
}
\item{design}{
Design of the study.\cr
\code{"2x3x3"} is the partial replicate design.\cr
\code{"2x2x4"} is a full replicate design with 2 sequences and 4 periods.\cr
\code{"2x2x3"} is a full replicate design with 2 sequences and 3 periods.\cr
Defaults to \code{"2x3x3"}. Details are given the section about Designs.
}
\item{regulator}{
Regulatory settings for the widening of the BE acceptance limits.\cr
May be given as character from the choices \code{"EMA"}, \code{"HC"}, \code{"FDA"} or as an object of
class 'regSet' (see \code{\link{reg_const}}).\cr
Defaults to \code{regulator="EMA"} if missing.\cr
This argument may be given also in lower case if given as character.\cr
The former \code{regulator="ANVISA"} is no longer allowed. Since 2016 the ANVISA recommends the EMA\enc{’}{'} regulatory settings.
}
\item{nsims}{
Number of simulations to be performed to obtain the empirical power.
Defaults to 100,000 = 1e+05.\cr
If not given and \code{theta0} equals one of the expanded limits (\emph{i.e.}, simulating empirical alpha), defaults to 1e+06.
}
\item{details}{
If set to \code{TRUE} the computational time is shown as well as the components
for the BE decision.\cr
p(BE-wABEL) is the probability that the CI is within (widened) limits.\cr
p(BE-PE) is the probability that the point estimate is within theta1 ... theta2.\cr
p(BE-ABE) is the simulated probability for the conventional ABE test.
}
\item{setseed}{
Simulations are dependent on the starting point of the (pseudo) random number
generator. To avoid differences in power for different runs a \code{set.seed()}
is issued if \code{setseed=TRUE}, the default.
}
}
\details{
The methods rely on the analysis of log-transformed data, \emph{i.e.}, assume a
log-normal distribution on the original scale.\cr\cr
The widened BE acceptance limits will be calculated by the formula\cr
\verb{ [L, U] = exp(-/+ r_const * sWR)}\cr
with \code{r_const} the regulatory constant and \code{sWR} the standard deviation of the within
subjects variability of the Reference. \code{r_const = 0.76} (~log(1.25)/0.29356) is used
in case of \code{regulator="EMA"} or \code{regulator="HC"} and in case of
\code{regulator="FDA"} \code{r_const = 0.89257...} (log(1.25)/0.25).
If the CVwR of the Reference is < CVswitch=0.3 the conventional ABE limits
apply (mixed procedure).\cr\cr
In case of \code{regulator="EMA"} a cap is placed on the widened limits if
CVwR>0.5, \emph{i.e.}, the widened limits are held at value calculated for CVwR=0.5.
In case of \code{regulator="HC"} the capping is done such that the acceptance
limits are 0.6666 ... 1.5 at maximum.\cr\cr
The simulations are done via the distributional properties of the statistical
quantities necessary for deciding BE based on widened ABEL.\cr
For more details see the document \verb{Implementation_scaledABE_simsVx.yy.pdf} in the
\code{/doc} sub-directory of the package.\cr\cr
Function \code{power.scABEL()} implements the simulation via distributional
characteristics of the \sQuote{key} statistics obtained from the EMA recommended
evaluation via ANOVA if \code{regulator="EMA"} or if the regulator component
\code{est_method} is set to \code{"ANOVA"} if regulator is an object of class 'regSet'.\cr
Otherwise the simulations are based on the distributional characteristis of the
\sQuote{key} statistics obtained from evaluation via intra-subject contrasts (ISC),
as recommended by the FDA.
}
\value{
Returns the value of the (empirical) power if argument \code{details=FALSE}.\cr\cr
Returns a named vector if argument \code{details=TRUE}.\cr
p(BE) is the power, p(BE-ABEL) is the power of the widened ABEL criterion alone
and p(BE-pe) is the power of the criterion \sQuote{point estimate within acceptance
range} alone. p(BE-ABE) is the power of the conventional ABE test given for
comparative purposes.
}
\references{
\enc{Tóthfalusi}{Tothfalusi} L, \enc{Endrényi}{Endrenyi} L. \emph{Sample Sizes for Designing Bioequivalence Studies for Highly Variable Drugs.} J Pharm Pharmaceut Sci. 2011;15(1):73--84. \href{https://ejournals.library.ualberta.ca/index.php/JPPS/article/download/11612/9489}{open source}
}
\author{
D. Labes
}
\note{
In case of \code{regulator="FDA"} the (empirical) power is only approximate since
the BE decision method is not exactly what is expected by the FDA. But the \dQuote{Two \enc{Laszlós}{Laszlos}} state that the scABEL method should be \sQuote{operational equivalent} to the
FDA method.\cr
To get the power for the FDA favored method via linearized scaled ABE criterion
use function \code{\link{power.RSABE}}.\cr\cr
In case of \code{regulator="HC"} (based on ISC), power is also only approximative since Health Canada recommends an evaluation via mixed model approach. This could only implemented via
subject data simulations which are very time consuming. But ISC may be a good
substitute.
}
\section{Designs}{
Although some designs are more \sQuote{popular} than others, power calculations are valid for \emph{all} of the following designs:
\tabular{ll}{
\code{"2x2x4"} \tab TRTR | RTRT\cr
\tab TRRT | RTTR\cr
\tab TTRR | RRTT\cr
\code{"2x2x3"} \tab TRT | RTR\cr
\tab TRR | RTT\cr
\code{"2x3x3"} \tab TRR | RTR | RRT
}
}
\section{Warning }{
Cross-validation of the simulations as implemented here and via the \sQuote{classical}
subject data simulation have shown somewhat unsatisfactory results for the
2x3x3 design if the variabilities for Test and Reference are different and/or sequences exteremly unbalanced.\cr
The function \code{power.scABEL()} therefore gives a warning if calculations
with different CVwT and CVwR are requested for the 2x3x3 partial replicate design. For \code{"EMA"} subject simulations are provided in \code{\link{power.scABEL.sdsims}}.
For more details see the above mentioned document \verb{Implementation_scaledABE_simsVy.xx.pdf}.
}
\seealso{
\code{\link{sampleN.scABEL}, \link{power.RSABE}, \link{reg_const}}
}
\examples{
# using all the defaults:
# design="2x3x3", EMA regulatory settings
# PE constraint 0.8-1.25, cap on widening if CV>0.5
# true ratio=0.90, 1E+6 simulations
power.scABEL(CV = 0.4, n = 29)
# should give:
# Unbalanced design. n(i)=10/10/9 assumed.
# [1] 0.66113
#
# with details=TRUE to view the computational time and components
power.scABEL(CV = 0.5, n = 54, theta0 = 1.15, details = TRUE)
# should give (times may differ depending on your machine):
# 1e+05sims. Time elapsed (sec): 0.07
#
# p(BE) p(BE-wABEL) p(BE-pe) p(BE-ABE)
# 0.81727 0.82078 0.85385 0.27542
#
# exploring 'pure ABEL' with the EMA regulatory constant
# (without mixed method, without capping, without pe constraint)
rs <- reg_const("EMA")
rs$CVswitch <- 0
rs$CVcap <- Inf
rs$pe_constr <- FALSE
power.scABEL(CV = 0.5, n = 54, theta0 = 1.15, regulator = rs)
# should give
# [1] 0.8519
}
|
# Determine covariate distributions for Nivo Population
# -----------------------------------------------------------------------------
# Create a population that is representative of the data used to develop the
# Nivolumab models shown in:
# Bajaj G, Wang X, Agrawal S, Gupta M, Roy A, Feng Y. Model-Based Population
# Pharmacokinetic Analysis of Nivolumab in Patients With Solid Tumors. CPT
# Pharmacometrics Syst Pharmacol. 2017;6(1):58-66.
# Liu C, Yu J, Li H, Liu J, Xu Y, Song P, et al. Association of time-varying
# clearance of nivolumab with disease dynamics and its implications on
# exposure response analysis. Clin Pharmacol Ther. 2017;101(5):657-66.
# This population is required to meet the following criteria:
# - Must include weight, height, age, sex, ECOG, serum creatinine and albumin
# - Albumin variability representative of metastatic melanoma patients
# - Weight and height are appropriately correlated
# - eGFR as determined by CKD-EPI representative of metastatic melanoma patients
# - Cancer type as metastatic melanoma
# - Unknown ADA assay results
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Prepare work environment
# Clear workspace
rm(list=ls(all=TRUE))
graphics.off()
# Load libraries
library(MASS)
library(MBESS)
library(ggplot2)
# Define colourblind palette and custom palette
cbPalette <- data.frame(
grey = "#999999",
orange = "#E69F00",
skyblue = "#56B4E9",
green = "#009E73",
yellow = "#F0E442",
blue = "#0072B2",
red = "#D55E00",
pink = "#CC79A7",
stringsAsFactors = F
)
# Set ggplot2 theme
theme_bw2 <- theme_set(theme_bw(base_size = 14))
theme_update(plot.title = element_text(hjust = 0.5))
# Load functions utility
source("scripts/functions_utility.R")
# Define known demographic data from Bajaj et al.
# Age - 61.12 years (11.12) [23 - 87]
# Weight - 79.09 kg (19.28) [34 - 168]
# Sex - 66.7% male, 33.3% female
# ECOG - 38.73% 0, 61.26% >0
# Number of individuals
nid <- 100000
# Continuous
mean.AGE <- 61.12
sd.AGE <- 11.12
range.AGE <- c(23, 87)
mean.WT <- 79.09
sd.WT <- 19.28
range.WT <- c(34, 168)
# Categorical
sex.prob <- 0.667 # males
ecog.prob <- 0.6126 # >0
# To determine eGFR height and SeCr must be determined
# eGFR - 78.49 mL/min/1.73m^2 (21.63)
# To determine albumin, values from Datta M et al. used
# Stage III Melanoma - 4.15 g/dl (0.33) [2.5 - 5.1]
# Stage IV Melanoma - 3.92 g/dl (0.45) [2.5 - 5.1]
mean.ALB <- 3.92
sd.ALB <- 0.45
range.ALB <- c(2.5, 5.1)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Determine mean height
# Based off of weight and BMI from Cortellini et al.
# Weight - 71 kg [35 - 139]
# BMI - 24.9 [13.5 - 46.6]
# BMI Category - Underweight 4.1%, Normal 46.3%, Overweight 38.6%, Obese 11%
# As BMI = wt/ht^2; ht = sqrt(wt/bmi)
mean.HT <- sqrt(71/24.9)*100
# Determine standard deviation of height distribution
# Create vectors for mean, sd and range (upper and lower)
# Range required as a truncated multivariate distribution is used
mean.WT.HT <- c(71, mean.HT)
sd.WT.HT <- c(16, 10)
lower.WT.HT <- c(range.WT[1], 150)
upper.WT.HT <- c(range.WT[2], 200)
# Set up correlation matrix for height and weight
corr.WT.HT <- matrix(c(
1.0, 0.7,
0.7, 1.0
), nrow = 2, ncol = 2) # symmetrical correlation matrix
# Use trunc.mvrnorm function
corrsim <- trunc.mvrnorm(n = nid, mean = mean.WT.HT, sd = sd.WT.HT,
corr_mat = corr.WT.HT, lower = lower.WT.HT, upper = upper.WT.HT, log = T)
# Determine BMI for samples
samp.bmi <- apply(corrsim, 1, bmi.fn)
# Look at results
hist(corrsim[,2])
c(
mean.WT = 71,
sd.WT = 16,
mean.HT = mean.HT,
sd.HT = sd.WT.HT[2]
)
c(
"UW" = length(samp.bmi[samp.bmi <= 18.5])/1000,
"N" = length(samp.bmi[samp.bmi > 18.5 & samp.bmi <= 25])/1000,
"OW" = length(samp.bmi[samp.bmi > 25 & samp.bmi < 30])/1000,
"O" = length(samp.bmi[samp.bmi >= 30])/1000
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Determine mean serum creatinine
# CKD-EPI equation used to determine eGFR
# eGFR = 141 * min(\frac{[Cr]}{\kappa}, 1)^{\alpha} *
# max(\frac{[Cr]}{\kappa},1)^{-1.209} * 0.993^{Age} *
# 1.018^{female} * 1.159^{black} * 1.73/BSA
# When substituting [Cr] for 0.9 and 0.7 for males and females respectively
# the mean eGFR is underpredicted allowing for the following simplication
# eGFR = 141 * \bigg(\frac{[Cr]}{\kappa}\bigg)^{-1.209} *
# 0.993^{age} * 1.018^{female} * 1.159^{black}
# And therefore you can solve for [Cr]
# [Cr] = \kappa * \bigg(\frac{eGFR}
# {141*0.993^{age}*1.018^{female}*1.159^{black}}\bigg)^{-\frac{1}{1.209}}
# The weighted mean can be determined using the mean serum creatinine determined
# for both males and females along with the proportion of males to females
# from Bajaj et al.
# While determination of the mean for serum creatinine can be analytical, the
# standard deviation and range are determined empirically, by altering values
# for SECR and HT until eGFR matches the distributions from Bajaj et al.
# Define demographic data
# Some objects have been defined above
mean.SECR <- 78.3
sd.SECR <- 30
range.SECR <- c(0, 200)
mean.eGFR <- 78.49
sd.eGFR <- 21.63
mean.HT <- 168.86
sd.HT <- 10
# Weight and Height - multivariate log-normal distribution
# Some objects have been defined above
mean.WT.HT <- c(mean.WT, mean.HT)
sd.WT.HT <- c(sd.WT, sd.HT)
corrsim <- trunc.mvrnorm(n = nid, mean = mean.WT.HT, sd = sd.WT.HT,
corr_mat = corr.WT.HT, lower = lower.WT.HT, upper = upper.WT.HT, log = T)
WT <- corrsim[,1]
HT <- corrsim[,2]
# Age - normal distribution
AGE <- trunc.rnorm(n = nid, mean = mean.AGE, sd = sd.AGE, range = range.AGE)
# Sex - binomial distribution
SEX <- rbinom(nid, 1, sex.prob)
# Serum Creatinine - log-normal distribution
SECR <- trunc.rnorm(n = nid, mean = mean.SECR, sd = sd.SECR,
range = range.SECR, log = T)
# eGFR
cov.df <- data.frame(WT, HT, AGE, SEX, SECR)
cov.df$eGFR <- apply(cov.df, 1, function(df) {
ckdepi.fn(df["SECR"], df["AGE"], df["SEX"], 0)
})
cov.df$GFR <- trunc.rnorm(n = nid, mean = mean.eGFR, sd = sd.eGFR,
range = c(0, 150))
p <- NULL
p <- ggplot(data = cov.df)
p <- p + geom_histogram(aes(eGFR, fill = cbPalette$red),
bins = 30, alpha = 0.5)
p <- p + geom_histogram(aes(GFR, fill = cbPalette$blue),
bins = 30, alpha = 0.5)
p <- p + scale_fill_identity(name = "Dist", guide = "legend",
labels = c("Samp. GFR", "Calc. GFR"))
p
# So what does the relationship between serum creatinine and eGFR look like
p <- NULL
p <- ggplot(data = cov.df[cov.df$SEX == 0,])
p <- p + geom_point(aes(x = SECR, y = eGFR), colour = cbPalette$red,
shape = 1, alpha = 0.25)
p <- p + scale_x_continuous("Serum Creatinine (umol/L)")
p
# Maybe the correlation between required covariates and eGFR can be obtained
# from this work and used to sample from a large multivariate distribution
with(cov.df, cor.test(WT, eGFR))
|
/scripts/190318_Demog_Sampling.R
|
no_license
|
jhhughes256/nivo_sim
|
R
| false
| false
| 7,187
|
r
|
# Determine covariate distributions for Nivo Population
# -----------------------------------------------------------------------------
# Create a population that is representative of the data used to develop the
# Nivolumab models shown in:
# Bajaj G, Wang X, Agrawal S, Gupta M, Roy A, Feng Y. Model-Based Population
# Pharmacokinetic Analysis of Nivolumab in Patients With Solid Tumors. CPT
# Pharmacometrics Syst Pharmacol. 2017;6(1):58-66.
# Liu C, Yu J, Li H, Liu J, Xu Y, Song P, et al. Association of time-varying
# clearance of nivolumab with disease dynamics and its implications on
# exposure response analysis. Clin Pharmacol Ther. 2017;101(5):657-66.
# This population is required to meet the following criteria:
# - Must include weight, height, age, sex, ECOG, serum creatinine and albumin
# - Albumin variability representative of metastatic melanoma patients
# - Weight and height are appropriately correlated
# - eGFR as determined by CKD-EPI representative of metastatic melanoma patients
# - Cancer type as metastatic melanoma
# - Unknown ADA assay results
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Prepare work environment
# Clear workspace
rm(list=ls(all=TRUE))
graphics.off()
# Load libraries
library(MASS)
library(MBESS)
library(ggplot2)
# Define colourblind palette and custom palette
cbPalette <- data.frame(
grey = "#999999",
orange = "#E69F00",
skyblue = "#56B4E9",
green = "#009E73",
yellow = "#F0E442",
blue = "#0072B2",
red = "#D55E00",
pink = "#CC79A7",
stringsAsFactors = F
)
# Set ggplot2 theme
theme_bw2 <- theme_set(theme_bw(base_size = 14))
theme_update(plot.title = element_text(hjust = 0.5))
# Load functions utility
source("scripts/functions_utility.R")
# Define known demographic data from Bajaj et al.
# Age - 61.12 years (11.12) [23 - 87]
# Weight - 79.09 kg (19.28) [34 - 168]
# Sex - 66.7% male, 33.3% female
# ECOG - 38.73% 0, 61.26% >0
# Number of individuals
nid <- 100000
# Continuous
mean.AGE <- 61.12
sd.AGE <- 11.12
range.AGE <- c(23, 87)
mean.WT <- 79.09
sd.WT <- 19.28
range.WT <- c(34, 168)
# Categorical
sex.prob <- 0.667 # males
ecog.prob <- 0.6126 # >0
# To determine eGFR height and SeCr must be determined
# eGFR - 78.49 mL/min/1.73m^2 (21.63)
# To determine albumin, values from Datta M et al. used
# Stage III Melanoma - 4.15 g/dl (0.33) [2.5 - 5.1]
# Stage IV Melanoma - 3.92 g/dl (0.45) [2.5 - 5.1]
mean.ALB <- 3.92
sd.ALB <- 0.45
range.ALB <- c(2.5, 5.1)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Determine mean height
# Based off of weight and BMI from Cortellini et al.
# Weight - 71 kg [35 - 139]
# BMI - 24.9 [13.5 - 46.6]
# BMI Category - Underweight 4.1%, Normal 46.3%, Overweight 38.6%, Obese 11%
# As BMI = wt/ht^2; ht = sqrt(wt/bmi)
mean.HT <- sqrt(71/24.9)*100
# Determine standard deviation of height distribution
# Create vectors for mean, sd and range (upper and lower)
# Range required as a truncated multivariate distribution is used
mean.WT.HT <- c(71, mean.HT)
sd.WT.HT <- c(16, 10)
lower.WT.HT <- c(range.WT[1], 150)
upper.WT.HT <- c(range.WT[2], 200)
# Set up correlation matrix for height and weight
corr.WT.HT <- matrix(c(
1.0, 0.7,
0.7, 1.0
), nrow = 2, ncol = 2) # symmetrical correlation matrix
# Use trunc.mvrnorm function
corrsim <- trunc.mvrnorm(n = nid, mean = mean.WT.HT, sd = sd.WT.HT,
corr_mat = corr.WT.HT, lower = lower.WT.HT, upper = upper.WT.HT, log = T)
# Determine BMI for samples
samp.bmi <- apply(corrsim, 1, bmi.fn)
# Look at results
hist(corrsim[,2])
c(
mean.WT = 71,
sd.WT = 16,
mean.HT = mean.HT,
sd.HT = sd.WT.HT[2]
)
c(
"UW" = length(samp.bmi[samp.bmi <= 18.5])/1000,
"N" = length(samp.bmi[samp.bmi > 18.5 & samp.bmi <= 25])/1000,
"OW" = length(samp.bmi[samp.bmi > 25 & samp.bmi < 30])/1000,
"O" = length(samp.bmi[samp.bmi >= 30])/1000
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Determine mean serum creatinine
# CKD-EPI equation used to determine eGFR
# eGFR = 141 * min(\frac{[Cr]}{\kappa}, 1)^{\alpha} *
# max(\frac{[Cr]}{\kappa},1)^{-1.209} * 0.993^{Age} *
# 1.018^{female} * 1.159^{black} * 1.73/BSA
# When substituting [Cr] for 0.9 and 0.7 for males and females respectively
# the mean eGFR is underpredicted allowing for the following simplication
# eGFR = 141 * \bigg(\frac{[Cr]}{\kappa}\bigg)^{-1.209} *
# 0.993^{age} * 1.018^{female} * 1.159^{black}
# And therefore you can solve for [Cr]
# [Cr] = \kappa * \bigg(\frac{eGFR}
# {141*0.993^{age}*1.018^{female}*1.159^{black}}\bigg)^{-\frac{1}{1.209}}
# The weighted mean can be determined using the mean serum creatinine determined
# for both males and females along with the proportion of males to females
# from Bajaj et al.
# While determination of the mean for serum creatinine can be analytical, the
# standard deviation and range are determined empirically, by altering values
# for SECR and HT until eGFR matches the distributions from Bajaj et al.
# Define demographic data
# Some objects have been defined above
mean.SECR <- 78.3
sd.SECR <- 30
range.SECR <- c(0, 200)
mean.eGFR <- 78.49
sd.eGFR <- 21.63
mean.HT <- 168.86
sd.HT <- 10
# Weight and Height - multivariate log-normal distribution
# Some objects have been defined above
mean.WT.HT <- c(mean.WT, mean.HT)
sd.WT.HT <- c(sd.WT, sd.HT)
corrsim <- trunc.mvrnorm(n = nid, mean = mean.WT.HT, sd = sd.WT.HT,
corr_mat = corr.WT.HT, lower = lower.WT.HT, upper = upper.WT.HT, log = T)
WT <- corrsim[,1]
HT <- corrsim[,2]
# Age - normal distribution
AGE <- trunc.rnorm(n = nid, mean = mean.AGE, sd = sd.AGE, range = range.AGE)
# Sex - binomial distribution
SEX <- rbinom(nid, 1, sex.prob)
# Serum Creatinine - log-normal distribution
SECR <- trunc.rnorm(n = nid, mean = mean.SECR, sd = sd.SECR,
range = range.SECR, log = T)
# eGFR
cov.df <- data.frame(WT, HT, AGE, SEX, SECR)
cov.df$eGFR <- apply(cov.df, 1, function(df) {
ckdepi.fn(df["SECR"], df["AGE"], df["SEX"], 0)
})
cov.df$GFR <- trunc.rnorm(n = nid, mean = mean.eGFR, sd = sd.eGFR,
range = c(0, 150))
p <- NULL
p <- ggplot(data = cov.df)
p <- p + geom_histogram(aes(eGFR, fill = cbPalette$red),
bins = 30, alpha = 0.5)
p <- p + geom_histogram(aes(GFR, fill = cbPalette$blue),
bins = 30, alpha = 0.5)
p <- p + scale_fill_identity(name = "Dist", guide = "legend",
labels = c("Samp. GFR", "Calc. GFR"))
p
# So what does the relationship between serum creatinine and eGFR look like
p <- NULL
p <- ggplot(data = cov.df[cov.df$SEX == 0,])
p <- p + geom_point(aes(x = SECR, y = eGFR), colour = cbPalette$red,
shape = 1, alpha = 0.25)
p <- p + scale_x_continuous("Serum Creatinine (umol/L)")
p
# Maybe the correlation between required covariates and eGFR can be obtained
# from this work and used to sample from a large multivariate distribution
with(cov.df, cor.test(WT, eGFR))
|
#' Fitted functional sample from FPCA object
#'
#' Combine the zero-meaned fitted values and the interpolated mean to get the fitted values for the trajectories or the derivatives of these trajectories.
#'
#' @param object A object of class FPCA returned by the function FPCA().
#' @param k The integer number of the first k components used for the representation. (default: length(fpcaObj$lambda ))
#' @param derOptns A list of options to control the derivation parameters specified by \code{list(name=value)}. See `Details'. (default = NULL)
#'
#' @details Available derivation control options are
#' \describe{
#' \item{p}{The order of the derivatives returned (default: 0, max: 2)}
#' \item{method}{The method used to produce the sample of derivatives ('EIG' (default) or 'QUO'). See Liu and Mueller (2009) for more details}
#' \item{bw}{Bandwidth for smoothing the derivatives (default: p * 0.10 * S)}
#' \item{kernelType}{Smoothing kernel choice; same available types are FPCA(). default('epan')}
#' }
#' @param ... Additional arguments
#'
#' @examples
#' set.seed(1)
#' n <- 20
#' pts <- seq(0, 1, by=0.05)
#' sampWiener <- Wiener(n, pts)
#' sampWiener <- Sparsify(sampWiener, pts, 10)
#' res <- FPCA(sampWiener$yList, sampWiener$tList,
#' list(dataType='Sparse', error=FALSE, kernel='epan', verbose=TRUE))
#' fittedY <- fitted(res)
#' @references
#' \cite{Liu, Bitao, and Hans-Georg Mueller. "Estimating derivatives for samples of sparsely observed functions, with application to online auction dynamics." Journal of the American Statistical Association 104, no. 486 (2009): 704-717. (Sparse data FPCA)}
#' @export
fitted.FPCA <- function (object, k = NULL, derOptns = list(), ...) {
derOptns <- SetDerOptions(fpcaObject = object, derOptns)
p <- derOptns[['p']]
method <- derOptns[['method']]
bw <- derOptns[['bw']] #
kernelType <- derOptns[['kernelType']]
fpcaObj <- object
if (class(fpcaObj) != 'FPCA'){
stop("fitted.FPCA() requires an FPCA class object as basic input")
}
if( is.null(k) ){
k = length( fpcaObj$lambda )
} else {
if( ( round(k)>=1) && ( round(k) <= length( fpcaObj$lambda ) ) ){
k = round(k);
} else {
stop("'fitted.FPCA()' is requested to use more components than it currently has available. (or 'k' is smaller than 1)")
}
}
if( ! (p %in% c(0,1,2))){
stop("'fitted.FPCA()' is requested to use a derivative order other than p = {0,1,2}!")
}
if( p < 1 ){
ZMFV = fpcaObj$xiEst[,1:k, drop = FALSE] %*% t(fpcaObj$phi[,1:k, drop = FALSE]);
IM = fpcaObj$mu
return( t(apply( ZMFV, 1, function(x) x + IM)))
} else { #Derivative is not zero
if( k > SelectK( fpcaObj, FVEthreshold=0.95, criterion='FVE')$k ){
warning("Potentially you use too many components to estimate derivatives. \n Consider using SelectK() to find a more informed estimate for 'k'.");
}
if( is.null(method) ){
method = 'EIG'
}
mu = fpcaObj$mu
phi = fpcaObj$phi
obsGrid = fpcaObj$obsGrid
workGrid = fpcaObj$workGrid
if ( method == 'EIG'){
phi = apply(phi, 2, function(phiI) Lwls1D(bw = bw, kernelType, win = rep(1, length(workGrid)),
xin = workGrid, yin = phiI, xout = workGrid, npoly = p, nder = p))
mu = Lwls1D(bw = bw, kernelType, win = rep(1, length(workGrid)), xin = workGrid, yin = mu, xout = workGrid, npoly = p, nder = p)
ZMFV = fpcaObj$xiEst[,1:k, drop = FALSE] %*% t(phi[,1:k, drop = FALSE]);
IM = mu
return( t(apply( ZMFV, 1, function(x) x + IM) ))
}
if( method == 'QUO'){
impSample <- fitted(fpcaObj, k = k); # Look ma! I do recursion!
return( t(apply(impSample, 1, function(curve) Lwls1D(bw = bw, kernelType, win = rep(1, length(workGrid)),
xin = workGrid, yin = curve, xout = workGrid, npoly = p, nder = p))))
}
}
stop('You asked for a derivation scheme that is not implemented.')
return(NULL)
}
getEnlargedGrid <- function(x){
N <- length(x)
return ( c( x[1] - 0.1 * diff(x[1:2]), x, x[N] + 0.1 * diff(x[(N-1):N])) )
}
getDerivative <- function(y, t, ord=1){ # Consider using the smoother to get the derivatives
if( length(y) != length(t) ){
stop("getDerivative y/t lengths are unequal.")
}
newt = getEnlargedGrid(t) # This is a trick to get first derivatives everywhere
newy = Hmisc::approxExtrap(x=t, y=y, xout= newt)$y
if (ord == 1) {
der <- numDeriv::grad( stats::splinefun(newt, newy) , x = t )
} else if (ord == 2) {
der <- sapply(t, function(t0)
numDeriv::hessian( stats::splinefun(newt, newy) , x = t0 )
)
}
return(der)
}
getSmoothCurve <- function(t, ft, GCV = FALSE, kernelType = 'epan', mult = 1){
myBw = ifelse( GCV, GCVLwls1D1( yy= ft, tt =t, npoly=1, nder=0, dataType='Sparse', kernel=kernelType)[['bOpt']] ,
CVLwls1D( y= ft, t = t, npoly=1, nder=0, dataType='Sparse', kernel=kernelType, kFolds = 10))
myBw <- myBw * mult
smoothCurve = Lwls1D(bw = myBw, kernel_type= kernelType, win = rep(1, length(t)), yin = ft, xout = t, xin= t)
return(smoothCurve)
}
|
/fdapace/R/fitted.FPCA.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 5,223
|
r
|
#' Fitted functional sample from FPCA object
#'
#' Combine the zero-meaned fitted values and the interpolated mean to get the fitted values for the trajectories or the derivatives of these trajectories.
#'
#' @param object A object of class FPCA returned by the function FPCA().
#' @param k The integer number of the first k components used for the representation. (default: length(fpcaObj$lambda ))
#' @param derOptns A list of options to control the derivation parameters specified by \code{list(name=value)}. See `Details'. (default = NULL)
#'
#' @details Available derivation control options are
#' \describe{
#' \item{p}{The order of the derivatives returned (default: 0, max: 2)}
#' \item{method}{The method used to produce the sample of derivatives ('EIG' (default) or 'QUO'). See Liu and Mueller (2009) for more details}
#' \item{bw}{Bandwidth for smoothing the derivatives (default: p * 0.10 * S)}
#' \item{kernelType}{Smoothing kernel choice; same available types are FPCA(). default('epan')}
#' }
#' @param ... Additional arguments
#'
#' @examples
#' set.seed(1)
#' n <- 20
#' pts <- seq(0, 1, by=0.05)
#' sampWiener <- Wiener(n, pts)
#' sampWiener <- Sparsify(sampWiener, pts, 10)
#' res <- FPCA(sampWiener$yList, sampWiener$tList,
#' list(dataType='Sparse', error=FALSE, kernel='epan', verbose=TRUE))
#' fittedY <- fitted(res)
#' @references
#' \cite{Liu, Bitao, and Hans-Georg Mueller. "Estimating derivatives for samples of sparsely observed functions, with application to online auction dynamics." Journal of the American Statistical Association 104, no. 486 (2009): 704-717. (Sparse data FPCA)}
#' @export
fitted.FPCA <- function (object, k = NULL, derOptns = list(), ...) {
derOptns <- SetDerOptions(fpcaObject = object, derOptns)
p <- derOptns[['p']]
method <- derOptns[['method']]
bw <- derOptns[['bw']] #
kernelType <- derOptns[['kernelType']]
fpcaObj <- object
if (class(fpcaObj) != 'FPCA'){
stop("fitted.FPCA() requires an FPCA class object as basic input")
}
if( is.null(k) ){
k = length( fpcaObj$lambda )
} else {
if( ( round(k)>=1) && ( round(k) <= length( fpcaObj$lambda ) ) ){
k = round(k);
} else {
stop("'fitted.FPCA()' is requested to use more components than it currently has available. (or 'k' is smaller than 1)")
}
}
if( ! (p %in% c(0,1,2))){
stop("'fitted.FPCA()' is requested to use a derivative order other than p = {0,1,2}!")
}
if( p < 1 ){
ZMFV = fpcaObj$xiEst[,1:k, drop = FALSE] %*% t(fpcaObj$phi[,1:k, drop = FALSE]);
IM = fpcaObj$mu
return( t(apply( ZMFV, 1, function(x) x + IM)))
} else { #Derivative is not zero
if( k > SelectK( fpcaObj, FVEthreshold=0.95, criterion='FVE')$k ){
warning("Potentially you use too many components to estimate derivatives. \n Consider using SelectK() to find a more informed estimate for 'k'.");
}
if( is.null(method) ){
method = 'EIG'
}
mu = fpcaObj$mu
phi = fpcaObj$phi
obsGrid = fpcaObj$obsGrid
workGrid = fpcaObj$workGrid
if ( method == 'EIG'){
phi = apply(phi, 2, function(phiI) Lwls1D(bw = bw, kernelType, win = rep(1, length(workGrid)),
xin = workGrid, yin = phiI, xout = workGrid, npoly = p, nder = p))
mu = Lwls1D(bw = bw, kernelType, win = rep(1, length(workGrid)), xin = workGrid, yin = mu, xout = workGrid, npoly = p, nder = p)
ZMFV = fpcaObj$xiEst[,1:k, drop = FALSE] %*% t(phi[,1:k, drop = FALSE]);
IM = mu
return( t(apply( ZMFV, 1, function(x) x + IM) ))
}
if( method == 'QUO'){
impSample <- fitted(fpcaObj, k = k); # Look ma! I do recursion!
return( t(apply(impSample, 1, function(curve) Lwls1D(bw = bw, kernelType, win = rep(1, length(workGrid)),
xin = workGrid, yin = curve, xout = workGrid, npoly = p, nder = p))))
}
}
stop('You asked for a derivation scheme that is not implemented.')
return(NULL)
}
getEnlargedGrid <- function(x){
N <- length(x)
return ( c( x[1] - 0.1 * diff(x[1:2]), x, x[N] + 0.1 * diff(x[(N-1):N])) )
}
getDerivative <- function(y, t, ord=1){ # Consider using the smoother to get the derivatives
if( length(y) != length(t) ){
stop("getDerivative y/t lengths are unequal.")
}
newt = getEnlargedGrid(t) # This is a trick to get first derivatives everywhere
newy = Hmisc::approxExtrap(x=t, y=y, xout= newt)$y
if (ord == 1) {
der <- numDeriv::grad( stats::splinefun(newt, newy) , x = t )
} else if (ord == 2) {
der <- sapply(t, function(t0)
numDeriv::hessian( stats::splinefun(newt, newy) , x = t0 )
)
}
return(der)
}
getSmoothCurve <- function(t, ft, GCV = FALSE, kernelType = 'epan', mult = 1){
myBw = ifelse( GCV, GCVLwls1D1( yy= ft, tt =t, npoly=1, nder=0, dataType='Sparse', kernel=kernelType)[['bOpt']] ,
CVLwls1D( y= ft, t = t, npoly=1, nder=0, dataType='Sparse', kernel=kernelType, kFolds = 10))
myBw <- myBw * mult
smoothCurve = Lwls1D(bw = myBw, kernel_type= kernelType, win = rep(1, length(t)), yin = ft, xout = t, xin= t)
return(smoothCurve)
}
|
#get data if doesn't exist already
if (!file.exists("household_power_consumption.txt")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile="household_power_consumption.zip", method="curl")
unzip("household_power_consumption.zip")
}
# read in dataset (kind of big, so it will take a minute)
epc <- read.csv2("household_power_consumption.txt", na.strings="?")
# subset the dates we are interested in
targetdata <- epc[(epc$Date == "1/2/2007" | epc$Date == "2/2/2007"),]
#create 4th graph
#make a 2x2 panel
par(mfrow=c(2,2))
#insert first graph
plot(as.numeric(targetdata$Global_active_power)/500, ylab="Global Active Power", xlab="", type="l", xaxt="n")
#adjust y axis with days
axis(1, at=c(1,nrow(targetdata)/2,nrow(targetdata)),labels=c("Thu","Fri","Sat"))
#insert second graph
plot(as.numeric(as.character(targetdata$Voltage)), type="l", ylab="Voltage", xlab="datetime", xaxt="n")
axis(1, at=c(1,nrow(targetdata)/2,nrow(targetdata)),labels=c("Thu","Fri","Sat"))
#insert third graph
plot(as.numeric(as.character(targetdata$Sub_metering_1)), col="black", type="l", xlab="", ylab="Energy sub metering", xaxt="n")
#add second data set
lines(as.numeric(as.character(targetdata$Sub_metering_2)), col="red")
#add third set
lines(as.numeric(as.character(targetdata$Sub_metering_3)), col="blue")
#fix y axis
axis(1, at=c(1,nrow(targetdata)/2,nrow(targetdata)),labels=c("Thu","Fri","Sat"))
#add legend
legend("topright", col= c("black", "red", "blue"), lty="solid", bty="n", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#insert 4th graph
plot(as.numeric(as.character(targetdata$Global_reactive_power)), type="l", ylab="Global_reactive_power", xlab="datetime", xaxt="n")
axis(1, at=c(1,nrow(targetdata)/2,nrow(targetdata)),labels=c("Thu","Fri","Sat"))
#write out as png
png(filename="plot4.png")
par(mfrow=c(2,2))
plot(as.numeric(targetdata$Global_active_power)/500, ylab="Global Active Power", xlab="", type="l", xaxt="n")
plot(as.numeric(as.character(targetdata$Voltage)), type="l", ylab="Voltage", xlab="datetime", xaxt="n")
axis(1, at=c(1,nrow(targetdata)/2,nrow(targetdata)),labels=c("Thu","Fri","Sat"))
plot(as.numeric(as.character(targetdata$Sub_metering_1)), col="black", type="l", xlab="", ylab="Energy sub metering", xaxt="n")
lines(as.numeric(as.character(targetdata$Sub_metering_2)), col="red")
lines(as.numeric(as.character(targetdata$Sub_metering_3)), col="blue")
axis(1, at=c(1,nrow(targetdata)/2,nrow(targetdata)),labels=c("Thu","Fri","Sat"))
legend("topright", col= c("black", "red", "blue"), lty="solid", bty="n", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(as.numeric(as.character(targetdata$Global_reactive_power)), type="l", ylab="Global_reactive_power", xlab="datetime", xaxt="n")
axis(1, at=c(1,nrow(targetdata)/2,nrow(targetdata)),labels=c("Thu","Fri","Sat"))
dev.off()
|
/plot4.R
|
no_license
|
eldonjenkins/ExData_Plotting1
|
R
| false
| false
| 2,913
|
r
|
#get data if doesn't exist already
if (!file.exists("household_power_consumption.txt")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile="household_power_consumption.zip", method="curl")
unzip("household_power_consumption.zip")
}
# read in dataset (kind of big, so it will take a minute)
epc <- read.csv2("household_power_consumption.txt", na.strings="?")
# subset the dates we are interested in
targetdata <- epc[(epc$Date == "1/2/2007" | epc$Date == "2/2/2007"),]
#create 4th graph
#make a 2x2 panel
par(mfrow=c(2,2))
#insert first graph
plot(as.numeric(targetdata$Global_active_power)/500, ylab="Global Active Power", xlab="", type="l", xaxt="n")
#adjust y axis with days
axis(1, at=c(1,nrow(targetdata)/2,nrow(targetdata)),labels=c("Thu","Fri","Sat"))
#insert second graph
plot(as.numeric(as.character(targetdata$Voltage)), type="l", ylab="Voltage", xlab="datetime", xaxt="n")
axis(1, at=c(1,nrow(targetdata)/2,nrow(targetdata)),labels=c("Thu","Fri","Sat"))
#insert third graph
plot(as.numeric(as.character(targetdata$Sub_metering_1)), col="black", type="l", xlab="", ylab="Energy sub metering", xaxt="n")
#add second data set
lines(as.numeric(as.character(targetdata$Sub_metering_2)), col="red")
#add third set
lines(as.numeric(as.character(targetdata$Sub_metering_3)), col="blue")
#fix y axis
axis(1, at=c(1,nrow(targetdata)/2,nrow(targetdata)),labels=c("Thu","Fri","Sat"))
#add legend
legend("topright", col= c("black", "red", "blue"), lty="solid", bty="n", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#insert 4th graph
plot(as.numeric(as.character(targetdata$Global_reactive_power)), type="l", ylab="Global_reactive_power", xlab="datetime", xaxt="n")
axis(1, at=c(1,nrow(targetdata)/2,nrow(targetdata)),labels=c("Thu","Fri","Sat"))
#write out as png
png(filename="plot4.png")
par(mfrow=c(2,2))
plot(as.numeric(targetdata$Global_active_power)/500, ylab="Global Active Power", xlab="", type="l", xaxt="n")
plot(as.numeric(as.character(targetdata$Voltage)), type="l", ylab="Voltage", xlab="datetime", xaxt="n")
axis(1, at=c(1,nrow(targetdata)/2,nrow(targetdata)),labels=c("Thu","Fri","Sat"))
plot(as.numeric(as.character(targetdata$Sub_metering_1)), col="black", type="l", xlab="", ylab="Energy sub metering", xaxt="n")
lines(as.numeric(as.character(targetdata$Sub_metering_2)), col="red")
lines(as.numeric(as.character(targetdata$Sub_metering_3)), col="blue")
axis(1, at=c(1,nrow(targetdata)/2,nrow(targetdata)),labels=c("Thu","Fri","Sat"))
legend("topright", col= c("black", "red", "blue"), lty="solid", bty="n", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(as.numeric(as.character(targetdata$Global_reactive_power)), type="l", ylab="Global_reactive_power", xlab="datetime", xaxt="n")
axis(1, at=c(1,nrow(targetdata)/2,nrow(targetdata)),labels=c("Thu","Fri","Sat"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/input.R
\name{readinput}
\alias{readinput}
\title{Read the target dataset}
\usage{
readinput(inputfile, raw = FALSE, verbose = TRUE)
}
\arguments{
\item{inputfile}{file root to read}
\item{raw}{if TRUE, treat the input as a text file as output by plink --convert 12. Otherwise (default) read the .bim/.bed/.fam}
\item{verbose}{whether to report file reading progress}
}
\value{
See readbed
}
\description{
Read a bim/bed
}
|
/man/readinput.Rd
|
no_license
|
danjlawson/pcapred
|
R
| false
| true
| 503
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/input.R
\name{readinput}
\alias{readinput}
\title{Read the target dataset}
\usage{
readinput(inputfile, raw = FALSE, verbose = TRUE)
}
\arguments{
\item{inputfile}{file root to read}
\item{raw}{if TRUE, treat the input as a text file as output by plink --convert 12. Otherwise (default) read the .bim/.bed/.fam}
\item{verbose}{whether to report file reading progress}
}
\value{
See readbed
}
\description{
Read a bim/bed
}
|
# ===========================================================
# Title : EBQI function
# Author : Filippo Ferrario
# Date : 30/10/2019
# Version : 1.2
# Aim : Allow flexibility in the function to change both model (i.e. box components) and scenario (i.e. box wheights)
# Changes :
#
# ===========================================================
# # Bench
# scores<-read.xlsx('./R_output/datasets/08a-indices-EBQI-box_scores-v0_3-2019-10-30.xlsx', sheetName='Scores', colClasses=NA)
# weights<-read.xlsx('./data/EBQI-literature_search.xlsx', sheetName='WEIGHTs')
# models<-read.xlsx('./data/EBQI-literature_search.xlsx', sheetName='EBQI_models-boxes')
# score_dataset= scores
# weights_dataset=filter(weights, Scenario=='ALGAE', model=='mod_1' )
# model_description= filter(models, model=='mod_1' )
# SITE='site'
# # BOX='box'
# DATA_SP='sp'
# SCORE='status'
# W_BOX='box'
# WEIGHTS='weight_1_10'
# MOD_BOX='box'
# MODEL_SP='sp'
# PERTURBATION=0
# #PERTURBATION=ptb1[,1]
# EBQI(score_dataset= scores ,weights_dataset=filter(weights, Scenario=='ALGAE', model=='mod_1'), model_description= filter(models, model=='mod_1' ),
# SITE='site',DATA_SP='sp',SCORE='status',
# W_BOX='box' ,WEIGHTS='weight_1_10' ,
# MOD_BOX='box',MODEL_SP='sp')
# score_dataset=epi_scores;
# weights_dataset=filter(W, Scenario=='ALGAE', model=='mod_1');
# model_description=filter(models, model=='mod_1')
# SITE='site'
# DATA_SP='sp'
# SCORE='status'
# W_BOX='box'
# WEIGHTS='weight_1_10'
# MOD_BOX='box'
# MODEL_SP='sp'
# ==================================
# Function EBQI
# ==================================
#' Calculate the Ecosystem-Based Quality Index
#'
#' Calculate the Ecosystem-Based Quality Index using the formula given in Personnic et al (2014).
#'
#' @param score_dataset dataframe with the scores of each box in the EBQI model. `score_dataset` must include the following columns: site name, species name, score value for each species.
#' @param weights_dataset dataframe with the weights of each box in the EBQI in the model. Different models and weighting scenarios can be specified. `weights_dataset` must include the following columns: model name, boxes' names, weighting scenario and weight value.
#' @param model_description dataframe specifying which species are considered in each box of the EBQI model. Different models can be specified.`model_description` must include the following columns: model name, boxes' names and species names.
#' @param SITE character. Name of the column in `score_dataset` storing the name of the site for which to calculate the EBQI.
#' @param DATA_SP character. Name of the column in`score_dataset` storing the names of the species.
#' @param SCORE character. Name of the column in `score_dataset` storing the species' scores.
#' @param W_BOX character. Name of the column in `weights_dataset` storing the box names.
#' @param WEIGHTS character. Name of the column in `weights_dataset` storing the numerical weights.
#' @param MAX_weight numeric. Maximum value of the weights. So far it is assumed that the weight values for the different model*scenario have a common range.
#' @param MOD_BOX character. Name of the column in `model_description` storing the box names.
#' @param MODEL_SP character. Name of the column in`model_description` storing the names of the species.
#' @param PERTURBATION numerical value to be added to weighs to introduce perturbations. Either one number or a vector of lenght equal to the number of boxes considered in the model in use.
#'
#' @return
#' A named list of two elements. `.$ebqi` stores the numerical value of the EBQI index. `.$scores` strores the (mean) scores used to calculate the EQBI.
#'
#' @details
#' The three input datasets `score_dataset`,`weights_dataset`,`model_description` can be used to calculate EBQI based on different model and weighting scheme specifications. A model is specified by the boxes considered. Different weighting scenario can be specified for the same model.
#' The names of the model boxes must match between input dataframes.
#' Only boxes for which there are species scored in the `score_dataset` will be actually used.
#' When 2 or more species are included in the same box their scores are averaged.
# `MAX_weight` is used to avoid that perturbations will result in weights exceeding the range decided for the scenario.
#'
#' For a real case study using this package please see \url{https://dataverse. } and the related paper.
#'
#' @author Filippo Ferrario, \email{filippo.ferrario.1@@ulaval.ca}
#' @references
#' Personnic, S., Boudouresque, C.F., Astruch, P., Ballesteros, E., Blouet, S., Bellan-Santini, D., Bonhomme, P., Thibault-Botha, D., Feunteun, E., Harmelin-Vivien, M., Pergent, G., Pergent-Martini, C., Pastor, J., Poggiale, J.-C., Renaud, F., Thibaut, T., Ruitton, S., 2014. An Ecosystem-Based Approach to Assess the Status of a Mediterranean Ecosystem, the Posidonia oceanica Seagrass Meadow. PLoS One 9, e98994. https://doi.org/10.1371/journal.pone.0098994
#'
#' @seealso \code{\link{classify}}
#'
#' @examples
#'
#' # create input datasets
#' scores<-data.frame(site=rep('s1',9),
#' sp=c('MPO','arenicola_marina','asteriidae','cancer_irroratus','echinarachnius_parma','strongylocentrotus_droebachiensis','OT-Invertivorous_Invertebrates','Suspension_feeders','Bioturbation'),
#' parameter=rep('density',9), unit='ind/m-2',
#' status=c(2,0,2,4,2,4,4,0,3))
#'
#' weights<-data.frame(model='m1', scenario='scen1',box=c('MPO','Herbivore','Detritus feeder','Invertivorous Invertebrates','Infauna','OT-Invertivorous Invertebrates','Bioturbation','Suspension feeder'), weight_1_10=c(10,7,3,6,4,1,1,1))
#'
#' mods<- data.frame(model='m1',
#' species=c('MPO','arenicola_marina','asteriidae','cancer_irroratus','echinarachnius_parma','strongylocentrotus_droebachiensis','OT-Invertivorous_Invertebrates','Suspension_feeders','Bioturbation','Nereis_sp'),
#' boxes=c('MPO','Detritus feeder','Invertivorous Invertebrates','Invertivorous Invertebrates','Detritus feeder','Herbivore','OT-Invertivorous Invertebrates','Suspension feeder','Bioturbation','Infauna'),
#' box_ID=NA)
#' # run the function
#' EBQI(score_dataset= scores ,weights_dataset=weights, model_description= mods,
#' SITE='site',DATA_SP='sp',SCORE='status',
#' W_BOX='box' ,WEIGHTS='weight_1_10' ,
#' MOD_BOX='boxes',MODEL_SP='species')
#'
#'
#' @export
EBQI<- function(score_dataset ,weights_dataset, model_description, SITE=NULL,DATA_SP=NULL,SCORE=NULL,W_BOX=NULL,WEIGHTS=NULL, MAX_weight=10,MOD_BOX=NULL, MODEL_SP=NULL,PERTURBATION=0)
{
# Version 1.2
# SITE, BOX and SCORE need to be caracters specifying the names of the columns in dataframe "dataset" that store the sites, box name, and status score.
# W_BOX and WEIGHTS are characters specifiyng the name of the column in dataframe "weights_dataset" where box name and weights are stored
# Semplifiy names
P=PERTURBATION
BX<- W_BOX
W<- WEIGHTS
S<- SCORE
names( model_description)[names( model_description)==MOD_BOX]<-BX
wei_set<-weights_dataset
score_set<-score_dataset
# check consistency of Box names and correspondence between datasets
chk<-sapply(wei_set[,BX], function(x){ as.character(x) %in% as.character(model_description[,BX])})
if (sum(chk)<length(chk)) {
I<-which(chk==F)
stop('BOX labels not coherent between input datasets! Check for: ',paste(wei_set[I,BX], collapse = ' '))
}
# assign a box to each species in the score_set
score_set<- merge(score_set,model_description, by.x=DATA_SP, by.y=MODEL_SP, all.x=TRUE, all.y=FALSE)
# 1) Wi x Si
score_set<-aggregate( score_set[,S], by=list(score_set[,BX],score_set[,SITE]), FUN='mean') # take mean of score per site per box
names(score_set)<-c(BX,SITE,'mean_score')
#score_set$mean_score<-round(score_set$mean_score) # turn off the rounding because it artificially creates differencence in the final EBQI when a box has to be splitted into its components.
# add perturbation to Weights and merge it with scores
wei_set[,W]<-wei_set[,W]+P
{# 2019-12-02: correct minimum and maximum weight to be not smaller than 1 and not larger than the maximum possible weight
wei_set[wei_set[,W]<1,W]<-1
wei_set[wei_set[,W]>MAX_weight,W]<-MAX_weight
}
scores_box<- merge(score_set,wei_set, by.x=BX, by.y=W_BOX)
scores_box$WxS<-scores_box[,'mean_score']*scores_box[,W]
scores_box<-scores_box[order(scores_box[,SITE],scores_box[,BX] ),]
# 2) compute numerator
agg<-list(scores_box[,SITE])
aggWxS<-aggregate(scores_box[,'WxS'],by=agg,FUN='sum')
names(aggWxS)<-c(SITE,'WxS')
# 3) compute denominator
EI_Denom<- aggregate(scores_box[,W],by=agg,FUN='sum')
EI_Denom$x<-EI_Denom$x*4
names(EI_Denom)<-c('site','denom')
# check on denominator being equal for all sites within a scenario+model
if (length(unique(EI_Denom$x) >1)) stop('DENOMINATOR IS NOT THE SAME FOR ALL SITES: possibly boxes/species missing at some site')
# 4) merge and compute EBQI
EI<- merge(aggWxS,EI_Denom, by=c('site'))
EI$EBQI<- round((EI$WxS /(EI$denom))*10,1)
# 5) OUTPUT
res<-list(ebqi=EI, scores=scores_box[,c(SITE,BX,'mean_score')])
return(res)
}
|
/R/EBQI-v1_2.R
|
no_license
|
filippo-ferrario/EzBQI
|
R
| false
| false
| 9,643
|
r
|
# ===========================================================
# Title : EBQI function
# Author : Filippo Ferrario
# Date : 30/10/2019
# Version : 1.2
# Aim : Allow flexibility in the function to change both model (i.e. box components) and scenario (i.e. box wheights)
# Changes :
#
# ===========================================================
# # Bench
# scores<-read.xlsx('./R_output/datasets/08a-indices-EBQI-box_scores-v0_3-2019-10-30.xlsx', sheetName='Scores', colClasses=NA)
# weights<-read.xlsx('./data/EBQI-literature_search.xlsx', sheetName='WEIGHTs')
# models<-read.xlsx('./data/EBQI-literature_search.xlsx', sheetName='EBQI_models-boxes')
# score_dataset= scores
# weights_dataset=filter(weights, Scenario=='ALGAE', model=='mod_1' )
# model_description= filter(models, model=='mod_1' )
# SITE='site'
# # BOX='box'
# DATA_SP='sp'
# SCORE='status'
# W_BOX='box'
# WEIGHTS='weight_1_10'
# MOD_BOX='box'
# MODEL_SP='sp'
# PERTURBATION=0
# #PERTURBATION=ptb1[,1]
# EBQI(score_dataset= scores ,weights_dataset=filter(weights, Scenario=='ALGAE', model=='mod_1'), model_description= filter(models, model=='mod_1' ),
# SITE='site',DATA_SP='sp',SCORE='status',
# W_BOX='box' ,WEIGHTS='weight_1_10' ,
# MOD_BOX='box',MODEL_SP='sp')
# score_dataset=epi_scores;
# weights_dataset=filter(W, Scenario=='ALGAE', model=='mod_1');
# model_description=filter(models, model=='mod_1')
# SITE='site'
# DATA_SP='sp'
# SCORE='status'
# W_BOX='box'
# WEIGHTS='weight_1_10'
# MOD_BOX='box'
# MODEL_SP='sp'
# ==================================
# Function EBQI
# ==================================
#' Calculate the Ecosystem-Based Quality Index
#'
#' Calculate the Ecosystem-Based Quality Index using the formula given in Personnic et al (2014).
#'
#' @param score_dataset dataframe with the scores of each box in the EBQI model. `score_dataset` must include the following columns: site name, species name, score value for each species.
#' @param weights_dataset dataframe with the weights of each box in the EBQI in the model. Different models and weighting scenarios can be specified. `weights_dataset` must include the following columns: model name, boxes' names, weighting scenario and weight value.
#' @param model_description dataframe specifying which species are considered in each box of the EBQI model. Different models can be specified.`model_description` must include the following columns: model name, boxes' names and species names.
#' @param SITE character. Name of the column in `score_dataset` storing the name of the site for which to calculate the EBQI.
#' @param DATA_SP character. Name of the column in`score_dataset` storing the names of the species.
#' @param SCORE character. Name of the column in `score_dataset` storing the species' scores.
#' @param W_BOX character. Name of the column in `weights_dataset` storing the box names.
#' @param WEIGHTS character. Name of the column in `weights_dataset` storing the numerical weights.
#' @param MAX_weight numeric. Maximum value of the weights. So far it is assumed that the weight values for the different model*scenario have a common range.
#' @param MOD_BOX character. Name of the column in `model_description` storing the box names.
#' @param MODEL_SP character. Name of the column in`model_description` storing the names of the species.
#' @param PERTURBATION numerical value to be added to weighs to introduce perturbations. Either one number or a vector of lenght equal to the number of boxes considered in the model in use.
#'
#' @return
#' A named list of two elements. `.$ebqi` stores the numerical value of the EBQI index. `.$scores` strores the (mean) scores used to calculate the EQBI.
#'
#' @details
#' The three input datasets `score_dataset`,`weights_dataset`,`model_description` can be used to calculate EBQI based on different model and weighting scheme specifications. A model is specified by the boxes considered. Different weighting scenario can be specified for the same model.
#' The names of the model boxes must match between input dataframes.
#' Only boxes for which there are species scored in the `score_dataset` will be actually used.
#' When 2 or more species are included in the same box their scores are averaged.
# `MAX_weight` is used to avoid that perturbations will result in weights exceeding the range decided for the scenario.
#'
#' For a real case study using this package please see \url{https://dataverse. } and the related paper.
#'
#' @author Filippo Ferrario, \email{filippo.ferrario.1@@ulaval.ca}
#' @references
#' Personnic, S., Boudouresque, C.F., Astruch, P., Ballesteros, E., Blouet, S., Bellan-Santini, D., Bonhomme, P., Thibault-Botha, D., Feunteun, E., Harmelin-Vivien, M., Pergent, G., Pergent-Martini, C., Pastor, J., Poggiale, J.-C., Renaud, F., Thibaut, T., Ruitton, S., 2014. An Ecosystem-Based Approach to Assess the Status of a Mediterranean Ecosystem, the Posidonia oceanica Seagrass Meadow. PLoS One 9, e98994. https://doi.org/10.1371/journal.pone.0098994
#'
#' @seealso \code{\link{classify}}
#'
#' @examples
#'
#' # create input datasets
#' scores<-data.frame(site=rep('s1',9),
#' sp=c('MPO','arenicola_marina','asteriidae','cancer_irroratus','echinarachnius_parma','strongylocentrotus_droebachiensis','OT-Invertivorous_Invertebrates','Suspension_feeders','Bioturbation'),
#' parameter=rep('density',9), unit='ind/m-2',
#' status=c(2,0,2,4,2,4,4,0,3))
#'
#' weights<-data.frame(model='m1', scenario='scen1',box=c('MPO','Herbivore','Detritus feeder','Invertivorous Invertebrates','Infauna','OT-Invertivorous Invertebrates','Bioturbation','Suspension feeder'), weight_1_10=c(10,7,3,6,4,1,1,1))
#'
#' mods<- data.frame(model='m1',
#' species=c('MPO','arenicola_marina','asteriidae','cancer_irroratus','echinarachnius_parma','strongylocentrotus_droebachiensis','OT-Invertivorous_Invertebrates','Suspension_feeders','Bioturbation','Nereis_sp'),
#' boxes=c('MPO','Detritus feeder','Invertivorous Invertebrates','Invertivorous Invertebrates','Detritus feeder','Herbivore','OT-Invertivorous Invertebrates','Suspension feeder','Bioturbation','Infauna'),
#' box_ID=NA)
#' # run the function
#' EBQI(score_dataset= scores ,weights_dataset=weights, model_description= mods,
#' SITE='site',DATA_SP='sp',SCORE='status',
#' W_BOX='box' ,WEIGHTS='weight_1_10' ,
#' MOD_BOX='boxes',MODEL_SP='species')
#'
#'
#' @export
EBQI<- function(score_dataset ,weights_dataset, model_description, SITE=NULL,DATA_SP=NULL,SCORE=NULL,W_BOX=NULL,WEIGHTS=NULL, MAX_weight=10,MOD_BOX=NULL, MODEL_SP=NULL,PERTURBATION=0)
{
# Version 1.2
# SITE, BOX and SCORE need to be caracters specifying the names of the columns in dataframe "dataset" that store the sites, box name, and status score.
# W_BOX and WEIGHTS are characters specifiyng the name of the column in dataframe "weights_dataset" where box name and weights are stored
# Semplifiy names
P=PERTURBATION
BX<- W_BOX
W<- WEIGHTS
S<- SCORE
names( model_description)[names( model_description)==MOD_BOX]<-BX
wei_set<-weights_dataset
score_set<-score_dataset
# check consistency of Box names and correspondence between datasets
chk<-sapply(wei_set[,BX], function(x){ as.character(x) %in% as.character(model_description[,BX])})
if (sum(chk)<length(chk)) {
I<-which(chk==F)
stop('BOX labels not coherent between input datasets! Check for: ',paste(wei_set[I,BX], collapse = ' '))
}
# assign a box to each species in the score_set
score_set<- merge(score_set,model_description, by.x=DATA_SP, by.y=MODEL_SP, all.x=TRUE, all.y=FALSE)
# 1) Wi x Si
score_set<-aggregate( score_set[,S], by=list(score_set[,BX],score_set[,SITE]), FUN='mean') # take mean of score per site per box
names(score_set)<-c(BX,SITE,'mean_score')
#score_set$mean_score<-round(score_set$mean_score) # turn off the rounding because it artificially creates differencence in the final EBQI when a box has to be splitted into its components.
# add perturbation to Weights and merge it with scores
wei_set[,W]<-wei_set[,W]+P
{# 2019-12-02: correct minimum and maximum weight to be not smaller than 1 and not larger than the maximum possible weight
wei_set[wei_set[,W]<1,W]<-1
wei_set[wei_set[,W]>MAX_weight,W]<-MAX_weight
}
scores_box<- merge(score_set,wei_set, by.x=BX, by.y=W_BOX)
scores_box$WxS<-scores_box[,'mean_score']*scores_box[,W]
scores_box<-scores_box[order(scores_box[,SITE],scores_box[,BX] ),]
# 2) compute numerator
agg<-list(scores_box[,SITE])
aggWxS<-aggregate(scores_box[,'WxS'],by=agg,FUN='sum')
names(aggWxS)<-c(SITE,'WxS')
# 3) compute denominator
EI_Denom<- aggregate(scores_box[,W],by=agg,FUN='sum')
EI_Denom$x<-EI_Denom$x*4
names(EI_Denom)<-c('site','denom')
# check on denominator being equal for all sites within a scenario+model
if (length(unique(EI_Denom$x) >1)) stop('DENOMINATOR IS NOT THE SAME FOR ALL SITES: possibly boxes/species missing at some site')
# 4) merge and compute EBQI
EI<- merge(aggWxS,EI_Denom, by=c('site'))
EI$EBQI<- round((EI$WxS /(EI$denom))*10,1)
# 5) OUTPUT
res<-list(ebqi=EI, scores=scores_box[,c(SITE,BX,'mean_score')])
return(res)
}
|
# *************************************
# Analyse Territioriale Multiscalaire *
# *************************************
library("cartography")
library("MTA")
# [1] Chargement du fichier de données
load("data/geometriesTN.RData")
# [2] Import de données correctement formatées (code SNUTS)
my.df<-read.csv( "data/data_carto_census2014.csv",header=TRUE,sep=";",dec=",",encoding="utf-8")
# [3] Deviation globale
my.df$globaldev <- gdev(my.df, "log_t_2014", "pop_t_2014", type = "rel")
# [3] Deviation territoriale
my.df$idgouv <- substr(my.df$id,1,4)
my.df$territorialdev <- tdev(my.df, "log_t_2014", "pop_t_2014", type = "rel",key="idgouv")
# [4] Déviation locale
my.df$localdev <- sdev(my.df, "log_t_2014", "pop_t_2014", type = "rel", delegations.spdf, spdfid = "del_id", xid = "id", order = 1)
bks <- c(min(my.df$localdev),75,100,125,150,max(my.df$localdev))
# [5] typologie multicalaire
seuil <- 110
synthesis <- mst(spdf = delegations.spdf,
x = my.df,
spdfid = "del_id",
xid = "id",
var1 = "log_t_2014",
var2 = "pop_t_2014",
dist = NULL,
key = "idgouv",
order = 1,
mat = NULL,
threshold = seuil,
superior = TRUE)
synthesis
# [6] Réalisation d'un plancjhe cartographique
opar <- par(mar = c(0, 0, 1.2, 0), mfrow = c(2, 2))
# carte 1
bks <- c(min(synthesis$gdevrel),75,100,125,150,max(synthesis$gdevrel))
bks <- sort(bks)
cols <- carto.pal(pal1 = "blue.pal", n1 = 2,pal2 = "wine.pal", n2 = 3)
plot(delegations.spdf, border = NA, col = NA, bg = "#A6CAE0")
plot(countries.spdf,border="white",col="#eadac1", lwd=0.6, add=T)
plot(shadow.spdf,col="#2D3F4580",border="NA",add=T)
plot(delegations.spdf, border = "white", col = "#cca992",lwd=0.2,add=T)
choroLayer(spdf = delegations.spdf, df = synthesis, var = "gdevrel",
legend.pos = "bottomleft",
legend.title.txt = "Ecart à la\nmoyenne\nnationale",
breaks = bks, border = "white",lwd=0.2,
col = cols, add=T)
plot(gouvernorats.spdf, border = "white", col = NA,lwd=0.4,add=T)
plot(coastlines.sp,col="#0e7aa5", lwd=1, add=T)
plot(others.spdf,col="#15629630",border=NA, add=T)
layoutLayer(title = "Population totale, 2014", # Changer ici le titre de la carte
author = "UMS RIATE / Universit? de Sfax",
sources = "sources : INS, 2014", # Changer ici les sources utilis?es
scale = 100, theme = "taupe.pal",
north = TRUE, frame = TRUE) # add a south arrow
# carte 2
head(synthesis)
bks <- c(min(synthesis$tdevrel),75,100,125,150,max(synthesis$tdevrel))
bks <- sort(bks)
plot(delegations.spdf, border = NA, col = NA, bg = "#A6CAE0")
plot(countries.spdf,border="white",col="#eadac1", lwd=0.6, add=T)
plot(shadow.spdf,col="#2D3F4580",border="NA",add=T)
plot(delegations.spdf, border = "white", col = "#cca992",lwd=0.2,add=T)
choroLayer(spdf = delegations.spdf, df = synthesis, var = "tdevrel",
legend.pos = "bottomleft",
legend.title.txt = "Ecart à la\nmoyenne\nrégionale\n(gouvernorats)",
breaks = bks, border = "white",lwd=0.2,
col = cols, add=T)
plot(gouvernorats.spdf, border = "black", col = NA,lwd=0.4,add=T)
plot(coastlines.sp,col="#0e7aa5", lwd=1, add=T)
plot(others.spdf,col="#15629630",border=NA, add=T)
layoutLayer(title = "Population totale, 2014", # Changer ici le titre de la carte
author = "UMS RIATE / Universit? de Sfax",
sources = "sources : INS, 2014", # Changer ici les sources utilis?es
scale = 100, theme = "taupe.pal",
north = TRUE, frame = TRUE) # add a south arrow
# carte 3
bks <- c(min(synthesis$sdevrel),75,100,125,150,max(synthesis$sdevrel))
bks <- sort(bks)
plot(delegations.spdf, border = NA, col = NA, bg = "#A6CAE0")
plot(countries.spdf,border="white",col="#eadac1", lwd=0.6, add=T)
plot(shadow.spdf,col="#2D3F4580",border="NA",add=T)
plot(delegations.spdf, border = "white", col = "#cca992",lwd=0.2,add=T)
choroLayer(spdf = delegations.spdf, df = synthesis, var = "sdevrel",
legend.pos = "bottomleft",
legend.title.txt = "Ecart à la\nmoyenne\nlocale\n(contiguité)",
breaks = bks, border = "white",lwd=0.2,
col = cols, add=T)
plot(gouvernorats.spdf, border = "white", col = NA,lwd=0.4,add=T)
plot(coastlines.sp,col="#0e7aa5", lwd=1, add=T)
plot(others.spdf,col="#15629630",border=NA, add=T)
layoutLayer(title = "Population totale, 2014", # Changer ici le titre de la carte
author = "UMS RIATE / Universit? de Sfax",
sources = "sources : INS, 2014", # Changer ici les sources utilis?es
scale = 100, theme = "taupe.pal",
north = TRUE, frame = TRUE) # add a south arrow
# carte 4
cols <- c("#f0f0f0", "#fdc785","#ffffab","#fba9b0","#addea6","#ffa100","#fff226","#e30020")
rVal<-c(" . . . ",
"[X] . . ",
" . [X] . ",
"[X] [X] . ",
" . . [X]",
"[X] . [X]",
" . [X] [X]",
"[X] [X] [X]")
plot(delegations.spdf, border = NA, col = NA, bg = "#A6CAE0")
plot(countries.spdf,border="white",col="#eadac1", lwd=0.6, add=T)
plot(shadow.spdf,col="#2D3F4580",border="NA",add=T)
plot(delegations.spdf, border = "white", col = "#cca992",lwd=0.2,add=T)
unique(synthesis$mst)
typoLayer(spdf = delegations.spdf, df = synthesis, var = "mst",
border = "#D9D9D9",legend.values.order = c(0,1,2,3,4,5,7),
col = cols,
lwd = 0.25,
legend.pos = "n",
add=T)
legendTypo(col = cols, categ = rVal,
title.txt = paste("Typologie\nmultiscalaire\n(>",seuil,")"),
nodata = FALSE, pos = "bottomleft")
|
/prg/MTA.R
|
no_license
|
chaymare/tunisie
|
R
| false
| false
| 5,987
|
r
|
# *************************************
# Analyse Territioriale Multiscalaire *
# *************************************
library("cartography")
library("MTA")
# [1] Chargement du fichier de données
load("data/geometriesTN.RData")
# [2] Import de données correctement formatées (code SNUTS)
my.df<-read.csv( "data/data_carto_census2014.csv",header=TRUE,sep=";",dec=",",encoding="utf-8")
# [3] Deviation globale
my.df$globaldev <- gdev(my.df, "log_t_2014", "pop_t_2014", type = "rel")
# [3] Deviation territoriale
my.df$idgouv <- substr(my.df$id,1,4)
my.df$territorialdev <- tdev(my.df, "log_t_2014", "pop_t_2014", type = "rel",key="idgouv")
# [4] Déviation locale
my.df$localdev <- sdev(my.df, "log_t_2014", "pop_t_2014", type = "rel", delegations.spdf, spdfid = "del_id", xid = "id", order = 1)
bks <- c(min(my.df$localdev),75,100,125,150,max(my.df$localdev))
# [5] typologie multicalaire
seuil <- 110
synthesis <- mst(spdf = delegations.spdf,
x = my.df,
spdfid = "del_id",
xid = "id",
var1 = "log_t_2014",
var2 = "pop_t_2014",
dist = NULL,
key = "idgouv",
order = 1,
mat = NULL,
threshold = seuil,
superior = TRUE)
synthesis
# [6] Réalisation d'un plancjhe cartographique
opar <- par(mar = c(0, 0, 1.2, 0), mfrow = c(2, 2))
# carte 1
bks <- c(min(synthesis$gdevrel),75,100,125,150,max(synthesis$gdevrel))
bks <- sort(bks)
cols <- carto.pal(pal1 = "blue.pal", n1 = 2,pal2 = "wine.pal", n2 = 3)
plot(delegations.spdf, border = NA, col = NA, bg = "#A6CAE0")
plot(countries.spdf,border="white",col="#eadac1", lwd=0.6, add=T)
plot(shadow.spdf,col="#2D3F4580",border="NA",add=T)
plot(delegations.spdf, border = "white", col = "#cca992",lwd=0.2,add=T)
choroLayer(spdf = delegations.spdf, df = synthesis, var = "gdevrel",
legend.pos = "bottomleft",
legend.title.txt = "Ecart à la\nmoyenne\nnationale",
breaks = bks, border = "white",lwd=0.2,
col = cols, add=T)
plot(gouvernorats.spdf, border = "white", col = NA,lwd=0.4,add=T)
plot(coastlines.sp,col="#0e7aa5", lwd=1, add=T)
plot(others.spdf,col="#15629630",border=NA, add=T)
layoutLayer(title = "Population totale, 2014", # Changer ici le titre de la carte
author = "UMS RIATE / Universit? de Sfax",
sources = "sources : INS, 2014", # Changer ici les sources utilis?es
scale = 100, theme = "taupe.pal",
north = TRUE, frame = TRUE) # add a south arrow
# carte 2
head(synthesis)
bks <- c(min(synthesis$tdevrel),75,100,125,150,max(synthesis$tdevrel))
bks <- sort(bks)
plot(delegations.spdf, border = NA, col = NA, bg = "#A6CAE0")
plot(countries.spdf,border="white",col="#eadac1", lwd=0.6, add=T)
plot(shadow.spdf,col="#2D3F4580",border="NA",add=T)
plot(delegations.spdf, border = "white", col = "#cca992",lwd=0.2,add=T)
choroLayer(spdf = delegations.spdf, df = synthesis, var = "tdevrel",
legend.pos = "bottomleft",
legend.title.txt = "Ecart à la\nmoyenne\nrégionale\n(gouvernorats)",
breaks = bks, border = "white",lwd=0.2,
col = cols, add=T)
plot(gouvernorats.spdf, border = "black", col = NA,lwd=0.4,add=T)
plot(coastlines.sp,col="#0e7aa5", lwd=1, add=T)
plot(others.spdf,col="#15629630",border=NA, add=T)
layoutLayer(title = "Population totale, 2014", # Changer ici le titre de la carte
author = "UMS RIATE / Universit? de Sfax",
sources = "sources : INS, 2014", # Changer ici les sources utilis?es
scale = 100, theme = "taupe.pal",
north = TRUE, frame = TRUE) # add a south arrow
# carte 3
bks <- c(min(synthesis$sdevrel),75,100,125,150,max(synthesis$sdevrel))
bks <- sort(bks)
plot(delegations.spdf, border = NA, col = NA, bg = "#A6CAE0")
plot(countries.spdf,border="white",col="#eadac1", lwd=0.6, add=T)
plot(shadow.spdf,col="#2D3F4580",border="NA",add=T)
plot(delegations.spdf, border = "white", col = "#cca992",lwd=0.2,add=T)
choroLayer(spdf = delegations.spdf, df = synthesis, var = "sdevrel",
legend.pos = "bottomleft",
legend.title.txt = "Ecart à la\nmoyenne\nlocale\n(contiguité)",
breaks = bks, border = "white",lwd=0.2,
col = cols, add=T)
plot(gouvernorats.spdf, border = "white", col = NA,lwd=0.4,add=T)
plot(coastlines.sp,col="#0e7aa5", lwd=1, add=T)
plot(others.spdf,col="#15629630",border=NA, add=T)
layoutLayer(title = "Population totale, 2014", # Changer ici le titre de la carte
author = "UMS RIATE / Universit? de Sfax",
sources = "sources : INS, 2014", # Changer ici les sources utilis?es
scale = 100, theme = "taupe.pal",
north = TRUE, frame = TRUE) # add a south arrow
# carte 4
cols <- c("#f0f0f0", "#fdc785","#ffffab","#fba9b0","#addea6","#ffa100","#fff226","#e30020")
rVal<-c(" . . . ",
"[X] . . ",
" . [X] . ",
"[X] [X] . ",
" . . [X]",
"[X] . [X]",
" . [X] [X]",
"[X] [X] [X]")
plot(delegations.spdf, border = NA, col = NA, bg = "#A6CAE0")
plot(countries.spdf,border="white",col="#eadac1", lwd=0.6, add=T)
plot(shadow.spdf,col="#2D3F4580",border="NA",add=T)
plot(delegations.spdf, border = "white", col = "#cca992",lwd=0.2,add=T)
unique(synthesis$mst)
typoLayer(spdf = delegations.spdf, df = synthesis, var = "mst",
border = "#D9D9D9",legend.values.order = c(0,1,2,3,4,5,7),
col = cols,
lwd = 0.25,
legend.pos = "n",
add=T)
legendTypo(col = cols, categ = rVal,
title.txt = paste("Typologie\nmultiscalaire\n(>",seuil,")"),
nodata = FALSE, pos = "bottomleft")
|
#' @title Combine SKAT-O analyses from one or more studies.
#'
#' @description Takes as input `seqMeta` objects (from e.g.
#' \code{\link{prepScores}}), and meta analyzes them, using SKAT-O. See the
#' package vignette for more extensive documentation.
#'
#' @inheritParams singlesnpMeta
#' @inheritParams burdenMeta
#' @param skat.wts Either a function to calculate testing weights for SKAT, or a
#' character specifying a vector of weights in the SNPInfo file. For skatOMeta
#' the default are the `beta' weights.
#' @param burden.wts Either a function to calculate weights for the burden test,
#' or a character specifying a vector of weights in the SNPInfo file. For
#' skatOMeta the default are the T1 weights.
#' @param rho A sequence of values that specify combinations of SKAT and a burden test to be considered. Default is c(0,1), which considers SKAT and a burden test.
#' @param method p-value calculation method. Should be one of 'saddlepoint', 'integration', or 'liu'.
#'
#' @details \code{skatOMeta()} implements the SKAT-Optimal test, which picks the
#' `best' combination of SKAT and a burden test, and then corrects for the
#' flexibility afforded by this choice. Specifically, if the SKAT statistic is
#' Q1, and the squared score for a burden test is Q2, SKAT-O considers tests
#' of the form (1-rho)*Q1 + rho*Q2, where rho between 0 and 1. The values of
#' rho are specified by the user using the argument \code{rho}. In the
#' simplest form, which is the default, SKAT-O computes a SKAT test and a T1
#' test, and reports the minimum p-value, corrected for multiple testing. See
#' the vignette or the accompanying references for more details.
#'
#' If there is a single variant in the gene, or the burden test is undefined
#' (e.g. there are no rare alleles for the T1 test), SKAT is reported (i.e.
#' rho=0).
#'
#' Note 1: the SKAT package uses the same weights for both SKAT and the burden
#' test, which this function does not.
#'
#' Note 2: all studies must use coordinated SNP Info files - that is, the SNP
#' names and gene definitions must be the same.
#'
#' Note 3: The method of p-value calculation is much more important here than
#' in SKAT. The `integration' method is fast and typically accurate for
#' p-values larger than 1e-9. The saddlepoint method is slower, but has higher
#' relative accuracy.
#'
#' Note 4: Since p-value calculation can be slow for SKAT-O, and less accurate
#' for small p-values, a reasonable alternative would be to first calculate
#' SKAT and a burden test, and record the minimum p-value, which is a lower
#' bound for the SKAT-O p-value. This can be done quickly and accurately.
#' Then, one would only need to perform SKAT-O on the small subset of genes
#' that are potentially interesting.
#'
#' Please see the package vignette for more details.
#'
#' @return a data frame with the following columns:
#' \item{gene}{Name of the gene or unit of aggregation being meta analyzed}
#' \item{p}{p-value of the SKAT-O test.}
#' \item{pmin}{The minimum of the p-values considered by SKAT-O (not corrected for multiple testing!).}
#' \item{rho}{The value of rho which gave the smallest p-value.}
#' \item{cmaf}{The cumulative minor allele frequency.}
#' \item{nmiss}{The number of `missing` SNPs. For a gene with a single SNP
#' this is the number of individuals which do not contribute to the analysis,
#' due to studies that did not report results for that SNP. For a gene with
#' multiple SNPs, is totalled over the gene. }
#' \item{nsnps}{The number of SNPs in the gene.}
#' \item{errflag}{An indicator of possible error: 0 suggests no error, > 0
#' indicates probable loss of accuracy.}
#'
#' @references Wu, M.C., Lee, S., Cai, T., Li, Y., Boehnke, M., and Lin, X.
#' (2011) Rare Variant Association Testing for Sequencing Data Using the
#' Sequence Kernel Association Test (SKAT). American Journal of Human
#' Genetics.
#'
#' Lee, S. and Wu, M.C. and Lin, X. (2012) Optimal tests for rare variant
#' effects in sequencing association studies. Biostatistics.
#'
#' @author Arie Voorman, Jennifer Brody
#' @seealso
#' \code{\link{skatOMeta}}
#' \code{\link{prepScores}}
#' \code{\link{burdenMeta}}
#' \code{\link{singlesnpMeta}}
#'
#' @examples
#' \dontrun{
#' ### load example data for 2 studies
#' data(seqMetaExample)
#'
#' ####run on each study:
#' cohort1 <- prepScores(Z=Z1, y~sex+bmi, SNPInfo = SNPInfo, data =pheno1)
#' cohort2 <- prepScores(Z=Z2, y~sex+bmi, SNPInfo = SNPInfo, kins=kins, data=pheno2)
#'
#' #### combine results:
#' ##skat-O with default settings:
#' out1 <- skatOMeta(cohort1, cohort2, SNPInfo = SNPInfo, method = "int")
#' head(out1)
#'
#' ##skat-O, using a large number of combinations between SKAT and T1 tests:
#' out2 <- skatOMeta(cohort1, cohort2, rho=seq(0,1,length=11), SNPInfo=SNPInfo, method="int")
#' head(out2)
#'
#' #rho = 0 indicates SKAT gave the smaller p-value (or the T1 is undefined)
#' #rho=1 indicates the burden test was chosen
#' # 0 < rho < 1 indicates some other value was chosen
#' #notice that most of the time either the SKAT or T1 is chosen
#' table(out2$rho)
#'
#' ##skat-O with beta-weights used in the burden test:
#' out3 <- skatOMeta(cohort1,cohort2, burden.wts = function(maf){dbeta(maf,1,25) },
#' rho=seq(0,1,length=11),SNPInfo = SNPInfo, method="int")
#' head(out3)
#' table(out3$rho)
#'
#' ########################
#' ####binary data
#' cohort1 <- prepScores(Z=Z1, ybin~1, family=binomial(), SNPInfo=SNPInfo, data=pheno1)
#' out.bin <- skatOMeta(cohort1, SNPInfo = SNPInfo, method="int")
#' head(out.bin)
#'
#' ####################
#' ####survival data
#' cohort1 <- prepCox(Z=Z1, Surv(time,status)~strata(sex)+bmi, SNPInfo=SNPInfo,
#' data=pheno1)
#' out.surv <- skatOMeta(cohort1, SNPInfo = SNPInfo, method="int")
#' head(out.surv)
#'
#' ##########################################
#' ###Compare with SKAT and T1 tests on their own:
#' cohort1 <- prepScores(Z=Z1, y~sex+bmi, SNPInfo=SNPInfo, data=pheno1)
#' cohort2 <- prepScores(Z=Z2, y~sex+bmi, SNPInfo=SNPInfo, kins=kins, data=pheno2)
#'
#' out.skat <- skatMeta(cohort1,cohort2,SNPInfo=SNPInfo)
#' out.t1 <- burdenMeta(cohort1,cohort2, wts= function(maf){as.numeric(maf <= 0.01)},
#' SNPInfo=SNPInfo)
#'
#' #plot results
#' #We compare the minimum p-value of SKAT and T1, adjusting for multiple tests
#' #using the Sidak correction, to that of SKAT-O.
#'
#' par(mfrow=c(1,3))
#' pseq <- seq(0,1,length=100)
#' plot(y=out.skat$p, x=out1$p,xlab="SKAT-O p-value", ylab="SKAT p-value", main ="SKAT-O vs SKAT")
#' lines(y=pseq,x=1-(1-pseq)^2,col=2,lty=2, lwd=2)
#' abline(0,1)
#'
#' plot(y=out.t1$p, x=out1$p,xlab="SKAT-O p-value", ylab="T1 p-value", main ="SKAT-O vs T1")
#' lines(y=pseq,x=1-(1-pseq)^2,col=2,lty=2, lwd=2)
#' abline(0,1)
#'
#' plot(y=pmin(out.t1$p, out.skat$p,na.rm=T), x=out1$p,xlab="SKAT-O p-value",
#' ylab="min(T1,SKAT) p-value", main ="min(T1,SKAT) vs SKAT-O")
#' lines(y=pseq,x=1-(1-pseq)^2,col=2,lty=2, lwd=2)
#' abline(0,1)
#' legend("bottomright", lwd=2,lty=2,col=2,legend="Bonferroni correction")
#' }
#'
#' @export
skatOMeta <- function(..., SNPInfo=NULL, skat.wts=function(maf){stats::dbeta(maf,1,25)}, burden.wts=function(maf){as.numeric(maf <= 0.01) }, rho=c(0,1), method=c("integration", "saddlepoint", "liu"), snpNames="Name", aggregateBy="gene", mafRange=c(0,0.5), verbose=FALSE) {
cl <- match.call(expand.dots = FALSE)
if(is.null(SNPInfo)){
warning("No SNP Info file provided: loading the Illumina HumanExome BeadChip. See ?SNPInfo for more details")
load(paste(find.package("seqMeta"), "data", "SNPInfo.rda",sep = "/"))
aggregateBy = "SKATgene"
} else {
SNPInfo <- prepSNPInfo(SNPInfo, snpNames, aggregateBy, wt1=skat.wts, wt2=burden.wts)
}
if(any(rho >1 | rho < 0 ) ) stop("rho must be between 0 and 1")
method <- match.arg(method)
#if( !(method %in% c("davies","farebrother","imhof","liu")) ) stop("Method specified is not valid! See documentation")
genelist <- stats::na.omit(unique(SNPInfo[,aggregateBy]))
cohortNames <- lapply(cl[[2]],as.character)
ncohort <- length(cohortNames)
ev <- parent.frame()
classes <- unlist(lapply(cohortNames,function(name){class(get(name,envir=ev))}))
if(!all(classes == "seqMeta" | classes == "skatCohort") ){
stop("an argument to ... is not a seqMeta object!")
}
res.strings <- data.frame("gene"=genelist,stringsAsFactors=F)
res.numeric <- matrix(NA, nrow= nrow(res.strings),ncol = length(c("p","pmin","rho","cmaf","nmiss", "nsnps", "errflag")))
colnames(res.numeric) <- c("p","pmin","rho","cmaf","nmiss", "nsnps","errflag")
if(verbose){
cat("\n Meta Analyzing... Progress:\n")
pb <- utils::txtProgressBar(min = 0, max = length(genelist), style = 3)
pb.i <- 0
}
ri <- 0
snp.names.list <- split(SNPInfo[,snpNames],SNPInfo[,aggregateBy])
for(gene in genelist){
ri <- ri+1
nsnps.sub <- length(snp.names.list[[gene]])
mscores <- maf <- numeric(nsnps.sub)
big.cov <- Matrix(0, nsnps.sub,nsnps.sub)
n.total <- numeric(nsnps.sub)
n.miss <- numeric(nsnps.sub)
vary.ave <- 0
for(cohort.k in 1:ncohort){
cohort.gene <- get(cohortNames[[cohort.k]],envir=ev)[[gene]]
if(!is.null(cohort.gene)){
sub <- match(snp.names.list[[gene]],colnames(cohort.gene$cov))
if(any(is.na(sub)) | any(sub != 1:length(sub), na.rm=TRUE) | length(cohort.gene$maf) > nsnps.sub){
#if(any(is.na(sub))) warning("Some SNPs were not in SNPInfo file for gene ", gene," and cohort ",names(cohorts)[cohort.k])
cohort.gene$cov <- as.matrix(cohort.gene$cov)[sub,sub,drop=FALSE]
cohort.gene$cov[is.na(sub),] <- cohort.gene$cov[,is.na(sub)] <- 0
cohort.gene$maf <- cohort.gene$maf[sub]
cohort.gene$maf[is.na(sub)] <- -1
cohort.gene$scores <- cohort.gene$scores[sub]
cohort.gene$scores[is.na(sub)] <- 0
}
n.total[cohort.gene$maf >= 0] <- n.total[cohort.gene$maf >= 0]+cohort.gene$n
n.miss[cohort.gene$maf < 0] <- n.miss[cohort.gene$maf < 0] + cohort.gene$n
cohort.gene$maf[cohort.gene$maf < 0] <- 0
mscores <- mscores + cohort.gene$scores/cohort.gene$sey^2
maf <- maf + 2*cohort.gene$maf*(cohort.gene$n)
big.cov <- big.cov + cohort.gene$cov/cohort.gene$sey^2
vary.ave <- vary.ave + max(cohort.gene$n,na.rm=T)*cohort.gene$sey^2
}else{
n.miss <- n.miss + get(cohortNames[[cohort.k]],envir=parent.frame())[[1]]$n
}
}
if(any(maf >0)){
maf <- maf/(2*n.total)
maf[is.nan(maf)] <- 0
maf <- sapply(maf, function(x){min(x,1-x)})
if( !all(mafRange == c(0,0.5))){
keep <- (maf >= min(mafRange)) & (maf <= max(mafRange))
big.cov <- big.cov[keep,keep]
mscores <- mscores[keep]
maf <- maf[keep]
}
}
if(length(maf)> 0){
if(is.function(skat.wts)){
w1 <- skat.wts(maf)
} else if(is.character(skat.wts)){
w1 <- as.numeric(SNPInfo[SNPInfo[,aggregateBy]==gene,skat.wts])
} else {
w1 <- rep(1,length(maf))
}
if(is.function(burden.wts)){
w2 <- burden.wts(maf)
} else if(is.character(burden.wts)){
w2 <- as.numeric(SNPInfo[SNPInfo[,aggregateBy]==gene,burden.wts])
} else {
w2 <- rep(1,length(maf))
}
w1 <- ifelse(maf >0, w1,0)
w2 <- ifelse(maf >0, w2,0)
##
Q.skat <- sum((w1*mscores)^2, na.rm=TRUE)
V.skat <- (w1)*t(t(big.cov)*as.vector(w1))
Q.burden <- sum(w2*mscores, na.rm=TRUE)^2
V.burden <- as.numeric(t(w2)%*%big.cov%*%w2)
#If burden test is 0, or only 1 SNP in the gene, do SKAT:
if(sum(maf > 0) ==1 | V.burden ==0){
lambda <- eigen(zapsmall(V.skat), symmetric = TRUE)$values
if(any(lambda > 0) & length(lambda) >1) {
tmpP <- pchisqsum2(Q.skat,lambda=lambda,method=method, acc=1e-7)
if(tmpP$errflag !=0 ){
res.numeric[ri,"errflag"] = 1
} else {
res.numeric[ri,"errflag"] = 0
}
p <- tmpP$p
} else {
p <- ifelse(length(lambda) == 1 & all(lambda > 0), stats::pchisq(Q.skat/lambda,df=1,lower.tail=FALSE),1)
res.numeric[ri,"errflag"] = 0
}
res.numeric[ri,"pmin"] = res.numeric[ri,"p"] = p
res.numeric[ri,"rho"] = 0
#Else do SKAT-O
} else {
skato.res <- skatO_getp(mscores, big.cov, diag(w1), w2, rho, method= method, gene=gene)
res.numeric[ri,"p"] <- skato.res$actualp
res.numeric[ri,"pmin"] = skato.res$minp
res.numeric[ri,"rho"] = skato.res$rho
res.numeric[ri, "errflag"] = skato.res$errflag
}
} else {
res.numeric[ri,"p"] <- res.numeric[ri,"pmin"] <- 1
res.numeric[ri,"rho"] <- 0
res.numeric[ri, "errflag"] <- 0
}
res.numeric[ri,"cmaf"] = sum(maf,na.rm=TRUE)
res.numeric[ri,"nsnps"] = sum(maf!= 0, na.rm =T)
res.numeric[ri,"nmiss"] = sum(n.miss, na.rm =T)
if(verbose){
pb.i <- pb.i+1
utils::setTxtProgressBar(pb, pb.i)
}
}
if(verbose) close(pb)
return(cbind(res.strings,res.numeric))
}
skatO_getp <- function(U,V, R, w, rho,method = "davies", gene=NULL){
##Input:
#U: score vector (length p)
#R: p x p weight matrix for skat
#w: burden weights
#rho: vector of rhos in [0,1]
#method: method for calculating Normal quadratic form distribution
#gene: The name of the region - used for error reporting
##Output: a list with elemeents
#minp: the minimum p-value
#actualp: the actual p-value
#rho: the value of rho which gave the minp
#ps: the whole vector of p-values
#errflag: 0 if no problem, 1 if quantile issue, 2 if integration issue
satterthwaite <- function(a, df) {
if (any(df > 1)) {
a <- rep(a, df)
}
tr <- mean(a)
tr2 <- mean(a^2)/(tr^2)
list(scale = tr * tr2, df = length(a)/tr2)
}
errflag = 0
Q.skat <- crossprod(R%*%U) # SKAT
Q.burden <- (t(w)%*%U)^2 # burden
Qs <- (1-rho)*Q.skat + rho*Q.burden
lambdas <- ps <- NULL
ps <- numeric(length(rho))
for(i in 1:length(rho)){
PC <- eigen((1-rho[i])*crossprod(R)+ rho[i]*outer(w,w),symmetric=TRUE)
v.sqrt <- with(PC,{ values[values < 0] <- 0; (vectors)%*%diag(sqrt(values))%*%t(vectors) })
lam <- eigen( zapsmall(v.sqrt%*%V%*%v.sqrt),only.values=TRUE,symmetric=TRUE)$values
lam <- lam[lam != 0]
lambdas <- c(lambdas, list( lam ))
tmpP <- pchisqsum2(Qs[i],lambda=lambdas[[i]],method=method, acc=1e-7)
if(tmpP$errflag != 0){
errflag <- 1
ps[i] <- pchisqsum2(Qs[i],lambda=lambdas[[i]],method="liu")$p
} else {
ps[i] <- tmpP$p
}
}
minp <- min(ps)
Ts <- numeric(length(rho))
for(i in 1:length(rho)){
sat <- satterthwaite(lambdas[[i]],rep(1,length(lambdas[[i]])))
upper <- stats::qchisq(minp/20,df=sat$df,lower.tail=FALSE)*sat$scale
tmpT <- try(stats::uniroot(function(x){pchisqsum2(x,lambda=lambdas[[i]],method=method,acc=1e-5)$p- minp }, interval=c(1e-10,upper))$root, silent = TRUE)
if(class(tmpT) == "try-error"){
#warning(paste0("Problem finding quantiles in gene ", gene, ", p-value may not be accurate"))
Ts[i] <- Qs[i]
errflag <- 2
} else {
Ts[i] <- tmpT
}
}
v11 <- R%*%V%*%R
v12 <- R%*%V%*%w
v22 <- as.numeric(t(w)%*%V%*%w)
V.cond <- v11 - outer( v12, v12 )/v22
lambda.cond <- eigen(V.cond,only.values=TRUE,symmetric=TRUE)$values
EDec <- eigen(V.cond,symmetric=TRUE)
D <- zapsmall(diag(EDec$values))
diag(D)[zapsmall(diag(D)) > 0] <- 1/sqrt(diag(D)[zapsmall(diag(D)) > 0])
diag(D)[diag(D) <= 0 ] <- 0
#meanvec <- t(EDec$vectors)%*%D%*%(EDec$vectors)%*%(v12)/c(v22)
meanvec <- as.numeric(D%*%t(EDec$vectors)%*%(v12)/c(v22))
Fcond <- function(x,method){
pp <- qmax <- numeric(length(x))
for(i in 1:length(x)){
qmax[i] <- min( ( (Ts[rho !=1 ] - rho[rho != 1]*x[i])/(1-rho)[rho !=1]) )
if(any(x[i] > Ts[rho == 1]) ){
pp[i] <- 1
} else {
p.tmp <- pchisqsum2(qmax[i], lambda=lambda.cond, delta = meanvec^2*x[i], method = method, acc=min(minp,1e-5) )
if(p.tmp$errflag != 0) stop("Error in integration! using Liu p-value")
pp[i] = p.tmp$p
}
}
return(pp)
}
if(any(lambda.cond > 0)){
integrand <- function(x){stats::dchisq(x,1)*Fcond(x*v22,method=method)}
integral <- try(stats::integrate(Vectorize(integrand),lower=0,upper=Inf, subdivisions = 200L, rel.tol=min(minp/100,1e-4)), silent = TRUE)
if (class(integral) == "try-error" ) {
integrand <- function(x){stats::dchisq(x,1)*Fcond(x*v22,method="liu")}
integral <- stats::integrate(Vectorize(integrand),lower=0,upper=Inf)
errflag <- 3
} else {
if(integral$message != "OK") errflag <- 2
}
actualp <- integral[1]$value
} else {
#cat(".")
actualp = minp
}
return(list("actualp"= actualp, "minp" = minp, "rho" = rho[which.min(ps)], "ps" = ps, "errflag" = errflag))
}
|
/R/skatOMeta.R
|
no_license
|
DavisBrian/seqMeta
|
R
| false
| false
| 16,959
|
r
|
#' @title Combine SKAT-O analyses from one or more studies.
#'
#' @description Takes as input `seqMeta` objects (from e.g.
#' \code{\link{prepScores}}), and meta analyzes them, using SKAT-O. See the
#' package vignette for more extensive documentation.
#'
#' @inheritParams singlesnpMeta
#' @inheritParams burdenMeta
#' @param skat.wts Either a function to calculate testing weights for SKAT, or a
#' character specifying a vector of weights in the SNPInfo file. For skatOMeta
#' the default are the `beta' weights.
#' @param burden.wts Either a function to calculate weights for the burden test,
#' or a character specifying a vector of weights in the SNPInfo file. For
#' skatOMeta the default are the T1 weights.
#' @param rho A sequence of values that specify combinations of SKAT and a burden test to be considered. Default is c(0,1), which considers SKAT and a burden test.
#' @param method p-value calculation method. Should be one of 'saddlepoint', 'integration', or 'liu'.
#'
#' @details \code{skatOMeta()} implements the SKAT-Optimal test, which picks the
#' `best' combination of SKAT and a burden test, and then corrects for the
#' flexibility afforded by this choice. Specifically, if the SKAT statistic is
#' Q1, and the squared score for a burden test is Q2, SKAT-O considers tests
#' of the form (1-rho)*Q1 + rho*Q2, where rho between 0 and 1. The values of
#' rho are specified by the user using the argument \code{rho}. In the
#' simplest form, which is the default, SKAT-O computes a SKAT test and a T1
#' test, and reports the minimum p-value, corrected for multiple testing. See
#' the vignette or the accompanying references for more details.
#'
#' If there is a single variant in the gene, or the burden test is undefined
#' (e.g. there are no rare alleles for the T1 test), SKAT is reported (i.e.
#' rho=0).
#'
#' Note 1: the SKAT package uses the same weights for both SKAT and the burden
#' test, which this function does not.
#'
#' Note 2: all studies must use coordinated SNP Info files - that is, the SNP
#' names and gene definitions must be the same.
#'
#' Note 3: The method of p-value calculation is much more important here than
#' in SKAT. The `integration' method is fast and typically accurate for
#' p-values larger than 1e-9. The saddlepoint method is slower, but has higher
#' relative accuracy.
#'
#' Note 4: Since p-value calculation can be slow for SKAT-O, and less accurate
#' for small p-values, a reasonable alternative would be to first calculate
#' SKAT and a burden test, and record the minimum p-value, which is a lower
#' bound for the SKAT-O p-value. This can be done quickly and accurately.
#' Then, one would only need to perform SKAT-O on the small subset of genes
#' that are potentially interesting.
#'
#' Please see the package vignette for more details.
#'
#' @return a data frame with the following columns:
#' \item{gene}{Name of the gene or unit of aggregation being meta analyzed}
#' \item{p}{p-value of the SKAT-O test.}
#' \item{pmin}{The minimum of the p-values considered by SKAT-O (not corrected for multiple testing!).}
#' \item{rho}{The value of rho which gave the smallest p-value.}
#' \item{cmaf}{The cumulative minor allele frequency.}
#' \item{nmiss}{The number of `missing` SNPs. For a gene with a single SNP
#' this is the number of individuals which do not contribute to the analysis,
#' due to studies that did not report results for that SNP. For a gene with
#' multiple SNPs, is totalled over the gene. }
#' \item{nsnps}{The number of SNPs in the gene.}
#' \item{errflag}{An indicator of possible error: 0 suggests no error, > 0
#' indicates probable loss of accuracy.}
#'
#' @references Wu, M.C., Lee, S., Cai, T., Li, Y., Boehnke, M., and Lin, X.
#' (2011) Rare Variant Association Testing for Sequencing Data Using the
#' Sequence Kernel Association Test (SKAT). American Journal of Human
#' Genetics.
#'
#' Lee, S. and Wu, M.C. and Lin, X. (2012) Optimal tests for rare variant
#' effects in sequencing association studies. Biostatistics.
#'
#' @author Arie Voorman, Jennifer Brody
#' @seealso
#' \code{\link{skatOMeta}}
#' \code{\link{prepScores}}
#' \code{\link{burdenMeta}}
#' \code{\link{singlesnpMeta}}
#'
#' @examples
#' \dontrun{
#' ### load example data for 2 studies
#' data(seqMetaExample)
#'
#' ####run on each study:
#' cohort1 <- prepScores(Z=Z1, y~sex+bmi, SNPInfo = SNPInfo, data =pheno1)
#' cohort2 <- prepScores(Z=Z2, y~sex+bmi, SNPInfo = SNPInfo, kins=kins, data=pheno2)
#'
#' #### combine results:
#' ##skat-O with default settings:
#' out1 <- skatOMeta(cohort1, cohort2, SNPInfo = SNPInfo, method = "int")
#' head(out1)
#'
#' ##skat-O, using a large number of combinations between SKAT and T1 tests:
#' out2 <- skatOMeta(cohort1, cohort2, rho=seq(0,1,length=11), SNPInfo=SNPInfo, method="int")
#' head(out2)
#'
#' #rho = 0 indicates SKAT gave the smaller p-value (or the T1 is undefined)
#' #rho=1 indicates the burden test was chosen
#' # 0 < rho < 1 indicates some other value was chosen
#' #notice that most of the time either the SKAT or T1 is chosen
#' table(out2$rho)
#'
#' ##skat-O with beta-weights used in the burden test:
#' out3 <- skatOMeta(cohort1,cohort2, burden.wts = function(maf){dbeta(maf,1,25) },
#' rho=seq(0,1,length=11),SNPInfo = SNPInfo, method="int")
#' head(out3)
#' table(out3$rho)
#'
#' ########################
#' ####binary data
#' cohort1 <- prepScores(Z=Z1, ybin~1, family=binomial(), SNPInfo=SNPInfo, data=pheno1)
#' out.bin <- skatOMeta(cohort1, SNPInfo = SNPInfo, method="int")
#' head(out.bin)
#'
#' ####################
#' ####survival data
#' cohort1 <- prepCox(Z=Z1, Surv(time,status)~strata(sex)+bmi, SNPInfo=SNPInfo,
#' data=pheno1)
#' out.surv <- skatOMeta(cohort1, SNPInfo = SNPInfo, method="int")
#' head(out.surv)
#'
#' ##########################################
#' ###Compare with SKAT and T1 tests on their own:
#' cohort1 <- prepScores(Z=Z1, y~sex+bmi, SNPInfo=SNPInfo, data=pheno1)
#' cohort2 <- prepScores(Z=Z2, y~sex+bmi, SNPInfo=SNPInfo, kins=kins, data=pheno2)
#'
#' out.skat <- skatMeta(cohort1,cohort2,SNPInfo=SNPInfo)
#' out.t1 <- burdenMeta(cohort1,cohort2, wts= function(maf){as.numeric(maf <= 0.01)},
#' SNPInfo=SNPInfo)
#'
#' #plot results
#' #We compare the minimum p-value of SKAT and T1, adjusting for multiple tests
#' #using the Sidak correction, to that of SKAT-O.
#'
#' par(mfrow=c(1,3))
#' pseq <- seq(0,1,length=100)
#' plot(y=out.skat$p, x=out1$p,xlab="SKAT-O p-value", ylab="SKAT p-value", main ="SKAT-O vs SKAT")
#' lines(y=pseq,x=1-(1-pseq)^2,col=2,lty=2, lwd=2)
#' abline(0,1)
#'
#' plot(y=out.t1$p, x=out1$p,xlab="SKAT-O p-value", ylab="T1 p-value", main ="SKAT-O vs T1")
#' lines(y=pseq,x=1-(1-pseq)^2,col=2,lty=2, lwd=2)
#' abline(0,1)
#'
#' plot(y=pmin(out.t1$p, out.skat$p,na.rm=T), x=out1$p,xlab="SKAT-O p-value",
#' ylab="min(T1,SKAT) p-value", main ="min(T1,SKAT) vs SKAT-O")
#' lines(y=pseq,x=1-(1-pseq)^2,col=2,lty=2, lwd=2)
#' abline(0,1)
#' legend("bottomright", lwd=2,lty=2,col=2,legend="Bonferroni correction")
#' }
#'
#' @export
skatOMeta <- function(..., SNPInfo=NULL, skat.wts=function(maf){stats::dbeta(maf,1,25)}, burden.wts=function(maf){as.numeric(maf <= 0.01) }, rho=c(0,1), method=c("integration", "saddlepoint", "liu"), snpNames="Name", aggregateBy="gene", mafRange=c(0,0.5), verbose=FALSE) {
cl <- match.call(expand.dots = FALSE)
if(is.null(SNPInfo)){
warning("No SNP Info file provided: loading the Illumina HumanExome BeadChip. See ?SNPInfo for more details")
load(paste(find.package("seqMeta"), "data", "SNPInfo.rda",sep = "/"))
aggregateBy = "SKATgene"
} else {
SNPInfo <- prepSNPInfo(SNPInfo, snpNames, aggregateBy, wt1=skat.wts, wt2=burden.wts)
}
if(any(rho >1 | rho < 0 ) ) stop("rho must be between 0 and 1")
method <- match.arg(method)
#if( !(method %in% c("davies","farebrother","imhof","liu")) ) stop("Method specified is not valid! See documentation")
genelist <- stats::na.omit(unique(SNPInfo[,aggregateBy]))
cohortNames <- lapply(cl[[2]],as.character)
ncohort <- length(cohortNames)
ev <- parent.frame()
classes <- unlist(lapply(cohortNames,function(name){class(get(name,envir=ev))}))
if(!all(classes == "seqMeta" | classes == "skatCohort") ){
stop("an argument to ... is not a seqMeta object!")
}
res.strings <- data.frame("gene"=genelist,stringsAsFactors=F)
res.numeric <- matrix(NA, nrow= nrow(res.strings),ncol = length(c("p","pmin","rho","cmaf","nmiss", "nsnps", "errflag")))
colnames(res.numeric) <- c("p","pmin","rho","cmaf","nmiss", "nsnps","errflag")
if(verbose){
cat("\n Meta Analyzing... Progress:\n")
pb <- utils::txtProgressBar(min = 0, max = length(genelist), style = 3)
pb.i <- 0
}
ri <- 0
snp.names.list <- split(SNPInfo[,snpNames],SNPInfo[,aggregateBy])
for(gene in genelist){
ri <- ri+1
nsnps.sub <- length(snp.names.list[[gene]])
mscores <- maf <- numeric(nsnps.sub)
big.cov <- Matrix(0, nsnps.sub,nsnps.sub)
n.total <- numeric(nsnps.sub)
n.miss <- numeric(nsnps.sub)
vary.ave <- 0
for(cohort.k in 1:ncohort){
cohort.gene <- get(cohortNames[[cohort.k]],envir=ev)[[gene]]
if(!is.null(cohort.gene)){
sub <- match(snp.names.list[[gene]],colnames(cohort.gene$cov))
if(any(is.na(sub)) | any(sub != 1:length(sub), na.rm=TRUE) | length(cohort.gene$maf) > nsnps.sub){
#if(any(is.na(sub))) warning("Some SNPs were not in SNPInfo file for gene ", gene," and cohort ",names(cohorts)[cohort.k])
cohort.gene$cov <- as.matrix(cohort.gene$cov)[sub,sub,drop=FALSE]
cohort.gene$cov[is.na(sub),] <- cohort.gene$cov[,is.na(sub)] <- 0
cohort.gene$maf <- cohort.gene$maf[sub]
cohort.gene$maf[is.na(sub)] <- -1
cohort.gene$scores <- cohort.gene$scores[sub]
cohort.gene$scores[is.na(sub)] <- 0
}
n.total[cohort.gene$maf >= 0] <- n.total[cohort.gene$maf >= 0]+cohort.gene$n
n.miss[cohort.gene$maf < 0] <- n.miss[cohort.gene$maf < 0] + cohort.gene$n
cohort.gene$maf[cohort.gene$maf < 0] <- 0
mscores <- mscores + cohort.gene$scores/cohort.gene$sey^2
maf <- maf + 2*cohort.gene$maf*(cohort.gene$n)
big.cov <- big.cov + cohort.gene$cov/cohort.gene$sey^2
vary.ave <- vary.ave + max(cohort.gene$n,na.rm=T)*cohort.gene$sey^2
}else{
n.miss <- n.miss + get(cohortNames[[cohort.k]],envir=parent.frame())[[1]]$n
}
}
if(any(maf >0)){
maf <- maf/(2*n.total)
maf[is.nan(maf)] <- 0
maf <- sapply(maf, function(x){min(x,1-x)})
if( !all(mafRange == c(0,0.5))){
keep <- (maf >= min(mafRange)) & (maf <= max(mafRange))
big.cov <- big.cov[keep,keep]
mscores <- mscores[keep]
maf <- maf[keep]
}
}
if(length(maf)> 0){
if(is.function(skat.wts)){
w1 <- skat.wts(maf)
} else if(is.character(skat.wts)){
w1 <- as.numeric(SNPInfo[SNPInfo[,aggregateBy]==gene,skat.wts])
} else {
w1 <- rep(1,length(maf))
}
if(is.function(burden.wts)){
w2 <- burden.wts(maf)
} else if(is.character(burden.wts)){
w2 <- as.numeric(SNPInfo[SNPInfo[,aggregateBy]==gene,burden.wts])
} else {
w2 <- rep(1,length(maf))
}
w1 <- ifelse(maf >0, w1,0)
w2 <- ifelse(maf >0, w2,0)
##
Q.skat <- sum((w1*mscores)^2, na.rm=TRUE)
V.skat <- (w1)*t(t(big.cov)*as.vector(w1))
Q.burden <- sum(w2*mscores, na.rm=TRUE)^2
V.burden <- as.numeric(t(w2)%*%big.cov%*%w2)
#If burden test is 0, or only 1 SNP in the gene, do SKAT:
if(sum(maf > 0) ==1 | V.burden ==0){
lambda <- eigen(zapsmall(V.skat), symmetric = TRUE)$values
if(any(lambda > 0) & length(lambda) >1) {
tmpP <- pchisqsum2(Q.skat,lambda=lambda,method=method, acc=1e-7)
if(tmpP$errflag !=0 ){
res.numeric[ri,"errflag"] = 1
} else {
res.numeric[ri,"errflag"] = 0
}
p <- tmpP$p
} else {
p <- ifelse(length(lambda) == 1 & all(lambda > 0), stats::pchisq(Q.skat/lambda,df=1,lower.tail=FALSE),1)
res.numeric[ri,"errflag"] = 0
}
res.numeric[ri,"pmin"] = res.numeric[ri,"p"] = p
res.numeric[ri,"rho"] = 0
#Else do SKAT-O
} else {
skato.res <- skatO_getp(mscores, big.cov, diag(w1), w2, rho, method= method, gene=gene)
res.numeric[ri,"p"] <- skato.res$actualp
res.numeric[ri,"pmin"] = skato.res$minp
res.numeric[ri,"rho"] = skato.res$rho
res.numeric[ri, "errflag"] = skato.res$errflag
}
} else {
res.numeric[ri,"p"] <- res.numeric[ri,"pmin"] <- 1
res.numeric[ri,"rho"] <- 0
res.numeric[ri, "errflag"] <- 0
}
res.numeric[ri,"cmaf"] = sum(maf,na.rm=TRUE)
res.numeric[ri,"nsnps"] = sum(maf!= 0, na.rm =T)
res.numeric[ri,"nmiss"] = sum(n.miss, na.rm =T)
if(verbose){
pb.i <- pb.i+1
utils::setTxtProgressBar(pb, pb.i)
}
}
if(verbose) close(pb)
return(cbind(res.strings,res.numeric))
}
skatO_getp <- function(U,V, R, w, rho,method = "davies", gene=NULL){
##Input:
#U: score vector (length p)
#R: p x p weight matrix for skat
#w: burden weights
#rho: vector of rhos in [0,1]
#method: method for calculating Normal quadratic form distribution
#gene: The name of the region - used for error reporting
##Output: a list with elemeents
#minp: the minimum p-value
#actualp: the actual p-value
#rho: the value of rho which gave the minp
#ps: the whole vector of p-values
#errflag: 0 if no problem, 1 if quantile issue, 2 if integration issue
satterthwaite <- function(a, df) {
if (any(df > 1)) {
a <- rep(a, df)
}
tr <- mean(a)
tr2 <- mean(a^2)/(tr^2)
list(scale = tr * tr2, df = length(a)/tr2)
}
errflag = 0
Q.skat <- crossprod(R%*%U) # SKAT
Q.burden <- (t(w)%*%U)^2 # burden
Qs <- (1-rho)*Q.skat + rho*Q.burden
lambdas <- ps <- NULL
ps <- numeric(length(rho))
for(i in 1:length(rho)){
PC <- eigen((1-rho[i])*crossprod(R)+ rho[i]*outer(w,w),symmetric=TRUE)
v.sqrt <- with(PC,{ values[values < 0] <- 0; (vectors)%*%diag(sqrt(values))%*%t(vectors) })
lam <- eigen( zapsmall(v.sqrt%*%V%*%v.sqrt),only.values=TRUE,symmetric=TRUE)$values
lam <- lam[lam != 0]
lambdas <- c(lambdas, list( lam ))
tmpP <- pchisqsum2(Qs[i],lambda=lambdas[[i]],method=method, acc=1e-7)
if(tmpP$errflag != 0){
errflag <- 1
ps[i] <- pchisqsum2(Qs[i],lambda=lambdas[[i]],method="liu")$p
} else {
ps[i] <- tmpP$p
}
}
minp <- min(ps)
Ts <- numeric(length(rho))
for(i in 1:length(rho)){
sat <- satterthwaite(lambdas[[i]],rep(1,length(lambdas[[i]])))
upper <- stats::qchisq(minp/20,df=sat$df,lower.tail=FALSE)*sat$scale
tmpT <- try(stats::uniroot(function(x){pchisqsum2(x,lambda=lambdas[[i]],method=method,acc=1e-5)$p- minp }, interval=c(1e-10,upper))$root, silent = TRUE)
if(class(tmpT) == "try-error"){
#warning(paste0("Problem finding quantiles in gene ", gene, ", p-value may not be accurate"))
Ts[i] <- Qs[i]
errflag <- 2
} else {
Ts[i] <- tmpT
}
}
v11 <- R%*%V%*%R
v12 <- R%*%V%*%w
v22 <- as.numeric(t(w)%*%V%*%w)
V.cond <- v11 - outer( v12, v12 )/v22
lambda.cond <- eigen(V.cond,only.values=TRUE,symmetric=TRUE)$values
EDec <- eigen(V.cond,symmetric=TRUE)
D <- zapsmall(diag(EDec$values))
diag(D)[zapsmall(diag(D)) > 0] <- 1/sqrt(diag(D)[zapsmall(diag(D)) > 0])
diag(D)[diag(D) <= 0 ] <- 0
#meanvec <- t(EDec$vectors)%*%D%*%(EDec$vectors)%*%(v12)/c(v22)
meanvec <- as.numeric(D%*%t(EDec$vectors)%*%(v12)/c(v22))
Fcond <- function(x,method){
pp <- qmax <- numeric(length(x))
for(i in 1:length(x)){
qmax[i] <- min( ( (Ts[rho !=1 ] - rho[rho != 1]*x[i])/(1-rho)[rho !=1]) )
if(any(x[i] > Ts[rho == 1]) ){
pp[i] <- 1
} else {
p.tmp <- pchisqsum2(qmax[i], lambda=lambda.cond, delta = meanvec^2*x[i], method = method, acc=min(minp,1e-5) )
if(p.tmp$errflag != 0) stop("Error in integration! using Liu p-value")
pp[i] = p.tmp$p
}
}
return(pp)
}
if(any(lambda.cond > 0)){
integrand <- function(x){stats::dchisq(x,1)*Fcond(x*v22,method=method)}
integral <- try(stats::integrate(Vectorize(integrand),lower=0,upper=Inf, subdivisions = 200L, rel.tol=min(minp/100,1e-4)), silent = TRUE)
if (class(integral) == "try-error" ) {
integrand <- function(x){stats::dchisq(x,1)*Fcond(x*v22,method="liu")}
integral <- stats::integrate(Vectorize(integrand),lower=0,upper=Inf)
errflag <- 3
} else {
if(integral$message != "OK") errflag <- 2
}
actualp <- integral[1]$value
} else {
#cat(".")
actualp = minp
}
return(list("actualp"= actualp, "minp" = minp, "rho" = rho[which.min(ps)], "ps" = ps, "errflag" = errflag))
}
|
##to run, can change number of sims, year, name of file to save tourneysims
#then run sim brackets file, where you can change numBrackets and file name to save brackets
#then run optimize brackets file where you can optimize brackets
##SET PARAMETERS/READ DATA######
year<-2019
sims<-1000
name<-paste0(year,"/TourneySims_", sims,"sims.Rda")
backtest<-ifelse(year==2019, F, T)
playInTbd<-T
load("data/game-data.RData")
source("functions.R", encoding = "UTF-8")
#if you use kaggle data files, this is where you could put your own projections...id like to add this to the shiny app but it's just not gonna happen
samplesubmission<-read.csv(paste0(year, "/Kaggle Submission.csv"), stringsAsFactors = F)
colnames(samplesubmission)[colnames(samplesubmission)%in% c("id", "Id")]<-"ID"
colnames(samplesubmission)[colnames(samplesubmission)%in% c("pred", "PRED")]<-"Pred"
samplesubmission$Team<-as.numeric(sapply(strsplit(samplesubmission$ID, "_"), `[[`, 2))
samplesubmission$OPP<-as.numeric(sapply(strsplit(samplesubmission$ID, "_"), `[[`, 3))
samplesubmission$Team_Full<-Teams$Team_Full[match(samplesubmission$Team, Teams$TeamID)]
samplesubmission$OPP_Full<-Teams$Team_Full[match(samplesubmission$OPP, Teams$TeamID)]
head(samplesubmission)
#need to get simulate winners in chronological order
TourneySlots$Round<-ifelse(substring(TourneySlots$Slot, 1, 1)=="R", substring(TourneySlots$Slot, 2, 2), 0)
TourneySlots<-TourneySlots[order(TourneySlots$Round, TourneySlots$Season, decreasing = F), ]
TourneySlots<-TourneySlots[TourneySlots$Season==year,]
#handling play-in games, impute actual into simulation prediction iff game has occured
losing_teams<-c()
if(year==2018){
losing_teams<-c("Ucla", "Long Island", "Arizona State", "North Carolina Central") #
} else if (year==2019){
losing_teams<-c("Prairie View A&m", "Temple", "North Carolina Central", "St Johns")
}
if(length(losing_teams)>=1){
samplesubmission$Pred[samplesubmission$Team_Full%in%losing_teams]<-0
samplesubmission$Pred[samplesubmission$OPP_Full%in%losing_teams]<-1
}
###GET-WINNER FUNCTIONS####
getSimResult<-function(row){
# slot<-TourneySlots$Slot[1]
# row<-TourneySlots[TourneySlots$Season==year,][5,]
# newdata<-data.frame(Slot=TourneySeeds$Seed[TourneySeeds$Season==year], Team=as.numeric(TourneySeeds$Team[TourneySeeds$Season==year]), Payout=0)
team1<-as.numeric(resultDF$Team[resultDF$Slot==TourneySlots$StrongSeed[row]]) #as.numeric(gsub("\\D", "", row$Strongseed))
team2<-as.numeric(resultDF$Team[resultDF$Slot==TourneySlots$WeakSeed[row]]) #as.numeric(gsub("\\D", "", row$Weakseed))
#simulate winner--samplesubmission stores data so that team1<team2
if(team1<team2) {
prob<-samplesubmission$Pred[samplesubmission$Team==team1 & samplesubmission$OPP==team2]
if(runif(1)>prob){
winner<-team2
loser<-team1
} else{
winner<-team1
loser<-team2
}
} else {
prob<-samplesubmission$Pred[samplesubmission$Team==team2 & samplesubmission$OPP==team1]
if(runif(1)>prob){
winner<-team1
loser<-team2
} else{
winner<-team2
loser<-team1
}
}
# advances-to, winner,loser
list(TourneySlots$Slot[row], winner,loser)
}
getActualResult<-function(row){
team1<-as.numeric(resultDF$Team[resultDF$Slot==TourneySlots$StrongSeed[row]])
team2<-as.numeric(resultDF$Team[resultDF$Slot==TourneySlots$WeakSeed[row]])
actual<-NCAATourneyDetailedResults[NCAATourneyDetailedResults$WTeamID%in% c(team1, team2)&
NCAATourneyDetailedResults$LTeamID%in% c(team1, team2) & NCAATourneyDetailedResults$Season==year,]
if(actual$WTeamID==team1){
winner<-team1
loser<-team2
} else if(actual$WTeamID==team2){
winner<-team2
loser<-team1
}
# advances-to, winner,loser
list(TourneySlots$Slot[row], winner,loser)
}
##SIMULATE TOURNAMENT#####
tourneySims<-list();length(tourneySims)<-sims
for(j in 1:sims){
#go through matchups sequentially and store results to resultDF
resultDF<-data.frame(Slot=TourneySeeds$Seed[TourneySeeds$Season==year],
Team=TourneySeeds$Team[TourneySeeds$Season==year],
Loser=NA, Payout=NA)
for(row in 1:nrow(TourneySlots)){
result<-getSimResult(row)
resultDF[nrow(resultDF)+1,1:3]<-c(result[[1]], result[[2]], result[[3]])
}
resultDF$Sim<-j
if(j%%100==0){
print(j)
}
tourneySims[[j]]<-resultDF
}
if(backtest==T){
resultDF<-data.frame(Slot=TourneySeeds$Seed[TourneySeeds$Season==year],
Team=TourneySeeds$Team[TourneySeeds$Season==year],
Loser=NA, Payout=NA)
for(row in 1:nrow(TourneySlots)){
result<-getActualResult(row)
resultDF[nrow(resultDF)+1,1:3]<-c(result[[1]], result[[2]], result[[3]])
}
resultDF$Sim<-sims+1
tourneySims[[length(tourneySims)+1]]<-resultDF
}
######SAVE DATA###########
tourneySims<-ldply(tourneySims, data.frame)
tourneySims$team_seed<-as.numeric(gsub("\\D", "",TourneySeeds$Seed[TourneySeeds$Season==year][match(tourneySims$Team,TourneySeeds$TeamID[TourneySeeds$Season==year])] ))
tourneySims$loser_seed<-as.numeric(gsub("\\D", "",TourneySeeds$Seed[TourneySeeds$Season==year][match(tourneySims$Loser,TourneySeeds$TeamID[TourneySeeds$Season==year])] ))
tourneySims$Round<-substr(tourneySims$Slot, 1, 2)
tourneySims$Round[grepl("W|X|Y|Z", tourneySims$Round)]<-0
tourneySims<-tourneySims[as.numeric(gsub("R", "",tourneySims$Round))>=1,]
tourneySims$Team_Full<-Teams$Team_Full[match(tourneySims$Team, Teams$TeamID)]
#line up with espn ownership data
if(playInTbd==T & year==2018){
tourneySims$Team_Full[tourneySims$Team_Full%in% c("Arizona State", "Syracuse")]<-"Asu/sy"
tourneySims$Team_Full[tourneySims$Team_Full%in% c("St Bonaventure", "Ucla")]<-"Bon/la"
tourneySims$Team_Full[tourneySims$Team_Full%in% c("Long Island", "Radford")]<-"Liu/rad"
tourneySims$Team_Full[tourneySims$Team_Full%in% c("North Carolina Central", "Texas Southern")]<-"Ncc/ts"
} else if (playInTbd==T & year==2019){
tourneySims$Team_Full[tourneySims$Team_Full%in% c("Arizona State", "St Johns")]<-"Asu/sju"
tourneySims$Team_Full[tourneySims$Team_Full%in% c("Belmont", "Temple")]<-"Bel/tem"
tourneySims$Team_Full[tourneySims$Team_Full%in% c("Fairleigh Dickinson", "Prairie View A&m")]<-"Fdu/pv"
tourneySims$Team_Full[tourneySims$Team_Full%in% c("North Dakota State", "North Carolina Central")]<-"Nds/ncc"
}
save(tourneySims, file=name)
##inspect results
inspect<-as.data.frame.matrix(table(tourneySims$Team_Full[tourneySims$Sim<=sims], tourneySims$Round[tourneySims$Sim<=sims])/sims)
inspect[order(inspect$R6,inspect$R5,inspect$R4,inspect$R3, inspect$R2, decreasing = T), ]
|
/1-simulate-tournament.R
|
no_license
|
cluelessgumshoe/dlm1223-march-madness-1
|
R
| false
| false
| 6,742
|
r
|
##to run, can change number of sims, year, name of file to save tourneysims
#then run sim brackets file, where you can change numBrackets and file name to save brackets
#then run optimize brackets file where you can optimize brackets
##SET PARAMETERS/READ DATA######
year<-2019
sims<-1000
name<-paste0(year,"/TourneySims_", sims,"sims.Rda")
backtest<-ifelse(year==2019, F, T)
playInTbd<-T
load("data/game-data.RData")
source("functions.R", encoding = "UTF-8")
#if you use kaggle data files, this is where you could put your own projections...id like to add this to the shiny app but it's just not gonna happen
samplesubmission<-read.csv(paste0(year, "/Kaggle Submission.csv"), stringsAsFactors = F)
colnames(samplesubmission)[colnames(samplesubmission)%in% c("id", "Id")]<-"ID"
colnames(samplesubmission)[colnames(samplesubmission)%in% c("pred", "PRED")]<-"Pred"
samplesubmission$Team<-as.numeric(sapply(strsplit(samplesubmission$ID, "_"), `[[`, 2))
samplesubmission$OPP<-as.numeric(sapply(strsplit(samplesubmission$ID, "_"), `[[`, 3))
samplesubmission$Team_Full<-Teams$Team_Full[match(samplesubmission$Team, Teams$TeamID)]
samplesubmission$OPP_Full<-Teams$Team_Full[match(samplesubmission$OPP, Teams$TeamID)]
head(samplesubmission)
#need to get simulate winners in chronological order
TourneySlots$Round<-ifelse(substring(TourneySlots$Slot, 1, 1)=="R", substring(TourneySlots$Slot, 2, 2), 0)
TourneySlots<-TourneySlots[order(TourneySlots$Round, TourneySlots$Season, decreasing = F), ]
TourneySlots<-TourneySlots[TourneySlots$Season==year,]
#handling play-in games, impute actual into simulation prediction iff game has occured
losing_teams<-c()
if(year==2018){
losing_teams<-c("Ucla", "Long Island", "Arizona State", "North Carolina Central") #
} else if (year==2019){
losing_teams<-c("Prairie View A&m", "Temple", "North Carolina Central", "St Johns")
}
if(length(losing_teams)>=1){
samplesubmission$Pred[samplesubmission$Team_Full%in%losing_teams]<-0
samplesubmission$Pred[samplesubmission$OPP_Full%in%losing_teams]<-1
}
###GET-WINNER FUNCTIONS####
getSimResult<-function(row){
# slot<-TourneySlots$Slot[1]
# row<-TourneySlots[TourneySlots$Season==year,][5,]
# newdata<-data.frame(Slot=TourneySeeds$Seed[TourneySeeds$Season==year], Team=as.numeric(TourneySeeds$Team[TourneySeeds$Season==year]), Payout=0)
team1<-as.numeric(resultDF$Team[resultDF$Slot==TourneySlots$StrongSeed[row]]) #as.numeric(gsub("\\D", "", row$Strongseed))
team2<-as.numeric(resultDF$Team[resultDF$Slot==TourneySlots$WeakSeed[row]]) #as.numeric(gsub("\\D", "", row$Weakseed))
#simulate winner--samplesubmission stores data so that team1<team2
if(team1<team2) {
prob<-samplesubmission$Pred[samplesubmission$Team==team1 & samplesubmission$OPP==team2]
if(runif(1)>prob){
winner<-team2
loser<-team1
} else{
winner<-team1
loser<-team2
}
} else {
prob<-samplesubmission$Pred[samplesubmission$Team==team2 & samplesubmission$OPP==team1]
if(runif(1)>prob){
winner<-team1
loser<-team2
} else{
winner<-team2
loser<-team1
}
}
# advances-to, winner,loser
list(TourneySlots$Slot[row], winner,loser)
}
getActualResult<-function(row){
team1<-as.numeric(resultDF$Team[resultDF$Slot==TourneySlots$StrongSeed[row]])
team2<-as.numeric(resultDF$Team[resultDF$Slot==TourneySlots$WeakSeed[row]])
actual<-NCAATourneyDetailedResults[NCAATourneyDetailedResults$WTeamID%in% c(team1, team2)&
NCAATourneyDetailedResults$LTeamID%in% c(team1, team2) & NCAATourneyDetailedResults$Season==year,]
if(actual$WTeamID==team1){
winner<-team1
loser<-team2
} else if(actual$WTeamID==team2){
winner<-team2
loser<-team1
}
# advances-to, winner,loser
list(TourneySlots$Slot[row], winner,loser)
}
##SIMULATE TOURNAMENT#####
tourneySims<-list();length(tourneySims)<-sims
for(j in 1:sims){
#go through matchups sequentially and store results to resultDF
resultDF<-data.frame(Slot=TourneySeeds$Seed[TourneySeeds$Season==year],
Team=TourneySeeds$Team[TourneySeeds$Season==year],
Loser=NA, Payout=NA)
for(row in 1:nrow(TourneySlots)){
result<-getSimResult(row)
resultDF[nrow(resultDF)+1,1:3]<-c(result[[1]], result[[2]], result[[3]])
}
resultDF$Sim<-j
if(j%%100==0){
print(j)
}
tourneySims[[j]]<-resultDF
}
if(backtest==T){
resultDF<-data.frame(Slot=TourneySeeds$Seed[TourneySeeds$Season==year],
Team=TourneySeeds$Team[TourneySeeds$Season==year],
Loser=NA, Payout=NA)
for(row in 1:nrow(TourneySlots)){
result<-getActualResult(row)
resultDF[nrow(resultDF)+1,1:3]<-c(result[[1]], result[[2]], result[[3]])
}
resultDF$Sim<-sims+1
tourneySims[[length(tourneySims)+1]]<-resultDF
}
######SAVE DATA###########
tourneySims<-ldply(tourneySims, data.frame)
tourneySims$team_seed<-as.numeric(gsub("\\D", "",TourneySeeds$Seed[TourneySeeds$Season==year][match(tourneySims$Team,TourneySeeds$TeamID[TourneySeeds$Season==year])] ))
tourneySims$loser_seed<-as.numeric(gsub("\\D", "",TourneySeeds$Seed[TourneySeeds$Season==year][match(tourneySims$Loser,TourneySeeds$TeamID[TourneySeeds$Season==year])] ))
tourneySims$Round<-substr(tourneySims$Slot, 1, 2)
tourneySims$Round[grepl("W|X|Y|Z", tourneySims$Round)]<-0
tourneySims<-tourneySims[as.numeric(gsub("R", "",tourneySims$Round))>=1,]
tourneySims$Team_Full<-Teams$Team_Full[match(tourneySims$Team, Teams$TeamID)]
#line up with espn ownership data
if(playInTbd==T & year==2018){
tourneySims$Team_Full[tourneySims$Team_Full%in% c("Arizona State", "Syracuse")]<-"Asu/sy"
tourneySims$Team_Full[tourneySims$Team_Full%in% c("St Bonaventure", "Ucla")]<-"Bon/la"
tourneySims$Team_Full[tourneySims$Team_Full%in% c("Long Island", "Radford")]<-"Liu/rad"
tourneySims$Team_Full[tourneySims$Team_Full%in% c("North Carolina Central", "Texas Southern")]<-"Ncc/ts"
} else if (playInTbd==T & year==2019){
tourneySims$Team_Full[tourneySims$Team_Full%in% c("Arizona State", "St Johns")]<-"Asu/sju"
tourneySims$Team_Full[tourneySims$Team_Full%in% c("Belmont", "Temple")]<-"Bel/tem"
tourneySims$Team_Full[tourneySims$Team_Full%in% c("Fairleigh Dickinson", "Prairie View A&m")]<-"Fdu/pv"
tourneySims$Team_Full[tourneySims$Team_Full%in% c("North Dakota State", "North Carolina Central")]<-"Nds/ncc"
}
save(tourneySims, file=name)
##inspect results
inspect<-as.data.frame.matrix(table(tourneySims$Team_Full[tourneySims$Sim<=sims], tourneySims$Round[tourneySims$Sim<=sims])/sims)
inspect[order(inspect$R6,inspect$R5,inspect$R4,inspect$R3, inspect$R2, decreasing = T), ]
|
# UNIVERSIDAD NACIONAL AUTÓNOMA DE MÉXICO
# Facultad de Economía
# Estadística 2020-2
# Profesor: Cesar Hernández
# Medidas de dispersión
# Gráficos de dispersión en R: función plot()
head(mtcars)
plot(mtcars$mpg)
plot(mtcars$mpg,mtcars$hp)
plot(mtcars$mpg,mtcars$hp, main = "Dispersión", xlab = "Milles per gallon", ylab = "Horse power", col = "gray")
|
/1-graficos-de-dispersion-en-R-funcion-plot.R
|
permissive
|
cghv94/medidas-de-dispersion-en-R
|
R
| false
| false
| 381
|
r
|
# UNIVERSIDAD NACIONAL AUTÓNOMA DE MÉXICO
# Facultad de Economía
# Estadística 2020-2
# Profesor: Cesar Hernández
# Medidas de dispersión
# Gráficos de dispersión en R: función plot()
head(mtcars)
plot(mtcars$mpg)
plot(mtcars$mpg,mtcars$hp)
plot(mtcars$mpg,mtcars$hp, main = "Dispersión", xlab = "Milles per gallon", ylab = "Horse power", col = "gray")
|
library(devtools)
library(dplyr)
library(tm)
library(stringr)
library(knitr)
library(R.utils)
library(ggplot2)
# Install latest version from Github
install_github("comhis/fennica") # or devtools::load_all() if you are working from the clone and modifying it
library(fennica)
# Install latest version from Github
install_github("comhis/comhis")
library(comhis)
# Load misc functions needed for harmonization
source("funcs.R")
# Define create the output folder
output.folder <- "output.tables/"
dir.create(output.folder)
# List the preprocessed data file and read the data
df.orig <- read_bibliographic_metadata("fennica_parsed.csv.gz", verbose = TRUE, sep = "|")
# df.orig <- sample_n(df.orig, 1000) # Try with a smaller subset first!
# ------------------------------------------------------------
ntop <- 20
author <- "Helsinki Computational History Group (COMHIS)"
# Visualization options
theme_set(theme_bw(20))
|
/inst/examples/init.R
|
permissive
|
JuliaMatveeva217/fennica
|
R
| false
| false
| 932
|
r
|
library(devtools)
library(dplyr)
library(tm)
library(stringr)
library(knitr)
library(R.utils)
library(ggplot2)
# Install latest version from Github
install_github("comhis/fennica") # or devtools::load_all() if you are working from the clone and modifying it
library(fennica)
# Install latest version from Github
install_github("comhis/comhis")
library(comhis)
# Load misc functions needed for harmonization
source("funcs.R")
# Define create the output folder
output.folder <- "output.tables/"
dir.create(output.folder)
# List the preprocessed data file and read the data
df.orig <- read_bibliographic_metadata("fennica_parsed.csv.gz", verbose = TRUE, sep = "|")
# df.orig <- sample_n(df.orig, 1000) # Try with a smaller subset first!
# ------------------------------------------------------------
ntop <- 20
author <- "Helsinki Computational History Group (COMHIS)"
# Visualization options
theme_set(theme_bw(20))
|
#Alyse, Aug23, 2015, R version 3.1.2 (2014-10-31),x86_64, mingw32
#clean workspace and setup working directory
remove(list=ls())
setwd("C:\\Users\\linw24\\Desktop\\WK3")
getwd()
#create a folder and download files
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
path_rf <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(path_rf, recursive=TRUE)
files
#Read the Activity files
dataActivityTest <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
dataActivityTrain <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
#Read the Subject files
dataSubjectTrain <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
dataSubjectTest <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
#Read Fearures files
dataFeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
dataFeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
#check varibles
str(dataActivityTest)
str(dataActivityTrain)
str(dataSubjectTrain)
str(dataSubjectTest)
str(dataFeaturesTest)
str(dataFeaturesTrain)
#merges the training and the test sets
dataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
dataActivity<- rbind(dataActivityTrain, dataActivityTest)
dataFeatures<- rbind(dataFeaturesTrain, dataFeaturesTest)
#naming
names(dataSubject)<-c("subject")
names(dataActivity)<- c("activity")
dataFeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
names(dataFeatures)<- dataFeaturesNames$V2
dataCombine <- cbind(dataSubject, dataActivity)
Data <- cbind(dataFeatures, dataCombine)
#subset naming
subdataFeaturesNames<-dataFeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", dataFeaturesNames$V2)]
selectedNames<-c(as.character(subdataFeaturesNames), "subject", "activity" )
Data<-subset(Data,select=selectedNames)
str(Data)
activityLabels <- read.table(file.path(path_rf, "activity_labels.txt"),header = FALSE)
head(Data$activity,30)
#labeling
names(Data)<-gsub("^t", "time", names(Data))
names(Data)<-gsub("^f", "frequency", names(Data))
names(Data)<-gsub("Acc", "Accelerometer", names(Data))
names(Data)<-gsub("Gyro", "Gyroscope", names(Data))
names(Data)<-gsub("Mag", "Magnitude", names(Data))
names(Data)<-gsub("BodyBody", "Body", names(Data))
names(Data)
#call libries and create table and codebook
version
library(plyr)
Data2<-aggregate(. ~subject + activity, Data, mean)
Data2<-Data2[order(Data2$subject,Data2$activity),]
write.table(Data2, file = "tidydata.txt",row.name=FALSE)
library(knitr)
knit2html("codebook.Rmd")
|
/run_analysis.R
|
no_license
|
FarheenAmaid/Getting-and-Cleaning-Data
|
R
| false
| false
| 2,783
|
r
|
#Alyse, Aug23, 2015, R version 3.1.2 (2014-10-31),x86_64, mingw32
#clean workspace and setup working directory
remove(list=ls())
setwd("C:\\Users\\linw24\\Desktop\\WK3")
getwd()
#create a folder and download files
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
path_rf <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(path_rf, recursive=TRUE)
files
#Read the Activity files
dataActivityTest <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
dataActivityTrain <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
#Read the Subject files
dataSubjectTrain <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
dataSubjectTest <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
#Read Fearures files
dataFeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
dataFeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
#check varibles
str(dataActivityTest)
str(dataActivityTrain)
str(dataSubjectTrain)
str(dataSubjectTest)
str(dataFeaturesTest)
str(dataFeaturesTrain)
#merges the training and the test sets
dataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
dataActivity<- rbind(dataActivityTrain, dataActivityTest)
dataFeatures<- rbind(dataFeaturesTrain, dataFeaturesTest)
#naming
names(dataSubject)<-c("subject")
names(dataActivity)<- c("activity")
dataFeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
names(dataFeatures)<- dataFeaturesNames$V2
dataCombine <- cbind(dataSubject, dataActivity)
Data <- cbind(dataFeatures, dataCombine)
#subset naming
subdataFeaturesNames<-dataFeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", dataFeaturesNames$V2)]
selectedNames<-c(as.character(subdataFeaturesNames), "subject", "activity" )
Data<-subset(Data,select=selectedNames)
str(Data)
activityLabels <- read.table(file.path(path_rf, "activity_labels.txt"),header = FALSE)
head(Data$activity,30)
#labeling
names(Data)<-gsub("^t", "time", names(Data))
names(Data)<-gsub("^f", "frequency", names(Data))
names(Data)<-gsub("Acc", "Accelerometer", names(Data))
names(Data)<-gsub("Gyro", "Gyroscope", names(Data))
names(Data)<-gsub("Mag", "Magnitude", names(Data))
names(Data)<-gsub("BodyBody", "Body", names(Data))
names(Data)
#call libries and create table and codebook
version
library(plyr)
Data2<-aggregate(. ~subject + activity, Data, mean)
Data2<-Data2[order(Data2$subject,Data2$activity),]
write.table(Data2, file = "tidydata.txt",row.name=FALSE)
library(knitr)
knit2html("codebook.Rmd")
|
# Grid search
# Importar el dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[, 3:5]
dataset$Purchased = factor(dataset$Purchased)
# Dividir el conjunto entre training y testing
library(caTools)
set.seed(123)
spit = sample.split(dataset$Purchased, SplitRatio = 0.75)
trainingSet = subset(dataset, spit == TRUE)
testingSet = subset(dataset, spit == FALSE)
# Escalar el dataset
trainingSet[, 1:2] = scale(trainingSet[, 1:2])
testingSet[, 1:2] = scale(testingSet[, 1:2])
# Ajudtar el modelo de regresion logistica con el conjunto de entrenamiento
library(e1071)
classifier = svm(formula = Purchased ~ ., data = trainingSet, type = "C-classification", kernel = "radial")
# Predicion de los resultado con el conjunto de testing
# Probabilidad de prediccion
yPred = predict(classifier, type = "response", newdata = testingSet[, -3])
# Matrix de confucion
cm = table(testingSet[, 3], yPred)
# Aplicar F Fold Cross Validation
library(caret)
folds = createFolds(trainingSet$Purchased, k = 10)
cv = lapply(folds, function(x) {
trainingFold = trainingSet[-x, ]
testingFold = trainingSet[x, ]
classifier = svm(formula = Purchased ~ ., data = trainingFold, type = "C-classification", kernel = "radial")
yPred = predict(classifier, type = "response", newdata = testingFold[, -3])
cm = table(testingFold[, 3], yPred)
accurary = (cm[1, 1] + cm[2, 2]) / (cm[1, 1] + cm[2, 2] + cm[1, 2] + cm[2, 1])
return(accurary)
})
accuracy = mean(as.numeric(cv))
accuracyStandardDeviation = sd(as.numeric(cv))
# Aplicar grid search para encontrar los parametros optimos
library(caret)
classifier = train(form = Purchased ~ ., data = trainingSet, method = 'svmRadial')
# Visuliazacion del conjunto de entranmiento
library(ElemStatLearn)
set = trainingSet
x1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
x2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
gridSet = expand.grid(x1, x2)
colnames(gridSet) = c('Age', 'EstimatedSalary')
yGrid = predict(classifier, newdata = gridSet)
plot(set[, -3],
main = 'Kernel CVM (Conjunto de Entrenamiento)',
xlab = 'Edad', ylab = 'Sueldo Estimado',
xlim = range(x1), ylim = range(x2))
contour(x1, x2, matrix(as.numeric(yGrid), length(x1), length(x2)), add = TRUE)
points(gridSet, pch = '.', col = ifelse(yGrid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
set = testingSet
x1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
x2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
gridSet = expand.grid(x1, x2)
colnames(gridSet) = c('Age', 'EstimatedSalary')
yGrid = predict(classifier, newdata = gridSet)
plot(set[, -3],
main = 'Kernel SVM (Conjunto de Testing)',
xlab = 'Edad', ylab = 'Sueldo Estimado',
xlim = range(x1), ylim = range(x2))
contour(x1, x2, matrix(as.numeric(yGrid), length(x1), length(x2)), add = TRUE)
points(gridSet, pch = '.', col = ifelse(yGrid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
/datasets/Part 10 - Model Selection & Boosting/Section 48 - Model Selection/grid_search_jcantero.R
|
permissive
|
canteroferron/machinelearning-az
|
R
| false
| false
| 3,047
|
r
|
# Grid search
# Importar el dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[, 3:5]
dataset$Purchased = factor(dataset$Purchased)
# Dividir el conjunto entre training y testing
library(caTools)
set.seed(123)
spit = sample.split(dataset$Purchased, SplitRatio = 0.75)
trainingSet = subset(dataset, spit == TRUE)
testingSet = subset(dataset, spit == FALSE)
# Escalar el dataset
trainingSet[, 1:2] = scale(trainingSet[, 1:2])
testingSet[, 1:2] = scale(testingSet[, 1:2])
# Ajudtar el modelo de regresion logistica con el conjunto de entrenamiento
library(e1071)
classifier = svm(formula = Purchased ~ ., data = trainingSet, type = "C-classification", kernel = "radial")
# Predicion de los resultado con el conjunto de testing
# Probabilidad de prediccion
yPred = predict(classifier, type = "response", newdata = testingSet[, -3])
# Matrix de confucion
cm = table(testingSet[, 3], yPred)
# Aplicar F Fold Cross Validation
library(caret)
folds = createFolds(trainingSet$Purchased, k = 10)
cv = lapply(folds, function(x) {
trainingFold = trainingSet[-x, ]
testingFold = trainingSet[x, ]
classifier = svm(formula = Purchased ~ ., data = trainingFold, type = "C-classification", kernel = "radial")
yPred = predict(classifier, type = "response", newdata = testingFold[, -3])
cm = table(testingFold[, 3], yPred)
accurary = (cm[1, 1] + cm[2, 2]) / (cm[1, 1] + cm[2, 2] + cm[1, 2] + cm[2, 1])
return(accurary)
})
accuracy = mean(as.numeric(cv))
accuracyStandardDeviation = sd(as.numeric(cv))
# Aplicar grid search para encontrar los parametros optimos
library(caret)
classifier = train(form = Purchased ~ ., data = trainingSet, method = 'svmRadial')
# Visuliazacion del conjunto de entranmiento
library(ElemStatLearn)
set = trainingSet
x1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
x2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
gridSet = expand.grid(x1, x2)
colnames(gridSet) = c('Age', 'EstimatedSalary')
yGrid = predict(classifier, newdata = gridSet)
plot(set[, -3],
main = 'Kernel CVM (Conjunto de Entrenamiento)',
xlab = 'Edad', ylab = 'Sueldo Estimado',
xlim = range(x1), ylim = range(x2))
contour(x1, x2, matrix(as.numeric(yGrid), length(x1), length(x2)), add = TRUE)
points(gridSet, pch = '.', col = ifelse(yGrid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
set = testingSet
x1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
x2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
gridSet = expand.grid(x1, x2)
colnames(gridSet) = c('Age', 'EstimatedSalary')
yGrid = predict(classifier, newdata = gridSet)
plot(set[, -3],
main = 'Kernel SVM (Conjunto de Testing)',
xlab = 'Edad', ylab = 'Sueldo Estimado',
xlim = range(x1), ylim = range(x2))
contour(x1, x2, matrix(as.numeric(yGrid), length(x1), length(x2)), add = TRUE)
points(gridSet, pch = '.', col = ifelse(yGrid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
#Authors: Stuart Keith and Caleb Neale
library(shiny)
newui <- fluidPage(
# App title ----
titlePanel("The Effects of COVID on the Stock Market"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Buttons for COVID data ----
radioButtons("covid", "Change in COVID Data:",
c("Global Cases" = "GlobalCases",
"Global Deaths" = "GlobalDeaths",
"US Cases" = "USCases",
"US Deaths" = "USDeaths")),
# br() element to introduce extra vertical spacing ----
br(),
# Input: Sector Dropdown menu ----
selectInput("sector",
"Sector:",
c("Materials" = "Materials",
"Communications" = "Communications",
"Energy" = "Energy",
"Financials" = "Financials",
"Industrials" = "Industrials",
"Technology" = "Technology",
"ConsumerStaples" = "ConsumerStaples",
"RealEstate" = "RealEstate",
"Utilities" = "Utilities",
"HealthCare" = "HealthCare",
"ConsumerDiscretionary" = "ConsumerDiscretionary" ))
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Tabset w/ plot, summary, and table ----
tabsetPanel(type = "tabs",
tabPanel("Summary",
br(),
plotOutput("plot")),
tabPanel("Comparative",
br(),
plotOutput("comparativea", width = 600, height = 300),
br(),
plotOutput("comparativeb", width = 600, height = 300),
br(),
plotOutput("comparativec", width = 600, height = 300)),
tabPanel("Sentiment",
br(),
plotOutput("sentiment", width = 600, height = 300),
br(),
plotOutput("sentimentb", width = 600, height = 300))
)
)
)
)
|
/ui.R
|
no_license
|
caneale320/SYS2202Final
|
R
| false
| false
| 2,343
|
r
|
#Authors: Stuart Keith and Caleb Neale
library(shiny)
newui <- fluidPage(
# App title ----
titlePanel("The Effects of COVID on the Stock Market"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Buttons for COVID data ----
radioButtons("covid", "Change in COVID Data:",
c("Global Cases" = "GlobalCases",
"Global Deaths" = "GlobalDeaths",
"US Cases" = "USCases",
"US Deaths" = "USDeaths")),
# br() element to introduce extra vertical spacing ----
br(),
# Input: Sector Dropdown menu ----
selectInput("sector",
"Sector:",
c("Materials" = "Materials",
"Communications" = "Communications",
"Energy" = "Energy",
"Financials" = "Financials",
"Industrials" = "Industrials",
"Technology" = "Technology",
"ConsumerStaples" = "ConsumerStaples",
"RealEstate" = "RealEstate",
"Utilities" = "Utilities",
"HealthCare" = "HealthCare",
"ConsumerDiscretionary" = "ConsumerDiscretionary" ))
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Tabset w/ plot, summary, and table ----
tabsetPanel(type = "tabs",
tabPanel("Summary",
br(),
plotOutput("plot")),
tabPanel("Comparative",
br(),
plotOutput("comparativea", width = 600, height = 300),
br(),
plotOutput("comparativeb", width = 600, height = 300),
br(),
plotOutput("comparativec", width = 600, height = 300)),
tabPanel("Sentiment",
br(),
plotOutput("sentiment", width = 600, height = 300),
br(),
plotOutput("sentimentb", width = 600, height = 300))
)
)
)
)
|
## ---- inicial_setup
twitter_setip <- function(){
source("Rscripts/load_libraries.R")
source("Rscripts/twitter_handshake.R")
source("Rscripts/database_conection.R")
twitter_handshake()
}
## ---- get_followers_ids
get_followers_ids <- function(username, r = 1000){
user <- getUser(username)
followers_ids <- user$getFollowerIDs(retryOnRateLimit = r)
followers_ids
}
## ---- store_followers_ids
store_folowers_ids <- function(username, followers_ids){
file_name <- paste0("data/", username, "_fwids.RData")
ms <- paste0("Followers ids guardadas en ",
"data/", username, "_fwids.RData")
print(ms) ## manda el mensaje aunque falle al guardar, corregir.
save(followers_ids, file = file_name)
}
## ---- load_followers_ids
load_followers_ids <- function(ids_file){
load(ids_file) ## se carga con el nombre followers_ids
}
## son 100 usuarios por consulta
## , 180 consultas por 15m.
## Cada 15m se puede obtener 180*100 usuarios (un chunck)
## ---- get_followers
get_followers <- function(user, followers_ids, r = 1000){
chunks_n <- ceiling(length(followers_ids)/(180*100))+1
indices <- seq(1,chunks_n*180*100, 180*100)
for (i in 1:(length(indices)-1)){
curr_ids <- followers_ids[indices[i]:indices[i+1]]
curr_users <- lookupUsers(curr_ids, retryOnRateLimit = r)
dir.create(paste0("data/", user))
file_name <- paste0("data/", user, "/", "f", i, ".RData")
save(curr_users, file = file_name)
store_users_db(curr_users, table = user)
}
}
## ---- otros
|
/Rscripts/funciones.R
|
permissive
|
DanielSalgado/Statistical-Learning
|
R
| false
| false
| 1,526
|
r
|
## ---- inicial_setup
twitter_setip <- function(){
source("Rscripts/load_libraries.R")
source("Rscripts/twitter_handshake.R")
source("Rscripts/database_conection.R")
twitter_handshake()
}
## ---- get_followers_ids
get_followers_ids <- function(username, r = 1000){
user <- getUser(username)
followers_ids <- user$getFollowerIDs(retryOnRateLimit = r)
followers_ids
}
## ---- store_followers_ids
store_folowers_ids <- function(username, followers_ids){
file_name <- paste0("data/", username, "_fwids.RData")
ms <- paste0("Followers ids guardadas en ",
"data/", username, "_fwids.RData")
print(ms) ## manda el mensaje aunque falle al guardar, corregir.
save(followers_ids, file = file_name)
}
## ---- load_followers_ids
load_followers_ids <- function(ids_file){
load(ids_file) ## se carga con el nombre followers_ids
}
## son 100 usuarios por consulta
## , 180 consultas por 15m.
## Cada 15m se puede obtener 180*100 usuarios (un chunck)
## ---- get_followers
get_followers <- function(user, followers_ids, r = 1000){
chunks_n <- ceiling(length(followers_ids)/(180*100))+1
indices <- seq(1,chunks_n*180*100, 180*100)
for (i in 1:(length(indices)-1)){
curr_ids <- followers_ids[indices[i]:indices[i+1]]
curr_users <- lookupUsers(curr_ids, retryOnRateLimit = r)
dir.create(paste0("data/", user))
file_name <- paste0("data/", user, "/", "f", i, ".RData")
save(curr_users, file = file_name)
store_users_db(curr_users, table = user)
}
}
## ---- otros
|
unzip("exdata_data_household_power_consumption.zip")
assign_data <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
plot3_data <- subset(assign_data, Date %in% c("1/2/2007","2/2/2007"))
plot3_data$Date <- as.Date(plot3_data$Date, format="%d/%m/%Y")
date_time <- paste(as.Date(plot3_data$Date), plot3_data$Time)
plot3_data$Date_time <- as.POSIXct(date_time)
with(plot3_data, plot(Sub_metering_1~Date_time, type="l", ylab="Global Active Power (kilowatts)", xlab=""))
lines(Sub_metering_2~Date_time,plot3_data, col='Red')
lines(Sub_metering_3~Date_time,plot3_data, col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
/plot3.R
|
no_license
|
tiwaribiplav/ExData_Plotting1
|
R
| false
| false
| 895
|
r
|
unzip("exdata_data_household_power_consumption.zip")
assign_data <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
plot3_data <- subset(assign_data, Date %in% c("1/2/2007","2/2/2007"))
plot3_data$Date <- as.Date(plot3_data$Date, format="%d/%m/%Y")
date_time <- paste(as.Date(plot3_data$Date), plot3_data$Time)
plot3_data$Date_time <- as.POSIXct(date_time)
with(plot3_data, plot(Sub_metering_1~Date_time, type="l", ylab="Global Active Power (kilowatts)", xlab=""))
lines(Sub_metering_2~Date_time,plot3_data, col='Red')
lines(Sub_metering_3~Date_time,plot3_data, col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
#' Read population-wide data
#'
#' @param filename Path to the folder
#' @param variable What to read
#'
#' @export
read_population <- function(folder, variable) {
read_data(folder, variable)
}
|
/R/read_population.R
|
no_license
|
rscherrer/EGS
|
R
| false
| false
| 199
|
r
|
#' Read population-wide data
#'
#' @param filename Path to the folder
#' @param variable What to read
#'
#' @export
read_population <- function(folder, variable) {
read_data(folder, variable)
}
|
library(testthat)
library(gformula)
test_check("gformula")
|
/tests/testthat.R
|
no_license
|
GerkeLab/gformula
|
R
| false
| false
| 60
|
r
|
library(testthat)
library(gformula)
test_check("gformula")
|
#' Funnel plot for difference in measures
#'
#' Function \code{plot.funnel_measure} creates funnel plot of differences in measures for two models across variable areas.
#' It uses data created with 'funnel_measure' function.
#'
#' @param x - funnel_measure object created with \code{\link{funnel_measure}} function.
#' @param ... - other parameters
#' @param dot_size - size of the dot on plots. Passed to \code{\link[ggplot2]{geom_point}}.
#'
#' @return ggplot object
#'
#' @import ggplot2
#' @rdname plot.funnel_measure
#' @export
#' @examples
#' \donttest{
#' library("mlr")
#' library("DALEXtra")
#' task <- mlr::makeRegrTask(
#' id = "R",
#' data = apartments,
#' target = "m2.price"
#' )
#' learner_lm <- mlr::makeLearner(
#' "regr.lm"
#' )
#' model_lm <- mlr::train(learner_lm, task)
#' explainer_lm <- explain_mlr(model_lm, apartmentsTest, apartmentsTest$m2.price, label = "LM")
#'
#' learner_rf <- mlr::makeLearner(
#' "regr.randomForest"
#' )
#' model_rf <- mlr::train(learner_rf, task)
#' explainer_rf <- explain_mlr(model_rf, apartmentsTest, apartmentsTest$m2.price, label = "RF")
#'
#' learner_gbm <- mlr::makeLearner(
#' "regr.gbm"
#' )
#' model_gbm <- mlr::train(learner_gbm, task)
#' explainer_gbm <- explain_mlr(model_gbm, apartmentsTest, apartmentsTest$m2.price, label = "GBM")
#'
#'
#' plot_data <- funnel_measure(explainer_lm, list(explainer_rf, explainer_gbm),
#' nbins = 5, measure_function = DALEX::loss_root_mean_square)
#' plot(plot_data)
#' }
plot.funnel_measure <- function(x, ..., dot_size = 0.5){
funnel_measure <- x
Variable <- Measure <- Measure_min <- Measure_max <- Category <- Text <- L <- NULL
if(!"funnel_measure" %in% class(funnel_measure)) stop("Data is not a funnel_measure object")
champion_label <- funnel_measure$models_info[funnel_measure$models_info$type == "Champion",]$label
challenger_label <- funnel_measure$models_info[funnel_measure$models_info$type == "Challenger",]$label
funnel_measure$data$Measure_min <- unlist(lapply(funnel_measure$data$Measure, function(x){
list("Measure_min" = min(0, x))
}
))
funnel_measure$data$Measure_max <- unlist(lapply(funnel_measure$data$Measure, function(x){
list("Measure_max" = max(0, x))
}
))
p <- lapply(challenger_label, function(y){
data <- funnel_measure$data
data = data[data$Challenger == y, ]
data$Text <- unlist(lapply(unique(data$Variable), function(x){
tmp <- data[data$Variable == x, ]
ifelse(tmp$Measure == max(tmp$Measure), 1,
ifelse(tmp$Measure == min(tmp$Measure) & tmp$Measure != max(tmp$Measure), -1, 0))
}
))
data$L <- ifelse(data$Text == -1 | data$Text == 1, data$Label, "")
ggplot(data, aes(x = Variable, y = Measure)) +
geom_pointrange(aes(ymin = Measure_min, ymax = Measure_max, color = Category),
position = position_dodge2(width = 0.75), size =
dot_size) +
scale_color_manual(values = c(colors_discrete_drwhy(3))) +
geom_hline(yintercept = 0, color = "#371ea3", size=1) +
ylim(c(-max(abs(data$Measure))-20, max(abs(data$Measure))+20)) +
ylab(paste("Champion (", champion_label, ") and Challengers measure difference", sep = "")) +
xlab("") +
labs(title = "Funnel Plot",
subtitle = paste("For every colour, dot on the right side of violet line means that Champion (",
champion_label,
") is better. \nDot on the left means that one of the Challengers is better than Champion (",
champion_label,
")",
sep = "")) +
theme_drwhy() +
theme(panel.grid = element_blank()) +
geom_vline(xintercept = seq(1.5, 1.5 + length(unique(data$Variable))), linetype = "dotdash", color = "#371ea3")+
geom_text(aes(y = Text * max(abs(Measure)), label = L),
hjust = 0.5, vjust = 0.5,
position = position_dodge2(width = 0.75),
color = "#371ea3") +
coord_flip()
})
p
}
|
/R/plot.funnel_measure.R
|
no_license
|
kasiapekala/DALEXtra
|
R
| false
| false
| 4,303
|
r
|
#' Funnel plot for difference in measures
#'
#' Function \code{plot.funnel_measure} creates funnel plot of differences in measures for two models across variable areas.
#' It uses data created with 'funnel_measure' function.
#'
#' @param x - funnel_measure object created with \code{\link{funnel_measure}} function.
#' @param ... - other parameters
#' @param dot_size - size of the dot on plots. Passed to \code{\link[ggplot2]{geom_point}}.
#'
#' @return ggplot object
#'
#' @import ggplot2
#' @rdname plot.funnel_measure
#' @export
#' @examples
#' \donttest{
#' library("mlr")
#' library("DALEXtra")
#' task <- mlr::makeRegrTask(
#' id = "R",
#' data = apartments,
#' target = "m2.price"
#' )
#' learner_lm <- mlr::makeLearner(
#' "regr.lm"
#' )
#' model_lm <- mlr::train(learner_lm, task)
#' explainer_lm <- explain_mlr(model_lm, apartmentsTest, apartmentsTest$m2.price, label = "LM")
#'
#' learner_rf <- mlr::makeLearner(
#' "regr.randomForest"
#' )
#' model_rf <- mlr::train(learner_rf, task)
#' explainer_rf <- explain_mlr(model_rf, apartmentsTest, apartmentsTest$m2.price, label = "RF")
#'
#' learner_gbm <- mlr::makeLearner(
#' "regr.gbm"
#' )
#' model_gbm <- mlr::train(learner_gbm, task)
#' explainer_gbm <- explain_mlr(model_gbm, apartmentsTest, apartmentsTest$m2.price, label = "GBM")
#'
#'
#' plot_data <- funnel_measure(explainer_lm, list(explainer_rf, explainer_gbm),
#' nbins = 5, measure_function = DALEX::loss_root_mean_square)
#' plot(plot_data)
#' }
plot.funnel_measure <- function(x, ..., dot_size = 0.5){
funnel_measure <- x
Variable <- Measure <- Measure_min <- Measure_max <- Category <- Text <- L <- NULL
if(!"funnel_measure" %in% class(funnel_measure)) stop("Data is not a funnel_measure object")
champion_label <- funnel_measure$models_info[funnel_measure$models_info$type == "Champion",]$label
challenger_label <- funnel_measure$models_info[funnel_measure$models_info$type == "Challenger",]$label
funnel_measure$data$Measure_min <- unlist(lapply(funnel_measure$data$Measure, function(x){
list("Measure_min" = min(0, x))
}
))
funnel_measure$data$Measure_max <- unlist(lapply(funnel_measure$data$Measure, function(x){
list("Measure_max" = max(0, x))
}
))
p <- lapply(challenger_label, function(y){
data <- funnel_measure$data
data = data[data$Challenger == y, ]
data$Text <- unlist(lapply(unique(data$Variable), function(x){
tmp <- data[data$Variable == x, ]
ifelse(tmp$Measure == max(tmp$Measure), 1,
ifelse(tmp$Measure == min(tmp$Measure) & tmp$Measure != max(tmp$Measure), -1, 0))
}
))
data$L <- ifelse(data$Text == -1 | data$Text == 1, data$Label, "")
ggplot(data, aes(x = Variable, y = Measure)) +
geom_pointrange(aes(ymin = Measure_min, ymax = Measure_max, color = Category),
position = position_dodge2(width = 0.75), size =
dot_size) +
scale_color_manual(values = c(colors_discrete_drwhy(3))) +
geom_hline(yintercept = 0, color = "#371ea3", size=1) +
ylim(c(-max(abs(data$Measure))-20, max(abs(data$Measure))+20)) +
ylab(paste("Champion (", champion_label, ") and Challengers measure difference", sep = "")) +
xlab("") +
labs(title = "Funnel Plot",
subtitle = paste("For every colour, dot on the right side of violet line means that Champion (",
champion_label,
") is better. \nDot on the left means that one of the Challengers is better than Champion (",
champion_label,
")",
sep = "")) +
theme_drwhy() +
theme(panel.grid = element_blank()) +
geom_vline(xintercept = seq(1.5, 1.5 + length(unique(data$Variable))), linetype = "dotdash", color = "#371ea3")+
geom_text(aes(y = Text * max(abs(Measure)), label = L),
hjust = 0.5, vjust = 0.5,
position = position_dodge2(width = 0.75),
color = "#371ea3") +
coord_flip()
})
p
}
|
#load libraries
library("ggplot2"); packageVersion("ggplot2")
library("phyloseq"); packageVersion("phyloseq")
library("plyr"); packageVersion("plyr")
library("dplyr"); packageVersion("dplyr")
library("cowplot"); packageVersion("cowplot")
library("reshape2"); packageVersion("reshape2")
library("rstatix"); packageVersion("rstatix")
library("ggpubr"); packageVersion("ggpubr")
library("venn"); packageVersion("venn")
#load colors
source("scripts/color_palletes.R")
source("scripts/extra_functions.R")
#load RDS object
Dor_ps.prev <-readRDS("data/Dor_ps_prev.rds")
#####################################
#Alpha diversity statistical tests
####################################
# Calculate richness
Dor_ps.alpha.div <- estimate_richness(Dor_ps.prev, split = TRUE, measures = NULL)
#generate data set with all bacterial community characteristics
Dor_comm.char<- data.frame(Sample_number_dada2 = sample_data(Dor_ps.prev)$Sample_number_dada2,
Pool = gsub("\\.","",sample_data(Dor_ps.prev)$location),
Year = sample_data(Dor_ps.prev)$Year,
Month = sample_data(Dor_ps.prev)$Month,
Mic.Season = sample_data(Dor_ps.prev)$Mic.Season,
#Chl.sequences = sample_sums(Dor_ps.chl),
Sequences= sample_sums(Dor_ps.prev),
Observed = Dor_ps.alpha.div$Observed,
Chao1 = Dor_ps.alpha.div$Chao1,
Completness = round(100*Dor_ps.alpha.div$Observed/Dor_ps.alpha.div$Chao1, digits=2),
Shannon = round(Dor_ps.alpha.div$Shannon,digits=2),
InvSimpson = round(Dor_ps.alpha.div$InvSimpson,digits=2),
Evenness = round(Dor_ps.alpha.div$Shannon/log(Dor_ps.alpha.div$Observed),digits=2))
#merge with env. par.
env.par<- c("Temp_degC",
"Ammonia_ug_L", "NO3_NO2_N_L", "TP_ug_L","Food..Kg.","Biomass..kg.","Chl_a_mg_L", "Chl_b_mg_L",
"Diatoxanthin_mg_L","Dinoxanthin_mg_L","Fucoxanthin_mg_L",
"b_caroten_mg_L","MC_ug_L","Lutein_mg_L","Zeaxanthin_mg_L")
Dor_metadata<- left_join(Dor_comm.char, sample_data(Dor_ps.prev), by = c("Sample_number_dada2","Year","Month","Mic.Season")) %>%
dplyr::select(c(names(Dor_comm.char),env.par)) %>%
mutate("N:P" = as.numeric(NO3_NO2_N_L/TP_ug_L)) %>% arrange(Pool,Year,Month)
write.csv(Dor_metadata, "./tables/Dor_alpha_table.csv")
# subset only 2013-2014
Dor_ps.prev_run1<- subset_samples(Dor_ps.prev, Run == "1")
#plot alpha diversity
Dor_alpha <- estimate_richness(Dor_ps.prev_run1, measures = c("Observed", "Chao1","Shannon", "Evenness"))
Dor_alpha$Evenness <- Dor_alpha$Shannon/log(Dor_alpha$Observed)
Dor_alpha <- merge_phyloseq(Dor_ps.prev_run1, sample_data(Dor_alpha))
Dor_alpha.m <- as(sample_data(Dor_alpha), "data.frame")%>%
select(location, Year, Month, Mic.Season, Observed, Chao1, Shannon, Evenness)%>%
melt(id.vars = c("location", "Year","Month", "Mic.Season"))
alpha.p<- ggplot(Dor_alpha.m, aes(x = Month, y = value, group = variable)) +
labs(x = "Year", y = "Alpha diversity")+
geom_point(aes(shape = location), size =3)+
geom_smooth(method = loess, se = TRUE)+
#scale_fill_manual(values =c("yellow","darkgreen"))+
#geom_boxplot(outlier.color = NULL, notch = FALSE)+
facet_grid(variable~Year, scales = "free")+
geom_hline(aes(yintercept=-Inf)) +
geom_vline(aes(xintercept=-Inf)) +
geom_vline(aes(xintercept=Inf))+
coord_cartesian(clip="off")+
theme_classic() +
theme(legend.position = "bottom")
ggsave("./figures/alpha_p.png",
plot = alpha.p,
units = "cm",
width = 30, height = 30,
#scale = 1,
dpi = 300)
alpha_pool.p<- ggplot(Dor_alpha.m, aes (x = location, y = value, group = location, colour = Year))+
geom_boxplot(outlier.color = NULL, notch = FALSE)+
geom_jitter(size = 3)+
facet_wrap(variable~., scales = "free", ncol = 2)+
theme_classic(base_size = 12)+
#geom_signif(comparisons = list(c("D1.", "Res."),c("D1.","V2."),c("Res.","V2.")),
# map_signif_level=TRUE, test = "wilcox.test", color = "black")+
theme(legend.position = "bottom")
ggsave("./figures/alpha_pools.png",
plot = alpha_pool.p,
units = "cm",
width = 30, height = 30,
#scale = 1,
dpi = 300)
alpha_seasons.p<- ggplot(Dor_alpha.m, aes (x = location, y = value, group = interaction(location,Mic.Season),
colour = Mic.Season, shape = as.factor(Year)))+
geom_boxplot(outlier.color = NA, notch = FALSE)+
geom_jitter(size = 3)+
facet_wrap(variable~., scales = "free", ncol = 2)+
scale_colour_manual(values = c("Wet"="darkblue",
"Dry"="orange")) +
#coord_fixed()+
theme_bw()+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
text=element_text(size=14),legend.position = "bottom")
ggsave("./figures/alpha_seasons.pdf",
plot = alpha_seasons.p,
units = "cm",
width = 30, height = 30,
#scale = 1,
dpi = 300)
#####################################
#Test statistical differences between Years and Seasons
####################################
shapiro.test(sample_data(Dor_alpha)$Chao1)
#Chao1 richness did not show normal distribution (p < 0.01), thus will be analyzed using Kruskal Wallis test
kruskal.test(Chao1 ~ location, data = data.frame(sample_data(Dor_alpha)))
kruskal.test(Chao1 ~ Mic.Season, data = data.frame(sample_data(Dor_alpha)))
Chao1_Wilcox_Season <- as(sample_data(Dor_alpha),"data.frame") %>%
group_by(location) %>%
rstatix::wilcox_test(Chao1 ~ Mic.Season, p.adjust.method = "BH") %>%
add_significance()
Chao1_Wilcox_Season <- as(sample_data(Dor_alpha),"data.frame") %>%
#group_by(Mic.Season) %>%
rstatix::wilcox_test(Chao1 ~ location, p.adjust.method = "BH") %>%
add_significance()
#####################################
#ASVs overlap between pools
####################################
#subset each pool
Dor_ps.D1<- subset_samples(Dor_ps.prev_run1, location =="D1.")
Dor_ps.D1<- prune_taxa(taxa_sums(Dor_ps.D1)>0,Dor_ps.D1)
Dor_ps.V2<- subset_samples(Dor_ps.prev_run1, location =="V2.")
Dor_ps.V2<- prune_taxa(taxa_sums(Dor_ps.V2)>0,Dor_ps.V2)
Dor_ps.Res<- subset_samples(Dor_ps.prev_run1, location =="Res.")
Dor_ps.Res<- prune_taxa(taxa_sums(Dor_ps.Res)>0,Dor_ps.Res)
#generate list of ASVs in each pool
z <- list()
z[["D1"]] <- as.character(row.names(otu_table(Dor_ps.D1)))
z[["V2"]] <- as.character(row.names(otu_table(Dor_ps.V2)))
z[["Res"]] <- as.character(row.names(otu_table(Dor_ps.Res)))
#plot
png(file="figures/venn_pools.png",units = "cm", res = 300,
width=30, height=30)
venn(z, snames = names(z), ilab=TRUE, zcolor = "style",
ilcs = 1, sncs = 2)
dev.off()
|
/Dor_alpha_div.R
|
no_license
|
edfadeev/Dor_ponds_16S
|
R
| false
| false
| 6,944
|
r
|
#load libraries
library("ggplot2"); packageVersion("ggplot2")
library("phyloseq"); packageVersion("phyloseq")
library("plyr"); packageVersion("plyr")
library("dplyr"); packageVersion("dplyr")
library("cowplot"); packageVersion("cowplot")
library("reshape2"); packageVersion("reshape2")
library("rstatix"); packageVersion("rstatix")
library("ggpubr"); packageVersion("ggpubr")
library("venn"); packageVersion("venn")
#load colors
source("scripts/color_palletes.R")
source("scripts/extra_functions.R")
#load RDS object
Dor_ps.prev <-readRDS("data/Dor_ps_prev.rds")
#####################################
#Alpha diversity statistical tests
####################################
# Calculate richness
Dor_ps.alpha.div <- estimate_richness(Dor_ps.prev, split = TRUE, measures = NULL)
#generate data set with all bacterial community characteristics
Dor_comm.char<- data.frame(Sample_number_dada2 = sample_data(Dor_ps.prev)$Sample_number_dada2,
Pool = gsub("\\.","",sample_data(Dor_ps.prev)$location),
Year = sample_data(Dor_ps.prev)$Year,
Month = sample_data(Dor_ps.prev)$Month,
Mic.Season = sample_data(Dor_ps.prev)$Mic.Season,
#Chl.sequences = sample_sums(Dor_ps.chl),
Sequences= sample_sums(Dor_ps.prev),
Observed = Dor_ps.alpha.div$Observed,
Chao1 = Dor_ps.alpha.div$Chao1,
Completness = round(100*Dor_ps.alpha.div$Observed/Dor_ps.alpha.div$Chao1, digits=2),
Shannon = round(Dor_ps.alpha.div$Shannon,digits=2),
InvSimpson = round(Dor_ps.alpha.div$InvSimpson,digits=2),
Evenness = round(Dor_ps.alpha.div$Shannon/log(Dor_ps.alpha.div$Observed),digits=2))
#merge with env. par.
env.par<- c("Temp_degC",
"Ammonia_ug_L", "NO3_NO2_N_L", "TP_ug_L","Food..Kg.","Biomass..kg.","Chl_a_mg_L", "Chl_b_mg_L",
"Diatoxanthin_mg_L","Dinoxanthin_mg_L","Fucoxanthin_mg_L",
"b_caroten_mg_L","MC_ug_L","Lutein_mg_L","Zeaxanthin_mg_L")
Dor_metadata<- left_join(Dor_comm.char, sample_data(Dor_ps.prev), by = c("Sample_number_dada2","Year","Month","Mic.Season")) %>%
dplyr::select(c(names(Dor_comm.char),env.par)) %>%
mutate("N:P" = as.numeric(NO3_NO2_N_L/TP_ug_L)) %>% arrange(Pool,Year,Month)
write.csv(Dor_metadata, "./tables/Dor_alpha_table.csv")
# subset only 2013-2014
Dor_ps.prev_run1<- subset_samples(Dor_ps.prev, Run == "1")
#plot alpha diversity
Dor_alpha <- estimate_richness(Dor_ps.prev_run1, measures = c("Observed", "Chao1","Shannon", "Evenness"))
Dor_alpha$Evenness <- Dor_alpha$Shannon/log(Dor_alpha$Observed)
Dor_alpha <- merge_phyloseq(Dor_ps.prev_run1, sample_data(Dor_alpha))
Dor_alpha.m <- as(sample_data(Dor_alpha), "data.frame")%>%
select(location, Year, Month, Mic.Season, Observed, Chao1, Shannon, Evenness)%>%
melt(id.vars = c("location", "Year","Month", "Mic.Season"))
alpha.p<- ggplot(Dor_alpha.m, aes(x = Month, y = value, group = variable)) +
labs(x = "Year", y = "Alpha diversity")+
geom_point(aes(shape = location), size =3)+
geom_smooth(method = loess, se = TRUE)+
#scale_fill_manual(values =c("yellow","darkgreen"))+
#geom_boxplot(outlier.color = NULL, notch = FALSE)+
facet_grid(variable~Year, scales = "free")+
geom_hline(aes(yintercept=-Inf)) +
geom_vline(aes(xintercept=-Inf)) +
geom_vline(aes(xintercept=Inf))+
coord_cartesian(clip="off")+
theme_classic() +
theme(legend.position = "bottom")
ggsave("./figures/alpha_p.png",
plot = alpha.p,
units = "cm",
width = 30, height = 30,
#scale = 1,
dpi = 300)
alpha_pool.p<- ggplot(Dor_alpha.m, aes (x = location, y = value, group = location, colour = Year))+
geom_boxplot(outlier.color = NULL, notch = FALSE)+
geom_jitter(size = 3)+
facet_wrap(variable~., scales = "free", ncol = 2)+
theme_classic(base_size = 12)+
#geom_signif(comparisons = list(c("D1.", "Res."),c("D1.","V2."),c("Res.","V2.")),
# map_signif_level=TRUE, test = "wilcox.test", color = "black")+
theme(legend.position = "bottom")
ggsave("./figures/alpha_pools.png",
plot = alpha_pool.p,
units = "cm",
width = 30, height = 30,
#scale = 1,
dpi = 300)
alpha_seasons.p<- ggplot(Dor_alpha.m, aes (x = location, y = value, group = interaction(location,Mic.Season),
colour = Mic.Season, shape = as.factor(Year)))+
geom_boxplot(outlier.color = NA, notch = FALSE)+
geom_jitter(size = 3)+
facet_wrap(variable~., scales = "free", ncol = 2)+
scale_colour_manual(values = c("Wet"="darkblue",
"Dry"="orange")) +
#coord_fixed()+
theme_bw()+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
text=element_text(size=14),legend.position = "bottom")
ggsave("./figures/alpha_seasons.pdf",
plot = alpha_seasons.p,
units = "cm",
width = 30, height = 30,
#scale = 1,
dpi = 300)
#####################################
#Test statistical differences between Years and Seasons
####################################
shapiro.test(sample_data(Dor_alpha)$Chao1)
#Chao1 richness did not show normal distribution (p < 0.01), thus will be analyzed using Kruskal Wallis test
kruskal.test(Chao1 ~ location, data = data.frame(sample_data(Dor_alpha)))
kruskal.test(Chao1 ~ Mic.Season, data = data.frame(sample_data(Dor_alpha)))
Chao1_Wilcox_Season <- as(sample_data(Dor_alpha),"data.frame") %>%
group_by(location) %>%
rstatix::wilcox_test(Chao1 ~ Mic.Season, p.adjust.method = "BH") %>%
add_significance()
Chao1_Wilcox_Season <- as(sample_data(Dor_alpha),"data.frame") %>%
#group_by(Mic.Season) %>%
rstatix::wilcox_test(Chao1 ~ location, p.adjust.method = "BH") %>%
add_significance()
#####################################
#ASVs overlap between pools
####################################
#subset each pool
Dor_ps.D1<- subset_samples(Dor_ps.prev_run1, location =="D1.")
Dor_ps.D1<- prune_taxa(taxa_sums(Dor_ps.D1)>0,Dor_ps.D1)
Dor_ps.V2<- subset_samples(Dor_ps.prev_run1, location =="V2.")
Dor_ps.V2<- prune_taxa(taxa_sums(Dor_ps.V2)>0,Dor_ps.V2)
Dor_ps.Res<- subset_samples(Dor_ps.prev_run1, location =="Res.")
Dor_ps.Res<- prune_taxa(taxa_sums(Dor_ps.Res)>0,Dor_ps.Res)
#generate list of ASVs in each pool
z <- list()
z[["D1"]] <- as.character(row.names(otu_table(Dor_ps.D1)))
z[["V2"]] <- as.character(row.names(otu_table(Dor_ps.V2)))
z[["Res"]] <- as.character(row.names(otu_table(Dor_ps.Res)))
#plot
png(file="figures/venn_pools.png",units = "cm", res = 300,
width=30, height=30)
venn(z, snames = names(z), ilab=TRUE, zcolor = "style",
ilcs = 1, sncs = 2)
dev.off()
|
# needs xcms3, see https://bioconductor.org/packages/release/bioc/html/xcms.html for installation
library(xcms)
### some useful functions ###
getDesign = function(mzml_files, label) {
design <- data.frame(sample_name = sub(basename(mzml_files), pattern = '.mzML', replacement = "", fixed = TRUE),
sample_group = c(rep(label, length(mzml_files))),
stringsAsFactors = FALSE)
return(design)
}
readData = function(files, design, mode="onDisk", msLevel=1) {
raw_data <- MSnbase::readMSData(files = files, pdata = new("NAnnotatedDataFrame", design),
mode = mode, msLevel. = msLevel, centroided. = TRUE)
return(raw_data)
}
pickPeaks = function(raw_data, ppm, peakwidth, snthresh, prefilter, mzdiff) {
# https://rdrr.io/bioc/xcms/man/findChromPeaks-centWave.html
cwp = CentWaveParam(ppm=ppm, peakwidth=peakwidth, snthresh=snthresh,
prefilter=prefilter, mzdiff=mzdiff)
xdata <- findChromPeaks(raw_data, param = cwp)
return(xdata)
}
extract_peaks <- function(mzml_files, design, ppm, peakwidth, snthresh, prefilter, mzdiff, msLevel) {
# TODO: at the moment, findChromPeaks only works with OnDiskMSnExp, that's why we pass it raw_data_on_disk for peak picking
raw_data <- readData(mzml_files, design, mode='onDisk', msLevel=msLevel)
xdata <- pickPeaks(raw_data, ppm=ppm, peakwidth=peakwidth, snthresh=snthresh, prefilter=prefilter, mzdiff=mzdiff)
return(xdata)
}
get_df = function(xdata, msLevel) {
df <- data.frame(chromPeaks(xdata))
df$msLevel <- msLevel
df$filename <- basename(fileNames(xdata)[df$sample])
return(df)
}
write_df <- function(df, filename) {
# write.csv(df, file = gzfile(filename), row.names = FALSE)
write.csv(df, file = filename, row.names = FALSE)
}
process_dir <- function(mzml_dir, extracted_peaks_out, aligned_features_out, aligned_intensities_out,
msLevel, ppm, bandwidth, snthresh, prefilter, mzdiff, binSize, minFraction, bw) {
# run for all files in mzml_dir
mzml_files <- list.files(path=mzml_dir, pattern='*.mzML', full.names=TRUE)
design <- getDesign(mzml_files, "group1")
xdata <- extract_peaks(mzml_files, design, ppm, peakwidth, snthresh, prefilter, mzdiff, msLevel)
peaks_df <- get_df(xdata, msLevel)
write_df(peaks_df, paste(mzml_dir, extracted_peaks_out, sep='/'))
# # Retention time alignment
# xdata <- adjustRtime(xdata, param = ObiwarpParam(binSize = binSize))
#
# ## Correspondence: group peaks across samples
# pdp <- PeakDensityParam(sampleGroups = xdata$sample_group,
# minFraction = minFraction, bw=bw)
# xdata <- groupChromPeaks(xdata, param = pdp)
# feature_df <- featureDefinitions(xdata)
# write_df(feature_df, paste(mzml_dir, aligned_features_out, sep='/'))
#
# ## Extract the into column for each feature.
# into_df <- featureValues(xdata, value = "into")
# write_df(into_df, paste(mzml_dir, aligned_intensities_out, sep='/'))
}
### processing starts here ###
# Parameters taken from from
# https://www.dropbox.com/home/Meta_clustering/ms2lda/large_study/r/beer_method_3_pos?preview=xcmsPeakPicking.R
ppm <- 3
peakwidth <- c(5, 100)
snthresh <- 3
prefilter <- c(3, 1000)
mzdiff = 0.001
# alignment and grouping parameters
binSize <- 0.6 # ObiWarp bin size
minFraction <- 0.8 # minimum fractions for a group to occur in all samples
bw <- 30 # bandwidth
msLevel <- 1
mzml_dir <- './mzML'
process_dir(mzml_dir, 'extracted_peaks_ms1.csv', 'aligned_features_ms1.csv', 'aligned_intensities_ms1.csv',
msLevel, ppm, bandwidth, snthresh, prefilter, mzdiff, binSize, minFraction, bw)
|
/vimms_django/documents/simple_ms1/example_data/urines/fragmentation/extract_peaks.R
|
no_license
|
Yanjiazhuo/vimms_django
|
R
| false
| false
| 3,780
|
r
|
# needs xcms3, see https://bioconductor.org/packages/release/bioc/html/xcms.html for installation
library(xcms)
### some useful functions ###
getDesign = function(mzml_files, label) {
design <- data.frame(sample_name = sub(basename(mzml_files), pattern = '.mzML', replacement = "", fixed = TRUE),
sample_group = c(rep(label, length(mzml_files))),
stringsAsFactors = FALSE)
return(design)
}
readData = function(files, design, mode="onDisk", msLevel=1) {
raw_data <- MSnbase::readMSData(files = files, pdata = new("NAnnotatedDataFrame", design),
mode = mode, msLevel. = msLevel, centroided. = TRUE)
return(raw_data)
}
pickPeaks = function(raw_data, ppm, peakwidth, snthresh, prefilter, mzdiff) {
# https://rdrr.io/bioc/xcms/man/findChromPeaks-centWave.html
cwp = CentWaveParam(ppm=ppm, peakwidth=peakwidth, snthresh=snthresh,
prefilter=prefilter, mzdiff=mzdiff)
xdata <- findChromPeaks(raw_data, param = cwp)
return(xdata)
}
extract_peaks <- function(mzml_files, design, ppm, peakwidth, snthresh, prefilter, mzdiff, msLevel) {
# TODO: at the moment, findChromPeaks only works with OnDiskMSnExp, that's why we pass it raw_data_on_disk for peak picking
raw_data <- readData(mzml_files, design, mode='onDisk', msLevel=msLevel)
xdata <- pickPeaks(raw_data, ppm=ppm, peakwidth=peakwidth, snthresh=snthresh, prefilter=prefilter, mzdiff=mzdiff)
return(xdata)
}
get_df = function(xdata, msLevel) {
df <- data.frame(chromPeaks(xdata))
df$msLevel <- msLevel
df$filename <- basename(fileNames(xdata)[df$sample])
return(df)
}
write_df <- function(df, filename) {
# write.csv(df, file = gzfile(filename), row.names = FALSE)
write.csv(df, file = filename, row.names = FALSE)
}
process_dir <- function(mzml_dir, extracted_peaks_out, aligned_features_out, aligned_intensities_out,
msLevel, ppm, bandwidth, snthresh, prefilter, mzdiff, binSize, minFraction, bw) {
# run for all files in mzml_dir
mzml_files <- list.files(path=mzml_dir, pattern='*.mzML', full.names=TRUE)
design <- getDesign(mzml_files, "group1")
xdata <- extract_peaks(mzml_files, design, ppm, peakwidth, snthresh, prefilter, mzdiff, msLevel)
peaks_df <- get_df(xdata, msLevel)
write_df(peaks_df, paste(mzml_dir, extracted_peaks_out, sep='/'))
# # Retention time alignment
# xdata <- adjustRtime(xdata, param = ObiwarpParam(binSize = binSize))
#
# ## Correspondence: group peaks across samples
# pdp <- PeakDensityParam(sampleGroups = xdata$sample_group,
# minFraction = minFraction, bw=bw)
# xdata <- groupChromPeaks(xdata, param = pdp)
# feature_df <- featureDefinitions(xdata)
# write_df(feature_df, paste(mzml_dir, aligned_features_out, sep='/'))
#
# ## Extract the into column for each feature.
# into_df <- featureValues(xdata, value = "into")
# write_df(into_df, paste(mzml_dir, aligned_intensities_out, sep='/'))
}
### processing starts here ###
# Parameters taken from from
# https://www.dropbox.com/home/Meta_clustering/ms2lda/large_study/r/beer_method_3_pos?preview=xcmsPeakPicking.R
ppm <- 3
peakwidth <- c(5, 100)
snthresh <- 3
prefilter <- c(3, 1000)
mzdiff = 0.001
# alignment and grouping parameters
binSize <- 0.6 # ObiWarp bin size
minFraction <- 0.8 # minimum fractions for a group to occur in all samples
bw <- 30 # bandwidth
msLevel <- 1
mzml_dir <- './mzML'
process_dir(mzml_dir, 'extracted_peaks_ms1.csv', 'aligned_features_ms1.csv', 'aligned_intensities_ms1.csv',
msLevel, ppm, bandwidth, snthresh, prefilter, mzdiff, binSize, minFraction, bw)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fct_diagonal_arrangement.R
\name{diagonal_arrangement}
\alias{diagonal_arrangement}
\title{Spatial Un-replicated Diagonal Arrangement Design}
\usage{
diagonal_arrangement(
nrows = NULL,
ncols = NULL,
lines = NULL,
checks = NULL,
planter = "serpentine",
l = 1,
plotNumber = 101,
kindExpt = "SUDC",
splitBy = "row",
seed = NULL,
blocks = NULL,
exptName = NULL,
locationNames = NULL,
multiLocationData = FALSE,
data = NULL
)
}
\arguments{
\item{nrows}{Number of rows in the field.}
\item{ncols}{Number of columns in the field.}
\item{lines}{Number of genotypes, experimental lines or treatments.}
\item{checks}{Number of genotypes checks.}
\item{planter}{Option for \code{serpentine} or \code{cartesian} plot arrangement.
By default \code{planter = 'serpentine'}.}
\item{l}{Number of locations or sites. By default \code{l = 1}.}
\item{plotNumber}{Numeric vector with the starting plot number for each location.
By default \code{plotNumber = 101}.}
\item{kindExpt}{Type of diagonal design, with single options: Single Un-replicated Diagonal Checks
\code{'SUDC'} and Decision Blocks Un-replicated Design with Diagonal Checks \code{'DBUDC'}
for multiple experiments. By default \code{kindExpt = 'SUDC'}.}
\item{splitBy}{Option to split the field when \code{kindExpt = 'DBUDC'} is selected.
By default \code{splitBy = 'row'}.}
\item{seed}{(optional) Real number that specifies the starting seed to obtain reproducible designs.}
\item{blocks}{Number of experiments or blocks to generate an \code{DBUDC} design.
If \code{kindExpt = 'DBUDC'} and data is null, \code{blocks} are mandatory.}
\item{exptName}{(optional) Name of the experiment.}
\item{locationNames}{(optional) Names each location.}
\item{multiLocationData}{(optional) Option to pass an entry list for multiple locations.
By default \code{multiLocationData = FALSE}.}
\item{data}{(optional) Data frame with 2 columns: \code{ENTRY | NAME }.}
}
\value{
A list with five elements.
\itemize{
\item \code{infoDesign} is a list with information on the design parameters.
\item \code{layoutRandom} is a matrix with the randomization layout.
\item \code{plotsNumber} is a matrix with the layout plot number.
\item \code{data_entry} is a data frame with the data input.
\item \code{fieldBook} is a data frame with field book design. This includes the index (Row, Column).
}
}
\description{
Randomly generates an spatial un-replicated diagonal arrangement design.
}
\examples{
# Example 1: Generates a spatial single diagonal arrangement design in one location
# with 270 treatments and 30 check plots for a field with dimensions 15 rows x 20 cols
# in a serpentine arrangement.
spatd <- diagonal_arrangement(
nrows = 15,
ncols = 20,
lines = 270,
checks = 4,
plotNumber = 101,
kindExpt = "SUDC",
planter = "serpentine",
seed = 1987,
exptName = "20WRY1",
locationNames = "MINOT"
)
spatd$infoDesign
spatd$layoutRandom
spatd$plotsNumber
head(spatd$fieldBook, 12)
# Example 2: Generates a spatial decision block diagonal arrangement design in one location
# with 720 treatments allocated in 5 experiments or blocks for a field with dimensions
# 30 rows x 26 cols in a serpentine arrangement. In this case, we show how to set up the data
# option with the entries list.
checks <- 5;expts <- 5
list_checks <- paste("CH", 1:checks, sep = "")
treatments <- paste("G", 6:725, sep = "")
treatment_list <- data.frame(list(ENTRY = 1:725, NAME = c(list_checks, treatments)))
head(treatment_list, 12)
tail(treatment_list, 12)
spatDB <- diagonal_arrangement(
nrows = 30,
ncols = 26,
checks = 5,
plotNumber = 1,
kindExpt = "DBUDC",
planter = "serpentine",
splitBy = "row",
blocks = c(150,155,95,200,120),
data = treatment_list
)
spatDB$infoDesign
spatDB$layoutRandom
spatDB$plotsNumber
head(spatDB$fieldBook,12)
# Example 3: Generates a spatial decision block diagonal arrangement design in one location
# with 270 treatments allocated in 3 experiments or blocks for a field with dimensions
# 20 rows x 15 cols in a serpentine arrangement. Which in turn is an augmented block (3 blocks).
spatAB <- diagonal_arrangement(
nrows = 20,
ncols = 15,
lines = 270,
checks = 4,
plotNumber = c(1,1001,2001),
kindExpt = "DBUDC",
planter = "serpentine",
exptName = c("20WRA", "20WRB", "20WRC"),
blocks = c(90, 90, 90),
splitBy = "column"
)
spatAB$infoDesign
spatAB$layoutRandom
spatAB$plotsNumber
head(spatAB$fieldBook,12)
}
\references{
Clarke, G. P. Y., & Stefanova, K. T. (2011). Optimal design for early-generation plant
breeding trials with unreplicated or partially replicated test lines. Australian & New
Zealand Journal of Statistics, 53(4), 461–480.
}
\author{
Didier Murillo [aut],
Salvador Gezan [aut],
Ana Heilman [ctb],
Thomas Walk [ctb],
Johan Aparicio [ctb],
Richard Horsley [ctb]
}
|
/man/diagonal_arrangement.Rd
|
permissive
|
DidierMurilloF/FielDHub
|
R
| false
| true
| 4,983
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fct_diagonal_arrangement.R
\name{diagonal_arrangement}
\alias{diagonal_arrangement}
\title{Spatial Un-replicated Diagonal Arrangement Design}
\usage{
diagonal_arrangement(
nrows = NULL,
ncols = NULL,
lines = NULL,
checks = NULL,
planter = "serpentine",
l = 1,
plotNumber = 101,
kindExpt = "SUDC",
splitBy = "row",
seed = NULL,
blocks = NULL,
exptName = NULL,
locationNames = NULL,
multiLocationData = FALSE,
data = NULL
)
}
\arguments{
\item{nrows}{Number of rows in the field.}
\item{ncols}{Number of columns in the field.}
\item{lines}{Number of genotypes, experimental lines or treatments.}
\item{checks}{Number of genotypes checks.}
\item{planter}{Option for \code{serpentine} or \code{cartesian} plot arrangement.
By default \code{planter = 'serpentine'}.}
\item{l}{Number of locations or sites. By default \code{l = 1}.}
\item{plotNumber}{Numeric vector with the starting plot number for each location.
By default \code{plotNumber = 101}.}
\item{kindExpt}{Type of diagonal design, with single options: Single Un-replicated Diagonal Checks
\code{'SUDC'} and Decision Blocks Un-replicated Design with Diagonal Checks \code{'DBUDC'}
for multiple experiments. By default \code{kindExpt = 'SUDC'}.}
\item{splitBy}{Option to split the field when \code{kindExpt = 'DBUDC'} is selected.
By default \code{splitBy = 'row'}.}
\item{seed}{(optional) Real number that specifies the starting seed to obtain reproducible designs.}
\item{blocks}{Number of experiments or blocks to generate an \code{DBUDC} design.
If \code{kindExpt = 'DBUDC'} and data is null, \code{blocks} are mandatory.}
\item{exptName}{(optional) Name of the experiment.}
\item{locationNames}{(optional) Names each location.}
\item{multiLocationData}{(optional) Option to pass an entry list for multiple locations.
By default \code{multiLocationData = FALSE}.}
\item{data}{(optional) Data frame with 2 columns: \code{ENTRY | NAME }.}
}
\value{
A list with five elements.
\itemize{
\item \code{infoDesign} is a list with information on the design parameters.
\item \code{layoutRandom} is a matrix with the randomization layout.
\item \code{plotsNumber} is a matrix with the layout plot number.
\item \code{data_entry} is a data frame with the data input.
\item \code{fieldBook} is a data frame with field book design. This includes the index (Row, Column).
}
}
\description{
Randomly generates an spatial un-replicated diagonal arrangement design.
}
\examples{
# Example 1: Generates a spatial single diagonal arrangement design in one location
# with 270 treatments and 30 check plots for a field with dimensions 15 rows x 20 cols
# in a serpentine arrangement.
spatd <- diagonal_arrangement(
nrows = 15,
ncols = 20,
lines = 270,
checks = 4,
plotNumber = 101,
kindExpt = "SUDC",
planter = "serpentine",
seed = 1987,
exptName = "20WRY1",
locationNames = "MINOT"
)
spatd$infoDesign
spatd$layoutRandom
spatd$plotsNumber
head(spatd$fieldBook, 12)
# Example 2: Generates a spatial decision block diagonal arrangement design in one location
# with 720 treatments allocated in 5 experiments or blocks for a field with dimensions
# 30 rows x 26 cols in a serpentine arrangement. In this case, we show how to set up the data
# option with the entries list.
checks <- 5;expts <- 5
list_checks <- paste("CH", 1:checks, sep = "")
treatments <- paste("G", 6:725, sep = "")
treatment_list <- data.frame(list(ENTRY = 1:725, NAME = c(list_checks, treatments)))
head(treatment_list, 12)
tail(treatment_list, 12)
spatDB <- diagonal_arrangement(
nrows = 30,
ncols = 26,
checks = 5,
plotNumber = 1,
kindExpt = "DBUDC",
planter = "serpentine",
splitBy = "row",
blocks = c(150,155,95,200,120),
data = treatment_list
)
spatDB$infoDesign
spatDB$layoutRandom
spatDB$plotsNumber
head(spatDB$fieldBook,12)
# Example 3: Generates a spatial decision block diagonal arrangement design in one location
# with 270 treatments allocated in 3 experiments or blocks for a field with dimensions
# 20 rows x 15 cols in a serpentine arrangement. Which in turn is an augmented block (3 blocks).
spatAB <- diagonal_arrangement(
nrows = 20,
ncols = 15,
lines = 270,
checks = 4,
plotNumber = c(1,1001,2001),
kindExpt = "DBUDC",
planter = "serpentine",
exptName = c("20WRA", "20WRB", "20WRC"),
blocks = c(90, 90, 90),
splitBy = "column"
)
spatAB$infoDesign
spatAB$layoutRandom
spatAB$plotsNumber
head(spatAB$fieldBook,12)
}
\references{
Clarke, G. P. Y., & Stefanova, K. T. (2011). Optimal design for early-generation plant
breeding trials with unreplicated or partially replicated test lines. Australian & New
Zealand Journal of Statistics, 53(4), 461–480.
}
\author{
Didier Murillo [aut],
Salvador Gezan [aut],
Ana Heilman [ctb],
Thomas Walk [ctb],
Johan Aparicio [ctb],
Richard Horsley [ctb]
}
|
##@S This file contains code that generates scripts for running the NER programs.
source("../../../../code/ODNB/ODNB_setup.R")
## Script for stanford NER tool processing
script = rep("", times = ZNSPLITS+3)
script[1] = "#!/bin/sh"
script[2] = "scriptdir=`dirname $0`"
script[3] = ""
for(i in 1:ZNSPLITS) {
temp = paste("sh ../../../../software/stanNER/ner.sh ../../../../data/ODNB_intermediate/NER/compiled_raw/",i,".txt > ../../../../data/ODNB_intermediate/NER/proc_STAN/ST_",i,".txt", sep = "")
script[i+3] = temp
}
writeLines(script, con = "../../../../code/ODNB/text_processing/NER/stanSCRIPT.sh")
## Create folders 'proc' and 'unproc' in lingpipe/demos/generic/bin/.
## Run the following script to copy files over:
## Script for copying text files over to lingpipe directory
script = rep("", times = 3)
script[1] = "#!/bin/sh"
script[2] = ""
script[3] = "cp ../../../../data/ODNB_intermediate/NER/compiled_raw/* ../../../../software/lingpipe/demos/generic/bin/unproc"
writeLines(script, con = "../../../../code/ODNB/text_processing/NER/lingprecopySCRIPT.sh")
## Then, run the following line
## --- code ---
## nice nohup sh cmd_ne_en_news_muc6.sh -inDir=unproc -outDir=proc
## --- code ---
## This should be run from software/lingpipe/demos/generic/bin
## Finally, run this line to fix everything.
## Script for copying processed files back, and cleans up other directories.
script = rep("", times = 5)
script[1] = "#!/bin/sh"
script[2] = ""
script[3] = "cp ../../../../software/lingpipe/demos/generic/bin/proc/* ../../../../data/ODNB_intermediate/NER/proc_LING/"
script[4] = "rm ../../../../software/lingpipe/demos/generic/bin/proc/*"
script[5] = "rm ../../../../software/lingpipe/demos/generic/bin/unproc/*"
writeLines(script, con = "../../../../code/ODNB/text_processing/NER/lingpostcopySCRIPT.sh")
|
/code/ODNB/text_processing/NER/ner_scripts.R
|
no_license
|
blazers/six
|
R
| false
| false
| 1,830
|
r
|
##@S This file contains code that generates scripts for running the NER programs.
source("../../../../code/ODNB/ODNB_setup.R")
## Script for stanford NER tool processing
script = rep("", times = ZNSPLITS+3)
script[1] = "#!/bin/sh"
script[2] = "scriptdir=`dirname $0`"
script[3] = ""
for(i in 1:ZNSPLITS) {
temp = paste("sh ../../../../software/stanNER/ner.sh ../../../../data/ODNB_intermediate/NER/compiled_raw/",i,".txt > ../../../../data/ODNB_intermediate/NER/proc_STAN/ST_",i,".txt", sep = "")
script[i+3] = temp
}
writeLines(script, con = "../../../../code/ODNB/text_processing/NER/stanSCRIPT.sh")
## Create folders 'proc' and 'unproc' in lingpipe/demos/generic/bin/.
## Run the following script to copy files over:
## Script for copying text files over to lingpipe directory
script = rep("", times = 3)
script[1] = "#!/bin/sh"
script[2] = ""
script[3] = "cp ../../../../data/ODNB_intermediate/NER/compiled_raw/* ../../../../software/lingpipe/demos/generic/bin/unproc"
writeLines(script, con = "../../../../code/ODNB/text_processing/NER/lingprecopySCRIPT.sh")
## Then, run the following line
## --- code ---
## nice nohup sh cmd_ne_en_news_muc6.sh -inDir=unproc -outDir=proc
## --- code ---
## This should be run from software/lingpipe/demos/generic/bin
## Finally, run this line to fix everything.
## Script for copying processed files back, and cleans up other directories.
script = rep("", times = 5)
script[1] = "#!/bin/sh"
script[2] = ""
script[3] = "cp ../../../../software/lingpipe/demos/generic/bin/proc/* ../../../../data/ODNB_intermediate/NER/proc_LING/"
script[4] = "rm ../../../../software/lingpipe/demos/generic/bin/proc/*"
script[5] = "rm ../../../../software/lingpipe/demos/generic/bin/unproc/*"
writeLines(script, con = "../../../../code/ODNB/text_processing/NER/lingpostcopySCRIPT.sh")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{amult}
\alias{amult}
\alias{\%X\%}
\title{Generalized array multiplication.}
\usage{
amult(X, Y, FUN = "*", SUM = "sum", BY = NULL, MoreArgs = NULL,
..., SIMPLIFY = TRUE, VECTORIZED = TRUE)
X \%X\% Y
}
\arguments{
\item{X, Y}{Generalized arrays that can be multiplied.}
\item{FUN}{The 'multiply' function.}
\item{SUM}{The 'reduce' function.}
\item{BY}{margins excluded from summary by SUM.}
\item{MoreArgs, SIMPLIFY, VECTORIZED}{Argument used by 'amap()'.}
\item{...}{Argument used by 'areduce()'.}
}
\description{
Default to Einstein summation convention, without explicitly subscripts.
}
\details{
Margins shared by X and Y are parallelly mapped by FUN,
and then reduced by SUM (inner product like \code{\%*\%});
margins in BY and shared by X and Y are simply mapped by FUN
but excluded from reducing (parallel product like \code{*});
other margins are extended repeatly (outer product like \code{\%o\%}).
Shared margins not to be mapped have to be renamed (like outer product).
For special FUN and SUM, fast algorithms are implemented.
}
\examples{
a <- garray(1:24, c(4,6), list(X=LETTERS[1:4], Y=letters[1:6]),
sdim=list(XX=c(x1=3,x2=1), YY=c(y1=1,y2=2)))
b <- garray(1:20, c(Z=5, X=4))
c <- garray(1:120, c(X=4,Y=6,Z=5))
m1 <- amult(a, b)
m2 <- amult(a, b, `*`, sum)
m3 <- amult(b, a)
all.equal(m1, m2)
all.equal(m1, m3)
all.equal(m1, t(m3))
n1 <- amult(a, c, `*`, sum)
n2 <- a\%X\%c
all.equal(n1, n2)
amult(garray(1:5,margins="I"), garray(1:8,margins="J"))
amult(garray(1:8,c(I=2,J=4)), garray(1:9,c(K=3,L=3)))
}
|
/man/amult.Rd
|
no_license
|
cran/garray
|
R
| false
| true
| 1,620
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{amult}
\alias{amult}
\alias{\%X\%}
\title{Generalized array multiplication.}
\usage{
amult(X, Y, FUN = "*", SUM = "sum", BY = NULL, MoreArgs = NULL,
..., SIMPLIFY = TRUE, VECTORIZED = TRUE)
X \%X\% Y
}
\arguments{
\item{X, Y}{Generalized arrays that can be multiplied.}
\item{FUN}{The 'multiply' function.}
\item{SUM}{The 'reduce' function.}
\item{BY}{margins excluded from summary by SUM.}
\item{MoreArgs, SIMPLIFY, VECTORIZED}{Argument used by 'amap()'.}
\item{...}{Argument used by 'areduce()'.}
}
\description{
Default to Einstein summation convention, without explicitly subscripts.
}
\details{
Margins shared by X and Y are parallelly mapped by FUN,
and then reduced by SUM (inner product like \code{\%*\%});
margins in BY and shared by X and Y are simply mapped by FUN
but excluded from reducing (parallel product like \code{*});
other margins are extended repeatly (outer product like \code{\%o\%}).
Shared margins not to be mapped have to be renamed (like outer product).
For special FUN and SUM, fast algorithms are implemented.
}
\examples{
a <- garray(1:24, c(4,6), list(X=LETTERS[1:4], Y=letters[1:6]),
sdim=list(XX=c(x1=3,x2=1), YY=c(y1=1,y2=2)))
b <- garray(1:20, c(Z=5, X=4))
c <- garray(1:120, c(X=4,Y=6,Z=5))
m1 <- amult(a, b)
m2 <- amult(a, b, `*`, sum)
m3 <- amult(b, a)
all.equal(m1, m2)
all.equal(m1, m3)
all.equal(m1, t(m3))
n1 <- amult(a, c, `*`, sum)
n2 <- a\%X\%c
all.equal(n1, n2)
amult(garray(1:5,margins="I"), garray(1:8,margins="J"))
amult(garray(1:8,c(I=2,J=4)), garray(1:9,c(K=3,L=3)))
}
|
##### This program web scrapes NBA standings data #####
library(rvest); library(stringr); library(tidyr);library(plyr);library(dplyr)
library(ggplot2)
# Initialize final DataFrame
final <- data.frame()
#--- Loop for years 1956 to 1970 (1 table only) ---#
for (yr in 1956:1970) {
#--- Define path for data ---#
webpage <- paste0("https://www.basketball-reference.com/leagues/NBA_",yr,"_standings.html")
node <- html_nodes(read_html(webpage), 'table')
#--- Scrape data and format ---#
# Scrape
standings <- html_table(node, header=TRUE, fill=TRUE)[[1]]
names(standings)[1] <- "team"
# Make year
standings$year <- yr
# Stack along
final <- rbind.fill(final,standings)
}
#--- Loop for years 1971 to 2019 (2 tables) ---#
for (yr in 1971:2019) {
#--- Define path for data ---#
webpage <- paste0("https://www.basketball-reference.com/leagues/NBA_",yr,"_standings.html")
node <- html_nodes(read_html(webpage), 'table')
#--- Scrape data and format ---#
# Scrape
east <- html_table(node, header=TRUE, fill=TRUE)[[1]]
names(east)[1] <- "team"
west <- html_table(node, header=TRUE, fill=TRUE)[[2]]
names(west)[1] <- "team"
# Stack east and west
standings <- rbind.fill(east,west)
# Make year
standings$year <- yr
# Stack with final DataFrame
final <- rbind.fill(final,standings)
}
# Checks
table(final$year)
table(final$team)
# Exclude the rows with "Division"
final2 <- filter(final, !grepl("Division", team))
table(final2$year)
# Write to local path
setwd("/Users/nicholashyder/Documents/Side Projects/NBA MVP Analysis/dta/")
saveRDS(final2,"Raw Final Standings.RData")
|
/import/scrape_team_standings.R
|
no_license
|
nhyder/nba_mvp_analysis
|
R
| false
| false
| 1,681
|
r
|
##### This program web scrapes NBA standings data #####
library(rvest); library(stringr); library(tidyr);library(plyr);library(dplyr)
library(ggplot2)
# Initialize final DataFrame
final <- data.frame()
#--- Loop for years 1956 to 1970 (1 table only) ---#
for (yr in 1956:1970) {
#--- Define path for data ---#
webpage <- paste0("https://www.basketball-reference.com/leagues/NBA_",yr,"_standings.html")
node <- html_nodes(read_html(webpage), 'table')
#--- Scrape data and format ---#
# Scrape
standings <- html_table(node, header=TRUE, fill=TRUE)[[1]]
names(standings)[1] <- "team"
# Make year
standings$year <- yr
# Stack along
final <- rbind.fill(final,standings)
}
#--- Loop for years 1971 to 2019 (2 tables) ---#
for (yr in 1971:2019) {
#--- Define path for data ---#
webpage <- paste0("https://www.basketball-reference.com/leagues/NBA_",yr,"_standings.html")
node <- html_nodes(read_html(webpage), 'table')
#--- Scrape data and format ---#
# Scrape
east <- html_table(node, header=TRUE, fill=TRUE)[[1]]
names(east)[1] <- "team"
west <- html_table(node, header=TRUE, fill=TRUE)[[2]]
names(west)[1] <- "team"
# Stack east and west
standings <- rbind.fill(east,west)
# Make year
standings$year <- yr
# Stack with final DataFrame
final <- rbind.fill(final,standings)
}
# Checks
table(final$year)
table(final$team)
# Exclude the rows with "Division"
final2 <- filter(final, !grepl("Division", team))
table(final2$year)
# Write to local path
setwd("/Users/nicholashyder/Documents/Side Projects/NBA MVP Analysis/dta/")
saveRDS(final2,"Raw Final Standings.RData")
|
#' Nest data in a Spark Dataframe
#'
#' This function is designed to behave similarly to \code{tidyr::nest}.
#'
#' Note that calling \code{sdf_nest} will not aggregate and cannot be done
#' inside of a \code{group_by(...) %>% summarize(..)} operation. To produce
#' a nested array one might use \code{sdf_nest} in conjunction with the
#' \code{collect_list} Spark SQL function:
#'
#' @examples
#' \dontrun{
#' # produces a dataframe with an array of characteristics nested under
#' # each unique species identifier
#' iris2 <- copy_to(sc, iris, name="iris")
#' iris2 %>%
#' sdf_nest(Sepal_Length, Sepal_Width, Petal.Length, Petal.Width, .key="data") %>%
#' group_by(Species) %>%
#' summarize(data=collect_list(data))
#' }
#'
#' @param x A Spark dataframe.
#' @param ... Columns to nest.
#' @param .key Character. A name for the new column containing nested fields
#' @export
sdf_nest <- function(x, ..., .key="data") {
dots <- convert_dots_to_strings(...)
sdf_nest_(x, columns=dots, .key=.key)
}
#' @rdname sdf_nest
#' @param columns Character vector. Columns to nest.
#' @export
sdf_nest_ <- function(x, columns, .key="data") {
sdf <- spark_dataframe(x)
sc <- spark_connection(x)
cols <- colnames(x)
nested_columns <- list()
select_columns <- list()
for (col in cols) {
sdf_col <- invoke(sdf, "col", col)
if (col %in% columns) {
nested_columns <- c(nested_columns, sdf_col)
} else {
select_columns <- c(select_columns, sdf_col)
}
}
nested_column <- invoke_static(sc, method="struct",
class="org.apache.spark.sql.functions",
nested_columns)
nested_column <- invoke(nested_column, "alias", .key)
# do select
# outdf <- sdf %>%
# invoke("select", columns)
outdf <- invoke(sdf, "select", c(select_columns, nested_column))
# regisger new table
sdf_register(outdf)
}
|
/R/nest.R
|
permissive
|
kashenfelter/sparklyr.nested
|
R
| false
| false
| 1,933
|
r
|
#' Nest data in a Spark Dataframe
#'
#' This function is designed to behave similarly to \code{tidyr::nest}.
#'
#' Note that calling \code{sdf_nest} will not aggregate and cannot be done
#' inside of a \code{group_by(...) %>% summarize(..)} operation. To produce
#' a nested array one might use \code{sdf_nest} in conjunction with the
#' \code{collect_list} Spark SQL function:
#'
#' @examples
#' \dontrun{
#' # produces a dataframe with an array of characteristics nested under
#' # each unique species identifier
#' iris2 <- copy_to(sc, iris, name="iris")
#' iris2 %>%
#' sdf_nest(Sepal_Length, Sepal_Width, Petal.Length, Petal.Width, .key="data") %>%
#' group_by(Species) %>%
#' summarize(data=collect_list(data))
#' }
#'
#' @param x A Spark dataframe.
#' @param ... Columns to nest.
#' @param .key Character. A name for the new column containing nested fields
#' @export
sdf_nest <- function(x, ..., .key="data") {
dots <- convert_dots_to_strings(...)
sdf_nest_(x, columns=dots, .key=.key)
}
#' @rdname sdf_nest
#' @param columns Character vector. Columns to nest.
#' @export
sdf_nest_ <- function(x, columns, .key="data") {
sdf <- spark_dataframe(x)
sc <- spark_connection(x)
cols <- colnames(x)
nested_columns <- list()
select_columns <- list()
for (col in cols) {
sdf_col <- invoke(sdf, "col", col)
if (col %in% columns) {
nested_columns <- c(nested_columns, sdf_col)
} else {
select_columns <- c(select_columns, sdf_col)
}
}
nested_column <- invoke_static(sc, method="struct",
class="org.apache.spark.sql.functions",
nested_columns)
nested_column <- invoke(nested_column, "alias", .key)
# do select
# outdf <- sdf %>%
# invoke("select", columns)
outdf <- invoke(sdf, "select", c(select_columns, nested_column))
# regisger new table
sdf_register(outdf)
}
|
getwd()
setwd("C:/Users/ktelk/Documents/Coursera/UCI HAR Dataset")
#list.files()
#You should create one R script called run_analysis.R that does the following.
#Merges the training and the test sets to create one data set.
#Extracts only the measurements on the mean and standard deviation for each measurement.
#Uses descriptive activity names to name the activities in the data set
#Appropriately labels the data set with descriptive variable names.
#From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#Good luck!
# Step1. Merges the training and the test sets to create one data set.
# setwd("~/Desktop/Online Coursera/Coursera-Getting-and-Cleaning-Data/peer_assessment/")
trainData <- read.table("./train/X_train.txt")
dim(trainData) # 7352*561
head(trainData)
trainLabel <- read.table("./train/y_train.txt")
table(trainLabel)
trainSubject <- read.table("./train/subject_train.txt")
testData <- read.table("./test/X_test.txt")
dim(testData) # 2947*561
testLabel <- read.table("./test/y_test.txt")
table(testLabel)
testSubject <- read.table("./test/subject_test.txt")
joinData <- rbind(trainData, testData)
dim(joinData) # 10299*561
joinLabel <- rbind(trainLabel, testLabel)
dim(joinLabel) # 10299*1
joinSubject <- rbind(trainSubject, testSubject)
dim(joinSubject) # 10299*1
# Step2. Extracts only the measurements on the mean and standard
# deviation for each measurement.
features <- read.table("./features.txt")
dim(features) # 561*2
meanStdIndices <- grep("mean\\(\\)|std\\(\\)", features[, 2])
length(meanStdIndices) # 66
joinData <- joinData[, meanStdIndices]
dim(joinData) # 10299*66
names(joinData) <- gsub("\\(\\)", "", features[meanStdIndices, 2]) # remove "()"
names(joinData) <- gsub("mean", "Mean", names(joinData)) # capitalize M
names(joinData) <- gsub("std", "Std", names(joinData)) # capitalize S
names(joinData) <- gsub("-", "", names(joinData)) # remove "-" in column names
# Step3. Uses descriptive activity names to name the activities in
# the data set
activity <- read.table("./activity_labels.txt")
activity[, 2] <- tolower(gsub("_", "", activity[, 2]))
substr(activity[2, 2], 8, 8) <- toupper(substr(activity[2, 2], 8, 8))
substr(activity[3, 2], 8, 8) <- toupper(substr(activity[3, 2], 8, 8))
activityLabel <- activity[joinLabel[, 1], 2]
joinLabel[, 1] <- activityLabel
names(joinLabel) <- "activity"
# Step4. Appropriately labels the data set with descriptive activity
# names.
names(joinSubject) <- "subject"
cleanedData <- cbind(joinSubject, joinLabel, joinData)
dim(cleanedData) # 10299*68
write.table(cleanedData, "merged_data.txt") # write out the 1st dataset
# Step5. Creates a second, independent tidy data set with the average of
# each variable for each activity and each subject.
subjectLen <- length(table(joinSubject)) # 30
activityLen <- dim(activity)[1] # 6
columnLen <- dim(cleanedData)[2]
result <- matrix(NA, nrow=subjectLen*activityLen, ncol=columnLen)
result <- as.data.frame(result)
colnames(result) <- colnames(cleanedData)
row <- 1
for(i in 1:subjectLen) {
for(j in 1:activityLen) {
result[row, 1] <- sort(unique(joinSubject)[, 1])[i]
result[row, 2] <- activity[j, 2]
bool1 <- i == cleanedData$subject
bool2 <- activity[j, 2] == cleanedData$activity
result[row, 3:columnLen] <- colMeans(cleanedData[bool1&bool2, 3:columnLen])
row <- row + 1
}
}
head(result)
write.table(result, "data_with_means.txt") # write out the 2nd dataset
#data <- read.table("./data_with_means.txt")
#data[1:12, 1:3]
|
/run_analysis.R
|
no_license
|
ktelk/Peer-Assessments
|
R
| false
| false
| 3,585
|
r
|
getwd()
setwd("C:/Users/ktelk/Documents/Coursera/UCI HAR Dataset")
#list.files()
#You should create one R script called run_analysis.R that does the following.
#Merges the training and the test sets to create one data set.
#Extracts only the measurements on the mean and standard deviation for each measurement.
#Uses descriptive activity names to name the activities in the data set
#Appropriately labels the data set with descriptive variable names.
#From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#Good luck!
# Step1. Merges the training and the test sets to create one data set.
# setwd("~/Desktop/Online Coursera/Coursera-Getting-and-Cleaning-Data/peer_assessment/")
trainData <- read.table("./train/X_train.txt")
dim(trainData) # 7352*561
head(trainData)
trainLabel <- read.table("./train/y_train.txt")
table(trainLabel)
trainSubject <- read.table("./train/subject_train.txt")
testData <- read.table("./test/X_test.txt")
dim(testData) # 2947*561
testLabel <- read.table("./test/y_test.txt")
table(testLabel)
testSubject <- read.table("./test/subject_test.txt")
joinData <- rbind(trainData, testData)
dim(joinData) # 10299*561
joinLabel <- rbind(trainLabel, testLabel)
dim(joinLabel) # 10299*1
joinSubject <- rbind(trainSubject, testSubject)
dim(joinSubject) # 10299*1
# Step2. Extracts only the measurements on the mean and standard
# deviation for each measurement.
features <- read.table("./features.txt")
dim(features) # 561*2
meanStdIndices <- grep("mean\\(\\)|std\\(\\)", features[, 2])
length(meanStdIndices) # 66
joinData <- joinData[, meanStdIndices]
dim(joinData) # 10299*66
names(joinData) <- gsub("\\(\\)", "", features[meanStdIndices, 2]) # remove "()"
names(joinData) <- gsub("mean", "Mean", names(joinData)) # capitalize M
names(joinData) <- gsub("std", "Std", names(joinData)) # capitalize S
names(joinData) <- gsub("-", "", names(joinData)) # remove "-" in column names
# Step3. Uses descriptive activity names to name the activities in
# the data set
activity <- read.table("./activity_labels.txt")
activity[, 2] <- tolower(gsub("_", "", activity[, 2]))
substr(activity[2, 2], 8, 8) <- toupper(substr(activity[2, 2], 8, 8))
substr(activity[3, 2], 8, 8) <- toupper(substr(activity[3, 2], 8, 8))
activityLabel <- activity[joinLabel[, 1], 2]
joinLabel[, 1] <- activityLabel
names(joinLabel) <- "activity"
# Step4. Appropriately labels the data set with descriptive activity
# names.
names(joinSubject) <- "subject"
cleanedData <- cbind(joinSubject, joinLabel, joinData)
dim(cleanedData) # 10299*68
write.table(cleanedData, "merged_data.txt") # write out the 1st dataset
# Step5. Creates a second, independent tidy data set with the average of
# each variable for each activity and each subject.
subjectLen <- length(table(joinSubject)) # 30
activityLen <- dim(activity)[1] # 6
columnLen <- dim(cleanedData)[2]
result <- matrix(NA, nrow=subjectLen*activityLen, ncol=columnLen)
result <- as.data.frame(result)
colnames(result) <- colnames(cleanedData)
row <- 1
for(i in 1:subjectLen) {
for(j in 1:activityLen) {
result[row, 1] <- sort(unique(joinSubject)[, 1])[i]
result[row, 2] <- activity[j, 2]
bool1 <- i == cleanedData$subject
bool2 <- activity[j, 2] == cleanedData$activity
result[row, 3:columnLen] <- colMeans(cleanedData[bool1&bool2, 3:columnLen])
row <- row + 1
}
}
head(result)
write.table(result, "data_with_means.txt") # write out the 2nd dataset
#data <- read.table("./data_with_means.txt")
#data[1:12, 1:3]
|
#Set working directory based on user login either Tejo or Suman
if(Sys.info()["login"]=="sumannooney")
{
workingdirectory="/Users/sumannooney/Documents/Data Science/Data at Scale/Capstone/capstone"
} else {
workingdirectory <- "/Users/tejo/UW450/Capstone"
}
setwd(workingdirectory)
cat("\014")
getwd()
if(!require("ggplot2")){install.packages("ggplot2");require("ggplot2")}
if(!require("dplyr")){install.packages("dplyr");require("dplyr")}
if(!require("tidyverse")){install.packages("tidyverse");require("tidyverse")}
#if(!require("corrr")){install.packages("corrr");require("corrr")}
if(!require("PerformanceAnalytics")){install.packages("PerformanceAnalytics");require("PerformanceAnalytics")}
|
/config.R
|
no_license
|
sumannooney/capstone
|
R
| false
| false
| 713
|
r
|
#Set working directory based on user login either Tejo or Suman
if(Sys.info()["login"]=="sumannooney")
{
workingdirectory="/Users/sumannooney/Documents/Data Science/Data at Scale/Capstone/capstone"
} else {
workingdirectory <- "/Users/tejo/UW450/Capstone"
}
setwd(workingdirectory)
cat("\014")
getwd()
if(!require("ggplot2")){install.packages("ggplot2");require("ggplot2")}
if(!require("dplyr")){install.packages("dplyr");require("dplyr")}
if(!require("tidyverse")){install.packages("tidyverse");require("tidyverse")}
#if(!require("corrr")){install.packages("corrr");require("corrr")}
if(!require("PerformanceAnalytics")){install.packages("PerformanceAnalytics");require("PerformanceAnalytics")}
|
## There are two main functions - makeCacheMatrix takes a matrix, and sets up methods within it for use by
## the function cacheSolve. makeCacheMatrix puts a null value in variable m to start. It defines a list of
## methods for use by external functions. These are in the comments in the code next to the relevant function (method).
## The second function (cacheSolve) takes the output from makeCacheMatrix as its input. If there's already a value in the
## cache store (m) - i.e. not null - it prints this (i.e. it's used it as a cache). IF not, it gets the data with x$get(),
## uses solve (if it can) to invert it, which it then assigns to m. It then passes this to x$getinvert() method call,
## which superassigns it to m, i.e. it is cached for future usage.
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
# takes a matrix as input to function. It also defines a series of methods (functions) for calling by cacheSolve later.
# Assuming matrix is invertible as per instructions, so no requirement to test for this.
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function(){ # returns the matrix
x
}
invertmatrix <- function(solved) m <<- solved # takes the inverted matrix and caches it with the superassignment operator.
getinvert <- function() m # returns the cached matrix.
list(set = set, get = get, invertmatrix = invertmatrix, getinvert = getinvert)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x', using the methods supplied in the makeCacheMatrix object.
## Applies solve() function to matrix data (if not already cached) to produce inverted matrix.
m <- x$getinvert()
if(!is.null(m)) {
message("getting inverted matrix")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$invertmatrix(m)
m
}
|
/cachematrix_back.R
|
no_license
|
Ginger11/ProgrammingAssignment2
|
R
| false
| false
| 1,975
|
r
|
## There are two main functions - makeCacheMatrix takes a matrix, and sets up methods within it for use by
## the function cacheSolve. makeCacheMatrix puts a null value in variable m to start. It defines a list of
## methods for use by external functions. These are in the comments in the code next to the relevant function (method).
## The second function (cacheSolve) takes the output from makeCacheMatrix as its input. If there's already a value in the
## cache store (m) - i.e. not null - it prints this (i.e. it's used it as a cache). IF not, it gets the data with x$get(),
## uses solve (if it can) to invert it, which it then assigns to m. It then passes this to x$getinvert() method call,
## which superassigns it to m, i.e. it is cached for future usage.
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
# takes a matrix as input to function. It also defines a series of methods (functions) for calling by cacheSolve later.
# Assuming matrix is invertible as per instructions, so no requirement to test for this.
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function(){ # returns the matrix
x
}
invertmatrix <- function(solved) m <<- solved # takes the inverted matrix and caches it with the superassignment operator.
getinvert <- function() m # returns the cached matrix.
list(set = set, get = get, invertmatrix = invertmatrix, getinvert = getinvert)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x', using the methods supplied in the makeCacheMatrix object.
## Applies solve() function to matrix data (if not already cached) to produce inverted matrix.
m <- x$getinvert()
if(!is.null(m)) {
message("getting inverted matrix")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$invertmatrix(m)
m
}
|
### NFL Big Data Bowl 2020-21
# Robert Bernhardt, Andrew Rogan, Daniel Weiss
# December 2020
## Feature Engineering
# Objective: Further clean data & create features which can be used for analysis
# Task
# Make all plays go left to right (from Offense POV)
AnalysisData <- AnalysisData %>%
mutate( x = case_when( playDirection == "right" ~ x ,
playDirection == "left" ~ 120 - x)) %>%
mutate( y = case_when( playDirection == "right" ~ y ,
playDirection == "left" ~ 160/3 - y)) %>%
mutate( dir = case_when( playDirection == "right" ~ dir ,
playDirection == "left" ~ mod(dir+180,360))) %>%
mutate( o = case_when( playDirection == "right" ~ o ,
playDirection == "left" ~ mod(o+180,360)))
# Task
# Create Event 2 (event2) featuring greater standardization
AnalysisData <- AnalysisData %>%
group_by(gameId , playId) %>%
mutate(event2 = case_when(
event == "ball_snap" ~ "ball_snap" ,
event == "pass_forward" | event == "pass_shovel" ~ "pass_thrown" ,
event == "pass_arrived" ~ "pass_arrived" ,
event == "pass_outcome_caught" | event == "pass_outcome_interception" |
event == "pass_outcome_incomplete" | event == "pass_outcome_touchdown" ~ "pass_outcome" ,
event == "tackle" | event == "out_of_bounds" | event == "touchdown" | event == "touchback"
~ "later_final_outcome" ,
TRUE ~ "None"))
# Task
# Create Event 3 (event3) featuring maximal standardization: ball_snap, pass_thrown, pass_arrived
# If no "pass_arrived" event, then "pass_outcome" indicates the arrival for event 3
AnalysisData <- AnalysisData %>%
group_by(gameId , playId) %>%
mutate(no_arrived = sum(event2 == "pass_arrived") == 0) %>%
mutate( event3 = case_when(event2 == "ball_snap" ~ "ball_snap" ,
event2 == "pass_thrown" ~ "pass_thrown" ,
event2 == "pass_arrived" ~ "pass_arrived" ,
event2 == "pass_outcome" & no_arrived == TRUE ~ "pass_arrived" ,
TRUE ~ "None")) %>%
mutate(no_arrived = NULL) # %>%
# Task
# Removes Data without any pass or any arrival in the events
AnalysisData <- AnalysisData %>%
filter(sum(event3 == "pass_thrown") > 0 ) %>%
filter(sum(event3 == "pass_arrived") > 0)
# Task
# Identify relevant teams
AnalysisData <- AnalysisData %>%
group_by(gameId, playId, frameId) %>%
mutate(QBTeam = team[match('QB', position)]) %>%
ungroup
AnalysisData <- AnalysisData %>%
group_by(playId , gameId) %>%
mutate(OffDef = case_when( team == QBTeam ~ "Offense",
team != QBTeam ~ "Defense")) %>%
mutate(OffDef = case_when( displayName != "Football" ~ OffDef,
displayName == "Football" ~ "Football")) %>%
mutate(Offense = case_when( OffDef == "Offense" ~ TRUE ,
OffDef == "Defense" ~ FALSE ,
OffDef == "Football" ~ NA ))
# Task
# Adjust and Create Time Variables
# Creates Date variable R understands
AnalysisData$Date <- as.Date(substr(AnalysisData$time,1,10))
options("digits.secs"=6) # Allows for fractions of seconds.
AnalysisData <- AnalysisData %>% # Creates date-time variable R understands
mutate(TimeClean = paste(substr(time,1,10) , substr(time,12,23))) %>%
mutate(TimeClean = as.POSIXct(TimeClean))
AnalysisData <- AnalysisData %>% # Identifies Critical Frames
group_by(gameId, playId) %>%
mutate(SnapFrame = frameId[match('ball_snap', event3)]) %>%
mutate(PassFrame = frameId[match('pass_thrown',event3)]) %>%
mutate(ArrivedFrame = frameId[match('pass_arrived',event3)])
AnalysisData <- AnalysisData %>%
mutate(FramesSinceSnap = frameId - SnapFrame) %>%
mutate(FramesSincePass = frameId - PassFrame) %>%
mutate(FramesSinceArrive = frameId - ArrivedFrame)
# Task
# Identify Primary Quarterback (player who receives snap)
AnalysisData <- AnalysisData %>%
group_by(gameId , playId) %>%
mutate(ball_snap_y = y[match('Football ball_snap' , paste(displayName, event3))]) %>%
mutate(dist2ball_snap = case_when( event == "ball_snap" ~ abs(y - ball_snap_y) ,
TRUE ~ 999)) %>%
mutate( BackfieldQB = case_when( event == "ball_snap" & position == "QB" & dist2ball_snap < 3 ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate( BackfieldQB = displayName[match('yes',BackfieldQB)]) %>%
mutate(ball_snap_y = NULL) %>% mutate(dist2ball_snap = NULL)
# Task
# Identify Targeted Player
AnalysisData <- left_join(AnalysisData , TargetedReceiver)
AnalysisData <- AnalysisData %>%
mutate(targetName = case_when( nflId == targetNflId ~ "yes" ,
TRUE ~ "no")) %>%
mutate(targetName = displayName[match('yes' , targetName)]) %>%
filter(is.na(targetNflId) == 0) %>% # Removes plays with no targeted player
mutate(targetPosition = case_when( nflId == targetNflId ~ "yes" ,
TRUE ~ "no")) %>%
mutate(targetPosition = position[match('yes',targetPosition)])
# Task
# Identify Primary/Secondary Defenders & Their Distance to Target at Time of Throw
AnalysisData <- AnalysisData %>%
group_by(playId , gameId) %>%
mutate(dist2target_x = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(dist2target_x = x[match('yes' , dist2target_x)]) %>%
mutate(dist2target_x = case_when( event3 != "pass_thrown" ~ 999 ,
TRUE ~ abs(x - dist2target_x))) %>% # x-distance found
mutate(dist2target_y = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(dist2target_y = y[match('yes' , dist2target_y)]) %>%
mutate(dist2target_y = case_when( event3 != "pass_thrown" ~ 999 ,
TRUE ~ abs(y - dist2target_y))) %>% # y-distance found
mutate(dist2target = case_when( OffDef == "Defense" & event3 == "pass_thrown" ~
round(sqrt( dist2target_x^2 + dist2target_y^2 ),3) ,
TRUE ~ 999)) %>%
# ^ Identify x, y, and Euclidean distance to targeted players for all defenders
mutate(PrimDefDistThrow = min(dist2target)) %>%
mutate(PrimaryDefName = case_when( PrimDefDistThrow == dist2target ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(PrimaryDefName = displayName[match('yes' , PrimaryDefName)]) %>%
mutate(PrimaryDefPosition = case_when( PrimDefDistThrow == dist2target ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(PrimaryDefPosition = position[match('yes',PrimaryDefPosition)]) %>%
mutate(PrimDefDistThrow = case_when( PrimDefDistThrow < 12 ~ PrimDefDistThrow , # Caps primary def distance at 12 yards
TRUE ~ 12 )) %>%
# ^ Identify Primary defender (min dist to target), his roster position, and his Euclidean distance
mutate(dist2target = case_when( PrimDefDistThrow == dist2target ~ dist2target + 1000 , # Allows ID of 2nd closest defender
TRUE ~ dist2target )) %>%
mutate(SecDefDistThrow = min(dist2target)) %>%
mutate(SecondaryDefName = case_when( SecDefDistThrow == dist2target ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(SecondaryDefName = displayName[match('yes' , SecondaryDefName)]) %>%
mutate(SecondaryDefPosition = case_when( SecDefDistThrow == dist2target ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(SecondaryDefPosition = position[match('yes',SecondaryDefPosition)]) %>%
mutate(SecDefDistThrow = case_when( SecDefDistThrow < 12 ~ SecDefDistThrow ,
TRUE ~ 12)) %>% # Caps secondary def dist at 12 yards
# ^ Identify Secondary defender, his roster position, & his Euclidean distance
mutate(dist2target = NULL) %>% mutate(dist2target_y = NULL) %>% mutate(dist2target_x = NULL)
# Task
# Extract Trajectory Attributes of Targeted Receiver, Primary/Secondary Defender (speed, acceleration, direction, orientation)
# At Time of throw
AnalysisData <- AnalysisData %>%
mutate(TargetSpeedThrow = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(TargetSpeedThrow = s[match('yes',TargetSpeedThrow)]) %>%
mutate(PrimDefSpeedThrow = case_when( displayName == PrimaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(PrimDefSpeedThrow = s[match('yes',PrimDefSpeedThrow)]) %>%
mutate(SecDefSpeedThrow = case_when( displayName == SecondaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(SecDefSpeedThrow = s[match('yes',SecDefSpeedThrow)]) %>%
# ^ Indicates critical player speed
mutate(TargetAccThrow = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(TargetAccThrow = a[match('yes',TargetAccThrow)]) %>%
mutate(PrimDefAccThrow = case_when( displayName == PrimaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(PrimDefAccThrow = a[match('yes',PrimDefAccThrow)]) %>%
mutate(SecDefAccThrow = case_when( displayName == SecondaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(SecDefAccThrow = a[match('yes',SecDefAccThrow)]) %>%
# ^ Indicates critical player acceleration
mutate(TargetDirThrow = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(TargetDirThrow = dir[match('yes',TargetDirThrow)]) %>%
mutate(PrimDefDirThrow = case_when( displayName == PrimaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(PrimDefDirThrow = dir[match('yes',PrimDefDirThrow)]) %>%
mutate(SecDefDirThrow = case_when( displayName == SecondaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(SecDefDirThrow = dir[match('yes',SecDefDirThrow)]) %>%
# ^ Indicates critical player direction
mutate(TargetOThrow = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(TargetOThrow = o[match('yes',TargetOThrow)]) %>%
mutate(PrimDefOThrow = case_when( displayName == PrimaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(PrimDefOThrow = o[match('yes',PrimDefOThrow)]) %>%
mutate(SecDefOThrow = case_when( displayName == SecondaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(SecDefOThrow = o[match('yes',SecDefOThrow)])
# ^ Indicates critical player orientation
# Task
# Identify More Advanced Features relating Targeted Player, Primary/Secondary Defender
# Generating variables indicating positions of ball, target, defenders at time of throw
AnalysisData <- AnalysisData %>% # Generating variables indicating positions of ball, target, defenders at pass arrival
ungroup %>% group_by(gameId , playId) %>%
mutate(ball_throw_x = x[match('Football pass_thrown', paste(displayName, event3))]) %>%
mutate(ball_throw_y = y[match('Football pass_thrown', paste(displayName, event3))]) %>%
# ^ Identify Ball Position
mutate(targ_throw_x = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(targ_throw_x = x[match('yes' , targ_throw_x)]) %>%
mutate(targ_throw_y = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(targ_throw_y = y[match('yes' , targ_throw_y)]) %>%
# ^ Identify targeted player position
mutate(prim_throw_x = case_when( displayName == PrimaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(prim_throw_x = x[match('yes' , prim_throw_x)]) %>%
mutate(prim_throw_y = case_when( displayName == PrimaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(prim_throw_y = y[match('yes' , prim_throw_y)]) %>%
# ^ Identify Primary Defender position
mutate(sec_throw_x = case_when( displayName == SecondaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(sec_throw_x = x[match('yes' , sec_throw_x)]) %>%
mutate(sec_throw_y = case_when( displayName == SecondaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(sec_throw_y = y[match('yes' , sec_throw_y)])
# ^ Identify Secondary Defender Position
# Generating variables indicating positions of ball, target, defenders at pass arrival
AnalysisData <- AnalysisData %>% # Generating variables indicating positions of ball, target, defenders at pass arrival
ungroup %>% group_by(gameId , playId) %>%
mutate(ball_arrive_x = x[match('Football pass_arrived', paste(displayName, event3))]) %>%
mutate(ball_arrive_y = y[match('Football pass_arrived', paste(displayName, event3))]) %>%
# ^ Identify Ball Position
mutate(targ_arrive_x = case_when( displayName == targetName & event3 == "pass_arrived" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(targ_arrive_x = x[match('yes' , targ_arrive_x)]) %>%
mutate(targ_arrive_y = case_when( displayName == targetName & event3 == "pass_arrived" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(targ_arrive_y = y[match('yes' , targ_arrive_y)]) %>%
# ^ Identify targeted player position
mutate(prim_arrive_x = case_when( displayName == PrimaryDefName & event3 == "pass_arrived" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(prim_arrive_x = x[match('yes' , prim_arrive_x)]) %>%
mutate(prim_arrive_y = case_when( displayName == PrimaryDefName & event3 == "pass_arrived" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(prim_arrive_y = y[match('yes' , prim_arrive_y)]) %>%
# ^ Identify Primary Defender position
mutate(sec_arrive_x = case_when( displayName == SecondaryDefName & event3 == "pass_arrived" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(sec_arrive_x = x[match('yes' , sec_arrive_x)]) %>%
mutate(sec_arrive_y = case_when( displayName == SecondaryDefName & event3 == "pass_arrived" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(sec_arrive_y = y[match('yes' , sec_arrive_y)])
# ^ Identify Secondary Defender Position
# Distance to eventual Ball Landing/Arrival Point at Time of Throw
AnalysisData <- AnalysisData %>%
mutate( TargDist2BallLandThrow = sqrt( (targ_throw_x - ball_arrive_x)^2 + (targ_throw_y - ball_arrive_y)^2 )) %>%
mutate( PrimDefDist2BallLandThrow = sqrt( (prim_throw_x - ball_arrive_x)^2 + (prim_throw_y - ball_arrive_y)^2 )) %>%
mutate( SecDefDist2BallLandThrow = sqrt( (sec_throw_x - ball_arrive_x)^2 + (sec_throw_y - ball_arrive_y)^2 ))
# Angular Separation between Target & Primary/Secondary Defenders at Throw
# At time of throw, consider the triangle defined by the QB, WR, and each defender
AnalysisData <- AnalysisData %>%
mutate(a1 = sqrt( (ball_throw_x - targ_throw_x)^2 + (ball_throw_y - targ_throw_y)^2) ) %>% # Target Player to Ball/QB
mutate(b1 = sqrt( (ball_throw_x - prim_throw_x)^2 + (ball_throw_y - prim_throw_y)^2) ) %>% # Primary Def. to Ball/QB
mutate(c1 = PrimDefDistThrow) %>% # Target Player to Primary Def.
mutate(PrimDefAngThrow = LawOfCosines(a1,b1,c1)) %>% # Angle between Primary Defender & Target Player
# a1 does not change
mutate(b1 = sqrt( (ball_throw_x - sec_throw_x)^2 + (ball_throw_y - sec_throw_y)^2) ) %>% # Secondary def. to Ball/QB
mutate(c1 = SecDefDistThrow) %>% # Target Played to Secondary Def.
mutate(SecDefAngThrow = LawOfCosines(a1,b1,c1)) # Angle between Secondary Defender & Target Player
# Distance between Target & Primary/Secondary Defenders at Pass Arrival
AnalysisData <- AnalysisData %>% # Obtaining variables for relevant distances
mutate(PrimDefDistArrive =
sqrt( (targ_arrive_x - prim_arrive_x)^2 + (targ_arrive_y - prim_arrive_y)^2 )) %>%
mutate(SecDefDistArrive =
sqrt( (targ_arrive_x - sec_arrive_x)^2 + (targ_arrive_y - sec_arrive_y)^2 ))
# Distance between Target & Primary/Secondary Defenders & Ball at Pass Arrival
AnalysisData <- AnalysisData %>%
mutate( TargDist2BallLandArrive = sqrt( (targ_arrive_x - ball_arrive_x)^2 + (targ_arrive_y - ball_arrive_y)^2 )) %>%
mutate( PrimDefDist2BallLandArrive = sqrt( (prim_arrive_x - ball_arrive_x)^2 + (prim_arrive_y - ball_arrive_y)^2 )) %>%
mutate( SecDefDist2BallLandArrive = sqrt( (sec_arrive_x - ball_arrive_x)^2 + (sec_arrive_y - ball_arrive_y)^2 ))
# Angle between receiver at throw, defenders at throw, and ball landing spot
AnalysisData <- AnalysisData %>%
mutate(a1 = TargDist2BallLandThrow) %>% # Ball (arrive) to Target Receiver (throw)
mutate(b1 = PrimDefDist2BallLandThrow) %>% # Ball (arrive) to Primary Def. (throw)
mutate(c1 = PrimDefDistThrow) %>% # Target Player to Primary Def. (throw)
mutate(PrimBallRecAng = LawOfCosines(a1,b1,c1)) %>% # Primary Def.-Ball-Target Angle
# a1 stays the same
mutate(b1 = SecDefDist2BallLandThrow) %>% # Ball (arrive) to Secondary Def. (throw)
mutate(c1 = SecDefDistThrow) %>% # Target Player to Secondary Def. (throw)
mutate(SecBallRecAng = LawOfCosines(a1,b1,c1)) %>% # Secondary Def.-Ball-Target Angle
mutate(a1 = NULL) %>% mutate(b1 = NULL) %>% mutate(c1 = NULL) # Removing unnecessary variables
# Task
# Target Depth/Throw Distance Variables
AnalysisData <- AnalysisData %>% # Identifies location of ball at various points based on event3
group_by(gameId, playId) %>%
mutate(ball_snap_x = x[match('Football ball_snap' , paste(displayName, event3))]) %>%
mutate(ball_snap_y = y[match('Football ball_snap' , paste(displayName, event3))])
AnalysisData <- AnalysisData %>%
mutate( ThrowDist = # Distance from throw to where ball lands or is contacted by receiver/defender
sqrt( (ball_throw_x-ball_arrive_x)^2 + (ball_throw_y-ball_arrive_y)^2 )) %>%
mutate( Passer2RecDistThrow = # Determines QB-Rec dist when throw is made, assumes QB is where the ball is at throw
sqrt( (ball_throw_x - targ_throw_x)^2 + (ball_throw_y - targ_throw_y)^2 ) ) %>%
mutate( Passer2RecDistArrive = # Determines QB-Rec dist when ball arrives, assumes QB is where the ball is at throw
sqrt( (ball_throw_x - targ_arrive_x)^2 + (ball_throw_y - targ_arrive_y)^2 ) ) %>%
mutate( TargDownfieldDistThrow = targ_throw_x - ball_snap_x ) %>% # Yards Downfield Receiver is at throw
mutate( TargDownfieldDistRec = targ_arrive_x - ball_snap_x ) # Yards Downfield Receiver is at arrival/outcome
# Task
# Pursuit Angle Feature
AnalysisData <- AnalysisData %>%
mutate(est_flight_time = 0.4 + 0.001 * Passer2RecDistThrow^2 + 0.012 * Passer2RecDistThrow) %>%
# Approximates flight time based on various regression results
mutate(targ_future_x = targ_throw_x + est_flight_time * TargetSpeedThrow * cos(TargetDirThrow * pi/180)) %>%
mutate(targ_future_y = targ_throw_y + est_flight_time * TargetSpeedThrow * cos(TargetDirThrow * pi/180)) %>%
mutate(avgDev = sqrt( (targ_throw_x-targ_future_x)^2 + (targ_throw_y-targ_future_y)^2 ))
# Task
# Removes Unnecessary Intermediate Variables
AnalysisData <- AnalysisData %>%
mutate(ball_snap_x = NULL) %>% mutate(ball_snap_y = NULL) %>%
mutate(ball_throw_x = NULL) %>% mutate(ball_throw_y = NULL) %>%
mutate(ball_arrive_x = NULL) %>% mutate(ball_arrive_y = NULL) %>%
mutate(targ_arrive_x = NULL) %>% mutate(targ_arrive_y = NULL) %>%
mutate(targ_throw_x = NULL) %>% mutate(targ_throw_y = NULL) %>%
mutate(prim_throw_x = NULL) %>% mutate(prim_throw_y = NULL) %>%
mutate(prim_arrive_x = NULL) %>% mutate(prim_arrive_y = NULL) %>%
mutate(sec_arrive_x = NULL) %>% mutate(sec_arrive_y = NULL) %>%
mutate(targ_future_x = NULL) %>% mutate(targ_future_y = NULL)
# Task
# Identify Play-Specific Factors of Interest
AnalysisData <- AnalysisData %>%
group_by(gameId , playId) %>%
mutate(AnyShift = (sum(event == "shift") > 1)) %>%
mutate(AnyMotion = (sum(event == "man_in_motion") > 1)) %>%
mutate(AnyPlayAction = (sum(event == "play_action") > 1)) %>%
mutate(AnyTipped = (sum(event == "pass_tipped") > 1)) %>%
mutate(AnyFlag = (sum(event == "penalty_flag") > 1))
# Task
# Identify Number of Times a Player is on the Field
AnalysisData <- AnalysisData %>%
mutate(playerPlayCount = case_when(event3 == "ball_snap" ~ 1 ,
TRUE ~ 0)) %>%
group_by(displayName) %>%
mutate(playerPlayCount = sum(playerPlayCount)) %>%
ungroup %>% group_by(playId , gameId)
# Task
# Import & Merge Weather Data
weatherData <- read.csv("Data/weather_for_plays.csv")
weatherData <- weatherData %>%
select(gameId , playId , Temperature , Conditions)
AnalysisData <- left_join(AnalysisData , weatherData)
# Task
# Incorporate Coverage ID
coverages <- readRDS('Data/allWeeksCoverageID')
coverages <- coverages %>%
select(gameId , playId , nflId , displayName , zone, Coverage , CoverageFamily)
AnalysisData <- left_join(AnalysisData , coverages)
# Task
# Determine Primary/Secondary Defender Coverage Assignment
AnalysisData <- AnalysisData %>%
mutate(PrimDefZone = case_when(displayName == PrimaryDefName & event3 == "ball_snap" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(PrimDefZone = zone[match('yes',PrimDefZone)]) %>%
mutate(SecDefZone = case_when(displayName == SecondaryDefName & event3 == "ball_snap" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(SecDefZone = zone[match('yes',SecDefZone)])
# Task
# Merge features with General Play data (plays.csv) & Create Extracted Features
length(unique(AnalysisData$playId)) # Indicates number of different play IDs
ExtractedFeatures <- AnalysisData %>% # Extracts 1 observation per play
group_by(gameId , playId) %>%
filter(displayName == BackfieldQB & event3 == "ball_snap") %>%
filter(is.na(targetName)==0) %>% # Removes players missing key variables
filter(is.na(PrimaryDefName)==0)
length(unique(ExtractedFeatures$playId)) # Indicates number of different play IDs. Should be the same as previously.
ExtractedFeatures <- left_join(ExtractedFeatures , plays)
ExtractedFeatures <- ExtractedFeatures %>%
mutate(Complete = (passResult == "C")) %>%
mutate(Incomplete = (passResult == "I")) %>%
mutate(Interception = (passResult == "IN"))
# Task
# Simplifying Key Player Positions
ExtractedFeatures <- ExtractedFeatures %>%
mutate(targetPosBasic = case_when( targetPosition == "RB" | targetPosition == "HB" | targetPosition == "FB" ~ "RB" ,
TRUE ~ targetPosition )) %>%
mutate(PrimaryDefPosBasic = case_when(PrimaryDefPosition == "SS" | PrimaryDefPosition == "S" | PrimaryDefPosition == "FS" |
PrimaryDefPosition == "CB" | PrimaryDefPosition == "DB" ~ "DB" ,
PrimaryDefPosition == "LB" | PrimaryDefPosition == "MLB" |
PrimaryDefPosition == "OLB" | PrimaryDefPosition == "ILB" ~ "LB" ,
PrimaryDefPosition == "DL" ~ "DL" )) %>%
mutate(SecondaryDefPosBasic = case_when(SecondaryDefPosition == "SS" | SecondaryDefPosition == "S" | SecondaryDefPosition == "FS" |
SecondaryDefPosition == "CB" | SecondaryDefPosition == "DB" ~ "DB" ,
SecondaryDefPosition == "LB" | SecondaryDefPosition == "MLB" |
SecondaryDefPosition == "OLB" | SecondaryDefPosition == "ILB" ~ "LB" ,
SecondaryDefPosition == "DL" ~ "DL" ))
# Task
# Mutate Final Extracted Features
# Creating Delta Features
ExtractedFeatures <- ExtractedFeatures %>%
filter(is.na(TargetDirThrow) == 0 & is.na(PrimDefDirThrow) == 0 & is.na(SecDefDirThrow) == 0) %>%
# Accounts for rare scenario where a player's tracker partly malfunctions
filter(is.na(PrimDefDistArrive)==0) %>% # Removes plays missing key variables (few & far in between)
mutate(PrimDeltaSpeedThrow = TargetSpeedThrow - PrimDefSpeedThrow) %>%
mutate(PrimDeltaAccThrow = TargetAccThrow - PrimDefAccThrow) %>%
mutate(PrimDeltaDirThrow = OrientationDiff(TargetDirThrow,PrimDefDirThrow)) %>%
mutate(SecDeltaSpeedThrow = TargetSpeedThrow - SecDefSpeedThrow) %>%
mutate(SecDeltaAccThrow = TargetAccThrow - SecDefAccThrow) %>%
mutate(SecDeltaDirThrow = OrientationDiff(TargetDirThrow,SecDefDirThrow))
# Time Pass in Air (& Square)
ExtractedFeatures <- ExtractedFeatures %>%
mutate(TimePassInAir = (ArrivedFrame - PassFrame)/10) %>%
mutate(P2RDistThrowSq = Passer2RecDistThrow^2 )
# Player Controls -- Minimum Number of Targets
ExtractedFeatures <- ExtractedFeatures %>%
ungroup %>% group_by(targetName) %>%
mutate(targetNameQ = 1) %>% mutate(targetNameQ = sum(targetNameQ)) %>%
mutate(targetNameQ = case_when(targetNameQ < 15 ~ "AAA" ,
targetNameQ >= 15 ~ targetName)) %>%
ungroup %>% group_by(PrimaryDefName) %>%
mutate(PrimaryDefNameQ = 1) %>% mutate(PrimaryDefNameQ = sum(PrimaryDefNameQ)) %>%
mutate(PrimaryDefNameQ = case_when(PrimaryDefNameQ < 15 ~ "AAA" ,
PrimaryDefNameQ >= 15 ~ PrimaryDefName)) %>%
ungroup %>% group_by(playId , gameId)
# Fixing Primary Defender Man/Zone
ExtractedFeatures <- ExtractedFeatures %>%
mutate(PrimDefZone = case_when(PrimDefZone == 1 ~ 'Zone' ,
PrimDefZone == 0 ~ 'Man' ,
TRUE ~ 'AAA-Other'))
print("Feature Engineering 1 Completed")
|
/Analysis/Functions/feature_eng1.r
|
no_license
|
rbernhardt12/Big-Data-Bowl-20-Public
|
R
| false
| false
| 26,821
|
r
|
### NFL Big Data Bowl 2020-21
# Robert Bernhardt, Andrew Rogan, Daniel Weiss
# December 2020
## Feature Engineering
# Objective: Further clean data & create features which can be used for analysis
# Task
# Make all plays go left to right (from Offense POV)
AnalysisData <- AnalysisData %>%
mutate( x = case_when( playDirection == "right" ~ x ,
playDirection == "left" ~ 120 - x)) %>%
mutate( y = case_when( playDirection == "right" ~ y ,
playDirection == "left" ~ 160/3 - y)) %>%
mutate( dir = case_when( playDirection == "right" ~ dir ,
playDirection == "left" ~ mod(dir+180,360))) %>%
mutate( o = case_when( playDirection == "right" ~ o ,
playDirection == "left" ~ mod(o+180,360)))
# Task
# Create Event 2 (event2) featuring greater standardization
AnalysisData <- AnalysisData %>%
group_by(gameId , playId) %>%
mutate(event2 = case_when(
event == "ball_snap" ~ "ball_snap" ,
event == "pass_forward" | event == "pass_shovel" ~ "pass_thrown" ,
event == "pass_arrived" ~ "pass_arrived" ,
event == "pass_outcome_caught" | event == "pass_outcome_interception" |
event == "pass_outcome_incomplete" | event == "pass_outcome_touchdown" ~ "pass_outcome" ,
event == "tackle" | event == "out_of_bounds" | event == "touchdown" | event == "touchback"
~ "later_final_outcome" ,
TRUE ~ "None"))
# Task
# Create Event 3 (event3) featuring maximal standardization: ball_snap, pass_thrown, pass_arrived
# If no "pass_arrived" event, then "pass_outcome" indicates the arrival for event 3
AnalysisData <- AnalysisData %>%
group_by(gameId , playId) %>%
mutate(no_arrived = sum(event2 == "pass_arrived") == 0) %>%
mutate( event3 = case_when(event2 == "ball_snap" ~ "ball_snap" ,
event2 == "pass_thrown" ~ "pass_thrown" ,
event2 == "pass_arrived" ~ "pass_arrived" ,
event2 == "pass_outcome" & no_arrived == TRUE ~ "pass_arrived" ,
TRUE ~ "None")) %>%
mutate(no_arrived = NULL) # %>%
# Task
# Removes Data without any pass or any arrival in the events
AnalysisData <- AnalysisData %>%
filter(sum(event3 == "pass_thrown") > 0 ) %>%
filter(sum(event3 == "pass_arrived") > 0)
# Task
# Identify relevant teams
AnalysisData <- AnalysisData %>%
group_by(gameId, playId, frameId) %>%
mutate(QBTeam = team[match('QB', position)]) %>%
ungroup
AnalysisData <- AnalysisData %>%
group_by(playId , gameId) %>%
mutate(OffDef = case_when( team == QBTeam ~ "Offense",
team != QBTeam ~ "Defense")) %>%
mutate(OffDef = case_when( displayName != "Football" ~ OffDef,
displayName == "Football" ~ "Football")) %>%
mutate(Offense = case_when( OffDef == "Offense" ~ TRUE ,
OffDef == "Defense" ~ FALSE ,
OffDef == "Football" ~ NA ))
# Task
# Adjust and Create Time Variables
# Creates Date variable R understands
AnalysisData$Date <- as.Date(substr(AnalysisData$time,1,10))
options("digits.secs"=6) # Allows for fractions of seconds.
AnalysisData <- AnalysisData %>% # Creates date-time variable R understands
mutate(TimeClean = paste(substr(time,1,10) , substr(time,12,23))) %>%
mutate(TimeClean = as.POSIXct(TimeClean))
AnalysisData <- AnalysisData %>% # Identifies Critical Frames
group_by(gameId, playId) %>%
mutate(SnapFrame = frameId[match('ball_snap', event3)]) %>%
mutate(PassFrame = frameId[match('pass_thrown',event3)]) %>%
mutate(ArrivedFrame = frameId[match('pass_arrived',event3)])
AnalysisData <- AnalysisData %>%
mutate(FramesSinceSnap = frameId - SnapFrame) %>%
mutate(FramesSincePass = frameId - PassFrame) %>%
mutate(FramesSinceArrive = frameId - ArrivedFrame)
# Task
# Identify Primary Quarterback (player who receives snap)
AnalysisData <- AnalysisData %>%
group_by(gameId , playId) %>%
mutate(ball_snap_y = y[match('Football ball_snap' , paste(displayName, event3))]) %>%
mutate(dist2ball_snap = case_when( event == "ball_snap" ~ abs(y - ball_snap_y) ,
TRUE ~ 999)) %>%
mutate( BackfieldQB = case_when( event == "ball_snap" & position == "QB" & dist2ball_snap < 3 ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate( BackfieldQB = displayName[match('yes',BackfieldQB)]) %>%
mutate(ball_snap_y = NULL) %>% mutate(dist2ball_snap = NULL)
# Task
# Identify Targeted Player
AnalysisData <- left_join(AnalysisData , TargetedReceiver)
AnalysisData <- AnalysisData %>%
mutate(targetName = case_when( nflId == targetNflId ~ "yes" ,
TRUE ~ "no")) %>%
mutate(targetName = displayName[match('yes' , targetName)]) %>%
filter(is.na(targetNflId) == 0) %>% # Removes plays with no targeted player
mutate(targetPosition = case_when( nflId == targetNflId ~ "yes" ,
TRUE ~ "no")) %>%
mutate(targetPosition = position[match('yes',targetPosition)])
# Task
# Identify Primary/Secondary Defenders & Their Distance to Target at Time of Throw
AnalysisData <- AnalysisData %>%
group_by(playId , gameId) %>%
mutate(dist2target_x = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(dist2target_x = x[match('yes' , dist2target_x)]) %>%
mutate(dist2target_x = case_when( event3 != "pass_thrown" ~ 999 ,
TRUE ~ abs(x - dist2target_x))) %>% # x-distance found
mutate(dist2target_y = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(dist2target_y = y[match('yes' , dist2target_y)]) %>%
mutate(dist2target_y = case_when( event3 != "pass_thrown" ~ 999 ,
TRUE ~ abs(y - dist2target_y))) %>% # y-distance found
mutate(dist2target = case_when( OffDef == "Defense" & event3 == "pass_thrown" ~
round(sqrt( dist2target_x^2 + dist2target_y^2 ),3) ,
TRUE ~ 999)) %>%
# ^ Identify x, y, and Euclidean distance to targeted players for all defenders
mutate(PrimDefDistThrow = min(dist2target)) %>%
mutate(PrimaryDefName = case_when( PrimDefDistThrow == dist2target ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(PrimaryDefName = displayName[match('yes' , PrimaryDefName)]) %>%
mutate(PrimaryDefPosition = case_when( PrimDefDistThrow == dist2target ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(PrimaryDefPosition = position[match('yes',PrimaryDefPosition)]) %>%
mutate(PrimDefDistThrow = case_when( PrimDefDistThrow < 12 ~ PrimDefDistThrow , # Caps primary def distance at 12 yards
TRUE ~ 12 )) %>%
# ^ Identify Primary defender (min dist to target), his roster position, and his Euclidean distance
mutate(dist2target = case_when( PrimDefDistThrow == dist2target ~ dist2target + 1000 , # Allows ID of 2nd closest defender
TRUE ~ dist2target )) %>%
mutate(SecDefDistThrow = min(dist2target)) %>%
mutate(SecondaryDefName = case_when( SecDefDistThrow == dist2target ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(SecondaryDefName = displayName[match('yes' , SecondaryDefName)]) %>%
mutate(SecondaryDefPosition = case_when( SecDefDistThrow == dist2target ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(SecondaryDefPosition = position[match('yes',SecondaryDefPosition)]) %>%
mutate(SecDefDistThrow = case_when( SecDefDistThrow < 12 ~ SecDefDistThrow ,
TRUE ~ 12)) %>% # Caps secondary def dist at 12 yards
# ^ Identify Secondary defender, his roster position, & his Euclidean distance
mutate(dist2target = NULL) %>% mutate(dist2target_y = NULL) %>% mutate(dist2target_x = NULL)
# Task
# Extract Trajectory Attributes of Targeted Receiver, Primary/Secondary Defender (speed, acceleration, direction, orientation)
# At Time of throw
AnalysisData <- AnalysisData %>%
mutate(TargetSpeedThrow = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(TargetSpeedThrow = s[match('yes',TargetSpeedThrow)]) %>%
mutate(PrimDefSpeedThrow = case_when( displayName == PrimaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(PrimDefSpeedThrow = s[match('yes',PrimDefSpeedThrow)]) %>%
mutate(SecDefSpeedThrow = case_when( displayName == SecondaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(SecDefSpeedThrow = s[match('yes',SecDefSpeedThrow)]) %>%
# ^ Indicates critical player speed
mutate(TargetAccThrow = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(TargetAccThrow = a[match('yes',TargetAccThrow)]) %>%
mutate(PrimDefAccThrow = case_when( displayName == PrimaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(PrimDefAccThrow = a[match('yes',PrimDefAccThrow)]) %>%
mutate(SecDefAccThrow = case_when( displayName == SecondaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(SecDefAccThrow = a[match('yes',SecDefAccThrow)]) %>%
# ^ Indicates critical player acceleration
mutate(TargetDirThrow = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(TargetDirThrow = dir[match('yes',TargetDirThrow)]) %>%
mutate(PrimDefDirThrow = case_when( displayName == PrimaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(PrimDefDirThrow = dir[match('yes',PrimDefDirThrow)]) %>%
mutate(SecDefDirThrow = case_when( displayName == SecondaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(SecDefDirThrow = dir[match('yes',SecDefDirThrow)]) %>%
# ^ Indicates critical player direction
mutate(TargetOThrow = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(TargetOThrow = o[match('yes',TargetOThrow)]) %>%
mutate(PrimDefOThrow = case_when( displayName == PrimaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(PrimDefOThrow = o[match('yes',PrimDefOThrow)]) %>%
mutate(SecDefOThrow = case_when( displayName == SecondaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(SecDefOThrow = o[match('yes',SecDefOThrow)])
# ^ Indicates critical player orientation
# Task
# Identify More Advanced Features relating Targeted Player, Primary/Secondary Defender
# Generating variables indicating positions of ball, target, defenders at time of throw
AnalysisData <- AnalysisData %>% # Generating variables indicating positions of ball, target, defenders at pass arrival
ungroup %>% group_by(gameId , playId) %>%
mutate(ball_throw_x = x[match('Football pass_thrown', paste(displayName, event3))]) %>%
mutate(ball_throw_y = y[match('Football pass_thrown', paste(displayName, event3))]) %>%
# ^ Identify Ball Position
mutate(targ_throw_x = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(targ_throw_x = x[match('yes' , targ_throw_x)]) %>%
mutate(targ_throw_y = case_when( displayName == targetName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(targ_throw_y = y[match('yes' , targ_throw_y)]) %>%
# ^ Identify targeted player position
mutate(prim_throw_x = case_when( displayName == PrimaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(prim_throw_x = x[match('yes' , prim_throw_x)]) %>%
mutate(prim_throw_y = case_when( displayName == PrimaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(prim_throw_y = y[match('yes' , prim_throw_y)]) %>%
# ^ Identify Primary Defender position
mutate(sec_throw_x = case_when( displayName == SecondaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(sec_throw_x = x[match('yes' , sec_throw_x)]) %>%
mutate(sec_throw_y = case_when( displayName == SecondaryDefName & event3 == "pass_thrown" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(sec_throw_y = y[match('yes' , sec_throw_y)])
# ^ Identify Secondary Defender Position
# Generating variables indicating positions of ball, target, defenders at pass arrival
AnalysisData <- AnalysisData %>% # Generating variables indicating positions of ball, target, defenders at pass arrival
ungroup %>% group_by(gameId , playId) %>%
mutate(ball_arrive_x = x[match('Football pass_arrived', paste(displayName, event3))]) %>%
mutate(ball_arrive_y = y[match('Football pass_arrived', paste(displayName, event3))]) %>%
# ^ Identify Ball Position
mutate(targ_arrive_x = case_when( displayName == targetName & event3 == "pass_arrived" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(targ_arrive_x = x[match('yes' , targ_arrive_x)]) %>%
mutate(targ_arrive_y = case_when( displayName == targetName & event3 == "pass_arrived" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(targ_arrive_y = y[match('yes' , targ_arrive_y)]) %>%
# ^ Identify targeted player position
mutate(prim_arrive_x = case_when( displayName == PrimaryDefName & event3 == "pass_arrived" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(prim_arrive_x = x[match('yes' , prim_arrive_x)]) %>%
mutate(prim_arrive_y = case_when( displayName == PrimaryDefName & event3 == "pass_arrived" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(prim_arrive_y = y[match('yes' , prim_arrive_y)]) %>%
# ^ Identify Primary Defender position
mutate(sec_arrive_x = case_when( displayName == SecondaryDefName & event3 == "pass_arrived" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(sec_arrive_x = x[match('yes' , sec_arrive_x)]) %>%
mutate(sec_arrive_y = case_when( displayName == SecondaryDefName & event3 == "pass_arrived" ~ 'yes' ,
TRUE ~ 'no' )) %>%
mutate(sec_arrive_y = y[match('yes' , sec_arrive_y)])
# ^ Identify Secondary Defender Position
# Distance to eventual Ball Landing/Arrival Point at Time of Throw
AnalysisData <- AnalysisData %>%
mutate( TargDist2BallLandThrow = sqrt( (targ_throw_x - ball_arrive_x)^2 + (targ_throw_y - ball_arrive_y)^2 )) %>%
mutate( PrimDefDist2BallLandThrow = sqrt( (prim_throw_x - ball_arrive_x)^2 + (prim_throw_y - ball_arrive_y)^2 )) %>%
mutate( SecDefDist2BallLandThrow = sqrt( (sec_throw_x - ball_arrive_x)^2 + (sec_throw_y - ball_arrive_y)^2 ))
# Angular Separation between Target & Primary/Secondary Defenders at Throw
# At time of throw, consider the triangle defined by the QB, WR, and each defender
AnalysisData <- AnalysisData %>%
mutate(a1 = sqrt( (ball_throw_x - targ_throw_x)^2 + (ball_throw_y - targ_throw_y)^2) ) %>% # Target Player to Ball/QB
mutate(b1 = sqrt( (ball_throw_x - prim_throw_x)^2 + (ball_throw_y - prim_throw_y)^2) ) %>% # Primary Def. to Ball/QB
mutate(c1 = PrimDefDistThrow) %>% # Target Player to Primary Def.
mutate(PrimDefAngThrow = LawOfCosines(a1,b1,c1)) %>% # Angle between Primary Defender & Target Player
# a1 does not change
mutate(b1 = sqrt( (ball_throw_x - sec_throw_x)^2 + (ball_throw_y - sec_throw_y)^2) ) %>% # Secondary def. to Ball/QB
mutate(c1 = SecDefDistThrow) %>% # Target Played to Secondary Def.
mutate(SecDefAngThrow = LawOfCosines(a1,b1,c1)) # Angle between Secondary Defender & Target Player
# Distance between Target & Primary/Secondary Defenders at Pass Arrival
AnalysisData <- AnalysisData %>% # Obtaining variables for relevant distances
mutate(PrimDefDistArrive =
sqrt( (targ_arrive_x - prim_arrive_x)^2 + (targ_arrive_y - prim_arrive_y)^2 )) %>%
mutate(SecDefDistArrive =
sqrt( (targ_arrive_x - sec_arrive_x)^2 + (targ_arrive_y - sec_arrive_y)^2 ))
# Distance between Target & Primary/Secondary Defenders & Ball at Pass Arrival
AnalysisData <- AnalysisData %>%
mutate( TargDist2BallLandArrive = sqrt( (targ_arrive_x - ball_arrive_x)^2 + (targ_arrive_y - ball_arrive_y)^2 )) %>%
mutate( PrimDefDist2BallLandArrive = sqrt( (prim_arrive_x - ball_arrive_x)^2 + (prim_arrive_y - ball_arrive_y)^2 )) %>%
mutate( SecDefDist2BallLandArrive = sqrt( (sec_arrive_x - ball_arrive_x)^2 + (sec_arrive_y - ball_arrive_y)^2 ))
# Angle between receiver at throw, defenders at throw, and ball landing spot
AnalysisData <- AnalysisData %>%
mutate(a1 = TargDist2BallLandThrow) %>% # Ball (arrive) to Target Receiver (throw)
mutate(b1 = PrimDefDist2BallLandThrow) %>% # Ball (arrive) to Primary Def. (throw)
mutate(c1 = PrimDefDistThrow) %>% # Target Player to Primary Def. (throw)
mutate(PrimBallRecAng = LawOfCosines(a1,b1,c1)) %>% # Primary Def.-Ball-Target Angle
# a1 stays the same
mutate(b1 = SecDefDist2BallLandThrow) %>% # Ball (arrive) to Secondary Def. (throw)
mutate(c1 = SecDefDistThrow) %>% # Target Player to Secondary Def. (throw)
mutate(SecBallRecAng = LawOfCosines(a1,b1,c1)) %>% # Secondary Def.-Ball-Target Angle
mutate(a1 = NULL) %>% mutate(b1 = NULL) %>% mutate(c1 = NULL) # Removing unnecessary variables
# Task
# Target Depth/Throw Distance Variables
AnalysisData <- AnalysisData %>% # Identifies location of ball at various points based on event3
group_by(gameId, playId) %>%
mutate(ball_snap_x = x[match('Football ball_snap' , paste(displayName, event3))]) %>%
mutate(ball_snap_y = y[match('Football ball_snap' , paste(displayName, event3))])
AnalysisData <- AnalysisData %>%
mutate( ThrowDist = # Distance from throw to where ball lands or is contacted by receiver/defender
sqrt( (ball_throw_x-ball_arrive_x)^2 + (ball_throw_y-ball_arrive_y)^2 )) %>%
mutate( Passer2RecDistThrow = # Determines QB-Rec dist when throw is made, assumes QB is where the ball is at throw
sqrt( (ball_throw_x - targ_throw_x)^2 + (ball_throw_y - targ_throw_y)^2 ) ) %>%
mutate( Passer2RecDistArrive = # Determines QB-Rec dist when ball arrives, assumes QB is where the ball is at throw
sqrt( (ball_throw_x - targ_arrive_x)^2 + (ball_throw_y - targ_arrive_y)^2 ) ) %>%
mutate( TargDownfieldDistThrow = targ_throw_x - ball_snap_x ) %>% # Yards Downfield Receiver is at throw
mutate( TargDownfieldDistRec = targ_arrive_x - ball_snap_x ) # Yards Downfield Receiver is at arrival/outcome
# Task
# Pursuit Angle Feature
AnalysisData <- AnalysisData %>%
mutate(est_flight_time = 0.4 + 0.001 * Passer2RecDistThrow^2 + 0.012 * Passer2RecDistThrow) %>%
# Approximates flight time based on various regression results
mutate(targ_future_x = targ_throw_x + est_flight_time * TargetSpeedThrow * cos(TargetDirThrow * pi/180)) %>%
mutate(targ_future_y = targ_throw_y + est_flight_time * TargetSpeedThrow * cos(TargetDirThrow * pi/180)) %>%
mutate(avgDev = sqrt( (targ_throw_x-targ_future_x)^2 + (targ_throw_y-targ_future_y)^2 ))
# Task
# Removes Unnecessary Intermediate Variables
AnalysisData <- AnalysisData %>%
mutate(ball_snap_x = NULL) %>% mutate(ball_snap_y = NULL) %>%
mutate(ball_throw_x = NULL) %>% mutate(ball_throw_y = NULL) %>%
mutate(ball_arrive_x = NULL) %>% mutate(ball_arrive_y = NULL) %>%
mutate(targ_arrive_x = NULL) %>% mutate(targ_arrive_y = NULL) %>%
mutate(targ_throw_x = NULL) %>% mutate(targ_throw_y = NULL) %>%
mutate(prim_throw_x = NULL) %>% mutate(prim_throw_y = NULL) %>%
mutate(prim_arrive_x = NULL) %>% mutate(prim_arrive_y = NULL) %>%
mutate(sec_arrive_x = NULL) %>% mutate(sec_arrive_y = NULL) %>%
mutate(targ_future_x = NULL) %>% mutate(targ_future_y = NULL)
# Task
# Identify Play-Specific Factors of Interest
AnalysisData <- AnalysisData %>%
group_by(gameId , playId) %>%
mutate(AnyShift = (sum(event == "shift") > 1)) %>%
mutate(AnyMotion = (sum(event == "man_in_motion") > 1)) %>%
mutate(AnyPlayAction = (sum(event == "play_action") > 1)) %>%
mutate(AnyTipped = (sum(event == "pass_tipped") > 1)) %>%
mutate(AnyFlag = (sum(event == "penalty_flag") > 1))
# Task
# Identify Number of Times a Player is on the Field
AnalysisData <- AnalysisData %>%
mutate(playerPlayCount = case_when(event3 == "ball_snap" ~ 1 ,
TRUE ~ 0)) %>%
group_by(displayName) %>%
mutate(playerPlayCount = sum(playerPlayCount)) %>%
ungroup %>% group_by(playId , gameId)
# Task
# Import & Merge Weather Data
weatherData <- read.csv("Data/weather_for_plays.csv")
weatherData <- weatherData %>%
select(gameId , playId , Temperature , Conditions)
AnalysisData <- left_join(AnalysisData , weatherData)
# Task
# Incorporate Coverage ID
coverages <- readRDS('Data/allWeeksCoverageID')
coverages <- coverages %>%
select(gameId , playId , nflId , displayName , zone, Coverage , CoverageFamily)
AnalysisData <- left_join(AnalysisData , coverages)
# Task
# Determine Primary/Secondary Defender Coverage Assignment
AnalysisData <- AnalysisData %>%
mutate(PrimDefZone = case_when(displayName == PrimaryDefName & event3 == "ball_snap" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(PrimDefZone = zone[match('yes',PrimDefZone)]) %>%
mutate(SecDefZone = case_when(displayName == SecondaryDefName & event3 == "ball_snap" ~ 'yes' ,
TRUE ~ 'no')) %>%
mutate(SecDefZone = zone[match('yes',SecDefZone)])
# Task
# Merge features with General Play data (plays.csv) & Create Extracted Features
length(unique(AnalysisData$playId)) # Indicates number of different play IDs
ExtractedFeatures <- AnalysisData %>% # Extracts 1 observation per play
group_by(gameId , playId) %>%
filter(displayName == BackfieldQB & event3 == "ball_snap") %>%
filter(is.na(targetName)==0) %>% # Removes players missing key variables
filter(is.na(PrimaryDefName)==0)
length(unique(ExtractedFeatures$playId)) # Indicates number of different play IDs. Should be the same as previously.
ExtractedFeatures <- left_join(ExtractedFeatures , plays)
ExtractedFeatures <- ExtractedFeatures %>%
mutate(Complete = (passResult == "C")) %>%
mutate(Incomplete = (passResult == "I")) %>%
mutate(Interception = (passResult == "IN"))
# Task
# Simplifying Key Player Positions
ExtractedFeatures <- ExtractedFeatures %>%
mutate(targetPosBasic = case_when( targetPosition == "RB" | targetPosition == "HB" | targetPosition == "FB" ~ "RB" ,
TRUE ~ targetPosition )) %>%
mutate(PrimaryDefPosBasic = case_when(PrimaryDefPosition == "SS" | PrimaryDefPosition == "S" | PrimaryDefPosition == "FS" |
PrimaryDefPosition == "CB" | PrimaryDefPosition == "DB" ~ "DB" ,
PrimaryDefPosition == "LB" | PrimaryDefPosition == "MLB" |
PrimaryDefPosition == "OLB" | PrimaryDefPosition == "ILB" ~ "LB" ,
PrimaryDefPosition == "DL" ~ "DL" )) %>%
mutate(SecondaryDefPosBasic = case_when(SecondaryDefPosition == "SS" | SecondaryDefPosition == "S" | SecondaryDefPosition == "FS" |
SecondaryDefPosition == "CB" | SecondaryDefPosition == "DB" ~ "DB" ,
SecondaryDefPosition == "LB" | SecondaryDefPosition == "MLB" |
SecondaryDefPosition == "OLB" | SecondaryDefPosition == "ILB" ~ "LB" ,
SecondaryDefPosition == "DL" ~ "DL" ))
# Task
# Mutate Final Extracted Features
# Creating Delta Features
ExtractedFeatures <- ExtractedFeatures %>%
filter(is.na(TargetDirThrow) == 0 & is.na(PrimDefDirThrow) == 0 & is.na(SecDefDirThrow) == 0) %>%
# Accounts for rare scenario where a player's tracker partly malfunctions
filter(is.na(PrimDefDistArrive)==0) %>% # Removes plays missing key variables (few & far in between)
mutate(PrimDeltaSpeedThrow = TargetSpeedThrow - PrimDefSpeedThrow) %>%
mutate(PrimDeltaAccThrow = TargetAccThrow - PrimDefAccThrow) %>%
mutate(PrimDeltaDirThrow = OrientationDiff(TargetDirThrow,PrimDefDirThrow)) %>%
mutate(SecDeltaSpeedThrow = TargetSpeedThrow - SecDefSpeedThrow) %>%
mutate(SecDeltaAccThrow = TargetAccThrow - SecDefAccThrow) %>%
mutate(SecDeltaDirThrow = OrientationDiff(TargetDirThrow,SecDefDirThrow))
# Time Pass in Air (& Square)
ExtractedFeatures <- ExtractedFeatures %>%
mutate(TimePassInAir = (ArrivedFrame - PassFrame)/10) %>%
mutate(P2RDistThrowSq = Passer2RecDistThrow^2 )
# Player Controls -- Minimum Number of Targets
ExtractedFeatures <- ExtractedFeatures %>%
ungroup %>% group_by(targetName) %>%
mutate(targetNameQ = 1) %>% mutate(targetNameQ = sum(targetNameQ)) %>%
mutate(targetNameQ = case_when(targetNameQ < 15 ~ "AAA" ,
targetNameQ >= 15 ~ targetName)) %>%
ungroup %>% group_by(PrimaryDefName) %>%
mutate(PrimaryDefNameQ = 1) %>% mutate(PrimaryDefNameQ = sum(PrimaryDefNameQ)) %>%
mutate(PrimaryDefNameQ = case_when(PrimaryDefNameQ < 15 ~ "AAA" ,
PrimaryDefNameQ >= 15 ~ PrimaryDefName)) %>%
ungroup %>% group_by(playId , gameId)
# Fixing Primary Defender Man/Zone
ExtractedFeatures <- ExtractedFeatures %>%
mutate(PrimDefZone = case_when(PrimDefZone == 1 ~ 'Zone' ,
PrimDefZone == 0 ~ 'Man' ,
TRUE ~ 'AAA-Other'))
print("Feature Engineering 1 Completed")
|
\name{is_initialized}
\alias{is_initialized}
\docType{package}
\title{
NDICATES IF THE BDD FACTORY HAS BEEN INITIALIZED
}
\description{
This instruction allows to the user to know if the BDD factory has been initialized.
}
\usage{
is_initialized(string bdd_name)
}
\arguments{
\item{bdd_name}{(Optional) Name of the BDD.}
}
\value{
\item{is_initialized}{It is true if the factory is initialized and false if it is not.}
}
\examples{
is_initialized()
is_initialized("bdd_1")
}
|
/rbdd/man/is_initialized.Rd
|
no_license
|
braguti/rbdd
|
R
| false
| false
| 476
|
rd
|
\name{is_initialized}
\alias{is_initialized}
\docType{package}
\title{
NDICATES IF THE BDD FACTORY HAS BEEN INITIALIZED
}
\description{
This instruction allows to the user to know if the BDD factory has been initialized.
}
\usage{
is_initialized(string bdd_name)
}
\arguments{
\item{bdd_name}{(Optional) Name of the BDD.}
}
\value{
\item{is_initialized}{It is true if the factory is initialized and false if it is not.}
}
\examples{
is_initialized()
is_initialized("bdd_1")
}
|
# Tobit Models
require(ggplot2)
require(GGally)
require(VGAM)
dat <- read.csv("https://stats.idre.ucla.edu/stat/data/tobit.csv")
summary(dat)
# function that gives the density of normal distribution
# for given mean and sd, scaled to be on a count metric
# for the histogram: count = density * sample size * bin width
f <- function(x, var, bw = 15) {
dnorm(x, mean = mean(var), sd(var)) * length(var) * bw
}
# setup base plot
p <- ggplot(dat, aes(x = apt, fill=prog))
# histogram, coloured by proportion in different programs
# with a normal distribution overlayed
p + stat_bin(binwidth=15) +
stat_function(fun = f, size = 1,
args = list(var = dat$apt))
p + stat_bin(binwidth = 1) +
stat_function(fun = f, size = 1, args = list(var = dat$apt, bw = 1))
cor(dat[, c("read", "math", "apt")])
# plot matrix
ggpairs(dat[, c("read", "math", "apt")])
summary(m <- vglm(apt ~ read + math + prog, tobit(Upper = 800), data = dat))
ctable <- coef(summary(m))
pvals <- 2 * pt(abs(ctable[, "z value"]), df.residual(m), lower.tail = FALSE)
cbind(ctable, pvals)
m2 <- vglm(apt ~ read + math, tobit(Upper = 800), data = dat)
(p <- pchisq(2 * (logLik(m) - logLik(m2)), df = 2, lower.tail = FALSE))
b <- coef(m)
se <- sqrt(diag(vcov(m)))
cbind(LL = b - qnorm(0.975) * se, UL = b + qnorm(0.975) * se)
dat$yhat <- fitted(m)[,1]
dat$rr <- resid(m, type = "response")
dat$rp <- resid(m, type = "pearson")[,1]
par(mfcol = c(2, 3))
with(dat, {
plot(yhat, rr, main = "Fitted vs Residuals")
qqnorm(rr)
plot(yhat, rp, main = "Fitted vs Pearson Residuals")
qqnorm(rp)
plot(apt, rp, main = "Actual vs Pearson Residuals")
plot(apt, yhat, main = "Actual vs Fitted")
})
# correlation
(r <- with(dat, cor(yhat, apt)))
# variance accounted for
r^2
|
/example/4Tobit Regression.R
|
no_license
|
goal1234/R-zZzZzZ
|
R
| false
| false
| 1,768
|
r
|
# Tobit Models
require(ggplot2)
require(GGally)
require(VGAM)
dat <- read.csv("https://stats.idre.ucla.edu/stat/data/tobit.csv")
summary(dat)
# function that gives the density of normal distribution
# for given mean and sd, scaled to be on a count metric
# for the histogram: count = density * sample size * bin width
f <- function(x, var, bw = 15) {
dnorm(x, mean = mean(var), sd(var)) * length(var) * bw
}
# setup base plot
p <- ggplot(dat, aes(x = apt, fill=prog))
# histogram, coloured by proportion in different programs
# with a normal distribution overlayed
p + stat_bin(binwidth=15) +
stat_function(fun = f, size = 1,
args = list(var = dat$apt))
p + stat_bin(binwidth = 1) +
stat_function(fun = f, size = 1, args = list(var = dat$apt, bw = 1))
cor(dat[, c("read", "math", "apt")])
# plot matrix
ggpairs(dat[, c("read", "math", "apt")])
summary(m <- vglm(apt ~ read + math + prog, tobit(Upper = 800), data = dat))
ctable <- coef(summary(m))
pvals <- 2 * pt(abs(ctable[, "z value"]), df.residual(m), lower.tail = FALSE)
cbind(ctable, pvals)
m2 <- vglm(apt ~ read + math, tobit(Upper = 800), data = dat)
(p <- pchisq(2 * (logLik(m) - logLik(m2)), df = 2, lower.tail = FALSE))
b <- coef(m)
se <- sqrt(diag(vcov(m)))
cbind(LL = b - qnorm(0.975) * se, UL = b + qnorm(0.975) * se)
dat$yhat <- fitted(m)[,1]
dat$rr <- resid(m, type = "response")
dat$rp <- resid(m, type = "pearson")[,1]
par(mfcol = c(2, 3))
with(dat, {
plot(yhat, rr, main = "Fitted vs Residuals")
qqnorm(rr)
plot(yhat, rp, main = "Fitted vs Pearson Residuals")
qqnorm(rp)
plot(apt, rp, main = "Actual vs Pearson Residuals")
plot(apt, yhat, main = "Actual vs Fitted")
})
# correlation
(r <- with(dat, cor(yhat, apt)))
# variance accounted for
r^2
|
\name{RobustGaSP-package}
\alias{RobustGaSP-package}
\alias{RobustGaSP}
\docType{package}
\title{
\packageTitle{RobustGaSP}
}
\description{
\packageDescription{RobustGaSP}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{RobustGaSP}
\packageIndices{RobustGaSP}
%%~~ An overview of how to use the package, including the most important functions ~~
}
\author{
\packageAuthor{RobustGaSP}
Maintainer: \packageMaintainer{RobustGaSP}
}
\references{
J.O. Berger, V. De Oliveira and B. Sanso (2001), \emph{Objective Bayesian analysis of spatially correlated data}, \emph{Journal of the American Statistical Association}, {96}, 1361-1374.
M. Gu. and J.O. Berger (2016). Parallel partial Gaussian process emulation for computer models with massive output. \emph{Annals of Applied Statistics}, 10(3), 1317-1347.
M. Gu. (2016). Robust uncertainty quantification and scalable computation for computer models with massive output. Ph.D. thesis. Duke University.
M. Gu, X. Wang and J.O. Berger (2018), \emph{Robust Gaussian stochastic process emulation}, \emph{Annals of Statistics}, 46(6A), 3038-3066.
M. Gu (2018), \emph{Jointly robust prior for Gaussian stochastic process in emulation, calibration and variable selection}, arXiv:1804.09329.
R. Paulo (2005), \emph{Default priors for Gaussian processes}, \emph{Annals of statistics}, 33(2), 556-582.
J. Sacks, W.J. Welch, T.J. Mitchell, and H.P. Wynn (1989), \emph{Design and analysis of computer experiments}, \emph{Statistical Science}, \bold{4}, 409-435.
}
%%~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation directory ~~
\keyword{package}
\keyword{computer model}
\keyword{emulation}
\keyword{simulation}
%\seealso{
%%~~ Optional links to other man pages, e.g. ~~
%%~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
%\code{\link[RobustGaSP]{RobustGaSP}}
%%\code{\link[RobustGaSP]{RobustGaSP-class}}
%}
\examples{
#------------------------
# a 3 dimensional example
#------------------------
# dimensional of the inputs
dim_inputs <- 3
# number of the inputs
num_obs <- 30
# uniform samples of design
input <- matrix(runif(num_obs*dim_inputs), num_obs,dim_inputs)
# Following codes use maximin Latin Hypercube Design, which is typically better than uniform
# library(lhs)
# input <- maximinLHS(n=num_obs, k=dim_inputs) ##maximin lhd sample
####
# outputs from the 3 dim dettepepel.3.data function
output = matrix(0,num_obs,1)
for(i in 1:num_obs){
output[i]<-dettepepel.3.data(input[i,])
}
# use constant mean basis, with no constraint on optimization
m1<- rgasp(design = input, response = output, lower_bound=FALSE)
# the following use constraints on optimization
# m1<- rgasp(design = input, response = output, lower_bound=TRUE)
# the following use a single start on optimization
# m1<- rgasp(design = input, response = output, lower_bound=FALSE)
# number of points to be predicted
num_testing_input <- 5000
# generate points to be predicted
testing_input <- matrix(runif(num_testing_input*dim_inputs),num_testing_input,dim_inputs)
# Perform prediction
m1.predict<-predict(m1, testing_input, outasS3 = FALSE)
# Predictive mean
m1.predict@mean
# The following tests how good the prediction is
testing_output <- matrix(0,num_testing_input,1)
for(i in 1:num_testing_input){
testing_output[i]<-dettepepel.3.data(testing_input[i,])
}
# compute the MSE, average coverage and average length
# out of sample MSE
MSE_emulator <- sum((m1.predict@mean-testing_output)^2)/(num_testing_input)
# proportion covered by 95\% posterior predictive credible interval
prop_emulator <- length(which((m1.predict@lower95<=testing_output)
&(m1.predict@upper95>=testing_output)))/num_testing_input
# average length of posterior predictive credible interval
length_emulator <- sum(m1.predict@upper95-m1.predict@lower95)/num_testing_input
# output of prediction
MSE_emulator
prop_emulator
length_emulator
# normalized RMSE
sqrt(MSE_emulator/mean((testing_output-mean(output))^2 ))
}
|
/fuzzedpackages/RobustGaSP/man/RobustGaSP-package.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 4,143
|
rd
|
\name{RobustGaSP-package}
\alias{RobustGaSP-package}
\alias{RobustGaSP}
\docType{package}
\title{
\packageTitle{RobustGaSP}
}
\description{
\packageDescription{RobustGaSP}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{RobustGaSP}
\packageIndices{RobustGaSP}
%%~~ An overview of how to use the package, including the most important functions ~~
}
\author{
\packageAuthor{RobustGaSP}
Maintainer: \packageMaintainer{RobustGaSP}
}
\references{
J.O. Berger, V. De Oliveira and B. Sanso (2001), \emph{Objective Bayesian analysis of spatially correlated data}, \emph{Journal of the American Statistical Association}, {96}, 1361-1374.
M. Gu. and J.O. Berger (2016). Parallel partial Gaussian process emulation for computer models with massive output. \emph{Annals of Applied Statistics}, 10(3), 1317-1347.
M. Gu. (2016). Robust uncertainty quantification and scalable computation for computer models with massive output. Ph.D. thesis. Duke University.
M. Gu, X. Wang and J.O. Berger (2018), \emph{Robust Gaussian stochastic process emulation}, \emph{Annals of Statistics}, 46(6A), 3038-3066.
M. Gu (2018), \emph{Jointly robust prior for Gaussian stochastic process in emulation, calibration and variable selection}, arXiv:1804.09329.
R. Paulo (2005), \emph{Default priors for Gaussian processes}, \emph{Annals of statistics}, 33(2), 556-582.
J. Sacks, W.J. Welch, T.J. Mitchell, and H.P. Wynn (1989), \emph{Design and analysis of computer experiments}, \emph{Statistical Science}, \bold{4}, 409-435.
}
%%~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation directory ~~
\keyword{package}
\keyword{computer model}
\keyword{emulation}
\keyword{simulation}
%\seealso{
%%~~ Optional links to other man pages, e.g. ~~
%%~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
%\code{\link[RobustGaSP]{RobustGaSP}}
%%\code{\link[RobustGaSP]{RobustGaSP-class}}
%}
\examples{
#------------------------
# a 3 dimensional example
#------------------------
# dimensional of the inputs
dim_inputs <- 3
# number of the inputs
num_obs <- 30
# uniform samples of design
input <- matrix(runif(num_obs*dim_inputs), num_obs,dim_inputs)
# Following codes use maximin Latin Hypercube Design, which is typically better than uniform
# library(lhs)
# input <- maximinLHS(n=num_obs, k=dim_inputs) ##maximin lhd sample
####
# outputs from the 3 dim dettepepel.3.data function
output = matrix(0,num_obs,1)
for(i in 1:num_obs){
output[i]<-dettepepel.3.data(input[i,])
}
# use constant mean basis, with no constraint on optimization
m1<- rgasp(design = input, response = output, lower_bound=FALSE)
# the following use constraints on optimization
# m1<- rgasp(design = input, response = output, lower_bound=TRUE)
# the following use a single start on optimization
# m1<- rgasp(design = input, response = output, lower_bound=FALSE)
# number of points to be predicted
num_testing_input <- 5000
# generate points to be predicted
testing_input <- matrix(runif(num_testing_input*dim_inputs),num_testing_input,dim_inputs)
# Perform prediction
m1.predict<-predict(m1, testing_input, outasS3 = FALSE)
# Predictive mean
m1.predict@mean
# The following tests how good the prediction is
testing_output <- matrix(0,num_testing_input,1)
for(i in 1:num_testing_input){
testing_output[i]<-dettepepel.3.data(testing_input[i,])
}
# compute the MSE, average coverage and average length
# out of sample MSE
MSE_emulator <- sum((m1.predict@mean-testing_output)^2)/(num_testing_input)
# proportion covered by 95\% posterior predictive credible interval
prop_emulator <- length(which((m1.predict@lower95<=testing_output)
&(m1.predict@upper95>=testing_output)))/num_testing_input
# average length of posterior predictive credible interval
length_emulator <- sum(m1.predict@upper95-m1.predict@lower95)/num_testing_input
# output of prediction
MSE_emulator
prop_emulator
length_emulator
# normalized RMSE
sqrt(MSE_emulator/mean((testing_output-mean(output))^2 ))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_data.R
\name{formatDfByCol}
\alias{formatDfByCol}
\title{Format dataframe, by column}
\usage{
formatDfByCol(dt, match, trendTeam, rowStat, colStats, statNames)
}
\arguments{
\item{dt}{A dataframe of stats, eg. a Sportscode matrix output.}
\item{match}{(optional) A string. Indicates the game of interest.}
\item{trendTeam}{(optional) A string. Indicates the team for which the user wants to pull all existing game stats.}
\item{rowStat}{A string. Indicates the row of interest.}
\item{colStats}{A vector of strings. Indicates the columns of interest.}
\item{statNames}{A vector of strings. Indicates the formatted version of the colStat names for the plot.}
}
\value{
If all parameters are valid for the input dataframe, then the output will be a formatted dataframe.
}
\description{
formatDfByCol returns a dataframe formatted to visualize distinct column stats
}
\details{
This function returns a formatted dataframe for visualizing data when the
column stats for analysis each need to be visualized individually.
}
|
/man/formatDfByCol.Rd
|
no_license
|
ludisanalytics/ludisviz
|
R
| false
| true
| 1,108
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_data.R
\name{formatDfByCol}
\alias{formatDfByCol}
\title{Format dataframe, by column}
\usage{
formatDfByCol(dt, match, trendTeam, rowStat, colStats, statNames)
}
\arguments{
\item{dt}{A dataframe of stats, eg. a Sportscode matrix output.}
\item{match}{(optional) A string. Indicates the game of interest.}
\item{trendTeam}{(optional) A string. Indicates the team for which the user wants to pull all existing game stats.}
\item{rowStat}{A string. Indicates the row of interest.}
\item{colStats}{A vector of strings. Indicates the columns of interest.}
\item{statNames}{A vector of strings. Indicates the formatted version of the colStat names for the plot.}
}
\value{
If all parameters are valid for the input dataframe, then the output will be a formatted dataframe.
}
\description{
formatDfByCol returns a dataframe formatted to visualize distinct column stats
}
\details{
This function returns a formatted dataframe for visualizing data when the
column stats for analysis each need to be visualized individually.
}
|
source("./R/tv-delay.R")
source("./R/mDTVSIRfn.R")
source("./R/sparse.R")
source("./R/LS.sparse.R")
library(penalized)
library(CollocInfer)
library(limSolve)
## Real data:
mDf <- read.csv("./Data/meas_ca_on__1939-89_wk.csv", skip = 3)
bDf <- read.csv("./Data/bth_ca_on__1921-2002_mn.csv", skip = 5)
## plot(x = mDf$numdate[mDf$numdate > 1955 & mDf$numdate < 1960], y = mDf$cases[mDf$numdate > 1955 & mDf$numdate < 1960],type = "l")
mTimes <- mDf$numdate[mDf$numdate > 1958 & mDf$numdate < 1963]
mI <- mDf$cases[mDf$numdate > 1958 & mDf$numdate < 1963]
tmpMonth <- mDf$month[mDf$numdate > 1958 & mDf$numdate < 1963]
mB <- rep(0, length(tmpMonth))
for(i in 1:length(tmpMonth)){
mB[i] <- bDf[which(bDf$year == floor(mTimes[i]) & bDf$month == tmpMonth[i]),3 ] * 12 * 0.384 * (671 + 772) / 11410
}
mTimes <- mTimes - 1958
rr = c(0,round(max(mTimes))) # the range of observations times
knots = seq(rr[1],rr[2],2/52) # knots at 52 equally spaced values
norder = 3 # the order of the B-spline basis functions,
# in this case piece-wise quadratic
nbasis = length(knots)+norder-2 # the number of basis functions
# set up the basis object
bbasis0 <- create.bspline.basis(range=rr, norder=norder, nbasis=nbasis,breaks=knots)
times0 <- mTimes
times.d <- mTimes[mTimes >= 1]
knots.d <- seq(1,rr[2],2/52)
nbasis.d = length(knots.d) + norder - 2
bbasis.d <- create.bspline.basis(range=c(1,rr[2]), norder=norder, nbasis=nbasis.d, breaks=knots.d)
## Generating Data
mData <- matrix(NA, length(mI),2)
mData[,2] <- mI
colnames(mData) <- c("S" , "I")
mData.d <- mData[mTimes >= 1,]
# To get an initial estimate of the states we smooth the observed I component
# and set the other coefficients to zero.
# smooth the log I values
fdnames=list(NULL,c('S', 'I'),NULL)
DEfd0 <- smooth.basis(times0 ,(mData[,2]),fdPar(bbasis0,1,0.1))
DEfd.d <- smooth.basis(times.d, (mData[,2])[times0 >= 1],fdPar(bbasis.d,1,0.1))
coefs0 <- cbind(matrix(80000,bbasis0$nbasis,1), DEfd0$fd$coefs)
coefs.d <- cbind(matrix(80000,bbasis.d$nbasis,1), DEfd.d$fd$coefs)
colnames(coefs0) <- colnames(coefs.d) <- c("S", "I")
# set up the functional data object for the three variables
# plot the smooth plus data
## plotfit.fd(mData[,2],times0,DEfd0$fd)
DEfd0 <- fd(coefs0,bbasis0, fdnames)
DEfd.d <- fd(coefs.d,bbasis.d, fdnames)
procTimes <- c(1, seq(1 + 1/52, 5 - 1/52, by = 2/52), 5)
procB <- vector(,length(procTimes))
for(i in 1:length(procTimes)){
month <- round((procTimes[i] - floor(procTimes[i])) * 12)
if(month == 0)
month <- 1
procB[i] <- bDf[which(bDf$year == (floor(procTimes[i])+1958) & bDf$month == month),3 ] * 0.384 * 12
}
# list object betamore is now passed into LS.setup, too, in order to make
# it available as a functional parameter defined by its three coefficients
# run LS.setup
args <- commandArgs(TRUE)
lambda1 <- 10^(as.numeric(args[1]) %/% 5) / 1000
lambda2 <- 10^(as.numeric(args[1]) %% 5) / 1000
mPars <- 50
names(mPars) <- c("gamma")
mKappa <- rep(1e-4, 4)
names(mKappa) <- c("k1", "k2", "k3","k4")
initBeta <- rep(0, 7)
initBeta[1:2] <- 0.0005
## debug(Profile.LS.tv)
tv.fit <- Profile.LS.tv.delay(mDTVSIRfn, mData.d, times.d, pars = mPars, kappa = mKappa, coefs = coefs.d, beta = initBeta, basisvals = bbasis.d, lambda = c(lambda1,lambda2), more = list(b = procB), in.meth='nlminb', control.out = list(method = "nnls", maxIter = 10, lambda.sparse = 0, echo = TRUE), delay = delay, basisvals0 = bbasis0, coefs0 = coefs0, nbeta = length(initBeta), ndelay = 2, tau = list(seq(0,6/52, 1/52)))
save(tv.fit, lambda1, lambda2, file = paste("mfit02-",lambda1,lambda2,".RData", sep=""))
|
/measles-scr02.R
|
no_license
|
VulpeculaZ/sparseDE
|
R
| false
| false
| 3,682
|
r
|
source("./R/tv-delay.R")
source("./R/mDTVSIRfn.R")
source("./R/sparse.R")
source("./R/LS.sparse.R")
library(penalized)
library(CollocInfer)
library(limSolve)
## Real data:
mDf <- read.csv("./Data/meas_ca_on__1939-89_wk.csv", skip = 3)
bDf <- read.csv("./Data/bth_ca_on__1921-2002_mn.csv", skip = 5)
## plot(x = mDf$numdate[mDf$numdate > 1955 & mDf$numdate < 1960], y = mDf$cases[mDf$numdate > 1955 & mDf$numdate < 1960],type = "l")
mTimes <- mDf$numdate[mDf$numdate > 1958 & mDf$numdate < 1963]
mI <- mDf$cases[mDf$numdate > 1958 & mDf$numdate < 1963]
tmpMonth <- mDf$month[mDf$numdate > 1958 & mDf$numdate < 1963]
mB <- rep(0, length(tmpMonth))
for(i in 1:length(tmpMonth)){
mB[i] <- bDf[which(bDf$year == floor(mTimes[i]) & bDf$month == tmpMonth[i]),3 ] * 12 * 0.384 * (671 + 772) / 11410
}
mTimes <- mTimes - 1958
rr = c(0,round(max(mTimes))) # the range of observations times
knots = seq(rr[1],rr[2],2/52) # knots at 52 equally spaced values
norder = 3 # the order of the B-spline basis functions,
# in this case piece-wise quadratic
nbasis = length(knots)+norder-2 # the number of basis functions
# set up the basis object
bbasis0 <- create.bspline.basis(range=rr, norder=norder, nbasis=nbasis,breaks=knots)
times0 <- mTimes
times.d <- mTimes[mTimes >= 1]
knots.d <- seq(1,rr[2],2/52)
nbasis.d = length(knots.d) + norder - 2
bbasis.d <- create.bspline.basis(range=c(1,rr[2]), norder=norder, nbasis=nbasis.d, breaks=knots.d)
## Generating Data
mData <- matrix(NA, length(mI),2)
mData[,2] <- mI
colnames(mData) <- c("S" , "I")
mData.d <- mData[mTimes >= 1,]
# To get an initial estimate of the states we smooth the observed I component
# and set the other coefficients to zero.
# smooth the log I values
fdnames=list(NULL,c('S', 'I'),NULL)
DEfd0 <- smooth.basis(times0 ,(mData[,2]),fdPar(bbasis0,1,0.1))
DEfd.d <- smooth.basis(times.d, (mData[,2])[times0 >= 1],fdPar(bbasis.d,1,0.1))
coefs0 <- cbind(matrix(80000,bbasis0$nbasis,1), DEfd0$fd$coefs)
coefs.d <- cbind(matrix(80000,bbasis.d$nbasis,1), DEfd.d$fd$coefs)
colnames(coefs0) <- colnames(coefs.d) <- c("S", "I")
# set up the functional data object for the three variables
# plot the smooth plus data
## plotfit.fd(mData[,2],times0,DEfd0$fd)
DEfd0 <- fd(coefs0,bbasis0, fdnames)
DEfd.d <- fd(coefs.d,bbasis.d, fdnames)
procTimes <- c(1, seq(1 + 1/52, 5 - 1/52, by = 2/52), 5)
procB <- vector(,length(procTimes))
for(i in 1:length(procTimes)){
month <- round((procTimes[i] - floor(procTimes[i])) * 12)
if(month == 0)
month <- 1
procB[i] <- bDf[which(bDf$year == (floor(procTimes[i])+1958) & bDf$month == month),3 ] * 0.384 * 12
}
# list object betamore is now passed into LS.setup, too, in order to make
# it available as a functional parameter defined by its three coefficients
# run LS.setup
args <- commandArgs(TRUE)
lambda1 <- 10^(as.numeric(args[1]) %/% 5) / 1000
lambda2 <- 10^(as.numeric(args[1]) %% 5) / 1000
mPars <- 50
names(mPars) <- c("gamma")
mKappa <- rep(1e-4, 4)
names(mKappa) <- c("k1", "k2", "k3","k4")
initBeta <- rep(0, 7)
initBeta[1:2] <- 0.0005
## debug(Profile.LS.tv)
tv.fit <- Profile.LS.tv.delay(mDTVSIRfn, mData.d, times.d, pars = mPars, kappa = mKappa, coefs = coefs.d, beta = initBeta, basisvals = bbasis.d, lambda = c(lambda1,lambda2), more = list(b = procB), in.meth='nlminb', control.out = list(method = "nnls", maxIter = 10, lambda.sparse = 0, echo = TRUE), delay = delay, basisvals0 = bbasis0, coefs0 = coefs0, nbeta = length(initBeta), ndelay = 2, tau = list(seq(0,6/52, 1/52)))
save(tv.fit, lambda1, lambda2, file = paste("mfit02-",lambda1,lambda2,".RData", sep=""))
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/iipmetric.r
\name{iipmetric}
\alias{iipmetric}
\title{Compute Intensity Inner Product Metrics}
\usage{
iipmetric(S1, S2, measure = "sim", tau = 1, M = NULL)
}
\arguments{
\item{S1}{marked point process data.}
\item{S2}{marked point process data.}
\item{measure}{\code{"sim"} for similarity and \code{"dist"} for distance. Default \code{"sim"}.}
\item{tau}{a parameter for filtering function.}
\item{M}{a precision matrix for filter of marks, i.e., exp( - r' M r) is used for filtering marks. It should be symmetric and positive semi-definite.}
}
\value{
Similarity or distance between two inputs (marked) point process S1 and S2.
}
\description{
For the analysis of point process, intensity function plays a central roll. Paiva et al. (2009) proposed to use the intensity function for defining the inner product between point process realizations.
}
\details{
\code{iipmetric} computes intensity inner product metric. Intensity function for the point process realization is estimated by kernel density estimator. This function adopts Gaussian kernels for the sake of computational efficiency.
}
\examples{
##The aftershock data of 26th July 2003 earthquake of M6.2 at the northern Miyagi-Ken Japan.
data(Miyagi20030626)
## time longitude latitude depth magnitude
## split events by 7-hour
sMiyagi <- splitMPP(Miyagi20030626,h=60*60*7,scaleMarks=TRUE)$S
N <- 10
tau <- 0.1
sMat <- matrix(0,N,N)
cat("calculating intensity inner product...")
for(i in 1:(N)){
cat(i," ")
for(j in i:N){
S1 <- sMiyagi[[i]]$time;S2 <- sMiyagi[[j]]$time
sMat[i,j] <- iipmetric(S1,S2,tau=tau,M=diag(1,4))
}
}
sMat <- sMat+t(sMat)
tmpd <- diag(sMat) <- diag(sMat)/2
sMat <- sMat/sqrt(outer(tmpd,tmpd))
image(sMat)
}
\author{
Hideitsu Hino \email{hinohide@cs.tsukuba.ac.jp}, Ken Takano, Yuki Yoshikawa, and Noboru Murata
}
\references{
A.R.C. Paiva, I. Park, and J.C. Principe. A reproducing kernel Hilbert space framework for spike train signal processing, Neural Computation, Vol. 21(2), pp. 424-449, 2009.
}
|
/man/iipmetric.Rd
|
no_license
|
cran/mmpp
|
R
| false
| false
| 2,101
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/iipmetric.r
\name{iipmetric}
\alias{iipmetric}
\title{Compute Intensity Inner Product Metrics}
\usage{
iipmetric(S1, S2, measure = "sim", tau = 1, M = NULL)
}
\arguments{
\item{S1}{marked point process data.}
\item{S2}{marked point process data.}
\item{measure}{\code{"sim"} for similarity and \code{"dist"} for distance. Default \code{"sim"}.}
\item{tau}{a parameter for filtering function.}
\item{M}{a precision matrix for filter of marks, i.e., exp( - r' M r) is used for filtering marks. It should be symmetric and positive semi-definite.}
}
\value{
Similarity or distance between two inputs (marked) point process S1 and S2.
}
\description{
For the analysis of point process, intensity function plays a central roll. Paiva et al. (2009) proposed to use the intensity function for defining the inner product between point process realizations.
}
\details{
\code{iipmetric} computes intensity inner product metric. Intensity function for the point process realization is estimated by kernel density estimator. This function adopts Gaussian kernels for the sake of computational efficiency.
}
\examples{
##The aftershock data of 26th July 2003 earthquake of M6.2 at the northern Miyagi-Ken Japan.
data(Miyagi20030626)
## time longitude latitude depth magnitude
## split events by 7-hour
sMiyagi <- splitMPP(Miyagi20030626,h=60*60*7,scaleMarks=TRUE)$S
N <- 10
tau <- 0.1
sMat <- matrix(0,N,N)
cat("calculating intensity inner product...")
for(i in 1:(N)){
cat(i," ")
for(j in i:N){
S1 <- sMiyagi[[i]]$time;S2 <- sMiyagi[[j]]$time
sMat[i,j] <- iipmetric(S1,S2,tau=tau,M=diag(1,4))
}
}
sMat <- sMat+t(sMat)
tmpd <- diag(sMat) <- diag(sMat)/2
sMat <- sMat/sqrt(outer(tmpd,tmpd))
image(sMat)
}
\author{
Hideitsu Hino \email{hinohide@cs.tsukuba.ac.jp}, Ken Takano, Yuki Yoshikawa, and Noboru Murata
}
\references{
A.R.C. Paiva, I. Park, and J.C. Principe. A reproducing kernel Hilbert space framework for spike train signal processing, Neural Computation, Vol. 21(2), pp. 424-449, 2009.
}
|
# Rotor properties - minor tidying of Masden ------------------------------
# get pitch and rotor values for turbine sampling -------------------------
# truncated normal sampling of wind speed (non-negativity constraint) -- S is bigger than iter
windSpeed<-rtnorm(S, windSpeedMean,windSpeedSD,0)
#= Unsure of the umber of samples being taken here - gerater than iter seems to be the goal
wind.speed.m.s<-sample(windSpeed, iter+(iter/5), replace=T)
#= read in the wind-speed/rotor-speed & pitch relationships
windName <- paste("windpower",TurbineData$TurbineModel[t], sep="_")
windD <- paste(windName,"csv", sep=".")
windData <- read.csv(paste("data",windD, sep="/"), header=T)
#remove anything less than wind threshold of power curve
windThreshold <- windData$Wind[min(which(windData$Rotor != 0))]
wind.speed.m.s <- wind.speed.m.s[wind.speed.m.s>windThreshold]
#= assign rotor speeds to wind speeds
rotorSpeed <- numeric()
rotorPitch <- numeric()
#= unsure about the construction below here - "else" looks hacky
for (y in 1:length(wind.speed.m.s)){
for (z in 1:length(windData$Wind)){
if(z<length(windData$Wind)){
if(wind.speed.m.s[y]>=windData$Wind[z] & wind.speed.m.s[y]<windData$Wind[z+1]){rotorSpeed[y]<-windData$Rotor[z]}
if(wind.speed.m.s[y]>=windData$Wind[z] & wind.speed.m.s[y]<windData$Wind[z+1]){rotorPitch[y]<-windData$Pitch[z]}}else
if(wind.speed.m.s[y]>=windData$Wind[z]){rotorSpeed[y]<-windData$Rotor[z]}
if(wind.speed.m.s[y]>=windData$Wind[z]){rotorPitch[y]<-windData$Pitch[z]}
}
}
|
/scripts/get_rotor_plus_pitch_auto.r
|
no_license
|
dmpstats/stochCRM
|
R
| false
| false
| 1,631
|
r
|
# Rotor properties - minor tidying of Masden ------------------------------
# get pitch and rotor values for turbine sampling -------------------------
# truncated normal sampling of wind speed (non-negativity constraint) -- S is bigger than iter
windSpeed<-rtnorm(S, windSpeedMean,windSpeedSD,0)
#= Unsure of the umber of samples being taken here - gerater than iter seems to be the goal
wind.speed.m.s<-sample(windSpeed, iter+(iter/5), replace=T)
#= read in the wind-speed/rotor-speed & pitch relationships
windName <- paste("windpower",TurbineData$TurbineModel[t], sep="_")
windD <- paste(windName,"csv", sep=".")
windData <- read.csv(paste("data",windD, sep="/"), header=T)
#remove anything less than wind threshold of power curve
windThreshold <- windData$Wind[min(which(windData$Rotor != 0))]
wind.speed.m.s <- wind.speed.m.s[wind.speed.m.s>windThreshold]
#= assign rotor speeds to wind speeds
rotorSpeed <- numeric()
rotorPitch <- numeric()
#= unsure about the construction below here - "else" looks hacky
for (y in 1:length(wind.speed.m.s)){
for (z in 1:length(windData$Wind)){
if(z<length(windData$Wind)){
if(wind.speed.m.s[y]>=windData$Wind[z] & wind.speed.m.s[y]<windData$Wind[z+1]){rotorSpeed[y]<-windData$Rotor[z]}
if(wind.speed.m.s[y]>=windData$Wind[z] & wind.speed.m.s[y]<windData$Wind[z+1]){rotorPitch[y]<-windData$Pitch[z]}}else
if(wind.speed.m.s[y]>=windData$Wind[z]){rotorSpeed[y]<-windData$Rotor[z]}
if(wind.speed.m.s[y]>=windData$Wind[z]){rotorPitch[y]<-windData$Pitch[z]}
}
}
|
library(DiceKriging)
### Name: update
### Title: Update of a kriging model
### Aliases: update update.km update,km-method
### ** Examples
set.seed(8)
N <- 9 # number of observations
testfun <- branin
# a 9 points initial design
design <- expand.grid(x1=seq(0,1,length=3), x2=seq(0,1,length=3))
response <- testfun(design)
# km object with matern3_2 covariance
# params estimated by ML from the observations
model <- km(formula = ~., design = design,
response = response, covtype = "matern3_2")
model@covariance
newX <- matrix(c(0.4,0.5), ncol = 2) #the point that we are going to add in the km object
newy <- testfun(newX)
newmodel <- update(object = model, newX = newX, newy = newy, cov.reestim = TRUE)
newmodel@covariance
|
/data/genthat_extracted_code/DiceKriging/examples/update.km.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 737
|
r
|
library(DiceKriging)
### Name: update
### Title: Update of a kriging model
### Aliases: update update.km update,km-method
### ** Examples
set.seed(8)
N <- 9 # number of observations
testfun <- branin
# a 9 points initial design
design <- expand.grid(x1=seq(0,1,length=3), x2=seq(0,1,length=3))
response <- testfun(design)
# km object with matern3_2 covariance
# params estimated by ML from the observations
model <- km(formula = ~., design = design,
response = response, covtype = "matern3_2")
model@covariance
newX <- matrix(c(0.4,0.5), ncol = 2) #the point that we are going to add in the km object
newy <- testfun(newX)
newmodel <- update(object = model, newX = newX, newy = newy, cov.reestim = TRUE)
newmodel@covariance
|
## Filter records needed for course assignment to a temporary file, then read it in as a data frame
conx1 <- file("./household_power_consumption.txt", "r")
conx2 <- file("./household_power_consumption_filtered.txt", "w")
writeLines(readLines(conx1, n=1), conx2) ## Output header
tag <- c("^[12]/2/2007") ##Filtering condition
repeat {
reg <- readLines(conx1, n=1)
if (length(reg) == 0) break
if (regexec(tag, reg) < 1) next
writeLines(reg, conx2)
}
close(conx1);close(conx2)
hpcData <- read.table("./household_power_consumption_filtered.txt", header=TRUE, sep=";", na.strings="?", as.is=TRUE)
hpcData$datetime <- strptime(paste(hpcData$Date,hpcData$Time), "%d/%m/%Y %H:%M:%S") ## Date/Time conversion
## Plot
Sys.setlocale(category = "LC_ALL", locale = "C") ## Set locale to English
with(hpcData, {
plot(datetime, Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering")
lines(datetime, Sub_metering_2, type="l", col="red")
lines(datetime, Sub_metering_3, type="l", col="blue")
})
legend("topright", lty=1, col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex=0.75)
## Output to PNG device
dev.copy(png, "./plot3.png", width=480, height=480)
dev.off()
|
/plot3.R
|
no_license
|
Nick-Lin/ExData_Plotting1
|
R
| false
| false
| 1,230
|
r
|
## Filter records needed for course assignment to a temporary file, then read it in as a data frame
conx1 <- file("./household_power_consumption.txt", "r")
conx2 <- file("./household_power_consumption_filtered.txt", "w")
writeLines(readLines(conx1, n=1), conx2) ## Output header
tag <- c("^[12]/2/2007") ##Filtering condition
repeat {
reg <- readLines(conx1, n=1)
if (length(reg) == 0) break
if (regexec(tag, reg) < 1) next
writeLines(reg, conx2)
}
close(conx1);close(conx2)
hpcData <- read.table("./household_power_consumption_filtered.txt", header=TRUE, sep=";", na.strings="?", as.is=TRUE)
hpcData$datetime <- strptime(paste(hpcData$Date,hpcData$Time), "%d/%m/%Y %H:%M:%S") ## Date/Time conversion
## Plot
Sys.setlocale(category = "LC_ALL", locale = "C") ## Set locale to English
with(hpcData, {
plot(datetime, Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering")
lines(datetime, Sub_metering_2, type="l", col="red")
lines(datetime, Sub_metering_3, type="l", col="blue")
})
legend("topright", lty=1, col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex=0.75)
## Output to PNG device
dev.copy(png, "./plot3.png", width=480, height=480)
dev.off()
|
## This part of the script download the original dataset in Zip format
## unzip the dataset in txt format and create the powerdata dataframe
## (source data for all four plots)
## seleting two days data from original dataset.
## set your working directory
setwd("C:/COURSERA/ExploratoryDataAnalysis/Project1/")
## check that the zip file exist in working directory, if not download the zip file
setInternet2(TRUE)
filename = "household_power_consumption.zip"
if (!file.exists(filename)) {
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
f <- file.path(getwd(), "household_power_consumption.zip")
download.file(url, f)
}
## Check if the "powerdata" data frame exists in the R session, if not ,
## read the data from the zip , then create / prepare the data frame
## selecting two days data from original dataset.
if(!exists("powerdata")) {
csvfile = "household_power_consumption.txt"
data = read.csv(unz(filename, "household_power_consumption.txt"),
header=TRUE,sep=";", stringsAsFactors=F, na.strings="?",
colClasses=c("character", "character", rep("numeric",7)))
## select Feb 1, 2007 and Feb 2, 2007 data
## strptime is a function to directly convert character vectors
##to POSIXlt format
subset <- data$Date == "1/2/2007" | data$Date == "2/2/2007"
powerdata <- data[subset, ]
x <- paste(powerdata$Date, powerdata$Time)
powerdata$DateTime <- strptime(x, "%d/%m/%Y %H:%M:%S")
powerdata$Date = as.Date(powerdata$Date, format="%d/%m/%Y")
rownames(powerdata) <- 1:nrow(powerdata)
}
# end of download data
## Construction of the plot 3
png(filename = "plot3.png", width = 480, height = 480, units = "px",
bg = "transparent")
par(mfrow = c(1, 1)) # figures will drawn in a 1*1 matrix
plot(powerdata$DateTime, powerdata$Sub_metering_1,
type = "l", col = "black", xlab = "", ylab = "Energy sub metering")
lines(powerdata$DateTime, powerdata$Sub_metering_2, col = "red")
lines(powerdata$DateTime, powerdata$Sub_metering_3, col = "blue")
legend("topright", col = c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),lwd = 1)
dev.off()
|
/plot3.R
|
no_license
|
egrojgonzalez/ExData_Plotting1
|
R
| false
| false
| 2,208
|
r
|
## This part of the script download the original dataset in Zip format
## unzip the dataset in txt format and create the powerdata dataframe
## (source data for all four plots)
## seleting two days data from original dataset.
## set your working directory
setwd("C:/COURSERA/ExploratoryDataAnalysis/Project1/")
## check that the zip file exist in working directory, if not download the zip file
setInternet2(TRUE)
filename = "household_power_consumption.zip"
if (!file.exists(filename)) {
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
f <- file.path(getwd(), "household_power_consumption.zip")
download.file(url, f)
}
## Check if the "powerdata" data frame exists in the R session, if not ,
## read the data from the zip , then create / prepare the data frame
## selecting two days data from original dataset.
if(!exists("powerdata")) {
csvfile = "household_power_consumption.txt"
data = read.csv(unz(filename, "household_power_consumption.txt"),
header=TRUE,sep=";", stringsAsFactors=F, na.strings="?",
colClasses=c("character", "character", rep("numeric",7)))
## select Feb 1, 2007 and Feb 2, 2007 data
## strptime is a function to directly convert character vectors
##to POSIXlt format
subset <- data$Date == "1/2/2007" | data$Date == "2/2/2007"
powerdata <- data[subset, ]
x <- paste(powerdata$Date, powerdata$Time)
powerdata$DateTime <- strptime(x, "%d/%m/%Y %H:%M:%S")
powerdata$Date = as.Date(powerdata$Date, format="%d/%m/%Y")
rownames(powerdata) <- 1:nrow(powerdata)
}
# end of download data
## Construction of the plot 3
png(filename = "plot3.png", width = 480, height = 480, units = "px",
bg = "transparent")
par(mfrow = c(1, 1)) # figures will drawn in a 1*1 matrix
plot(powerdata$DateTime, powerdata$Sub_metering_1,
type = "l", col = "black", xlab = "", ylab = "Energy sub metering")
lines(powerdata$DateTime, powerdata$Sub_metering_2, col = "red")
lines(powerdata$DateTime, powerdata$Sub_metering_3, col = "blue")
legend("topright", col = c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),lwd = 1)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_A.R
\docType{data}
\name{A}
\alias{A}
\title{Adjacency matrices of binary brain networks for 212 subjects}
\format{An object of class \code{array} of dimension 68 x 68 x 212.}
\source{
\href{https://github.com/wangronglu/CISE-algorithm}{Github}
}
\usage{
data(A)
}
\description{
Data from Human Connectome Project (HCP). The brain is segmented into 68
regions. Each subject's binary network consists of a collection of inter-
connections among those brain regions, with ones indicate a connection between a
pair of regions and zeros indicate no connection.
}
\references{
Wang et al. (2017)
(\href{https://arxiv.org/abs/1707.06360}{Arxiv})
}
\keyword{datasets}
|
/man/A.Rd
|
no_license
|
wangronglu/CISE
|
R
| false
| true
| 746
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_A.R
\docType{data}
\name{A}
\alias{A}
\title{Adjacency matrices of binary brain networks for 212 subjects}
\format{An object of class \code{array} of dimension 68 x 68 x 212.}
\source{
\href{https://github.com/wangronglu/CISE-algorithm}{Github}
}
\usage{
data(A)
}
\description{
Data from Human Connectome Project (HCP). The brain is segmented into 68
regions. Each subject's binary network consists of a collection of inter-
connections among those brain regions, with ones indicate a connection between a
pair of regions and zeros indicate no connection.
}
\references{
Wang et al. (2017)
(\href{https://arxiv.org/abs/1707.06360}{Arxiv})
}
\keyword{datasets}
|
#' @name Std.aggregation
#' @title XXX
#' @description XXX
#' location: /net/hafkaldi/export/u2/reikn/Splus5/SMB/GEOMETRY.NEW
#' @docType data
#' @author Hoskuldur Bjornsson
NULL
#' @name STRATAS
#' @title XXX
#' @description XXX
#' location: /net/hafkaldi/export/u2/reikn/Splus5/SMB/GEOMETRY.NEW
#' @docType data
#' @author Hoskuldur Bjornsson
NULL
#' @name stratas_df
#' @title XXX
#' @description XXX
#'
#' x <- attributes(STRATAS)
#' stratas_df <- data.frame(strata = as.integer(x$names),
#' area = x$area,
#' rall.area = x$rall.area,
#' lon = x$pos[,2],
#' lat = x$pos[,1])
#' use_data(stratas_df)
#'
#' @docType data
#' @author Einar Hjorleifsson
NULL
|
/R/data_GEOMETRYNEW.R
|
no_license
|
fishvice/husky
|
R
| false
| false
| 661
|
r
|
#' @name Std.aggregation
#' @title XXX
#' @description XXX
#' location: /net/hafkaldi/export/u2/reikn/Splus5/SMB/GEOMETRY.NEW
#' @docType data
#' @author Hoskuldur Bjornsson
NULL
#' @name STRATAS
#' @title XXX
#' @description XXX
#' location: /net/hafkaldi/export/u2/reikn/Splus5/SMB/GEOMETRY.NEW
#' @docType data
#' @author Hoskuldur Bjornsson
NULL
#' @name stratas_df
#' @title XXX
#' @description XXX
#'
#' x <- attributes(STRATAS)
#' stratas_df <- data.frame(strata = as.integer(x$names),
#' area = x$area,
#' rall.area = x$rall.area,
#' lon = x$pos[,2],
#' lat = x$pos[,1])
#' use_data(stratas_df)
#'
#' @docType data
#' @author Einar Hjorleifsson
NULL
|
testlist <- list(b = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(metacoder:::centroid,testlist)
str(result)
|
/metacoder/inst/testfiles/centroid/AFL_centroid/centroid_valgrind_files/1615766446-test.R
|
permissive
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 294
|
r
|
testlist <- list(b = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(metacoder:::centroid,testlist)
str(result)
|
# set up admixture simulation with tree
# the tree was estimated in a previous step, here we just set up the admixture matrix and save/copy the necessary data into a new directory for replicates to read from
# a real-data/tree version of sim-00-sim-pop.R
# creates:
# - bnpsd.RData
# - fst.txt
# - kinship_mean.txt
library(optparse)
library(bnpsd)
library(genio)
library(readr)
library(popkin)
# the name is for dir only, actual file is just "data"
name_in <- 'data'
############
### ARGV ###
############
# define options
option_list = list(
make_option("--bfile", type = "character", default = NA,
help = "Base name for input plink files (.BED/BIM/FAM)", metavar = "character")
)
opt_parser <- OptionParser(option_list = option_list)
opt <- parse_args(opt_parser)
# get values
name <- opt$bfile
# stop if name is missing
if ( is.na(name) )
stop('`--bfile` terminal option is required!')
# move to where the data is
setwd( '../data/' )
setwd( name )
# load FAM table, for subpopulation labels
fam <- read_fam( name_in )
# get number of loci too, for simulating the same number
m_loci <- count_lines( name_in, 'bim' )
# read annotations
subpop_info <- read_tsv( 'pops-annot.txt', comment = '#', show_col_types = FALSE )
# map subpopulations using sub-subpopulations
fam$superpop <- subpop_info$superpop[ match( fam$fam, subpop_info$pop ) ]
# load tree calculated last time
load( 'tree.RData' ) # loads: tree, coanc_est, kinship_pop
# save things into a new destination
name <- paste0( name, '_sim' )
# move to where the data will be
# let's not overwrite things, under the assumption that the simulations are very expensive to redo
# if the output directory exists, assume all the files we want are there too. Only a non-existent output directory will work
setwd( '..' )
if ( dir.exists( name ) )
stop('Output exists, will not overwrite: ', name)
# else create directory and move into there
dir.create( name )
setwd( name )
# labels come from fam file
# subpops are desired order, match tree order
admix_proportions <- admix_prop_indep_subpops( fam$fam, subpops = tree$tip.label )
# copy names of individuals, to have correspondence to real data (though these are completely simulated)
rownames( admix_proportions ) <- fam$id
# edit tree a bit
# save under this name
tree_subpops <- tree
# also remove root edge (otherwise `bnpsd` complains about it, doesn't make sense to simulate using it)
tree_subpops$root.edge <- NULL
# recalculate tree coancestry after root edge is removed!
coanc_est <- coanc_tree( tree_subpops )
# calculate FST from tree, ignoring individuals/admixture (trivial here).
# do want to weigh superpops evenly
# easiest to map onto tree, where tip.label is subpops, to ensure agreement
tree_subpops$superpop <- subpop_info$superpop[ match( tree_subpops$tip.label, subpop_info$pop ) ]
# calculate weights that balance everything!
weights_tree <- weights_subpops( tree_subpops$superpop )
# use coanc_est to get inbreeding values out of
# NOTE: must pass as inbreeding (vector), rather than self-kinship (matrix)!
fst_tree <- fst( diag( coanc_est ), weights_tree )
# save FST for table
write_lines( fst_tree, 'fst.txt' )
# compared manually, agreed!
## # as a check, calculate FST the regular way, with individuals
## # coanc_est is ancestral coancestry according to tree (came from 'tree.RData')
## coanc_tree_all <- coanc_admix( admix_proportions, coanc_est )
## # these are weights for individuals
## weights_ind <- weights_subpops( fam$superpop, fam$fam )
## # NOTE: must pass as inbreeding (vector), rather than self-kinship (matrix)!
## fst_ind <- fst( diag( coanc_tree_all ), weights_ind )
# save bnpsd data to an RData file
# here add copy of real `fam` table, for checks
save( admix_proportions, tree_subpops, fam, m_loci, file = 'bnpsd.RData' )
## # try things with this info
## # compare to popkin
## # read existing popkin data
## data <- read_grm( 'popkin' )
## kinship <- data$kinship
##
## # subpopulations are scrambled, but there's mostly agreement considering that
## library(popkin)
## plot_popkin(
## list(
## inbr_diag( kinship ),
## coanc_tree_all
## )
## )
# lastly, also need mean kinship to undifferentiate real MAF
# NOTES:
# - must be "unweighted", because MAF weighed individuals uniformly
# - but at individual level (not subpops)
# - and it must be kinship, not coancestry
# - (these are probably all small differences, but might as well get it right)
kinship_mean <- mean( coanc_to_kinship( coanc_admix( admix_proportions, coanc_est ) ) )
write_lines( kinship_mean, 'kinship_mean.txt' )
|
/scripts/fit-03-sim-pop.R
|
no_license
|
OchoaLab/pca-assoc-paper
|
R
| false
| false
| 4,617
|
r
|
# set up admixture simulation with tree
# the tree was estimated in a previous step, here we just set up the admixture matrix and save/copy the necessary data into a new directory for replicates to read from
# a real-data/tree version of sim-00-sim-pop.R
# creates:
# - bnpsd.RData
# - fst.txt
# - kinship_mean.txt
library(optparse)
library(bnpsd)
library(genio)
library(readr)
library(popkin)
# the name is for dir only, actual file is just "data"
name_in <- 'data'
############
### ARGV ###
############
# define options
option_list = list(
make_option("--bfile", type = "character", default = NA,
help = "Base name for input plink files (.BED/BIM/FAM)", metavar = "character")
)
opt_parser <- OptionParser(option_list = option_list)
opt <- parse_args(opt_parser)
# get values
name <- opt$bfile
# stop if name is missing
if ( is.na(name) )
stop('`--bfile` terminal option is required!')
# move to where the data is
setwd( '../data/' )
setwd( name )
# load FAM table, for subpopulation labels
fam <- read_fam( name_in )
# get number of loci too, for simulating the same number
m_loci <- count_lines( name_in, 'bim' )
# read annotations
subpop_info <- read_tsv( 'pops-annot.txt', comment = '#', show_col_types = FALSE )
# map subpopulations using sub-subpopulations
fam$superpop <- subpop_info$superpop[ match( fam$fam, subpop_info$pop ) ]
# load tree calculated last time
load( 'tree.RData' ) # loads: tree, coanc_est, kinship_pop
# save things into a new destination
name <- paste0( name, '_sim' )
# move to where the data will be
# let's not overwrite things, under the assumption that the simulations are very expensive to redo
# if the output directory exists, assume all the files we want are there too. Only a non-existent output directory will work
setwd( '..' )
if ( dir.exists( name ) )
stop('Output exists, will not overwrite: ', name)
# else create directory and move into there
dir.create( name )
setwd( name )
# labels come from fam file
# subpops are desired order, match tree order
admix_proportions <- admix_prop_indep_subpops( fam$fam, subpops = tree$tip.label )
# copy names of individuals, to have correspondence to real data (though these are completely simulated)
rownames( admix_proportions ) <- fam$id
# edit tree a bit
# save under this name
tree_subpops <- tree
# also remove root edge (otherwise `bnpsd` complains about it, doesn't make sense to simulate using it)
tree_subpops$root.edge <- NULL
# recalculate tree coancestry after root edge is removed!
coanc_est <- coanc_tree( tree_subpops )
# calculate FST from tree, ignoring individuals/admixture (trivial here).
# do want to weigh superpops evenly
# easiest to map onto tree, where tip.label is subpops, to ensure agreement
tree_subpops$superpop <- subpop_info$superpop[ match( tree_subpops$tip.label, subpop_info$pop ) ]
# calculate weights that balance everything!
weights_tree <- weights_subpops( tree_subpops$superpop )
# use coanc_est to get inbreeding values out of
# NOTE: must pass as inbreeding (vector), rather than self-kinship (matrix)!
fst_tree <- fst( diag( coanc_est ), weights_tree )
# save FST for table
write_lines( fst_tree, 'fst.txt' )
# compared manually, agreed!
## # as a check, calculate FST the regular way, with individuals
## # coanc_est is ancestral coancestry according to tree (came from 'tree.RData')
## coanc_tree_all <- coanc_admix( admix_proportions, coanc_est )
## # these are weights for individuals
## weights_ind <- weights_subpops( fam$superpop, fam$fam )
## # NOTE: must pass as inbreeding (vector), rather than self-kinship (matrix)!
## fst_ind <- fst( diag( coanc_tree_all ), weights_ind )
# save bnpsd data to an RData file
# here add copy of real `fam` table, for checks
save( admix_proportions, tree_subpops, fam, m_loci, file = 'bnpsd.RData' )
## # try things with this info
## # compare to popkin
## # read existing popkin data
## data <- read_grm( 'popkin' )
## kinship <- data$kinship
##
## # subpopulations are scrambled, but there's mostly agreement considering that
## library(popkin)
## plot_popkin(
## list(
## inbr_diag( kinship ),
## coanc_tree_all
## )
## )
# lastly, also need mean kinship to undifferentiate real MAF
# NOTES:
# - must be "unweighted", because MAF weighed individuals uniformly
# - but at individual level (not subpops)
# - and it must be kinship, not coancestry
# - (these are probably all small differences, but might as well get it right)
kinship_mean <- mean( coanc_to_kinship( coanc_admix( admix_proportions, coanc_est ) ) )
write_lines( kinship_mean, 'kinship_mean.txt' )
|
## Adapted from https://github.com/leekgroup/derSupplement/blob/gh-pages/figure-expressed-regions/figure-expressed-regions.R
## Usage:
# qrsh
# module load R/3.3
# mkdir -p logs
# Rscript figure-dbFinder-ER.R > logs/figure-dbFinder-ER_log.txt 2>&1
library('derfinder')
library('derfinderHelper')
library('GenomicRanges')
library('TxDb.Hsapiens.UCSC.hg19.knownGene')
library('RColorBrewer')
library('scales')
library('GenomeInfoDb')
library("GenomicFeatures")
library('bumphunter')
## Define paths
mainPath <- '/dcl01/lieber/ajaffe/derRuns/derChIP/epimap/'
covPath <- file.path(mainPath, 'CoverageInfo/')
resPath <- file.path(mainPath, 'derAnalysis/run1-v1.5.38-H3K4me3')
plotdir <- file.path(mainPath, 'regionMatrix', 'figure-ER')
dir.create(plotdir, showWarnings = FALSE)
## Load data
load(file.path(mainPath, 'regionMatrix', 'regionMat-H3K4me3-cut10-chr12.Rdata'))
load(file.path(resPath, 'groupInfo.Rdata'))
load(file.path(resPath, 'models.Rdata'))
load(file.path(resPath, 'colsubset.Rdata'))
## Define group labels
groupSimple <- groupInfo
ageCut <- 52
levels(groupSimple) <- gsub(paste0('\\[23,', ageCut, '\\)'), paste0(ageCut, '-'), levels(groupSimple))
levels(groupSimple) <- gsub(paste0('\\[', ageCut, ',65\\]'), paste0(ageCut, '+'), levels(groupSimple))
levels(groupSimple) <- gsub('_', ':', levels(groupSimple))
## Phenotype information
message(paste(Sys.time(), 'loading phenotype information'))
load('/dcl01/lieber/ajaffe/psychENCODE_Data/EpiMap/annotated_phenotype_EpiMap_ChIPseq.rda')
pd <- pd[colsubset, ]
stopifnot(all(pd$HistoneMark == 'H3K4me3'))
## Define files
files <- pd$bamFile
names(files) <- pd$Sample_ID
## Some options
pad <- 300
scalefac <- 1
## Selected region
selected <- range(regionMat$chr12$regions[subjectHits(findOverlaps(GRanges('chr12', IRanges(11652500, 11654500), '*'), regionMat$chr12$regions))])
selected <- resize(selected, width(selected) + 2 * pad, fix = 'center')
## Load coverage
chr <- as.character(seqnames(selected))
chr
cov <- loadCoverage(files = files, which = selected, chr = chr, protectWhich = 3e4, totalMapped = pd$totalMapped)
## Bases
pos <- start(selected):end(selected)
## Log2 transform coverage
cov.log <- cov$coverage[pos, ]
for(i in seq_len(ncol(cov.log))) {
cov.log[[i]] <- log2(cov.log[[i]] + scalefac)
}
## Misc
covDat <- as.data.frame(cov$coverage[pos, ])
covDat.log <- as.data.frame(cov.log)
## Calculate overall mean
mean.ov <- log2(rowMeans(covDat) + scalefac)
y.axis <- c(0, 2^(1:9))
## Mean panel
pdf(file.path(plotdir, "mean_panel.pdf"), h= 6,w=14)
plot(mean.ov ~ pos, type="l", xlab=chr, ylab="", cex.axis=1.4, cex.lab=1.8, yaxt="n", ylim = log2(c(4, max(y.axis) + scalefac)))
axis(2, at = log2(y.axis + scalefac), labels = y.axis, cex.axis = 1.2)
mean.cutoff <- log2(10 + scalefac + 1)
abline(h= mean.cutoff, lty=2)
mean.sl <- slice(mean.ov, lower = mean.cutoff)
pl <- rep(brewer.pal(5, 'Greys')[5], 2)
palette(pl)
for(i in seq(along = mean.sl)) {
Ind = start(mean.sl)[i]:end(mean.sl)[i]
polygon(x = c(pos[Ind], rev(pos[Ind])),
y = c(mean.ov[Ind], rep(mean.cutoff, length(Ind))),
col = i, density =60)
}
dev.off()
## Smooth mean panel
mean.ov.smooth <- log2(runmedByCluster(rowMeans(covDat), k = 299, cluster = rep(1, nrow(covDat)), x = pos)$fitted[, 1] + scalefac)
y.axis <- c(0, 2^(1:8))
pdf(file.path(plotdir, "mean_smooth_panel.pdf"), h= 6,w=14)
plot(mean.ov.smooth ~ pos, type="l", xlab=chr, ylab="", cex.axis=1.4, cex.lab=1.8, yaxt="n", ylim = log2(c(4, max(y.axis) + scalefac)))
axis(2, at = log2(y.axis + scalefac), labels = y.axis, cex.axis = 1.2)
abline(h= mean.cutoff, lty=2)
mean.sl <- slice(mean.ov.smooth, lower = mean.cutoff)
pl <- rep(brewer.pal(3, 'Dark2')[1], 2)
palette(pl)
for(i in seq(along = mean.sl)) {
Ind = start(mean.sl)[i]:end(mean.sl)[i]
polygon(x = c(pos[Ind], rev(pos[Ind])),
y = c(mean.ov.smooth[Ind], rep(mean.cutoff, length(Ind))),
col = i, density =60)
}
dev.off()
## coverage panel
y.axis.sample <- c(0, 2^(1:12))
group.pl <- brewer.pal(12, 'Paired')[5:12]
pdf(file.path(plotdir, "fullCov_panel.pdf"), h= 6,w=14)
sample.pl <- mapply(function(col, n) {
alpha(col, 1)
}, group.pl, table(groupSimple))
palette(sample.pl)
matplot(pos, covDat.log, yaxt="n",
col=as.numeric(groupSimple), lty=1, type="l",
xlab=chr, ylab="", cex.axis=1.4, cex.lab=1.8)
axis(2, at = log2(y.axis.sample + scalefac), labels = y.axis.sample, cex.axis = 1.3)
#m = max(covDat.log)
m <- log2(512 + scalefac)
for(i in seq(along=mean.sl)) {
Ind = start(mean.sl)[i]:end(mean.sl)[i]
rect(xleft=min(pos[Ind]), xright = max(pos[Ind]),
ybot = log2(scalefac), ytop = m, col=brewer.pal(5, 'Greys')[3], density=10)
}
palette(group.pl)
legend("topright", levels(groupSimple), col=seq_len(length(levels(groupSimple))), cex=1.4,pch=15, ncol = 4, bty = 'n')
dev.off()
## annotate
load("/home/epi/ajaffe/GenomicStates/GenomicState.Hsapiens.ensembl.GRCh37.p12.rda")
ensemblAnno <- annotateRegions(selected,
GenomicState.Hsapiens.ensembl.GRCh37.p12$fullGenome)
ensemblCount <- ensemblAnno$countTable
### gene plot
a = as.data.frame(ensemblAnno$annotationList)
Strand = ifelse(a$strand == "+", 1, ifelse(a$strand=="-", -1, 0))
Col = ifelse(a$theRegion == "exon", "blue", ifelse(a$theRegion == "intron", "lightblue","white"))
Lwd = ifelse(a$theRegion == "exon", 1, ifelse(a$theRegion == "intron",0.5,0))
pdf(file.path(plotdir, "gene_anno.pdf"), h=3,w=14)
plot(0,0, type="n", xlim=range(pos),ylim=c(-1.5,1.5),yaxt="n",ylab="",
xlab=paste("Chromosome", mapSeqlevels(chr, 'NCBI')), cex.axis = 1.5, cex.lab =1.8)
axis(2,c(-1,1),c("-","+"),tick=FALSE,las=1,cex.axis = 3)
abline(h=0,lty=3)
for (j in seq_len(nrow(a))) {
polygon(c(a$start[j], a$end[j], a$end[j], a$start[j]),
Strand[j]/2 + c(-0.3, -0.3, 0.3, 0.3) * Lwd[j],
col = Col[j])
}
e <- a[a$theRegion == "exon", ]
s2 <- Strand[a$theRegion == "exon"]
g = unlist(e$symbol)
g[is.na(g)] = ""
if (length(g) > 0) {
text(x = e$start + e$width/2, y = s2 * 0.8, g,
font = 2, pos = s2 + 2,
cex = c(1.2, 0.01, 0.5, 0.5, 0.5, 0.01, 1.2, 1.2, 1.2, 0.01, 0.01))
}
dev.off()
#### extra tx info
txdb <- loadDb("/home/epi/ajaffe/Lieber/Projects/RNAseq/Ribozero_Compare/TxDb.Hsapiens.BioMart.ensembl.GRCh37.p12/inst/extdata/TxDb.Hsapiens.BioMart.ensembl.GRCh37.p12.sqlite")
txdb <- keepSeqlevels(txdb, mapSeqlevels(chr, 'NCBI'))
seqlevelsStyle(txdb) <- 'UCSC'
tx=exonsBy(txdb)
eList = tx[subjectHits(findOverlaps(selected, tx) )]
e.strand <- unlist(unique(strand(eList)))
e.n.neg <- sum(e.strand == '-')
e.n.pos <- sum(e.strand == '+')
ylim <- c(-1 * e.n.neg + ifelse(e.n.neg > 0, -0.5, 0.5), e.n.pos + 0.5)
pdf(file.path(plotdir, "trans_anno.pdf"), h=4.5,w=14)
plot(0,0, type="n", xlim=range(pos), ylim=ylim,
yaxt="n",ylab="", xlab=paste("Chromosome", mapSeqlevels(chr, 'NCBI'), '(161.1 mb)'), xaxt='n', cex.lab = 1.8)
axis(1, at = c(161115000, 161120000, 161125000, 161130000), labels = c('+15k', '+20k', '+25k', '+30k'), cex.axis = 1.5)
axis(2, c(- ifelse(e.n.neg, median(seq_len(e.n.neg)), NA), ifelse(e.n.pos, median(seq_len(e.n.pos)), NA)), c(ifelse(e.n.neg, '-', NA), ifelse(e.n.pos, "+", NA)), tick=FALSE,las=1,cex.axis = 3)
abline(h=0,lty=3)
for(i in seq(along=eList)) {
a = as.data.frame(eList[[i]])
i.strand <- sum(e.strand[ seq_len(length(e.strand)) <= i] == e.strand[i]) * ifelse(e.strand[i] == "+", 1, -1)
for (j in seq_len(nrow(a))) {
polygon(c(a$start[j], a$end[j], a$end[j], a$start[j]),
c(i.strand - 0.25, i.strand -0.25, i.strand +0.25, i.strand +0.25), col="blue")
}
int = gaps(eList[[i]])
int = int[seqnames(int) == unique(seqnames(eList[[i]]))]
int <- int[ end(int) < seqlengths(int) & start(int) > 1]
end(int) = end(int)+1
int = as.data.frame(int[start(int) != 1])
for (j in seq_len(nrow(int))) {
polygon(c(int$start[j], int$end[j], int$end[j], int$start[j]),
c(i.strand - 0.15, i.strand -0.15, i.strand + 0.15, i.strand +0.15), col="lightblue")
}
}
dev.off()
|
/epimap/regionMatrix/figure-dbFinder-ER.R
|
no_license
|
LieberInstitute/dbFinder
|
R
| false
| false
| 7,909
|
r
|
## Adapted from https://github.com/leekgroup/derSupplement/blob/gh-pages/figure-expressed-regions/figure-expressed-regions.R
## Usage:
# qrsh
# module load R/3.3
# mkdir -p logs
# Rscript figure-dbFinder-ER.R > logs/figure-dbFinder-ER_log.txt 2>&1
library('derfinder')
library('derfinderHelper')
library('GenomicRanges')
library('TxDb.Hsapiens.UCSC.hg19.knownGene')
library('RColorBrewer')
library('scales')
library('GenomeInfoDb')
library("GenomicFeatures")
library('bumphunter')
## Define paths
mainPath <- '/dcl01/lieber/ajaffe/derRuns/derChIP/epimap/'
covPath <- file.path(mainPath, 'CoverageInfo/')
resPath <- file.path(mainPath, 'derAnalysis/run1-v1.5.38-H3K4me3')
plotdir <- file.path(mainPath, 'regionMatrix', 'figure-ER')
dir.create(plotdir, showWarnings = FALSE)
## Load data
load(file.path(mainPath, 'regionMatrix', 'regionMat-H3K4me3-cut10-chr12.Rdata'))
load(file.path(resPath, 'groupInfo.Rdata'))
load(file.path(resPath, 'models.Rdata'))
load(file.path(resPath, 'colsubset.Rdata'))
## Define group labels
groupSimple <- groupInfo
ageCut <- 52
levels(groupSimple) <- gsub(paste0('\\[23,', ageCut, '\\)'), paste0(ageCut, '-'), levels(groupSimple))
levels(groupSimple) <- gsub(paste0('\\[', ageCut, ',65\\]'), paste0(ageCut, '+'), levels(groupSimple))
levels(groupSimple) <- gsub('_', ':', levels(groupSimple))
## Phenotype information
message(paste(Sys.time(), 'loading phenotype information'))
load('/dcl01/lieber/ajaffe/psychENCODE_Data/EpiMap/annotated_phenotype_EpiMap_ChIPseq.rda')
pd <- pd[colsubset, ]
stopifnot(all(pd$HistoneMark == 'H3K4me3'))
## Define files
files <- pd$bamFile
names(files) <- pd$Sample_ID
## Some options
pad <- 300
scalefac <- 1
## Selected region
selected <- range(regionMat$chr12$regions[subjectHits(findOverlaps(GRanges('chr12', IRanges(11652500, 11654500), '*'), regionMat$chr12$regions))])
selected <- resize(selected, width(selected) + 2 * pad, fix = 'center')
## Load coverage
chr <- as.character(seqnames(selected))
chr
cov <- loadCoverage(files = files, which = selected, chr = chr, protectWhich = 3e4, totalMapped = pd$totalMapped)
## Bases
pos <- start(selected):end(selected)
## Log2 transform coverage
cov.log <- cov$coverage[pos, ]
for(i in seq_len(ncol(cov.log))) {
cov.log[[i]] <- log2(cov.log[[i]] + scalefac)
}
## Misc
covDat <- as.data.frame(cov$coverage[pos, ])
covDat.log <- as.data.frame(cov.log)
## Calculate overall mean
mean.ov <- log2(rowMeans(covDat) + scalefac)
y.axis <- c(0, 2^(1:9))
## Mean panel
pdf(file.path(plotdir, "mean_panel.pdf"), h= 6,w=14)
plot(mean.ov ~ pos, type="l", xlab=chr, ylab="", cex.axis=1.4, cex.lab=1.8, yaxt="n", ylim = log2(c(4, max(y.axis) + scalefac)))
axis(2, at = log2(y.axis + scalefac), labels = y.axis, cex.axis = 1.2)
mean.cutoff <- log2(10 + scalefac + 1)
abline(h= mean.cutoff, lty=2)
mean.sl <- slice(mean.ov, lower = mean.cutoff)
pl <- rep(brewer.pal(5, 'Greys')[5], 2)
palette(pl)
for(i in seq(along = mean.sl)) {
Ind = start(mean.sl)[i]:end(mean.sl)[i]
polygon(x = c(pos[Ind], rev(pos[Ind])),
y = c(mean.ov[Ind], rep(mean.cutoff, length(Ind))),
col = i, density =60)
}
dev.off()
## Smooth mean panel
mean.ov.smooth <- log2(runmedByCluster(rowMeans(covDat), k = 299, cluster = rep(1, nrow(covDat)), x = pos)$fitted[, 1] + scalefac)
y.axis <- c(0, 2^(1:8))
pdf(file.path(plotdir, "mean_smooth_panel.pdf"), h= 6,w=14)
plot(mean.ov.smooth ~ pos, type="l", xlab=chr, ylab="", cex.axis=1.4, cex.lab=1.8, yaxt="n", ylim = log2(c(4, max(y.axis) + scalefac)))
axis(2, at = log2(y.axis + scalefac), labels = y.axis, cex.axis = 1.2)
abline(h= mean.cutoff, lty=2)
mean.sl <- slice(mean.ov.smooth, lower = mean.cutoff)
pl <- rep(brewer.pal(3, 'Dark2')[1], 2)
palette(pl)
for(i in seq(along = mean.sl)) {
Ind = start(mean.sl)[i]:end(mean.sl)[i]
polygon(x = c(pos[Ind], rev(pos[Ind])),
y = c(mean.ov.smooth[Ind], rep(mean.cutoff, length(Ind))),
col = i, density =60)
}
dev.off()
## coverage panel
y.axis.sample <- c(0, 2^(1:12))
group.pl <- brewer.pal(12, 'Paired')[5:12]
pdf(file.path(plotdir, "fullCov_panel.pdf"), h= 6,w=14)
sample.pl <- mapply(function(col, n) {
alpha(col, 1)
}, group.pl, table(groupSimple))
palette(sample.pl)
matplot(pos, covDat.log, yaxt="n",
col=as.numeric(groupSimple), lty=1, type="l",
xlab=chr, ylab="", cex.axis=1.4, cex.lab=1.8)
axis(2, at = log2(y.axis.sample + scalefac), labels = y.axis.sample, cex.axis = 1.3)
#m = max(covDat.log)
m <- log2(512 + scalefac)
for(i in seq(along=mean.sl)) {
Ind = start(mean.sl)[i]:end(mean.sl)[i]
rect(xleft=min(pos[Ind]), xright = max(pos[Ind]),
ybot = log2(scalefac), ytop = m, col=brewer.pal(5, 'Greys')[3], density=10)
}
palette(group.pl)
legend("topright", levels(groupSimple), col=seq_len(length(levels(groupSimple))), cex=1.4,pch=15, ncol = 4, bty = 'n')
dev.off()
## annotate
load("/home/epi/ajaffe/GenomicStates/GenomicState.Hsapiens.ensembl.GRCh37.p12.rda")
ensemblAnno <- annotateRegions(selected,
GenomicState.Hsapiens.ensembl.GRCh37.p12$fullGenome)
ensemblCount <- ensemblAnno$countTable
### gene plot
a = as.data.frame(ensemblAnno$annotationList)
Strand = ifelse(a$strand == "+", 1, ifelse(a$strand=="-", -1, 0))
Col = ifelse(a$theRegion == "exon", "blue", ifelse(a$theRegion == "intron", "lightblue","white"))
Lwd = ifelse(a$theRegion == "exon", 1, ifelse(a$theRegion == "intron",0.5,0))
pdf(file.path(plotdir, "gene_anno.pdf"), h=3,w=14)
plot(0,0, type="n", xlim=range(pos),ylim=c(-1.5,1.5),yaxt="n",ylab="",
xlab=paste("Chromosome", mapSeqlevels(chr, 'NCBI')), cex.axis = 1.5, cex.lab =1.8)
axis(2,c(-1,1),c("-","+"),tick=FALSE,las=1,cex.axis = 3)
abline(h=0,lty=3)
for (j in seq_len(nrow(a))) {
polygon(c(a$start[j], a$end[j], a$end[j], a$start[j]),
Strand[j]/2 + c(-0.3, -0.3, 0.3, 0.3) * Lwd[j],
col = Col[j])
}
e <- a[a$theRegion == "exon", ]
s2 <- Strand[a$theRegion == "exon"]
g = unlist(e$symbol)
g[is.na(g)] = ""
if (length(g) > 0) {
text(x = e$start + e$width/2, y = s2 * 0.8, g,
font = 2, pos = s2 + 2,
cex = c(1.2, 0.01, 0.5, 0.5, 0.5, 0.01, 1.2, 1.2, 1.2, 0.01, 0.01))
}
dev.off()
#### extra tx info
txdb <- loadDb("/home/epi/ajaffe/Lieber/Projects/RNAseq/Ribozero_Compare/TxDb.Hsapiens.BioMart.ensembl.GRCh37.p12/inst/extdata/TxDb.Hsapiens.BioMart.ensembl.GRCh37.p12.sqlite")
txdb <- keepSeqlevels(txdb, mapSeqlevels(chr, 'NCBI'))
seqlevelsStyle(txdb) <- 'UCSC'
tx=exonsBy(txdb)
eList = tx[subjectHits(findOverlaps(selected, tx) )]
e.strand <- unlist(unique(strand(eList)))
e.n.neg <- sum(e.strand == '-')
e.n.pos <- sum(e.strand == '+')
ylim <- c(-1 * e.n.neg + ifelse(e.n.neg > 0, -0.5, 0.5), e.n.pos + 0.5)
pdf(file.path(plotdir, "trans_anno.pdf"), h=4.5,w=14)
plot(0,0, type="n", xlim=range(pos), ylim=ylim,
yaxt="n",ylab="", xlab=paste("Chromosome", mapSeqlevels(chr, 'NCBI'), '(161.1 mb)'), xaxt='n', cex.lab = 1.8)
axis(1, at = c(161115000, 161120000, 161125000, 161130000), labels = c('+15k', '+20k', '+25k', '+30k'), cex.axis = 1.5)
axis(2, c(- ifelse(e.n.neg, median(seq_len(e.n.neg)), NA), ifelse(e.n.pos, median(seq_len(e.n.pos)), NA)), c(ifelse(e.n.neg, '-', NA), ifelse(e.n.pos, "+", NA)), tick=FALSE,las=1,cex.axis = 3)
abline(h=0,lty=3)
for(i in seq(along=eList)) {
a = as.data.frame(eList[[i]])
i.strand <- sum(e.strand[ seq_len(length(e.strand)) <= i] == e.strand[i]) * ifelse(e.strand[i] == "+", 1, -1)
for (j in seq_len(nrow(a))) {
polygon(c(a$start[j], a$end[j], a$end[j], a$start[j]),
c(i.strand - 0.25, i.strand -0.25, i.strand +0.25, i.strand +0.25), col="blue")
}
int = gaps(eList[[i]])
int = int[seqnames(int) == unique(seqnames(eList[[i]]))]
int <- int[ end(int) < seqlengths(int) & start(int) > 1]
end(int) = end(int)+1
int = as.data.frame(int[start(int) != 1])
for (j in seq_len(nrow(int))) {
polygon(c(int$start[j], int$end[j], int$end[j], int$start[j]),
c(i.strand - 0.15, i.strand -0.15, i.strand + 0.15, i.strand +0.15), col="lightblue")
}
}
dev.off()
|
# Homework 4
rm(list=ls())
library(igraph)
##################################################################################################
# Problem 3
##################################################################################################
# Load the stock edgelist
stock_file <- file("/Users/Yusi/Documents/EE232E/HW_4/StockNetworkFile.txt")
sector_file <- read.csv("/Users/Yusi/Documents/EE232E/HW_4/finance_data/Name_sector.csv", header = TRUE, sep = ",", quote = "\"")
# create stock graph
g_stocks <- read_graph(stock_file,format="ncol",directed=FALSE)
n_stocks <- length(V(g_stocks))
n_sectors <- 11
g_mst <- mst(g_stocks)
plot(g_mst, vertex.label = NA, vertex.size = 5)
sector_list <- setNames(object = sector_file$Sector, sector_file$Symbol)
sector_color <- rep(0, n_stocks)
colors_s <- rainbow(n_sectors)
colors_s[3] = "#FFFF00"
sector_color[which(sector_list == "Health Care")] <- colors_s[1]
sector_color[which(sector_list == "Industrials")] <- colors_s[2]
sector_color[which(sector_list == "Consumer Discretionary")] <- colors_s[3]
sector_color[which(sector_list == "Information Technology")] <- colors_s[4]
sector_color[which(sector_list == "Consumer Staples")] <- colors_s[5]
sector_color[which(sector_list == "Utilities")] <- colors_s[6]
sector_color[which(sector_list == "Financials")] <- colors_s[7]
sector_color[which(sector_list == "Materials")] <- colors_s[8]
sector_color[which(sector_list == "Energy")] <- colors_s[9]
sector_color[which(sector_list == "Telecommunication Services")] <- colors_s[10]
sector_color[which(sector_list == "Real Estate")] <- colors_s[11]
sector_color_list <- setNames(object = sector_color, sector_file$Symbol)
sector_sorted_names <- rep(0, n_stocks)
sector_sorted_colors <- rep(0, n_stocks)
for (i in 1:n_stocks){
sector_sorted_names[i] <- V(g_mst)[i]$name
sector_sorted_colors[i] <- sector_color_list[V(g_mst)[i]$name]
}
plot(g_mst, vertex.label = NA, vertex.color=sector_sorted_colors, vertex.size = 5, main = "Daily MST")
barplot(rep(1,11),col=colors_s, main = "Colors Used for Vertices")
##################################################################################################
# Problem 7: Modifying Correlation
##################################################################################################
# Load the stock edgelist
stock_file_mod <- file("/Users/Yusi/Documents/EE232E/HW_4/StockNetworkFileMod.txt")
# create stock graph
g_stocks_mod <- read_graph(stock_file_mod,format="ncol",directed=FALSE)
g_mst_mod <- mst(g_stocks_mod)
plot(g_mst_mod, vertex.label = NA, vertex.size = 5)
sector_sorted_names_mod <- rep(0, n_stocks)
sector_sorted_colors_mod <- rep(0, n_stocks)
for (i in 1:n_stocks){
sector_sorted_names_mod[i] <- V(g_mst_mod)[i]$name
sector_sorted_colors_mod[i] <- sector_color_list[V(g_mst_mod)[i]$name]
}
plot(g_mst_mod, vertex.label = NA, vertex.color=sector_sorted_colors_mod, vertex.size = 5, main = "Daily MST (Modified)")
|
/Yusi_workspace/HW4_p37.R
|
no_license
|
gkhadge/EE232E
|
R
| false
| false
| 2,961
|
r
|
# Homework 4
rm(list=ls())
library(igraph)
##################################################################################################
# Problem 3
##################################################################################################
# Load the stock edgelist
stock_file <- file("/Users/Yusi/Documents/EE232E/HW_4/StockNetworkFile.txt")
sector_file <- read.csv("/Users/Yusi/Documents/EE232E/HW_4/finance_data/Name_sector.csv", header = TRUE, sep = ",", quote = "\"")
# create stock graph
g_stocks <- read_graph(stock_file,format="ncol",directed=FALSE)
n_stocks <- length(V(g_stocks))
n_sectors <- 11
g_mst <- mst(g_stocks)
plot(g_mst, vertex.label = NA, vertex.size = 5)
sector_list <- setNames(object = sector_file$Sector, sector_file$Symbol)
sector_color <- rep(0, n_stocks)
colors_s <- rainbow(n_sectors)
colors_s[3] = "#FFFF00"
sector_color[which(sector_list == "Health Care")] <- colors_s[1]
sector_color[which(sector_list == "Industrials")] <- colors_s[2]
sector_color[which(sector_list == "Consumer Discretionary")] <- colors_s[3]
sector_color[which(sector_list == "Information Technology")] <- colors_s[4]
sector_color[which(sector_list == "Consumer Staples")] <- colors_s[5]
sector_color[which(sector_list == "Utilities")] <- colors_s[6]
sector_color[which(sector_list == "Financials")] <- colors_s[7]
sector_color[which(sector_list == "Materials")] <- colors_s[8]
sector_color[which(sector_list == "Energy")] <- colors_s[9]
sector_color[which(sector_list == "Telecommunication Services")] <- colors_s[10]
sector_color[which(sector_list == "Real Estate")] <- colors_s[11]
sector_color_list <- setNames(object = sector_color, sector_file$Symbol)
sector_sorted_names <- rep(0, n_stocks)
sector_sorted_colors <- rep(0, n_stocks)
for (i in 1:n_stocks){
sector_sorted_names[i] <- V(g_mst)[i]$name
sector_sorted_colors[i] <- sector_color_list[V(g_mst)[i]$name]
}
plot(g_mst, vertex.label = NA, vertex.color=sector_sorted_colors, vertex.size = 5, main = "Daily MST")
barplot(rep(1,11),col=colors_s, main = "Colors Used for Vertices")
##################################################################################################
# Problem 7: Modifying Correlation
##################################################################################################
# Load the stock edgelist
stock_file_mod <- file("/Users/Yusi/Documents/EE232E/HW_4/StockNetworkFileMod.txt")
# create stock graph
g_stocks_mod <- read_graph(stock_file_mod,format="ncol",directed=FALSE)
g_mst_mod <- mst(g_stocks_mod)
plot(g_mst_mod, vertex.label = NA, vertex.size = 5)
sector_sorted_names_mod <- rep(0, n_stocks)
sector_sorted_colors_mod <- rep(0, n_stocks)
for (i in 1:n_stocks){
sector_sorted_names_mod[i] <- V(g_mst_mod)[i]$name
sector_sorted_colors_mod[i] <- sector_color_list[V(g_mst_mod)[i]$name]
}
plot(g_mst_mod, vertex.label = NA, vertex.color=sector_sorted_colors_mod, vertex.size = 5, main = "Daily MST (Modified)")
|
library(spdep)
### Name: lextrB
### Title: Find extreme eigenvalues of binary symmetric spatial weights
### Aliases: lextrB lextrW lextrS l_max
### Keywords: spatial
### ** Examples
data(boston, package="spData")
ab.listb <- nb2listw(boston.soi, style="B")
er <- range(eigenw(ab.listb))
er
res_1 <- lextrB(ab.listb)
c(res_1)
#if (require(igraph)) {
# B <- as(ab.listb, "symmetricMatrix")
# n <- length(boston.soi)
# f2 <- function(x, extra=NULL) {as.vector(B %*% x)}
# ar1 <- arpack(f2, sym=TRUE, options=list(n=n, nev=1, ncv=8,
# which="LA", maxiter=200))
# print(ar1$values)
# At line 409 of file dsaupd.f: Fortran runtime error: Actual string
# length is shorter than the declared one for dummy argument 'which' (0/2)
# arn <- arpack(f2, sym=TRUE, options=list(n=n, nev=1, ncv=8,
# which="SA", maxiter=200))
# print(arn$values)
# At line 409 of file dsaupd.f: Fortran runtime error: Actual string
# length is shorter than the declared one for dummy argument 'which' (0/2)
# ar1 <- arpack(f2, sym=TRUE, options=list(n=n, nev=2, ncv=8,
# which="BE", maxiter=300))
# "BE" gives: At line 558 of file dsaup2.f: Fortran runtime error:
# Index '9' of dimension 1 of array 'bounds' above upper bound of 8
# "BE"
# print(ar1$values)
#}
k5 <- knn2nb(knearneigh(boston.utm, k=5))
c(l_max(nb2listw(k5, style="B")))
max(Re(eigenw(nb2listw(k5, style="B"))))
c(l_max(nb2listw(k5, style="C")))
max(Re(eigenw(nb2listw(k5, style="C"))))
ab.listw <- nb2listw(boston.soi, style="W")
er <- range(eigenw(similar.listw(ab.listw)))
er
res_1 <- lextrW(ab.listw)
c(res_1)
#if (require(igraph)) {
# B <- as(similar.listw(ab.listw), "symmetricMatrix")
# ar1 <- arpack(f2, sym=TRUE, options=list(n=n, nev=1, ncv=8,
# which="LA", maxiter=400))
# print(ar1$values)
# At line 409 of file dsaupd.f: Fortran runtime error: Actual string
# length is shorter than the declared one for dummy argument 'which' (0/2)
# arn <- arpack(f2, sym=TRUE, options=list(n=n, nev=1, ncv=8,
# which="SA", maxiter=400))
# print(arn$values)
# At line 409 of file dsaupd.f: Fortran runtime error: Actual string
# length is shorter than the declared one for dummy argument 'which' (0/2)
# ar1 <- arpack(f2, sym=TRUE, options=list(n=n, nev=2, ncv=8,
# which="BE", maxiter=300))
# "BE" gives: At line 558 of file dsaup2.f: Fortran runtime error:
# Index '9' of dimension 1 of array 'bounds' above upper bound of 8
# print(ar1$values)
#}
## No test:
ab.listw <- nb2listw(boston.soi, style="S")
er <- range(eigenw(similar.listw(ab.listw)))
er
res_1 <- lextrS(ab.listw)
c(res_1)
## End(No test)
#if (require(igraph)) {
# B <- as(similar.listw(ab.listw), "symmetricMatrix")
# ar1 <- arpack(f2, sym=TRUE, options=list(n=n, nev=1, ncv=8,
# which="LA", maxiter=300))
# print(ar1$values)
# At line 409 of file dsaupd.f: Fortran runtime error: Actual string
# length is shorter than the declared one for dummy argument 'which' (0/2)
# arn <- arpack(f2, sym=TRUE, options=list(n=n, nev=1, ncv=8,
# which="SA", maxiter=300))
# print(arn$values)
# At line 409 of file dsaupd.f: Fortran runtime error: Actual string
# length is shorter than the declared one for dummy argument 'which' (0/2)
# ar1 <- arpack(f2, sym=TRUE, options=list(n=n, nev=2, ncv=8,
# which="BE", maxiter=300))
# "BE" gives: At line 558 of file dsaup2.f: Fortran runtime error:
# Index '9' of dimension 1 of array 'bounds' above upper bound of 8
# print(ar1$values)
#}
|
/data/genthat_extracted_code/spdep/examples/lextrB.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 3,434
|
r
|
library(spdep)
### Name: lextrB
### Title: Find extreme eigenvalues of binary symmetric spatial weights
### Aliases: lextrB lextrW lextrS l_max
### Keywords: spatial
### ** Examples
data(boston, package="spData")
ab.listb <- nb2listw(boston.soi, style="B")
er <- range(eigenw(ab.listb))
er
res_1 <- lextrB(ab.listb)
c(res_1)
#if (require(igraph)) {
# B <- as(ab.listb, "symmetricMatrix")
# n <- length(boston.soi)
# f2 <- function(x, extra=NULL) {as.vector(B %*% x)}
# ar1 <- arpack(f2, sym=TRUE, options=list(n=n, nev=1, ncv=8,
# which="LA", maxiter=200))
# print(ar1$values)
# At line 409 of file dsaupd.f: Fortran runtime error: Actual string
# length is shorter than the declared one for dummy argument 'which' (0/2)
# arn <- arpack(f2, sym=TRUE, options=list(n=n, nev=1, ncv=8,
# which="SA", maxiter=200))
# print(arn$values)
# At line 409 of file dsaupd.f: Fortran runtime error: Actual string
# length is shorter than the declared one for dummy argument 'which' (0/2)
# ar1 <- arpack(f2, sym=TRUE, options=list(n=n, nev=2, ncv=8,
# which="BE", maxiter=300))
# "BE" gives: At line 558 of file dsaup2.f: Fortran runtime error:
# Index '9' of dimension 1 of array 'bounds' above upper bound of 8
# "BE"
# print(ar1$values)
#}
k5 <- knn2nb(knearneigh(boston.utm, k=5))
c(l_max(nb2listw(k5, style="B")))
max(Re(eigenw(nb2listw(k5, style="B"))))
c(l_max(nb2listw(k5, style="C")))
max(Re(eigenw(nb2listw(k5, style="C"))))
ab.listw <- nb2listw(boston.soi, style="W")
er <- range(eigenw(similar.listw(ab.listw)))
er
res_1 <- lextrW(ab.listw)
c(res_1)
#if (require(igraph)) {
# B <- as(similar.listw(ab.listw), "symmetricMatrix")
# ar1 <- arpack(f2, sym=TRUE, options=list(n=n, nev=1, ncv=8,
# which="LA", maxiter=400))
# print(ar1$values)
# At line 409 of file dsaupd.f: Fortran runtime error: Actual string
# length is shorter than the declared one for dummy argument 'which' (0/2)
# arn <- arpack(f2, sym=TRUE, options=list(n=n, nev=1, ncv=8,
# which="SA", maxiter=400))
# print(arn$values)
# At line 409 of file dsaupd.f: Fortran runtime error: Actual string
# length is shorter than the declared one for dummy argument 'which' (0/2)
# ar1 <- arpack(f2, sym=TRUE, options=list(n=n, nev=2, ncv=8,
# which="BE", maxiter=300))
# "BE" gives: At line 558 of file dsaup2.f: Fortran runtime error:
# Index '9' of dimension 1 of array 'bounds' above upper bound of 8
# print(ar1$values)
#}
## No test:
ab.listw <- nb2listw(boston.soi, style="S")
er <- range(eigenw(similar.listw(ab.listw)))
er
res_1 <- lextrS(ab.listw)
c(res_1)
## End(No test)
#if (require(igraph)) {
# B <- as(similar.listw(ab.listw), "symmetricMatrix")
# ar1 <- arpack(f2, sym=TRUE, options=list(n=n, nev=1, ncv=8,
# which="LA", maxiter=300))
# print(ar1$values)
# At line 409 of file dsaupd.f: Fortran runtime error: Actual string
# length is shorter than the declared one for dummy argument 'which' (0/2)
# arn <- arpack(f2, sym=TRUE, options=list(n=n, nev=1, ncv=8,
# which="SA", maxiter=300))
# print(arn$values)
# At line 409 of file dsaupd.f: Fortran runtime error: Actual string
# length is shorter than the declared one for dummy argument 'which' (0/2)
# ar1 <- arpack(f2, sym=TRUE, options=list(n=n, nev=2, ncv=8,
# which="BE", maxiter=300))
# "BE" gives: At line 558 of file dsaup2.f: Fortran runtime error:
# Index '9' of dimension 1 of array 'bounds' above upper bound of 8
# print(ar1$values)
#}
|
dsaff
asd
sfa
a=1
|
/Tarea1.R
|
no_license
|
Pentagrama5/CursoR
|
R
| false
| false
| 18
|
r
|
dsaff
asd
sfa
a=1
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.lambda_operations.R
\name{invoke}
\alias{invoke}
\title{Invokes a Lambda function}
\usage{
invoke(FunctionName, InvocationType = NULL, LogType = NULL,
ClientContext = NULL, Payload = NULL, Qualifier = NULL)
}
\arguments{
\item{FunctionName}{[required] The name of the Lambda function, version, or alias.
\strong{Name formats}
\itemize{
\item \strong{Function name} - \code{my-function} (name-only), \code{my-function:v1} (with alias).
\item \strong{Function ARN} - \code{arn:aws:lambda:us-west-2:123456789012:function:my-function}.
\item \strong{Partial ARN} - \code{123456789012:function:my-function}.
}
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.}
\item{InvocationType}{Choose from the following options.
\itemize{
\item \code{RequestResponse} (default) - Invoke the function synchronously. Keep the connection open until the function returns a response or times out. The API response includes the function response and additional data.
\item \code{Event} - Invoke the function asynchronously. Send events that fail multiple times to the function's dead-letter queue (if configured). The API response only includes a status code.
\item \code{DryRun} - Validate parameter values and verify that the user or role has permission to invoke the function.
}}
\item{LogType}{Set to \code{Tail} to include the execution log in the response.}
\item{ClientContext}{Up to 3583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.}
\item{Payload}{JSON that you want to provide to your Lambda function as input.}
\item{Qualifier}{Specify a version or alias to invoke a published version of the function.}
}
\description{
Invokes a Lambda function. You can invoke a function synchronously and wait for the response, or asynchronously. To invoke a function asynchronously, set \code{InvocationType} to \code{Event}.
}
\details{
For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the \href{http://docs.aws.amazon.com/lambda/latest/dg/monitoring-functions.html}{execution log} and \href{http://docs.aws.amazon.com/lambda/latest/dg/dlq.html}{trace}. To record function errors for asynchronous invocations, configure your function with a \href{http://docs.aws.amazon.com/lambda/latest/dg/dlq.html}{dead letter queue}.
The status code in the API response does not reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, \href{http://docs.aws.amazon.com/lambda/latest/dg/limits.html}{limit errors}, or issues with your function's code and configuration. For example, Lambda returns \code{TooManyRequestsException} if executing the function would cause you to exceed a concurrency limit at either the account level (\code{ConcurrentInvocationLimitExceeded}) or function level (\code{ReservedFunctionConcurrentInvocationLimitExceeded}).
For functions with a long timeout, your client may be disconnected during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.
This operation requires permission for the \code{lambda:InvokeFunction} action.
}
\section{Accepted Parameters}{
\preformatted{invoke(
FunctionName = "string",
InvocationType = "Event"|"RequestResponse"|"DryRun",
LogType = "None"|"Tail",
ClientContext = "string",
Payload = raw,
Qualifier = "string"
)
}
}
\examples{
# This operation invokes a Lambda function
\donttest{invoke(
ClientContext = "MyApp",
FunctionName = "MyFunction",
InvocationType = "Event",
LogType = "Tail",
Payload = "fileb://file-path/input.json",
Qualifier = "1"
)}
}
|
/service/paws.lambda/man/invoke.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 4,059
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.lambda_operations.R
\name{invoke}
\alias{invoke}
\title{Invokes a Lambda function}
\usage{
invoke(FunctionName, InvocationType = NULL, LogType = NULL,
ClientContext = NULL, Payload = NULL, Qualifier = NULL)
}
\arguments{
\item{FunctionName}{[required] The name of the Lambda function, version, or alias.
\strong{Name formats}
\itemize{
\item \strong{Function name} - \code{my-function} (name-only), \code{my-function:v1} (with alias).
\item \strong{Function ARN} - \code{arn:aws:lambda:us-west-2:123456789012:function:my-function}.
\item \strong{Partial ARN} - \code{123456789012:function:my-function}.
}
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.}
\item{InvocationType}{Choose from the following options.
\itemize{
\item \code{RequestResponse} (default) - Invoke the function synchronously. Keep the connection open until the function returns a response or times out. The API response includes the function response and additional data.
\item \code{Event} - Invoke the function asynchronously. Send events that fail multiple times to the function's dead-letter queue (if configured). The API response only includes a status code.
\item \code{DryRun} - Validate parameter values and verify that the user or role has permission to invoke the function.
}}
\item{LogType}{Set to \code{Tail} to include the execution log in the response.}
\item{ClientContext}{Up to 3583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.}
\item{Payload}{JSON that you want to provide to your Lambda function as input.}
\item{Qualifier}{Specify a version or alias to invoke a published version of the function.}
}
\description{
Invokes a Lambda function. You can invoke a function synchronously and wait for the response, or asynchronously. To invoke a function asynchronously, set \code{InvocationType} to \code{Event}.
}
\details{
For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the \href{http://docs.aws.amazon.com/lambda/latest/dg/monitoring-functions.html}{execution log} and \href{http://docs.aws.amazon.com/lambda/latest/dg/dlq.html}{trace}. To record function errors for asynchronous invocations, configure your function with a \href{http://docs.aws.amazon.com/lambda/latest/dg/dlq.html}{dead letter queue}.
The status code in the API response does not reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, \href{http://docs.aws.amazon.com/lambda/latest/dg/limits.html}{limit errors}, or issues with your function's code and configuration. For example, Lambda returns \code{TooManyRequestsException} if executing the function would cause you to exceed a concurrency limit at either the account level (\code{ConcurrentInvocationLimitExceeded}) or function level (\code{ReservedFunctionConcurrentInvocationLimitExceeded}).
For functions with a long timeout, your client may be disconnected during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.
This operation requires permission for the \code{lambda:InvokeFunction} action.
}
\section{Accepted Parameters}{
\preformatted{invoke(
FunctionName = "string",
InvocationType = "Event"|"RequestResponse"|"DryRun",
LogType = "None"|"Tail",
ClientContext = "string",
Payload = raw,
Qualifier = "string"
)
}
}
\examples{
# This operation invokes a Lambda function
\donttest{invoke(
ClientContext = "MyApp",
FunctionName = "MyFunction",
InvocationType = "Event",
LogType = "Tail",
Payload = "fileb://file-path/input.json",
Qualifier = "1"
)}
}
|
#' ---
#' title: "Bayesian data analysis demo 10.3"
#' author: "Aki Vehtari, Markus Paasiniemi"
#' date: "`r format(Sys.Date())`"
#' output:
#' html_document:
#' theme: readable
#' code_download: true
#' ---
#' ## Importance sampling with Normal distribution as a proposal for Bioassay model
#'
#' ggplot2, grid, and gridExtra are used for plotting, tidyr for
#' manipulating data frames
#+ setup, message=FALSE, error=FALSE, warning=FALSE
library(ggplot2)
theme_set(theme_minimal())
library(gridExtra)
library(grid)
library(tidyr)
library(MASS)
library(loo)
#' set seed to match the numbers with slides
set.seed(5710)
#' Bioassay data, (BDA3 page 86)
df1 <- data.frame(
x = c(-0.86, -0.30, -0.05, 0.73),
n = c(5, 5, 5, 5),
y = c(0, 1, 3, 5)
)
#' ### Grid sampling for Bioassay model.
#' Compute the posterior density in a grid
#'
#' - usually should be computed in logarithms!
#' - with alternative prior, check that range and spacing of A and B
#' are sensible
A = seq(-1.5, 7, length.out = 100)
B = seq(-5, 35, length.out = 100)
# make vectors that contain all pairwise combinations of A and B
cA <- rep(A, each = length(B))
cB <- rep(B, length(A))
#' Make a helper function to calculate the log likelihood
#' given a dataframe with x, y, and n and evaluation
#' points a and b. For the likelihood see BDA3 p. 75
logl <- function(df, a, b)
df['y']*(a + b*df['x']) - df['n']*log1p(exp(a + b*df['x']))
# calculate likelihoods: apply logl function for each observation
# ie. each row of data frame of x, n and y
p <- apply(df1, 1, logl, cA, cB) %>%
# sum the log likelihoods of observations
# and exponentiate to get the joint likelihood
rowSums() %>% exp()
#' Sample from the grid (with replacement)
nsamp <- 1000
samp_indices <- sample(length(p), size = nsamp,
replace = T, prob = p/sum(p))
samp_A <- cA[samp_indices[1:nsamp]]
samp_B <- cB[samp_indices[1:nsamp]]
# add random jitter, see BDA3 p. 76
samp_A <- samp_A + runif(nsamp, (A[1] - A[2])/2, (A[2] - A[1])/2)
samp_B <- samp_B + runif(nsamp, (B[1] - B[2])/2, (B[2] - B[1])/2)
#' Compute LD50 for all draws
samp_ld50 <- -samp_A/samp_B
#' Create a plot of the posterior density
# limits for the plots
xl <- c(-2, 7)
yl <- c(-2, 35)
pos <- ggplot(data = data.frame(cA ,cB, p), aes(x = cA, y = cB)) +
geom_raster(aes(fill = p, alpha = p), interpolate = T) +
geom_contour(aes(z = p), colour = 'black', size = 0.2) +
coord_cartesian(xlim = xl, ylim = yl) +
labs(x = 'alpha', y = 'beta') +
scale_fill_gradient(low = 'yellow', high = 'red', guide = "none") +
scale_alpha(range = c(0, 1), guide = "none")
pos
#' Plot of the samples
sam <- ggplot(data = data.frame(samp_A, samp_B)) +
geom_point(aes(samp_A, samp_B), color = 'blue', size = 0.3) +
coord_cartesian(xlim = xl, ylim = yl) +
labs(x = 'alpha', y = 'beta')
sam
#' Plot of the histogram of LD50
his <- ggplot() +
geom_histogram(aes(samp_ld50), binwidth = 0.05,
fill = 'steelblue', color = 'black') +
coord_cartesian(xlim = c(-0.8, 0.8)) +
labs(x = 'LD50 = -alpha/beta')
his
#' ### Normal approximation for Bioassay model.
#' Define the function to be optimized
bioassayfun <- function(w, df) {
z <- w[1] + w[2]*df$x
-sum(df$y*(z) - df$n*log1p(exp(z)))
}
#' Optimize and compute the Hessian at the mode
w0 <- c(0,0)
optim_res <- optim(w0, bioassayfun, gr = NULL, df1, hessian = T)
w <- optim_res$par
S <- solve(optim_res$hessian)
#' Multivariate normal probability density function
dmvnorm <- function(x, mu, sig)
exp(-0.5*(length(x)*log(2*pi) + log(det(sig)) + (x-mu)%*%solve(sig, x-mu)))
#' Evaluate likelihood at points (cA,cB)
#' this is just for illustration and would not be needed otherwise
p <- apply(cbind(cA, cB), 1, dmvnorm, w, S)
#' Sample from the multivariate normal
samp_norm <- mvrnorm(nsamp, w, S)
#' Samples of LD50 conditional beta > 0:
#' Normal approximation does not take into account that the posterior
#' is not symmetric and that there is very low density for negative
#' beta values. Based on the draws from the normal approximation
#' is is estimated that there is about 5% probability that beta is negative!
bpi <- samp_norm[,2] > 0
samp_norm_ld50 <- -samp_norm[bpi,1]/samp_norm[bpi,2]
#' Create a plot of the normal distribution approximation
pos_norm <- ggplot(data = data.frame(cA ,cB, p), aes(x = cA, y = cB)) +
geom_raster(aes(fill = p, alpha = p), interpolate = T) +
geom_contour(aes(z = p), colour = 'black', size = 0.2) +
coord_cartesian(xlim = xl, ylim = yl) +
labs(x = 'alpha', y = 'beta') +
scale_fill_gradient(low = 'yellow', high = 'red', guide = "none") +
scale_alpha(range = c(0, 1), guide = "none")
pos_norm
#' Plot of the samples
sam_norm <- ggplot(data = data.frame(samp_A=samp_norm[,1], samp_B=samp_norm[,2])) +
geom_point(aes(samp_A, samp_B), color = 'blue', size = 0.3) +
coord_cartesian(xlim = xl, ylim = yl) +
labs(x = 'alpha', y = 'beta')
sam_norm
#' Plot of the histogram of LD50
his_norm <- ggplot() +
geom_histogram(aes(samp_norm_ld50), binwidth = 0.05,
fill = 'steelblue', color = 'black') +
coord_cartesian(xlim = c(-0.8, 0.8)) +
labs(x = 'LD50 = -alpha/beta, beta > 0')
his_norm
#' ### Importance sampling for Bioassay model.
#' Multivariate normal log probability density function
ldmvnorm <- function(x, mu, sig)
(-0.5*(length(x)*log(2*pi) + log(det(sig)) + (x-mu)%*%solve(sig, x-mu)))
#' Log importance ratios (working in log scale is numerically more stable)
lg <- apply(samp_norm, 1, ldmvnorm, w, S)
lp <- apply(df1, 1, logl, samp_norm[,1], samp_norm[,2]) %>% rowSums()
lw <- lp-lg
#' Pareto smoothed importance sampling
#' [(Vehtari et al, 2017)](https://arxiv.org/abs/1507.02646)
psislw <- psis(lw, r_eff = 1)
#' Pareto diagnostics. k<0.7 is ok.
#' [(Vehtari et al, 2017)](https://arxiv.org/abs/1507.02646)
print(psislw$diagnostics$pareto_k, digits=2)
#' Effective sample size estimate
#' [(Vehtari et al, 2017)](https://arxiv.org/abs/1507.02646)
print(psislw$diagnostics$n_eff, digits=2)
#' Pareto smoothed weights
psisw <- exp(psislw$log_weights)
#' Importance sampling weights could be used to weight different
#' expectations directly, but for visualisation and easy computation
#' of LD50 histogram, we use resampling importance sampling.
samp_indices <- sample(length(psisw), size = nsamp,
replace = T, prob = psisw)
rissamp_A <- samp_norm[samp_indices,1]
rissamp_B <- samp_norm[samp_indices,2]
# add random jitter, see BDA3 p. 76
rissamp_A <- rissamp_A + runif(nsamp, (A[1] - A[2])/2, (A[2] - A[1])/2)
rissamp_B <- rissamp_B + runif(nsamp, (B[1] - B[2])/2, (B[2] - B[1])/2)
# samples of LD50
rissamp_ld50 <- -rissamp_A/rissamp_B
#' Plot of the samples
sam_ris <- ggplot(data = data.frame(rissamp_A, rissamp_B)) +
geom_point(aes(rissamp_A, rissamp_B), color = 'blue', size = 0.3) +
coord_cartesian(xlim = xl, ylim = yl) +
labs(x = 'alpha', y = 'beta')
sam_ris
#' Plot of the histogram of LD50
his_ris <- ggplot() +
geom_histogram(aes(rissamp_ld50), binwidth = 0.05,
fill = 'steelblue', color = 'black') +
coord_cartesian(xlim = c(-0.8, 0.8)) +
labs(x = 'LD50 = -alpha/beta')
his_ris
#' Combine the plots. Top: grid sampling, middle: normal
#' approximation, bottom: importance sampling.
#+ blank, fig.show='hide'
blank <- grid.rect(gp=gpar(col="white"))
#+ combined
grid.arrange(pos, sam, his, pos_norm, sam_norm, his_norm, blank, sam_ris, his_ris, ncol=3)
|
/demos_ch10/demo10_3.R
|
permissive
|
avehtari/BDA_R_demos
|
R
| false
| false
| 7,451
|
r
|
#' ---
#' title: "Bayesian data analysis demo 10.3"
#' author: "Aki Vehtari, Markus Paasiniemi"
#' date: "`r format(Sys.Date())`"
#' output:
#' html_document:
#' theme: readable
#' code_download: true
#' ---
#' ## Importance sampling with Normal distribution as a proposal for Bioassay model
#'
#' ggplot2, grid, and gridExtra are used for plotting, tidyr for
#' manipulating data frames
#+ setup, message=FALSE, error=FALSE, warning=FALSE
library(ggplot2)
theme_set(theme_minimal())
library(gridExtra)
library(grid)
library(tidyr)
library(MASS)
library(loo)
#' set seed to match the numbers with slides
set.seed(5710)
#' Bioassay data, (BDA3 page 86)
df1 <- data.frame(
x = c(-0.86, -0.30, -0.05, 0.73),
n = c(5, 5, 5, 5),
y = c(0, 1, 3, 5)
)
#' ### Grid sampling for Bioassay model.
#' Compute the posterior density in a grid
#'
#' - usually should be computed in logarithms!
#' - with alternative prior, check that range and spacing of A and B
#' are sensible
A = seq(-1.5, 7, length.out = 100)
B = seq(-5, 35, length.out = 100)
# make vectors that contain all pairwise combinations of A and B
cA <- rep(A, each = length(B))
cB <- rep(B, length(A))
#' Make a helper function to calculate the log likelihood
#' given a dataframe with x, y, and n and evaluation
#' points a and b. For the likelihood see BDA3 p. 75
logl <- function(df, a, b)
df['y']*(a + b*df['x']) - df['n']*log1p(exp(a + b*df['x']))
# calculate likelihoods: apply logl function for each observation
# ie. each row of data frame of x, n and y
p <- apply(df1, 1, logl, cA, cB) %>%
# sum the log likelihoods of observations
# and exponentiate to get the joint likelihood
rowSums() %>% exp()
#' Sample from the grid (with replacement)
nsamp <- 1000
samp_indices <- sample(length(p), size = nsamp,
replace = T, prob = p/sum(p))
samp_A <- cA[samp_indices[1:nsamp]]
samp_B <- cB[samp_indices[1:nsamp]]
# add random jitter, see BDA3 p. 76
samp_A <- samp_A + runif(nsamp, (A[1] - A[2])/2, (A[2] - A[1])/2)
samp_B <- samp_B + runif(nsamp, (B[1] - B[2])/2, (B[2] - B[1])/2)
#' Compute LD50 for all draws
samp_ld50 <- -samp_A/samp_B
#' Create a plot of the posterior density
# limits for the plots
xl <- c(-2, 7)
yl <- c(-2, 35)
pos <- ggplot(data = data.frame(cA ,cB, p), aes(x = cA, y = cB)) +
geom_raster(aes(fill = p, alpha = p), interpolate = T) +
geom_contour(aes(z = p), colour = 'black', size = 0.2) +
coord_cartesian(xlim = xl, ylim = yl) +
labs(x = 'alpha', y = 'beta') +
scale_fill_gradient(low = 'yellow', high = 'red', guide = "none") +
scale_alpha(range = c(0, 1), guide = "none")
pos
#' Plot of the samples
sam <- ggplot(data = data.frame(samp_A, samp_B)) +
geom_point(aes(samp_A, samp_B), color = 'blue', size = 0.3) +
coord_cartesian(xlim = xl, ylim = yl) +
labs(x = 'alpha', y = 'beta')
sam
#' Plot of the histogram of LD50
his <- ggplot() +
geom_histogram(aes(samp_ld50), binwidth = 0.05,
fill = 'steelblue', color = 'black') +
coord_cartesian(xlim = c(-0.8, 0.8)) +
labs(x = 'LD50 = -alpha/beta')
his
#' ### Normal approximation for Bioassay model.
#' Define the function to be optimized
bioassayfun <- function(w, df) {
z <- w[1] + w[2]*df$x
-sum(df$y*(z) - df$n*log1p(exp(z)))
}
#' Optimize and compute the Hessian at the mode
w0 <- c(0,0)
optim_res <- optim(w0, bioassayfun, gr = NULL, df1, hessian = T)
w <- optim_res$par
S <- solve(optim_res$hessian)
#' Multivariate normal probability density function
dmvnorm <- function(x, mu, sig)
exp(-0.5*(length(x)*log(2*pi) + log(det(sig)) + (x-mu)%*%solve(sig, x-mu)))
#' Evaluate likelihood at points (cA,cB)
#' this is just for illustration and would not be needed otherwise
p <- apply(cbind(cA, cB), 1, dmvnorm, w, S)
#' Sample from the multivariate normal
samp_norm <- mvrnorm(nsamp, w, S)
#' Samples of LD50 conditional beta > 0:
#' Normal approximation does not take into account that the posterior
#' is not symmetric and that there is very low density for negative
#' beta values. Based on the draws from the normal approximation
#' is is estimated that there is about 5% probability that beta is negative!
bpi <- samp_norm[,2] > 0
samp_norm_ld50 <- -samp_norm[bpi,1]/samp_norm[bpi,2]
#' Create a plot of the normal distribution approximation
pos_norm <- ggplot(data = data.frame(cA ,cB, p), aes(x = cA, y = cB)) +
geom_raster(aes(fill = p, alpha = p), interpolate = T) +
geom_contour(aes(z = p), colour = 'black', size = 0.2) +
coord_cartesian(xlim = xl, ylim = yl) +
labs(x = 'alpha', y = 'beta') +
scale_fill_gradient(low = 'yellow', high = 'red', guide = "none") +
scale_alpha(range = c(0, 1), guide = "none")
pos_norm
#' Plot of the samples
sam_norm <- ggplot(data = data.frame(samp_A=samp_norm[,1], samp_B=samp_norm[,2])) +
geom_point(aes(samp_A, samp_B), color = 'blue', size = 0.3) +
coord_cartesian(xlim = xl, ylim = yl) +
labs(x = 'alpha', y = 'beta')
sam_norm
#' Plot of the histogram of LD50
his_norm <- ggplot() +
geom_histogram(aes(samp_norm_ld50), binwidth = 0.05,
fill = 'steelblue', color = 'black') +
coord_cartesian(xlim = c(-0.8, 0.8)) +
labs(x = 'LD50 = -alpha/beta, beta > 0')
his_norm
#' ### Importance sampling for Bioassay model.
#' Multivariate normal log probability density function
ldmvnorm <- function(x, mu, sig)
(-0.5*(length(x)*log(2*pi) + log(det(sig)) + (x-mu)%*%solve(sig, x-mu)))
#' Log importance ratios (working in log scale is numerically more stable)
lg <- apply(samp_norm, 1, ldmvnorm, w, S)
lp <- apply(df1, 1, logl, samp_norm[,1], samp_norm[,2]) %>% rowSums()
lw <- lp-lg
#' Pareto smoothed importance sampling
#' [(Vehtari et al, 2017)](https://arxiv.org/abs/1507.02646)
psislw <- psis(lw, r_eff = 1)
#' Pareto diagnostics. k<0.7 is ok.
#' [(Vehtari et al, 2017)](https://arxiv.org/abs/1507.02646)
print(psislw$diagnostics$pareto_k, digits=2)
#' Effective sample size estimate
#' [(Vehtari et al, 2017)](https://arxiv.org/abs/1507.02646)
print(psislw$diagnostics$n_eff, digits=2)
#' Pareto smoothed weights
psisw <- exp(psislw$log_weights)
#' Importance sampling weights could be used to weight different
#' expectations directly, but for visualisation and easy computation
#' of LD50 histogram, we use resampling importance sampling.
samp_indices <- sample(length(psisw), size = nsamp,
replace = T, prob = psisw)
rissamp_A <- samp_norm[samp_indices,1]
rissamp_B <- samp_norm[samp_indices,2]
# add random jitter, see BDA3 p. 76
rissamp_A <- rissamp_A + runif(nsamp, (A[1] - A[2])/2, (A[2] - A[1])/2)
rissamp_B <- rissamp_B + runif(nsamp, (B[1] - B[2])/2, (B[2] - B[1])/2)
# samples of LD50
rissamp_ld50 <- -rissamp_A/rissamp_B
#' Plot of the samples
sam_ris <- ggplot(data = data.frame(rissamp_A, rissamp_B)) +
geom_point(aes(rissamp_A, rissamp_B), color = 'blue', size = 0.3) +
coord_cartesian(xlim = xl, ylim = yl) +
labs(x = 'alpha', y = 'beta')
sam_ris
#' Plot of the histogram of LD50
his_ris <- ggplot() +
geom_histogram(aes(rissamp_ld50), binwidth = 0.05,
fill = 'steelblue', color = 'black') +
coord_cartesian(xlim = c(-0.8, 0.8)) +
labs(x = 'LD50 = -alpha/beta')
his_ris
#' Combine the plots. Top: grid sampling, middle: normal
#' approximation, bottom: importance sampling.
#+ blank, fig.show='hide'
blank <- grid.rect(gp=gpar(col="white"))
#+ combined
grid.arrange(pos, sam, his, pos_norm, sam_norm, his_norm, blank, sam_ris, his_ris, ncol=3)
|
pkg_lis = c("plyr", "dplyr", "tidyr", "stringr", "ggplot2", "tidytext", "readtext", "syuzhet", "purrrlyr", "topicmodels", "ggwordcloud", "shinycssloaders", "shinycssloaders")
mia_pkgs = pkg_list[!(pkg_list %in% installed.packages()[,"Package"])]
if(length(mia_pkgs) > 0) install.packages(mia_pkgs)
loaded_pkgs = lapply(pkg_list, require, character.only=TRUE)
library(plyr)
library(dplyr)
library(tidyr)
library(stringr)
library(ggplot2)
library(tidytext)
library(readtext)
library(syuzhet)
library(purrrlyr)
library(topicmodels)
library(ggwordcloud)
# Language Model Variables
en_bigram = readRDS("../data/en_bigram.rds")
it_bigram = readRDS("../data/it_bigram.rds")
es_bigram = readRDS("../data/es_bigram.rds")
en_trigram = readRDS("../data/en_trigram.rds")
it_trigram = readRDS("../data/it_trigram.rds")
es_trigram = readRDS("../data/es_trigram.rds")
en_qgram = readRDS("../data/en_qgram.rds")
it_qgram = readRDS("../data/it_qgram.rds")
es_qgram = readRDS("../data/es_qgram.rds")
# Era Model Variables
en_era = readRDS("../data/en_era3.rds")
es_era = readRDS("../data/es_era3.rds")
it_era = readRDS("../data/it_era3.rds")
`%then%` <- shiny:::`%OR%`
is_there_data = function(input) {
if(is.null(input) || !str_detect(input, pattern = "txt")) {
"Please select a text file to analyze"
} else {
NULL
}
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Language Analysis Section ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# find a better way to tokenize, exclude number?
# Code derived from www.tidytextmining.com/ngrams.html
create_ngram_model = function(books, number = 2) {
colnames = paste0("word", 1:number)
ngram = books %>%
unnest_tokens(output = ngram, input = text, token = "ngrams", n = number) %>%
separate(ngram, colnames, sep = " ") %>%
plyr::count(colnames)
return (ngram)
}
row_wise_laplace = function(target, model, V, number = 2) {
targetdf = model[model$word1 == target$word1,]
i = 2
while (length(targetdf) & (i <= (number - 1))) {
targetdf = targetdf[targetdf[[i]] == target[[i]],]
i = i + 1
}
numer = 1
num_value = 0
denom = V
if (nrow(targetdf) > 0) {
targetdf_n = targetdf[targetdf[[number]] == target[[number]],]
if (nrow(targetdf_n)) {
num_value = targetdf_n$freq
}
numer = 1 + num_value
denom = V + sum(targetdf$freq)
}
return (log(numer/denom) * target$freq)
}
# this is laplace smoothing
# returns log(probability), e^return_value to get the probability
ngram_evaluator_laplace = function(test_ngram, model, number = 2) {
V = length(unique(model[["word1"]]))
prob = 0
comb = test_ngram %>% by_row(row_wise_laplace, model = model, V = V, number = number, .collate = "rows")
return (sum(comb$.out))
}
# if x is large enough just consider itself
# Katz gave me 5
get_const = function(freq, freq_array) {
if (freq < 5) {
# because array starts from 1
return ((freq + 1) * freq_array[freq] / freq_array[freq - 1])
} else {
return (freq)
}
}
row_wise_gt = function(target, model, V, freq_arr, number = 2) {
targetdf = model[model$word1 == target$word1,]
seen_pairs = nrow(model)
i = 2
while (nrow(targetdf) & (i <= (number - 1))) {
targetdf = targetdf[targetdf[[i]] == target[[i]],]
i = i + 1
}
denom = seen_pairs
num_value = 0
numer = 0
if (nrow(targetdf) > 0) {
targetdf_n = targetdf[targetdf[[number]] == target[[number]],]
denom = sum(targetdf$freq)
if (nrow(targetdf_n) > 0) {
num_value = targetdf_n$freq
}
}
if (num_value < 5) {
numer = (num_value + 1) * freq_arr[num_value + 2] / freq_arr[num_value + 1]
} else {
numer = num_value
}
return (log(numer/denom) * target$freq)
}
# this is good turing smoothing
# returns log(probability), e^return_value to get the probability
ngram_evaluator_gt = function(test_ngram, model, number = 2) {
V = length(unique(model[["word1"]]))
possible_pairs = V^number
unseen_pairs = possible_pairs - nrow(model)
freq_array = c(unseen_pairs,
nrow(subset(model, freq == 1)),
nrow(subset(model, freq == 2)),
nrow(subset(model, freq == 3)),
nrow(subset(model, freq == 4)),
nrow(subset(model, freq == 5)))
comb = test_ngram %>% by_row(row_wise_gt, model = model, V = V, freq_arr = freq_array, number = number, .collate = "rows")
return (sum(comb$.out))
}
ngram_prob = function(filename, model, func, number = 2) {
testfile = readtext(filename)
test_ngram = create_ngram_model(testfile, number)
func(test_ngram, model, number)
}
#Language Vizualization
language_df = function(filename, ngram, methodNumber) {
print("loaded in the file...")
#probable a better way to do this, but this'll do for now
if (str_detect(ngram, pattern = "Bi")){
lap_english_prob = ngram_prob(filename, en_bigram, ngram_evaluator_laplace)
lap_italian_prob = ngram_prob(filename, it_bigram, ngram_evaluator_laplace)
lap_spanish_prob = ngram_prob(filename, es_bigram, ngram_evaluator_laplace)
gt_english_prob = ngram_prob(filename, en_bigram, ngram_evaluator_gt)
gt_italian_prob = ngram_prob(filename, it_bigram, ngram_evaluator_gt)
gt_spanish_prob = ngram_prob(filename, es_bigram, ngram_evaluator_gt)
}
if (str_detect(ngram, pattern = "Tri")){
lap_english_prob = ngram_prob(filename, en_trigram, ngram_evaluator_laplace, number = 3)
lap_italian_prob = ngram_prob(filename, it_trigram, ngram_evaluator_laplace, number = 3)
lap_spanish_prob = ngram_prob(filename, es_trigram, ngram_evaluator_laplace, number = 3)
gt_english_prob = ngram_prob(filename, en_trigram, ngram_evaluator_gt, number = 3)
gt_italian_prob = ngram_prob(filename, it_trigram, ngram_evaluator_gt, number = 3)
gt_spanish_prob = ngram_prob(filename, es_trigram, ngram_evaluator_gt, number = 3)
}
if (str_detect(ngram, pattern = "Q")) {
lap_english_prob = ngram_prob(filename, en_qgram, ngram_evaluator_laplace, number = 4)
lap_italian_prob = ngram_prob(filename, it_qgram, ngram_evaluator_laplace, number = 4)
lap_spanish_prob = ngram_prob(filename, es_qgram, ngram_evaluator_laplace, number = 4)
gt_english_prob = ngram_prob(filename, en_qgram, ngram_evaluator_gt, number = 4)
gt_italian_prob = ngram_prob(filename, it_qgram, ngram_evaluator_gt, number = 4)
gt_spanish_prob = ngram_prob(filename, es_qgram, ngram_evaluator_gt, number = 4)
}
print("made all the probs!")
if (methodNumber == 1) {
lap_df = tibble("Prob" = c(lap_english_prob, lap_italian_prob, lap_spanish_prob),
"Language" = c("English", "Italian", "Spanish"),
"Minimum" = ifelse(min(-Prob) == -Prob, T, F))
return(lap_df)
}
if (methodNumber == 2) {
gt_df = tibble("Prob" = c(gt_english_prob, gt_italian_prob, gt_spanish_prob),
"Language" = c("English", "Italian", "Spanish"),
"Minimum" = ifelse(min(-Prob) == -Prob, T, F))
return(gt_df)
}
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sentiment Section ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Sentiment Modeling
#type = positivity or emotion
sent_modeling = function(filename, language_name, type = "positivity") {
testfile = readtext(filename)
test_sent = testfile %>%
unnest_tokens(output = word, input = text, token = "words")
if (type == "emotion") {
table = get_sentiment_dictionary("nrc", language_name) %>%
filter(sentiment != "positive" & sentiment != "negative")
} else {
table = get_sentiment_dictionary("nrc", language_name) %>%
filter(sentiment == "positive" | sentiment == "negative")
}
sent_analysis(test_sent, table)
}
# bag of words approach
sent_analysis = function(test_sent, model) {
assigned_v = test_sent %>%
count(word, sort = TRUE) %>%
inner_join(model, by = "word")
return (assigned_v)
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Era Analysis ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
era_analysis = function(text, language) {
print("Working on era")
if(language == "English"){
work = en_era
topic = c("Middle", "Renaissance", "Victorian", "Neoclassical", "Modern", "Romantic")
}
if(language == "Italian"){
work = it_era
topic = c("Baroque", "Contemporary", "Romanticism", "Medieval", "Classicism", "Renaissance")
}
if(language == "Spanish"){
work = es_era
topic = c("Modernism", "Realism", "Renaissance", "Baroque", "Enlightenment", "Romanticism")
}
text_df = readtext(text)
wordcount = text_df %>%
unnest_tokens(word, text) %>%
count(word)
print("scoring...")
# same words
topicscores = wordcount %>% inner_join(work, by = "word")
scores = c(sum(topicscores[,3]),sum(topicscores[,4]),sum(topicscores[,5]),sum(topicscores[,6]),sum(topicscores[,7]),sum(topicscores[,8]))
final = data.frame(document = topic, score = scores)
return(final)
}
|
/TextAnalyzer/global.R
|
no_license
|
wanttomakeyousmile/nlp-book-project
|
R
| false
| false
| 9,020
|
r
|
pkg_lis = c("plyr", "dplyr", "tidyr", "stringr", "ggplot2", "tidytext", "readtext", "syuzhet", "purrrlyr", "topicmodels", "ggwordcloud", "shinycssloaders", "shinycssloaders")
mia_pkgs = pkg_list[!(pkg_list %in% installed.packages()[,"Package"])]
if(length(mia_pkgs) > 0) install.packages(mia_pkgs)
loaded_pkgs = lapply(pkg_list, require, character.only=TRUE)
library(plyr)
library(dplyr)
library(tidyr)
library(stringr)
library(ggplot2)
library(tidytext)
library(readtext)
library(syuzhet)
library(purrrlyr)
library(topicmodels)
library(ggwordcloud)
# Language Model Variables
en_bigram = readRDS("../data/en_bigram.rds")
it_bigram = readRDS("../data/it_bigram.rds")
es_bigram = readRDS("../data/es_bigram.rds")
en_trigram = readRDS("../data/en_trigram.rds")
it_trigram = readRDS("../data/it_trigram.rds")
es_trigram = readRDS("../data/es_trigram.rds")
en_qgram = readRDS("../data/en_qgram.rds")
it_qgram = readRDS("../data/it_qgram.rds")
es_qgram = readRDS("../data/es_qgram.rds")
# Era Model Variables
en_era = readRDS("../data/en_era3.rds")
es_era = readRDS("../data/es_era3.rds")
it_era = readRDS("../data/it_era3.rds")
`%then%` <- shiny:::`%OR%`
is_there_data = function(input) {
if(is.null(input) || !str_detect(input, pattern = "txt")) {
"Please select a text file to analyze"
} else {
NULL
}
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Language Analysis Section ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# find a better way to tokenize, exclude number?
# Code derived from www.tidytextmining.com/ngrams.html
create_ngram_model = function(books, number = 2) {
colnames = paste0("word", 1:number)
ngram = books %>%
unnest_tokens(output = ngram, input = text, token = "ngrams", n = number) %>%
separate(ngram, colnames, sep = " ") %>%
plyr::count(colnames)
return (ngram)
}
row_wise_laplace = function(target, model, V, number = 2) {
targetdf = model[model$word1 == target$word1,]
i = 2
while (length(targetdf) & (i <= (number - 1))) {
targetdf = targetdf[targetdf[[i]] == target[[i]],]
i = i + 1
}
numer = 1
num_value = 0
denom = V
if (nrow(targetdf) > 0) {
targetdf_n = targetdf[targetdf[[number]] == target[[number]],]
if (nrow(targetdf_n)) {
num_value = targetdf_n$freq
}
numer = 1 + num_value
denom = V + sum(targetdf$freq)
}
return (log(numer/denom) * target$freq)
}
# this is laplace smoothing
# returns log(probability), e^return_value to get the probability
ngram_evaluator_laplace = function(test_ngram, model, number = 2) {
V = length(unique(model[["word1"]]))
prob = 0
comb = test_ngram %>% by_row(row_wise_laplace, model = model, V = V, number = number, .collate = "rows")
return (sum(comb$.out))
}
# if x is large enough just consider itself
# Katz gave me 5
get_const = function(freq, freq_array) {
if (freq < 5) {
# because array starts from 1
return ((freq + 1) * freq_array[freq] / freq_array[freq - 1])
} else {
return (freq)
}
}
row_wise_gt = function(target, model, V, freq_arr, number = 2) {
targetdf = model[model$word1 == target$word1,]
seen_pairs = nrow(model)
i = 2
while (nrow(targetdf) & (i <= (number - 1))) {
targetdf = targetdf[targetdf[[i]] == target[[i]],]
i = i + 1
}
denom = seen_pairs
num_value = 0
numer = 0
if (nrow(targetdf) > 0) {
targetdf_n = targetdf[targetdf[[number]] == target[[number]],]
denom = sum(targetdf$freq)
if (nrow(targetdf_n) > 0) {
num_value = targetdf_n$freq
}
}
if (num_value < 5) {
numer = (num_value + 1) * freq_arr[num_value + 2] / freq_arr[num_value + 1]
} else {
numer = num_value
}
return (log(numer/denom) * target$freq)
}
# this is good turing smoothing
# returns log(probability), e^return_value to get the probability
ngram_evaluator_gt = function(test_ngram, model, number = 2) {
V = length(unique(model[["word1"]]))
possible_pairs = V^number
unseen_pairs = possible_pairs - nrow(model)
freq_array = c(unseen_pairs,
nrow(subset(model, freq == 1)),
nrow(subset(model, freq == 2)),
nrow(subset(model, freq == 3)),
nrow(subset(model, freq == 4)),
nrow(subset(model, freq == 5)))
comb = test_ngram %>% by_row(row_wise_gt, model = model, V = V, freq_arr = freq_array, number = number, .collate = "rows")
return (sum(comb$.out))
}
ngram_prob = function(filename, model, func, number = 2) {
testfile = readtext(filename)
test_ngram = create_ngram_model(testfile, number)
func(test_ngram, model, number)
}
#Language Vizualization
language_df = function(filename, ngram, methodNumber) {
print("loaded in the file...")
#probable a better way to do this, but this'll do for now
if (str_detect(ngram, pattern = "Bi")){
lap_english_prob = ngram_prob(filename, en_bigram, ngram_evaluator_laplace)
lap_italian_prob = ngram_prob(filename, it_bigram, ngram_evaluator_laplace)
lap_spanish_prob = ngram_prob(filename, es_bigram, ngram_evaluator_laplace)
gt_english_prob = ngram_prob(filename, en_bigram, ngram_evaluator_gt)
gt_italian_prob = ngram_prob(filename, it_bigram, ngram_evaluator_gt)
gt_spanish_prob = ngram_prob(filename, es_bigram, ngram_evaluator_gt)
}
if (str_detect(ngram, pattern = "Tri")){
lap_english_prob = ngram_prob(filename, en_trigram, ngram_evaluator_laplace, number = 3)
lap_italian_prob = ngram_prob(filename, it_trigram, ngram_evaluator_laplace, number = 3)
lap_spanish_prob = ngram_prob(filename, es_trigram, ngram_evaluator_laplace, number = 3)
gt_english_prob = ngram_prob(filename, en_trigram, ngram_evaluator_gt, number = 3)
gt_italian_prob = ngram_prob(filename, it_trigram, ngram_evaluator_gt, number = 3)
gt_spanish_prob = ngram_prob(filename, es_trigram, ngram_evaluator_gt, number = 3)
}
if (str_detect(ngram, pattern = "Q")) {
lap_english_prob = ngram_prob(filename, en_qgram, ngram_evaluator_laplace, number = 4)
lap_italian_prob = ngram_prob(filename, it_qgram, ngram_evaluator_laplace, number = 4)
lap_spanish_prob = ngram_prob(filename, es_qgram, ngram_evaluator_laplace, number = 4)
gt_english_prob = ngram_prob(filename, en_qgram, ngram_evaluator_gt, number = 4)
gt_italian_prob = ngram_prob(filename, it_qgram, ngram_evaluator_gt, number = 4)
gt_spanish_prob = ngram_prob(filename, es_qgram, ngram_evaluator_gt, number = 4)
}
print("made all the probs!")
if (methodNumber == 1) {
lap_df = tibble("Prob" = c(lap_english_prob, lap_italian_prob, lap_spanish_prob),
"Language" = c("English", "Italian", "Spanish"),
"Minimum" = ifelse(min(-Prob) == -Prob, T, F))
return(lap_df)
}
if (methodNumber == 2) {
gt_df = tibble("Prob" = c(gt_english_prob, gt_italian_prob, gt_spanish_prob),
"Language" = c("English", "Italian", "Spanish"),
"Minimum" = ifelse(min(-Prob) == -Prob, T, F))
return(gt_df)
}
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sentiment Section ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Sentiment Modeling
#type = positivity or emotion
sent_modeling = function(filename, language_name, type = "positivity") {
testfile = readtext(filename)
test_sent = testfile %>%
unnest_tokens(output = word, input = text, token = "words")
if (type == "emotion") {
table = get_sentiment_dictionary("nrc", language_name) %>%
filter(sentiment != "positive" & sentiment != "negative")
} else {
table = get_sentiment_dictionary("nrc", language_name) %>%
filter(sentiment == "positive" | sentiment == "negative")
}
sent_analysis(test_sent, table)
}
# bag of words approach
sent_analysis = function(test_sent, model) {
assigned_v = test_sent %>%
count(word, sort = TRUE) %>%
inner_join(model, by = "word")
return (assigned_v)
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Era Analysis ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
era_analysis = function(text, language) {
print("Working on era")
if(language == "English"){
work = en_era
topic = c("Middle", "Renaissance", "Victorian", "Neoclassical", "Modern", "Romantic")
}
if(language == "Italian"){
work = it_era
topic = c("Baroque", "Contemporary", "Romanticism", "Medieval", "Classicism", "Renaissance")
}
if(language == "Spanish"){
work = es_era
topic = c("Modernism", "Realism", "Renaissance", "Baroque", "Enlightenment", "Romanticism")
}
text_df = readtext(text)
wordcount = text_df %>%
unnest_tokens(word, text) %>%
count(word)
print("scoring...")
# same words
topicscores = wordcount %>% inner_join(work, by = "word")
scores = c(sum(topicscores[,3]),sum(topicscores[,4]),sum(topicscores[,5]),sum(topicscores[,6]),sum(topicscores[,7]),sum(topicscores[,8]))
final = data.frame(document = topic, score = scores)
return(final)
}
|
library(dplyr)
library(magrittr)
library(ggplot2)
library(ggthemes)
library(car)
library(corrplot)
library(caret)
library(makedummies)
|
/titanic/init.R
|
no_license
|
ducksfrogs/masanariR
|
R
| false
| false
| 135
|
r
|
library(dplyr)
library(magrittr)
library(ggplot2)
library(ggthemes)
library(car)
library(corrplot)
library(caret)
library(makedummies)
|
\name{Prings05}
\docType{data}
\alias{Prings05}
\title{Multilevel data set of P. pinaster
}
\description{Radial increments of Pinus pinaster from
two sample plots located on Northern and Southern portions of Ebro
river basin, Spain.
}
\usage{data(Prings05)}
\format{
A data frame with the following 5 variables.
\describe{
\item{\code{x}}{A numeric vector with the radial increments in mm
year-1}
\item{\code{year}}{A numeric vector with the recorded year}
\item{\code{sample}}{A factor indicating the sample replicate}
\item{\code{tree}}{A factor indicating the tree number}
\item{\code{plot}}{A factor indicating the plot code}
}
}
\details{This data set
contains eight series of tree-ring widths of maritime
pine (Pinus pinaster), with recorded years spanning from 1810
to 2005. The cores were sampled
from dominant trees of two sites, with sample plots being located on
both: northern Spain (plot code: P44005) and center-east portion of
the same country (plot code: P16106). Two trees were selected by plot,
and two core samples were extracted by tree. Consequently, the sample
design defined three levels: sample in tree on plot (plot level),
sample in tree (tree level), and sample level.
}
\references{Bogino, S., and Bravo, F. (2008). Growth response of Pinus pinaster Ait. to climatic variables in central Spanish forests. Ann. For. Sci. 65, 1-13.
}
\examples{
str(Prings05)
}
\keyword{data sets}
|
/man/Prings05.Rd
|
no_license
|
cran/BIOdry
|
R
| false
| false
| 1,455
|
rd
|
\name{Prings05}
\docType{data}
\alias{Prings05}
\title{Multilevel data set of P. pinaster
}
\description{Radial increments of Pinus pinaster from
two sample plots located on Northern and Southern portions of Ebro
river basin, Spain.
}
\usage{data(Prings05)}
\format{
A data frame with the following 5 variables.
\describe{
\item{\code{x}}{A numeric vector with the radial increments in mm
year-1}
\item{\code{year}}{A numeric vector with the recorded year}
\item{\code{sample}}{A factor indicating the sample replicate}
\item{\code{tree}}{A factor indicating the tree number}
\item{\code{plot}}{A factor indicating the plot code}
}
}
\details{This data set
contains eight series of tree-ring widths of maritime
pine (Pinus pinaster), with recorded years spanning from 1810
to 2005. The cores were sampled
from dominant trees of two sites, with sample plots being located on
both: northern Spain (plot code: P44005) and center-east portion of
the same country (plot code: P16106). Two trees were selected by plot,
and two core samples were extracted by tree. Consequently, the sample
design defined three levels: sample in tree on plot (plot level),
sample in tree (tree level), and sample level.
}
\references{Bogino, S., and Bravo, F. (2008). Growth response of Pinus pinaster Ait. to climatic variables in central Spanish forests. Ann. For. Sci. 65, 1-13.
}
\examples{
str(Prings05)
}
\keyword{data sets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/define_contact_matrix.R
\name{define_contact_matrix}
\alias{define_contact_matrix}
\title{define partnering rates between demographic groups}
\usage{
define_contact_matrix(contact_df, demo_indices)
}
\arguments{
\item{contact_df}{data.frame with partnering rates in the \code{} column}
\item{demo_indices}{data.frame with the mapping of demographic groups to unique identifiers}
}
\description{
define partnering rates between demographic groups
}
|
/man/define_contact_matrix.Rd
|
no_license
|
caleb-easterly/msid
|
R
| false
| true
| 527
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/define_contact_matrix.R
\name{define_contact_matrix}
\alias{define_contact_matrix}
\title{define partnering rates between demographic groups}
\usage{
define_contact_matrix(contact_df, demo_indices)
}
\arguments{
\item{contact_df}{data.frame with partnering rates in the \code{} column}
\item{demo_indices}{data.frame with the mapping of demographic groups to unique identifiers}
}
\description{
define partnering rates between demographic groups
}
|
\name{mlvl}
\alias{mlvl}
\title{
Construct multilevel networks
}
\description{
Function to construct multilevel networks from multimodal structures.
}
\usage{
mlvl(x = NULL, y = NULL, type = c("bpn", "cn", "cn2", "list"),
symCdm, diag, lbs)
}
\arguments{
\item{x}{
domain data
}
\item{y}{
codomain data
}
\item{type}{
type of multilevel system:
- \code{bpn} for binomial projection
- \code{cn} for common membership network
- \code{cn2} for co-affiliation of network members
- \code{list} for the multimodal structures as a list
}
\item{symCdm}{
(optional and logical, only for \code{bpn}) whether or not symmetrize the codomain structure
}
\item{diag}{
(optional and logical) whether or not include the entries in the diagonal matrices
}
\item{lbs}{
(optional, only for \code{cn2}) tie labels
}
}
\details{
The default multilevel system is a binomial projection \code{bpn} that requires data for the two domains, as with
\code{cn2} as well.
Option \code{cn} does not need the domain in \code{x} since returns the co-affiliation of network members from the codomain structure.
Since these are different components in the multilevel system for co-affiliation of network members,
it is possible to specify the domain and codomain labels in \code{lbs} as a list object.
Making symmetric the codomain structure with \code{symCdm} is many times convenient for visualization purposes.
}
\value{
An object of `\code{Multilevel}' class of chosen type.
\item{mlnet}{
the multilevel network
}
\item{lbs}{
(list) domain and codomain labels
}
\item{modes}{
a vector indicating the domain of the data in \code{mlnet} where \code{1M} is for domain and \code{2} is for the codomain.
}
%%% ...
}
%\references{
%%% ~put references to the literature/web site here ~
%}
\author{
Antonio Rivero Ostoic
}
%\note{
%
%}
%
%%% ~Make other sections like Warning with \section{Warning }{....} ~
%
\seealso{
\code{\link[multigraph:mlgraph]{mlgraph}}, \code{\link[multigraph:multigraph]{multigraph}}
}
\examples{
# array for the domain
arr1 <- round( replace( array(runif(18), c(3,3,2)), array(runif(18), c(3,3,2))>.9, 3 ) )
# rectangle array for the co-domain
arr2 <- round( replace( array(runif(12), c(3,2,2)), array(runif(12), c(3,2,2))>.9, 3 ) )
# multilevel system with default type
mlvl(arr1, arr2)
}
%
\keyword{models}
\keyword{data}
|
/man/mlvl.Rd
|
no_license
|
mplex/multiplex
|
R
| false
| false
| 2,488
|
rd
|
\name{mlvl}
\alias{mlvl}
\title{
Construct multilevel networks
}
\description{
Function to construct multilevel networks from multimodal structures.
}
\usage{
mlvl(x = NULL, y = NULL, type = c("bpn", "cn", "cn2", "list"),
symCdm, diag, lbs)
}
\arguments{
\item{x}{
domain data
}
\item{y}{
codomain data
}
\item{type}{
type of multilevel system:
- \code{bpn} for binomial projection
- \code{cn} for common membership network
- \code{cn2} for co-affiliation of network members
- \code{list} for the multimodal structures as a list
}
\item{symCdm}{
(optional and logical, only for \code{bpn}) whether or not symmetrize the codomain structure
}
\item{diag}{
(optional and logical) whether or not include the entries in the diagonal matrices
}
\item{lbs}{
(optional, only for \code{cn2}) tie labels
}
}
\details{
The default multilevel system is a binomial projection \code{bpn} that requires data for the two domains, as with
\code{cn2} as well.
Option \code{cn} does not need the domain in \code{x} since returns the co-affiliation of network members from the codomain structure.
Since these are different components in the multilevel system for co-affiliation of network members,
it is possible to specify the domain and codomain labels in \code{lbs} as a list object.
Making symmetric the codomain structure with \code{symCdm} is many times convenient for visualization purposes.
}
\value{
An object of `\code{Multilevel}' class of chosen type.
\item{mlnet}{
the multilevel network
}
\item{lbs}{
(list) domain and codomain labels
}
\item{modes}{
a vector indicating the domain of the data in \code{mlnet} where \code{1M} is for domain and \code{2} is for the codomain.
}
%%% ...
}
%\references{
%%% ~put references to the literature/web site here ~
%}
\author{
Antonio Rivero Ostoic
}
%\note{
%
%}
%
%%% ~Make other sections like Warning with \section{Warning }{....} ~
%
\seealso{
\code{\link[multigraph:mlgraph]{mlgraph}}, \code{\link[multigraph:multigraph]{multigraph}}
}
\examples{
# array for the domain
arr1 <- round( replace( array(runif(18), c(3,3,2)), array(runif(18), c(3,3,2))>.9, 3 ) )
# rectangle array for the co-domain
arr2 <- round( replace( array(runif(12), c(3,2,2)), array(runif(12), c(3,2,2))>.9, 3 ) )
# multilevel system with default type
mlvl(arr1, arr2)
}
%
\keyword{models}
\keyword{data}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mycode.R
\name{add_strings}
\alias{add_strings}
\title{add strings}
\usage{
add_strings(strings)
}
\arguments{
\item{strings}{a vector of strings to concatinate}
}
\value{
a single string
}
\description{
Add strings by concatinating them together
}
\examples{
add_strings(c("abc", "def"))
}
|
/man/add_strings.Rd
|
permissive
|
bheavner/demo
|
R
| false
| true
| 370
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mycode.R
\name{add_strings}
\alias{add_strings}
\title{add strings}
\usage{
add_strings(strings)
}
\arguments{
\item{strings}{a vector of strings to concatinate}
}
\value{
a single string
}
\description{
Add strings by concatinating them together
}
\examples{
add_strings(c("abc", "def"))
}
|
#' Example Observed Rts
#'
#' An example data frame of observed Reproduction numbers
#' @format A data frame containing Rts estimated for each date.
"example_obs_rts"
|
/R/example_obs_rts.R
|
permissive
|
medewitt/EpiSoon
|
R
| false
| false
| 167
|
r
|
#' Example Observed Rts
#'
#' An example data frame of observed Reproduction numbers
#' @format A data frame containing Rts estimated for each date.
"example_obs_rts"
|
library(ggplot2)
library(dplyr)
library(data.table)
library(survival)
library(DiagrammeR)
library(regmedint)
## How to solve the phantom direct effect
## 0. Do nothing, nice to have compare
## 1. Have multiple measurement before enrollment
## 2. Have long follow-up, multiple measurement post-randomization
## 3. Both
## 4. Impute from other variable
## 5. Bayesian Stuff
## 6.
#Set Up
p<<-1.4
#outcome regression AFT
theta0<<-7.5
theta1<<-0.0 #direct effect, positive is protective
theta2<<--0.015 #effect * ldlc_change; negative is protective
theta3<<-0 # ldlc_change * drug assume no effect
theta4<<--0.02 # cov
# theta0_ni<<-7.7
# theta1_ni<<-0.59
# theta2_ni<<-0.0032 #should be negative
cutoff<-c(90,110,130,150,1000)
#
data<-simulation.function.v4(20000,cutoff = cutoff[5])
data$ldlc_change<-data$ldlc12 - data$ldlcb
data$ldlc_change_m<-data$ldlc12m - data$ldlcm
#fit<-lm(data,formula = ldlc_change_m~ldlc_change+ldlcb+drug)
#plot(fit$residuals)
#data$ldlc_change_m_pred<-predict(fit,data)
#plot(data$ldlc_change_m,data$ldlc_change_m_pred)#mean prediction works
#using predicted average as mediator
data$cen<-rep(1,20000)
data$y1<-rweibull(20000,shape = 1/p,scale = exp(theta0+theta1*data$drug+
theta2*data$ldlc_change_m+
theta3*data$ldlc_change_m*data$drug+
theta4*data$ldlcb))
#explore
ggplot(data) + geom_point((aes(x = ldlcb,y = ldlc_change,col = drug)))#negative
ggplot(data) + geom_point((aes(x = ldlcb,y = ldlc_change_m,col = drug)))#no relation
ldlcb.to.outcome<-survreg(Surv(y1,cen)~ldlcb,data = data,dist = "weibull")
summary(ldlcb.to.outcome)
block.all<-survreg(Surv(y1,cen)~drug+ldlc_change+ldlcb,data = data,dist = "weibull")
summary(block.all)
fit1<-regmedint(data = data,
## Variables
yvar = "y1",
avar = "drug",
mvar = "ldlc_change",
cvar = c("ldlcb"),
eventvar = "cen",
## Values at which effects are evaluated
a0 = 0,
a1 = 1,
m_cde = 0,
c_cond = 120,
## Model types
mreg = "linear",
#yreg = "survCox",
yreg = "survAFT_weibull",
## Additional specification
interaction = TRUE,
casecontrol = FALSE)
summ<-summary(fit1)
summ
#invastigate if the adjustment works or not using simulation
n<-1000
# Simulation:
# Method:
rd11<-NULL
set.seed(1)
for(j in 1:n){
tryCatch({
print(paste("1",j))
e1.te2<-NULL
e1.te<-NULL
e1.pnde<-NULL
e1.tnde<-NULL
e1.pnie<-NULL
e1.tnie<-NULL
e1.pm<-NULL
e1.beta0<-NULL
e1.beta1<-NULL
e1.beta2<-NULL
e1.sigma<-NULL
e1.theta1<-NULL
e1.theta2<-NULL
e1.theta3<-NULL
e1.theta4<-NULL
e2.te2<-NULL
e2.te<-NULL
e2.pnde<-NULL
e2.tnde<-NULL
e2.pnie<-NULL
e2.tnie<-NULL
e2.pm<-NULL
e2.beta0<-NULL
e2.beta1<-NULL
e2.beta2<-NULL
e2.sigma<-NULL
e2.theta1<-NULL
e2.theta2<-NULL
e2.theta3<-NULL
e2.theta4<-NULL
e3.te2<-NULL
e3.te<-NULL
e3.pnde<-NULL
e3.tnde<-NULL
e3.pnie<-NULL
e3.tnie<-NULL
e3.pm<-NULL
e3.beta0<-NULL
e3.beta1<-NULL
e3.beta2<-NULL
e3.sigma<-NULL
e3.theta1<-NULL
e3.theta2<-NULL
e3.theta3<-NULL
e3.theta4<-NULL
cutoff<-c(90,110,130,150,1000)
for(i in 1:5){
data<-simulation.function.v4(20000,cutoff = cutoff[i])
data$ldlc_change<-data$ldlc12 - data$ldlcb
data$ldlc_change_m<-data$ldlc12m - data$ldlcm
#fit<-lm(data,formula = ldlc_change_m~ldlc_change+ldlcb+drug)
# data$ldlc_change_m_pred<-predict(fit,data)
data$cen<-rep(1,20000)
#data$d<-rep(0,20000)
#data$logldlcb<-log(data$ldlcb)
data$y1<-rweibull(20000,shape = 1/p,scale = exp(theta0+theta1*data$drug+
theta2*data$ldlc_change_m+
theta3*data$ldlc_change_m*data$drug+
theta4*data$ldlcb))
c_median<-summary(data$ldlcb)[3]
c_mean<-summary(data$ldlcb)[4]
fit1<-regmedint(data = data,
## Variables
yvar = "y1",
avar = "drug",
mvar = "ldlc_change",
cvar = c("ldlcb"),
eventvar = "cen",
## Values at which effects are evaluated
a0 = 0,
a1 = 1,
m_cde = 0,
c_cond = 120,
## Model types
mreg = "linear",
#yreg = "survCox",
yreg = "survAFT_weibull",
## Additional specification
interaction = TRUE,
casecontrol = FALSE)
summ1<-summary(fit1)
te.fit<-survreg(Surv(y1,cen)~drug+ldlcb,data = data,dist = "weibull")
e1.te2[i]<-te.fit$coefficients[2]
e1.te[i]<-summ1$summary_myreg[6]
e1.pnde[i]<-summ1$summary_myreg[2]
e1.tnie[i]<-summ1$summary_myreg[3]
e1.tnde[i]<-summ1$summary_myreg[4]
e1.pnie[i]<-summ1$summary_myreg[5]
e1.pm[i]<-summ1$summary_myreg[7]
e1.beta0[i]<-fit1$mreg_fit$coefficients[1]
e1.beta1[i]<-fit1$mreg_fit$coefficients[2]
e1.beta2[i]<-fit1$mreg_fit$coefficients[3]
e1.sigma[i]<-summ1$summary_mreg_fit$sigma
e1.theta1[i]<-fit1$yreg_fit$coefficients[2]
e1.theta2[i]<-fit1$yreg_fit$coefficients[3]
e1.theta3[i]<-fit1$yreg_fit$coefficients[5]
e1.theta4[i]<-fit1$yreg_fit$coefficients[4]
fit2<-regmedint(data = data,
## Variables
yvar = "y1",
avar = "drug",
mvar = "ldlc_change",
cvar = c("ldlcb"),
eventvar = "cen",
## Values at which effects are evaluated
a0 = 0,
a1 = 1,
m_cde = 0,
c_cond = 120,
## Model types
mreg = "linear",
#yreg = "survCox",
yreg = "survAFT_weibull",
## Additional specification
interaction = FALSE,
casecontrol = FALSE)
summ2<-summary(fit2)
e2.te2[i]<-te.fit$coefficients[2]
e2.te[i]<-summ2$summary_myreg[6]
e2.pnde[i]<-summ2$summary_myreg[2]
e2.tnie[i]<-summ2$summary_myreg[3]
e2.tnde[i]<-summ2$summary_myreg[4]
e2.pnie[i]<-summ2$summary_myreg[5]
e2.pm[i]<-summ2$summary_myreg[7]
e2.beta0[i]<-fit2$mreg_fit$coefficients[1]
e2.beta1[i]<-fit2$mreg_fit$coefficients[2]
e2.beta2[i]<-fit2$mreg_fit$coefficients[3]
e2.sigma[i]<-summ2$summary_mreg_fit$sigma
e2.theta1[i]<-fit2$yreg_fit$coefficients[2]
e2.theta2[i]<-fit2$yreg_fit$coefficients[3]
e2.theta3[i]<-fit2$yreg_fit$coefficients[5]
e2.theta4[i]<-fit2$yreg_fit$coefficients[4]
m.fit<-lm(data = data,ldlc_change~ldlcb+drug)
data$ldlc_change_adj<-predict(m.fit,newdata = data)
fit3<-regmedint(data = data,
## Variables
yvar = "y1",
avar = "drug",
mvar = "ldlc_change_adj",
cvar = c("ldlcb"),
eventvar = "cen",
## Values at which effects are evaluated
a0 = 0,
a1 = 1,
m_cde = 0,
c_cond = 120,
## Model types
mreg = "linear",
#yreg = "survCox",
yreg = "survAFT_weibull",
## Additional specification
interaction = TRUE,
casecontrol = FALSE)
summ3<-summary(fit3)
#te.fit<-survreg(Surv(y1,cen)~drug+ldlcb,data = data,dist = "weibull")
# out.fit<-survreg(Surv(y1,cen)~drug+ldlc_change.m+drug*ldlc_change.m, dist="weibull",data = data)
# med.fit<-lm(data = data,ldlc_change ~ drug)
e3.te2[i]<-te.fit$coefficients[2]
e3.te[i]<-summ3$summary_myreg[6]
e3.pnde[i]<-summ3$summary_myreg[2]
e3.tnie[i]<-summ3$summary_myreg[3]
e3.tnde[i]<-summ3$summary_myreg[4]
e3.pnie[i]<-summ3$summary_myreg[5]
e3.pm[i]<-summ3$summary_myreg[7]
e3.beta0[i]<-fit3$mreg_fit$coefficients[1]
e3.beta1[i]<-fit3$mreg_fit$coefficients[2]
e3.beta2[i]<-fit3$mreg_fit$coefficients[3]
e3.sigma[i]<-summ3$summary_mreg_fit$sigma
e3.theta1[i]<-fit3$yreg_fit$coefficients[2]
e3.theta2[i]<-fit3$yreg_fit$coefficients[3]
e3.theta3[i]<-fit3$yreg_fit$coefficients[5]
e3.theta4[i]<-fit3$yreg_fit$coefficients[4]
}
results<-data.frame(cutoff,
e1.te2,e1.te,e1.pnde,e1.tnie,e1.tnde,e1.pnie,e1.pm,
e1.beta0,e1.beta1,e1.beta2,e1.sigma,
e1.theta1,e1.theta2,e1.theta3,e1.theta4,
e2.te2,e2.te,e2.pnde,e2.tnie,e2.tnde,e2.pnie,e2.pm,
e2.beta0,e2.beta1,e2.beta2,e2.sigma,
e2.theta1,e2.theta2,e2.theta3,e2.theta4,
e3.te2,e3.te,e3.pnde,e3.tnie,e3.tnde,e3.pnie,e3.pm,
e3.beta0,e3.beta1,e3.beta2,e3.sigma,
e3.theta1,e3.theta2,e3.theta3,e3.theta4,
interaction = 1)
results$iter<-j
rd11<-rbind(rd11,results)
}, error=function(e){} )
}
rd11$method<-"1_0_adj"#
#e1 Interaction
#e2 No interaction
#e3 adjusted with interaction
setwd("/Users/sh/Documents/GitHub/Mediation-RGTM/")
fwrite(rd11,file = "v2-3.csv")
#v2-0: messed up adj
#v2-1: updated adj, correctly, positive theta2(harmful)
#v2-2: CATE beta1(negative theta2)
#v2-3: f4,
#v2-4: f42, CATE beta1
|
/multiple_measure.R
|
no_license
|
SichengH/Mediation-RGTM
|
R
| false
| false
| 10,637
|
r
|
library(ggplot2)
library(dplyr)
library(data.table)
library(survival)
library(DiagrammeR)
library(regmedint)
## How to solve the phantom direct effect
## 0. Do nothing, nice to have compare
## 1. Have multiple measurement before enrollment
## 2. Have long follow-up, multiple measurement post-randomization
## 3. Both
## 4. Impute from other variable
## 5. Bayesian Stuff
## 6.
#Set Up
p<<-1.4
#outcome regression AFT
theta0<<-7.5
theta1<<-0.0 #direct effect, positive is protective
theta2<<--0.015 #effect * ldlc_change; negative is protective
theta3<<-0 # ldlc_change * drug assume no effect
theta4<<--0.02 # cov
# theta0_ni<<-7.7
# theta1_ni<<-0.59
# theta2_ni<<-0.0032 #should be negative
cutoff<-c(90,110,130,150,1000)
#
data<-simulation.function.v4(20000,cutoff = cutoff[5])
data$ldlc_change<-data$ldlc12 - data$ldlcb
data$ldlc_change_m<-data$ldlc12m - data$ldlcm
#fit<-lm(data,formula = ldlc_change_m~ldlc_change+ldlcb+drug)
#plot(fit$residuals)
#data$ldlc_change_m_pred<-predict(fit,data)
#plot(data$ldlc_change_m,data$ldlc_change_m_pred)#mean prediction works
#using predicted average as mediator
data$cen<-rep(1,20000)
data$y1<-rweibull(20000,shape = 1/p,scale = exp(theta0+theta1*data$drug+
theta2*data$ldlc_change_m+
theta3*data$ldlc_change_m*data$drug+
theta4*data$ldlcb))
#explore
ggplot(data) + geom_point((aes(x = ldlcb,y = ldlc_change,col = drug)))#negative
ggplot(data) + geom_point((aes(x = ldlcb,y = ldlc_change_m,col = drug)))#no relation
ldlcb.to.outcome<-survreg(Surv(y1,cen)~ldlcb,data = data,dist = "weibull")
summary(ldlcb.to.outcome)
block.all<-survreg(Surv(y1,cen)~drug+ldlc_change+ldlcb,data = data,dist = "weibull")
summary(block.all)
fit1<-regmedint(data = data,
## Variables
yvar = "y1",
avar = "drug",
mvar = "ldlc_change",
cvar = c("ldlcb"),
eventvar = "cen",
## Values at which effects are evaluated
a0 = 0,
a1 = 1,
m_cde = 0,
c_cond = 120,
## Model types
mreg = "linear",
#yreg = "survCox",
yreg = "survAFT_weibull",
## Additional specification
interaction = TRUE,
casecontrol = FALSE)
summ<-summary(fit1)
summ
#invastigate if the adjustment works or not using simulation
n<-1000
# Simulation:
# Method:
rd11<-NULL
set.seed(1)
for(j in 1:n){
tryCatch({
print(paste("1",j))
e1.te2<-NULL
e1.te<-NULL
e1.pnde<-NULL
e1.tnde<-NULL
e1.pnie<-NULL
e1.tnie<-NULL
e1.pm<-NULL
e1.beta0<-NULL
e1.beta1<-NULL
e1.beta2<-NULL
e1.sigma<-NULL
e1.theta1<-NULL
e1.theta2<-NULL
e1.theta3<-NULL
e1.theta4<-NULL
e2.te2<-NULL
e2.te<-NULL
e2.pnde<-NULL
e2.tnde<-NULL
e2.pnie<-NULL
e2.tnie<-NULL
e2.pm<-NULL
e2.beta0<-NULL
e2.beta1<-NULL
e2.beta2<-NULL
e2.sigma<-NULL
e2.theta1<-NULL
e2.theta2<-NULL
e2.theta3<-NULL
e2.theta4<-NULL
e3.te2<-NULL
e3.te<-NULL
e3.pnde<-NULL
e3.tnde<-NULL
e3.pnie<-NULL
e3.tnie<-NULL
e3.pm<-NULL
e3.beta0<-NULL
e3.beta1<-NULL
e3.beta2<-NULL
e3.sigma<-NULL
e3.theta1<-NULL
e3.theta2<-NULL
e3.theta3<-NULL
e3.theta4<-NULL
cutoff<-c(90,110,130,150,1000)
for(i in 1:5){
data<-simulation.function.v4(20000,cutoff = cutoff[i])
data$ldlc_change<-data$ldlc12 - data$ldlcb
data$ldlc_change_m<-data$ldlc12m - data$ldlcm
#fit<-lm(data,formula = ldlc_change_m~ldlc_change+ldlcb+drug)
# data$ldlc_change_m_pred<-predict(fit,data)
data$cen<-rep(1,20000)
#data$d<-rep(0,20000)
#data$logldlcb<-log(data$ldlcb)
data$y1<-rweibull(20000,shape = 1/p,scale = exp(theta0+theta1*data$drug+
theta2*data$ldlc_change_m+
theta3*data$ldlc_change_m*data$drug+
theta4*data$ldlcb))
c_median<-summary(data$ldlcb)[3]
c_mean<-summary(data$ldlcb)[4]
fit1<-regmedint(data = data,
## Variables
yvar = "y1",
avar = "drug",
mvar = "ldlc_change",
cvar = c("ldlcb"),
eventvar = "cen",
## Values at which effects are evaluated
a0 = 0,
a1 = 1,
m_cde = 0,
c_cond = 120,
## Model types
mreg = "linear",
#yreg = "survCox",
yreg = "survAFT_weibull",
## Additional specification
interaction = TRUE,
casecontrol = FALSE)
summ1<-summary(fit1)
te.fit<-survreg(Surv(y1,cen)~drug+ldlcb,data = data,dist = "weibull")
e1.te2[i]<-te.fit$coefficients[2]
e1.te[i]<-summ1$summary_myreg[6]
e1.pnde[i]<-summ1$summary_myreg[2]
e1.tnie[i]<-summ1$summary_myreg[3]
e1.tnde[i]<-summ1$summary_myreg[4]
e1.pnie[i]<-summ1$summary_myreg[5]
e1.pm[i]<-summ1$summary_myreg[7]
e1.beta0[i]<-fit1$mreg_fit$coefficients[1]
e1.beta1[i]<-fit1$mreg_fit$coefficients[2]
e1.beta2[i]<-fit1$mreg_fit$coefficients[3]
e1.sigma[i]<-summ1$summary_mreg_fit$sigma
e1.theta1[i]<-fit1$yreg_fit$coefficients[2]
e1.theta2[i]<-fit1$yreg_fit$coefficients[3]
e1.theta3[i]<-fit1$yreg_fit$coefficients[5]
e1.theta4[i]<-fit1$yreg_fit$coefficients[4]
fit2<-regmedint(data = data,
## Variables
yvar = "y1",
avar = "drug",
mvar = "ldlc_change",
cvar = c("ldlcb"),
eventvar = "cen",
## Values at which effects are evaluated
a0 = 0,
a1 = 1,
m_cde = 0,
c_cond = 120,
## Model types
mreg = "linear",
#yreg = "survCox",
yreg = "survAFT_weibull",
## Additional specification
interaction = FALSE,
casecontrol = FALSE)
summ2<-summary(fit2)
e2.te2[i]<-te.fit$coefficients[2]
e2.te[i]<-summ2$summary_myreg[6]
e2.pnde[i]<-summ2$summary_myreg[2]
e2.tnie[i]<-summ2$summary_myreg[3]
e2.tnde[i]<-summ2$summary_myreg[4]
e2.pnie[i]<-summ2$summary_myreg[5]
e2.pm[i]<-summ2$summary_myreg[7]
e2.beta0[i]<-fit2$mreg_fit$coefficients[1]
e2.beta1[i]<-fit2$mreg_fit$coefficients[2]
e2.beta2[i]<-fit2$mreg_fit$coefficients[3]
e2.sigma[i]<-summ2$summary_mreg_fit$sigma
e2.theta1[i]<-fit2$yreg_fit$coefficients[2]
e2.theta2[i]<-fit2$yreg_fit$coefficients[3]
e2.theta3[i]<-fit2$yreg_fit$coefficients[5]
e2.theta4[i]<-fit2$yreg_fit$coefficients[4]
m.fit<-lm(data = data,ldlc_change~ldlcb+drug)
data$ldlc_change_adj<-predict(m.fit,newdata = data)
fit3<-regmedint(data = data,
## Variables
yvar = "y1",
avar = "drug",
mvar = "ldlc_change_adj",
cvar = c("ldlcb"),
eventvar = "cen",
## Values at which effects are evaluated
a0 = 0,
a1 = 1,
m_cde = 0,
c_cond = 120,
## Model types
mreg = "linear",
#yreg = "survCox",
yreg = "survAFT_weibull",
## Additional specification
interaction = TRUE,
casecontrol = FALSE)
summ3<-summary(fit3)
#te.fit<-survreg(Surv(y1,cen)~drug+ldlcb,data = data,dist = "weibull")
# out.fit<-survreg(Surv(y1,cen)~drug+ldlc_change.m+drug*ldlc_change.m, dist="weibull",data = data)
# med.fit<-lm(data = data,ldlc_change ~ drug)
e3.te2[i]<-te.fit$coefficients[2]
e3.te[i]<-summ3$summary_myreg[6]
e3.pnde[i]<-summ3$summary_myreg[2]
e3.tnie[i]<-summ3$summary_myreg[3]
e3.tnde[i]<-summ3$summary_myreg[4]
e3.pnie[i]<-summ3$summary_myreg[5]
e3.pm[i]<-summ3$summary_myreg[7]
e3.beta0[i]<-fit3$mreg_fit$coefficients[1]
e3.beta1[i]<-fit3$mreg_fit$coefficients[2]
e3.beta2[i]<-fit3$mreg_fit$coefficients[3]
e3.sigma[i]<-summ3$summary_mreg_fit$sigma
e3.theta1[i]<-fit3$yreg_fit$coefficients[2]
e3.theta2[i]<-fit3$yreg_fit$coefficients[3]
e3.theta3[i]<-fit3$yreg_fit$coefficients[5]
e3.theta4[i]<-fit3$yreg_fit$coefficients[4]
}
results<-data.frame(cutoff,
e1.te2,e1.te,e1.pnde,e1.tnie,e1.tnde,e1.pnie,e1.pm,
e1.beta0,e1.beta1,e1.beta2,e1.sigma,
e1.theta1,e1.theta2,e1.theta3,e1.theta4,
e2.te2,e2.te,e2.pnde,e2.tnie,e2.tnde,e2.pnie,e2.pm,
e2.beta0,e2.beta1,e2.beta2,e2.sigma,
e2.theta1,e2.theta2,e2.theta3,e2.theta4,
e3.te2,e3.te,e3.pnde,e3.tnie,e3.tnde,e3.pnie,e3.pm,
e3.beta0,e3.beta1,e3.beta2,e3.sigma,
e3.theta1,e3.theta2,e3.theta3,e3.theta4,
interaction = 1)
results$iter<-j
rd11<-rbind(rd11,results)
}, error=function(e){} )
}
rd11$method<-"1_0_adj"#
#e1 Interaction
#e2 No interaction
#e3 adjusted with interaction
setwd("/Users/sh/Documents/GitHub/Mediation-RGTM/")
fwrite(rd11,file = "v2-3.csv")
#v2-0: messed up adj
#v2-1: updated adj, correctly, positive theta2(harmful)
#v2-2: CATE beta1(negative theta2)
#v2-3: f4,
#v2-4: f42, CATE beta1
|
## compute eigenvector centrality scores on dyadic independent igraphs
## libraries and data
rm(list=ls())
library(igraph)
library(influenceR)
#load("dyadic-ind-mod-base-13-igraphs.RData")
igraph_list <- readRDS("ten_imputed_igraphs.RDS")
## compute betweenness centrality
btwn_dyadic_ind_mod_base_13 <- lapply(igraph_list,
function(x)
influenceR::betweenness(x))
ordered_btwn_dyadic_ind_mod_base_13 <- lapply(btwn_dyadic_ind_mod_base_13,
function (x)
order(x, decreasing=TRUE)
)
top300_btwn_dyadic_ind_mod_base_13 <- lapply(ordered_btwn_dyadic_ind_mod_base_13,
function (x)
x[1:300]
)
freq_top300_btwn_dyadic_ind_mod_base_13 <- table(unlist(top300_btwn_dyadic_ind_mod_base_13))
sort_top300_btwn_dyadic_ind_mod_base_13 <- sort(freq_top300_btwn_dyadic_ind_mod_base_13,
decreasing=TRUE)
length(sort_top300_btwn_dyadic_ind_mod_base_13)
names.in.num <- as.numeric(names(sort_top300_btwn_dyadic_ind_mod_base_13))
length(which(names.in.num > 298))
## save
save.image(file="sort_top300_btwn_dyadic_ind_mod_base_13.RData")
saveRDS(sort_top300_btwn_dyadic_ind_mod_base_13, file="top300_btwn_dyad_ind.RDS")
|
/empirical/wave1/dyad-ind-model/compute-influence-metrics/compute-btwn-on-dyad-ind-igraphs.R
|
no_license
|
khanna7/uconnect-missing-ties
|
R
| false
| false
| 1,617
|
r
|
## compute eigenvector centrality scores on dyadic independent igraphs
## libraries and data
rm(list=ls())
library(igraph)
library(influenceR)
#load("dyadic-ind-mod-base-13-igraphs.RData")
igraph_list <- readRDS("ten_imputed_igraphs.RDS")
## compute betweenness centrality
btwn_dyadic_ind_mod_base_13 <- lapply(igraph_list,
function(x)
influenceR::betweenness(x))
ordered_btwn_dyadic_ind_mod_base_13 <- lapply(btwn_dyadic_ind_mod_base_13,
function (x)
order(x, decreasing=TRUE)
)
top300_btwn_dyadic_ind_mod_base_13 <- lapply(ordered_btwn_dyadic_ind_mod_base_13,
function (x)
x[1:300]
)
freq_top300_btwn_dyadic_ind_mod_base_13 <- table(unlist(top300_btwn_dyadic_ind_mod_base_13))
sort_top300_btwn_dyadic_ind_mod_base_13 <- sort(freq_top300_btwn_dyadic_ind_mod_base_13,
decreasing=TRUE)
length(sort_top300_btwn_dyadic_ind_mod_base_13)
names.in.num <- as.numeric(names(sort_top300_btwn_dyadic_ind_mod_base_13))
length(which(names.in.num > 298))
## save
save.image(file="sort_top300_btwn_dyadic_ind_mod_base_13.RData")
saveRDS(sort_top300_btwn_dyadic_ind_mod_base_13, file="top300_btwn_dyad_ind.RDS")
|
testlist <- list(A = structure(c(6.46645194387231e-200, 5.15454813856261e-88 ), .Dim = 1:2), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613103180-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 195
|
r
|
testlist <- list(A = structure(c(6.46645194387231e-200, 5.15454813856261e-88 ), .Dim = 1:2), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
install.packages("statcheck")
library("statcheck")
install.packages("readr")
library(readr)
#the file needs to be in .txt format for this to work
txt <- read_file(file.choose())
statcheck(txt)
|
/StatsCheck.R
|
no_license
|
TarenRohovit/ArchitectExpertise
|
R
| false
| false
| 194
|
r
|
install.packages("statcheck")
library("statcheck")
install.packages("readr")
library(readr)
#the file needs to be in .txt format for this to work
txt <- read_file(file.choose())
statcheck(txt)
|
library(tidyverse)
tmp <- read_csv("data/processed/colley.csv")
target <- read_csv("data/processed/target.csv")
sample <- read_csv("data/WSampleSubmissionStage2.csv")
fe <- target %>%
bind_rows(sample) %>%
distinct(ID) %>%
mutate(Season = as.integer(str_sub(ID, 1, 4)),
team1 = as.integer(str_sub(ID, 6, 9)),
team2 = as.integer(str_sub(ID, 11, 14))) %>%
left_join(tmp, by = c("Season", "team1" = "TeamID")) %>%
left_join(tmp, by = c("Season", "team2" = "TeamID")) %>%
select(-Season, -team1, -team2) %>%
transmute(ID,
colley_r_diff = colley_r.x - colley_r.y)
write_csv(fe, "data/features/colley_fe.csv")
rm(tmp, fe);gc()
|
/名称未設定フォルダ/src/features/colley_fe.R
|
no_license
|
kur0cky/NCAA2019w
|
R
| false
| false
| 675
|
r
|
library(tidyverse)
tmp <- read_csv("data/processed/colley.csv")
target <- read_csv("data/processed/target.csv")
sample <- read_csv("data/WSampleSubmissionStage2.csv")
fe <- target %>%
bind_rows(sample) %>%
distinct(ID) %>%
mutate(Season = as.integer(str_sub(ID, 1, 4)),
team1 = as.integer(str_sub(ID, 6, 9)),
team2 = as.integer(str_sub(ID, 11, 14))) %>%
left_join(tmp, by = c("Season", "team1" = "TeamID")) %>%
left_join(tmp, by = c("Season", "team2" = "TeamID")) %>%
select(-Season, -team1, -team2) %>%
transmute(ID,
colley_r_diff = colley_r.x - colley_r.y)
write_csv(fe, "data/features/colley_fe.csv")
rm(tmp, fe);gc()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/privacy_params.R
\docType{class}
\name{DPParamsEps-class}
\alias{DPParamsEps-class}
\alias{DPParamsEps}
\alias{show,DPParamsEps-method}
\alias{getEpsilon,DPParamsEps-method}
\alias{setEpsilon<-,DPParamsEps-method}
\alias{toGamma,DPParamsEps,numeric-method}
\title{An S4 class for basic differential privacy parameters.}
\usage{
\S4method{show}{DPParamsEps}(object)
\S4method{getEpsilon}{DPParamsEps}(object)
\S4method{setEpsilon}{DPParamsEps}(object) <- value
\S4method{toGamma}{DPParamsEps,numeric}(object, gamma)
}
\arguments{
\item{object}{an object of class \code{\link{DPParamsEps}}.}
\item{value}{a scalar numeric \eqn{\epsilon}.}
\item{gamma}{a scalar numeric \eqn{\gamma}.}
}
\description{
An S4 base class representing the basic privacy parameter \eqn{\epsilon} in
differential privacy.
}
\section{Methods (by generic)}{
\itemize{
\item \code{show}: automatically prints the object.
\item \code{getEpsilon}: getter for slot \code{epsilon}.
\item \code{setEpsilon<-}: setter for slot \code{epsilon}.
\item \code{toGamma}: returns object to corresponding instance of subclass
\code{\link{DPParamsGam}}.
}}
\section{Slots}{
\describe{
\item{\code{epsilon}}{positive scalar numeric privacy level.}
}}
\seealso{
\code{\link{DPParamsDel}} subclass for \eqn{(\epsilon,\delta)}
relaxation, \code{\link{DPParamsGam}} subclass for random relaxation.
}
|
/man/DPParamsEps-class.Rd
|
permissive
|
cran/diffpriv
|
R
| false
| true
| 1,495
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/privacy_params.R
\docType{class}
\name{DPParamsEps-class}
\alias{DPParamsEps-class}
\alias{DPParamsEps}
\alias{show,DPParamsEps-method}
\alias{getEpsilon,DPParamsEps-method}
\alias{setEpsilon<-,DPParamsEps-method}
\alias{toGamma,DPParamsEps,numeric-method}
\title{An S4 class for basic differential privacy parameters.}
\usage{
\S4method{show}{DPParamsEps}(object)
\S4method{getEpsilon}{DPParamsEps}(object)
\S4method{setEpsilon}{DPParamsEps}(object) <- value
\S4method{toGamma}{DPParamsEps,numeric}(object, gamma)
}
\arguments{
\item{object}{an object of class \code{\link{DPParamsEps}}.}
\item{value}{a scalar numeric \eqn{\epsilon}.}
\item{gamma}{a scalar numeric \eqn{\gamma}.}
}
\description{
An S4 base class representing the basic privacy parameter \eqn{\epsilon} in
differential privacy.
}
\section{Methods (by generic)}{
\itemize{
\item \code{show}: automatically prints the object.
\item \code{getEpsilon}: getter for slot \code{epsilon}.
\item \code{setEpsilon<-}: setter for slot \code{epsilon}.
\item \code{toGamma}: returns object to corresponding instance of subclass
\code{\link{DPParamsGam}}.
}}
\section{Slots}{
\describe{
\item{\code{epsilon}}{positive scalar numeric privacy level.}
}}
\seealso{
\code{\link{DPParamsDel}} subclass for \eqn{(\epsilon,\delta)}
relaxation, \code{\link{DPParamsGam}} subclass for random relaxation.
}
|
setGeneric("bind",
function(object, ...) standardGeneric("bind"))
setMethod("bind", signature(object = "Wave"),
function(object, ...){
allobjects <- as.list(list(...))
lapply(allobjects, equalWave, object)
allobjects <- c(list(object), allobjects)
object@left <- unlist(lapply(allobjects, slot, "left"))
if(object@stereo)
object@right <- unlist(lapply(allobjects, slot, "right"))
return(object)
}
)
setMethod("bind", signature(object = "WaveMC"),
function(object, ...){
allobjects <- as.list(list(...))
lapply(allobjects, equalWave, object)
allobjects <- c(list(object), allobjects)
object@.Data <- do.call(rbind, allobjects)
return(object)
}
)
|
/R/bind.R
|
no_license
|
cran/tuneR
|
R
| false
| false
| 725
|
r
|
setGeneric("bind",
function(object, ...) standardGeneric("bind"))
setMethod("bind", signature(object = "Wave"),
function(object, ...){
allobjects <- as.list(list(...))
lapply(allobjects, equalWave, object)
allobjects <- c(list(object), allobjects)
object@left <- unlist(lapply(allobjects, slot, "left"))
if(object@stereo)
object@right <- unlist(lapply(allobjects, slot, "right"))
return(object)
}
)
setMethod("bind", signature(object = "WaveMC"),
function(object, ...){
allobjects <- as.list(list(...))
lapply(allobjects, equalWave, object)
allobjects <- c(list(object), allobjects)
object@.Data <- do.call(rbind, allobjects)
return(object)
}
)
|
# Loading data
data <- read.table("household_power_consumption.txt", header = TRUE, sep =";", stringsAsFactors = FALSE, dec =".")
summary(data)
#Subsetting the data from 2007-02-01 and 2007-02-02
subsetdata <- data[data$Date %in% c("1/2/2007", "2/2/2007"),]
globalActivePower <- as.numeric(subsetdata$Global_active_power)
globalReactivePower <- as.numeric(subsetdata$Global_reactive_power)
globalReactivePower <- as.numeric(subsetdata$Global_reactive_power)
voltage <- as.numeric(subsetdata$Voltage)
subMetering1 <- as.numeric(subsetdata$Sub_metering_1)
subMetering2 <- as.numeric(subsetdata$Sub_metering_2)
subMetering3 <- as.numeric(subsetdata$Sub_metering_3)
# PLOT 4
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type ="l", xlab ="", ylab = "Clobal Active Power")
plot(datetime, voltage, type ="l", xlab = "datetime", ylab = "Voltage")
plot(datetime, subMetering1, type = "l", ylab = "Energy sub metering", xlab = "")
lines(datetime, subMetering2, type = "l", col = "red")
lines(datetime, subMetering3, type ="l", col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, lwd = 2.5, col = c("black", "red", "blue"))
plot(datetime, globalReactivePower, type ="l", xlab = "datetime", ylab ="Global_reactive_power")
png("plot4.png", width = 480, height = 480)
dev.off()
|
/plot4.R
|
no_license
|
ssanjel1/ExData_Plotting1
|
R
| false
| false
| 1,367
|
r
|
# Loading data
data <- read.table("household_power_consumption.txt", header = TRUE, sep =";", stringsAsFactors = FALSE, dec =".")
summary(data)
#Subsetting the data from 2007-02-01 and 2007-02-02
subsetdata <- data[data$Date %in% c("1/2/2007", "2/2/2007"),]
globalActivePower <- as.numeric(subsetdata$Global_active_power)
globalReactivePower <- as.numeric(subsetdata$Global_reactive_power)
globalReactivePower <- as.numeric(subsetdata$Global_reactive_power)
voltage <- as.numeric(subsetdata$Voltage)
subMetering1 <- as.numeric(subsetdata$Sub_metering_1)
subMetering2 <- as.numeric(subsetdata$Sub_metering_2)
subMetering3 <- as.numeric(subsetdata$Sub_metering_3)
# PLOT 4
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type ="l", xlab ="", ylab = "Clobal Active Power")
plot(datetime, voltage, type ="l", xlab = "datetime", ylab = "Voltage")
plot(datetime, subMetering1, type = "l", ylab = "Energy sub metering", xlab = "")
lines(datetime, subMetering2, type = "l", col = "red")
lines(datetime, subMetering3, type ="l", col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, lwd = 2.5, col = c("black", "red", "blue"))
plot(datetime, globalReactivePower, type ="l", xlab = "datetime", ylab ="Global_reactive_power")
png("plot4.png", width = 480, height = 480)
dev.off()
|
# ELS-laskuri
# Sofia Airola 2016
# sofia.airola@hespartto.fi
# COUNTTOTMASS: laskee kullekin havaintopäivälle kokonaisbiomassan
# dataTable = taulukko, jossa oleelliset muuttujat ja päivämäärät; palauttaa samanmuotoisen taulukon
countTotMass <- function(dataTable) {
# jos dataa ei ole, ei lasketa mitään
if (nrow(dataTable) == 0) {
newTable = data.frame(asema = numeric(), pvm = numeric(), paiva = numeric(), kk = numeric(), vuosi = numeric(), biomassa = numeric())
return (newTable)
}
# tästä asemat vektorina
stations = 0
stations <- unique(dataTable$asema)
newTable = numeric()
for (i in 1:length(stations)) {
tempTable <- subset(dataTable, asema == stations[i])
# tästä duplikaattipäivämäärät vektorina
dups <- sort(unique(tempTable[duplicated(tempTable$pvm),]$pvm))
# yhdistetään saman päivän eri havainnot summaksi
for (j in 1:nrow(tempTable)) {
if (tempTable$pvm[j] %in% dups) {
temp = sum(subset(tempTable, (pvm == tempTable$pvm[j]))$biomassa, na.rm = TRUE)
# muunto milligrammoiksi
temp = temp/1000
if (!is.nan(temp)) {
tempTable$biomassa[j] = temp
} else tempTable$biomassa[j] = tempTable$biomassa[j]/1000
} else tempTable$biomassa[j] = tempTable$biomassa[j]/1000
}
# poistetaan duplikaattipäivämäärälliset havainnot, vain ensimmäinen (nyt summa) jää
tempTable <- tempTable[!duplicated(tempTable$pvm),]
newTable <- rbind(newTable, tempTable)
}
return (newTable)
}
|
/Skriptit/biomassa.r
|
no_license
|
ficusvirens/ELS-laskuri
|
R
| false
| false
| 1,534
|
r
|
# ELS-laskuri
# Sofia Airola 2016
# sofia.airola@hespartto.fi
# COUNTTOTMASS: laskee kullekin havaintopäivälle kokonaisbiomassan
# dataTable = taulukko, jossa oleelliset muuttujat ja päivämäärät; palauttaa samanmuotoisen taulukon
countTotMass <- function(dataTable) {
# jos dataa ei ole, ei lasketa mitään
if (nrow(dataTable) == 0) {
newTable = data.frame(asema = numeric(), pvm = numeric(), paiva = numeric(), kk = numeric(), vuosi = numeric(), biomassa = numeric())
return (newTable)
}
# tästä asemat vektorina
stations = 0
stations <- unique(dataTable$asema)
newTable = numeric()
for (i in 1:length(stations)) {
tempTable <- subset(dataTable, asema == stations[i])
# tästä duplikaattipäivämäärät vektorina
dups <- sort(unique(tempTable[duplicated(tempTable$pvm),]$pvm))
# yhdistetään saman päivän eri havainnot summaksi
for (j in 1:nrow(tempTable)) {
if (tempTable$pvm[j] %in% dups) {
temp = sum(subset(tempTable, (pvm == tempTable$pvm[j]))$biomassa, na.rm = TRUE)
# muunto milligrammoiksi
temp = temp/1000
if (!is.nan(temp)) {
tempTable$biomassa[j] = temp
} else tempTable$biomassa[j] = tempTable$biomassa[j]/1000
} else tempTable$biomassa[j] = tempTable$biomassa[j]/1000
}
# poistetaan duplikaattipäivämäärälliset havainnot, vain ensimmäinen (nyt summa) jää
tempTable <- tempTable[!duplicated(tempTable$pvm),]
newTable <- rbind(newTable, tempTable)
}
return (newTable)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nndup.R
\name{nndup}
\alias{nndup}
\title{Determine duplicates in nearest neighbor list}
\usage{
nndup(nn, N = max(unlist(nn)))
}
\arguments{
\item{nn}{A list of nearest neighbors.}
\item{N}{The largest value in \code{nn}.}
}
\value{
A logical vector of indicating duplicate indices.
}
\description{
\code{nndup} determines the indices of duplicated
elements for a nearest neighbors list created by a
function such as \code{\link{nnpop}} or
\code{\link{knn}}. The indices are related to the list
returned by \code{\link{nn2zones}}.
}
\examples{
nn = list(1:3, c(2:1, 4))
nndup(nn, 4)
}
|
/smerc/man/nndup.Rd
|
no_license
|
akhikolla/InformationHouse
|
R
| false
| true
| 693
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nndup.R
\name{nndup}
\alias{nndup}
\title{Determine duplicates in nearest neighbor list}
\usage{
nndup(nn, N = max(unlist(nn)))
}
\arguments{
\item{nn}{A list of nearest neighbors.}
\item{N}{The largest value in \code{nn}.}
}
\value{
A logical vector of indicating duplicate indices.
}
\description{
\code{nndup} determines the indices of duplicated
elements for a nearest neighbors list created by a
function such as \code{\link{nnpop}} or
\code{\link{knn}}. The indices are related to the list
returned by \code{\link{nn2zones}}.
}
\examples{
nn = list(1:3, c(2:1, 4))
nndup(nn, 4)
}
|
##1
time <- c(35.34, 36.26, 30.54, 38.2, 37.59, 39.18, 33.16, 34.23, 27.9, 36.33, 32.39, 34.89, 35.7, 31.99, 34.03)
(t.test(time, mu=35, conf.level=0.95))
#H0: μ = 35 , H1: μ =! 35
#p > a więc nie ma podstaw do odrzucenia hipotezy zerowej,
#co oznacza, że przeciętny czas pracy w godzinach
#baterjyki nie różni się od 35
##2
con <- c(101.1, 105.7, 102.6, 113.4, 98.1)
t.test(con, mu = 127.7, conf.level = 0.99, alternative ="less")
#p<a -> odrzucamy H0
##3
k1 <- c(3.71, 4.28, 2.95, 3.2, 3.38, 4,05, 4.07, 4.98, 3.2, 3.43, 3.09, 4.5, 3.12, 3.68, 3.9)
k2 <- c(3.1, 3.38, 4.06, 3.6, 3.81, 4.5, 4, 3.25, 4.11, 4.85, 2.8, 4.)
var.test(k1,k2)
t.test(k1, k2, var.equal=TRUE)
#H1: (𝝁𝟏≠𝝁𝟐 😞osiągnięcia klasy I i klasy II różnią się i nie można ich uznać za takie same
##4
r1 <- c(220, 185, 270, 285, 200, 295, 255, 190, 225, 230)
r2 <- c(190, 175, 215, 260, 215, 195, 260, 150, 155, 175)
t.test(r1, r2, var.equal=TRUE)
#qqnorm(con)
#shapiro.test(con)
|
/LAB8/LAB.R
|
no_license
|
PWilkosz99/StatisticalDataAnalysisPW
|
R
| false
| false
| 1,027
|
r
|
##1
time <- c(35.34, 36.26, 30.54, 38.2, 37.59, 39.18, 33.16, 34.23, 27.9, 36.33, 32.39, 34.89, 35.7, 31.99, 34.03)
(t.test(time, mu=35, conf.level=0.95))
#H0: μ = 35 , H1: μ =! 35
#p > a więc nie ma podstaw do odrzucenia hipotezy zerowej,
#co oznacza, że przeciętny czas pracy w godzinach
#baterjyki nie różni się od 35
##2
con <- c(101.1, 105.7, 102.6, 113.4, 98.1)
t.test(con, mu = 127.7, conf.level = 0.99, alternative ="less")
#p<a -> odrzucamy H0
##3
k1 <- c(3.71, 4.28, 2.95, 3.2, 3.38, 4,05, 4.07, 4.98, 3.2, 3.43, 3.09, 4.5, 3.12, 3.68, 3.9)
k2 <- c(3.1, 3.38, 4.06, 3.6, 3.81, 4.5, 4, 3.25, 4.11, 4.85, 2.8, 4.)
var.test(k1,k2)
t.test(k1, k2, var.equal=TRUE)
#H1: (𝝁𝟏≠𝝁𝟐 😞osiągnięcia klasy I i klasy II różnią się i nie można ich uznać za takie same
##4
r1 <- c(220, 185, 270, 285, 200, 295, 255, 190, 225, 230)
r2 <- c(190, 175, 215, 260, 215, 195, 260, 150, 155, 175)
t.test(r1, r2, var.equal=TRUE)
#qqnorm(con)
#shapiro.test(con)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
expect_bool_function_equal <- function(array_exp, r_exp) {
# Assert that the Array operation returns a boolean array
# and that its contents are equal to expected
expect_r6_class(array_exp, "ArrowDatum")
expect_type_equal(array_exp, bool())
expect_identical(as.vector(array_exp), r_exp)
}
expect_array_compares <- function(x, compared_to) {
r_values <- as.vector(x)
r_compared_to <- as.vector(compared_to)
# Iterate over all comparison functions
expect_bool_function_equal(x == compared_to, r_values == r_compared_to)
expect_bool_function_equal(x != compared_to, r_values != r_compared_to)
expect_bool_function_equal(x > compared_to, r_values > r_compared_to)
expect_bool_function_equal(x >= compared_to, r_values >= r_compared_to)
expect_bool_function_equal(x < compared_to, r_values < r_compared_to)
expect_bool_function_equal(x <= compared_to, r_values <= r_compared_to)
}
test_that("compare ops with Array", {
a <- Array$create(1:5)
expect_array_compares(a, 4L)
expect_array_compares(a, 4) # implicit casting
expect_array_compares(a, Scalar$create(4))
expect_array_compares(Array$create(c(NA, 1:5)), 4)
expect_array_compares(Array$create(as.numeric(c(NA, 1:5))), 4)
expect_array_compares(Array$create(c(NA, 1:5)), Array$create(rev(c(NA, 1:5))))
expect_array_compares(Array$create(c(NA, 1:5)), Array$create(rev(c(NA, 1:5)), type = double()))
})
test_that("compare ops with ChunkedArray", {
expect_array_compares(ChunkedArray$create(1:3, 4:5), 4L)
expect_array_compares(ChunkedArray$create(1:3, 4:5), 4) # implicit casting
expect_array_compares(ChunkedArray$create(1:3, 4:5), Scalar$create(4))
expect_array_compares(ChunkedArray$create(c(NA, 1:3), 4:5), 4)
expect_array_compares(
ChunkedArray$create(c(NA, 1:3), 4:5),
ChunkedArray$create(4:5, c(NA, 1:3))
)
expect_array_compares(
ChunkedArray$create(c(NA, 1:3), 4:5),
Array$create(c(NA, 1:5))
)
expect_array_compares(
Array$create(c(NA, 1:5)),
ChunkedArray$create(c(NA, 1:3), 4:5)
)
})
test_that("logic ops with Array", {
truth <- expand.grid(left = c(TRUE, FALSE, NA), right = c(TRUE, FALSE, NA))
a_left <- Array$create(truth$left)
a_right <- Array$create(truth$right)
expect_bool_function_equal(a_left & a_right, truth$left & truth$right)
expect_bool_function_equal(a_left | a_right, truth$left | truth$right)
expect_bool_function_equal(a_left == a_right, truth$left == truth$right)
expect_bool_function_equal(a_left != a_right, truth$left != truth$right)
expect_bool_function_equal(!a_left, !truth$left)
# More complexity
isEqualTo <- function(x, y) x == y & !is.na(x)
expect_bool_function_equal(
isEqualTo(a_left, a_right),
isEqualTo(truth$left, truth$right)
)
})
test_that("logic ops with ChunkedArray", {
truth <- expand.grid(left = c(TRUE, FALSE, NA), right = c(TRUE, FALSE, NA))
a_left <- ChunkedArray$create(truth$left)
a_right <- ChunkedArray$create(truth$right)
expect_bool_function_equal(a_left & a_right, truth$left & truth$right)
expect_bool_function_equal(a_left | a_right, truth$left | truth$right)
expect_bool_function_equal(a_left == a_right, truth$left == truth$right)
expect_bool_function_equal(a_left != a_right, truth$left != truth$right)
expect_bool_function_equal(!a_left, !truth$left)
# More complexity
isEqualTo <- function(x, y) x == y & !is.na(x)
expect_bool_function_equal(
isEqualTo(a_left, a_right),
isEqualTo(truth$left, truth$right)
)
})
test_that("call_function validation", {
expect_error(
call_function("filter", 4),
'Argument 1 is of class numeric but it must be one of "Array", "ChunkedArray", "RecordBatch", "Table", or "Scalar"'
)
expect_error(
call_function("filter", Array$create(1:4), 3),
"Argument 2 is of class numeric"
)
expect_error(
call_function("filter",
Array$create(1:4),
Array$create(c(TRUE, FALSE, TRUE)),
options = list(keep_na = TRUE)
),
"arguments must all be the same length"
)
expect_error(
call_function("filter",
record_batch(a = 1:3),
Array$create(c(TRUE, FALSE, TRUE)),
options = list(keep_na = TRUE)
),
NA
)
expect_error(
call_function("filter", options = list(keep_na = TRUE)),
"accepts 2 arguments"
)
})
|
/r/tests/testthat/test-compute-vector.R
|
permissive
|
wesm/arrow
|
R
| false
| false
| 5,072
|
r
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
expect_bool_function_equal <- function(array_exp, r_exp) {
# Assert that the Array operation returns a boolean array
# and that its contents are equal to expected
expect_r6_class(array_exp, "ArrowDatum")
expect_type_equal(array_exp, bool())
expect_identical(as.vector(array_exp), r_exp)
}
expect_array_compares <- function(x, compared_to) {
r_values <- as.vector(x)
r_compared_to <- as.vector(compared_to)
# Iterate over all comparison functions
expect_bool_function_equal(x == compared_to, r_values == r_compared_to)
expect_bool_function_equal(x != compared_to, r_values != r_compared_to)
expect_bool_function_equal(x > compared_to, r_values > r_compared_to)
expect_bool_function_equal(x >= compared_to, r_values >= r_compared_to)
expect_bool_function_equal(x < compared_to, r_values < r_compared_to)
expect_bool_function_equal(x <= compared_to, r_values <= r_compared_to)
}
test_that("compare ops with Array", {
a <- Array$create(1:5)
expect_array_compares(a, 4L)
expect_array_compares(a, 4) # implicit casting
expect_array_compares(a, Scalar$create(4))
expect_array_compares(Array$create(c(NA, 1:5)), 4)
expect_array_compares(Array$create(as.numeric(c(NA, 1:5))), 4)
expect_array_compares(Array$create(c(NA, 1:5)), Array$create(rev(c(NA, 1:5))))
expect_array_compares(Array$create(c(NA, 1:5)), Array$create(rev(c(NA, 1:5)), type = double()))
})
test_that("compare ops with ChunkedArray", {
expect_array_compares(ChunkedArray$create(1:3, 4:5), 4L)
expect_array_compares(ChunkedArray$create(1:3, 4:5), 4) # implicit casting
expect_array_compares(ChunkedArray$create(1:3, 4:5), Scalar$create(4))
expect_array_compares(ChunkedArray$create(c(NA, 1:3), 4:5), 4)
expect_array_compares(
ChunkedArray$create(c(NA, 1:3), 4:5),
ChunkedArray$create(4:5, c(NA, 1:3))
)
expect_array_compares(
ChunkedArray$create(c(NA, 1:3), 4:5),
Array$create(c(NA, 1:5))
)
expect_array_compares(
Array$create(c(NA, 1:5)),
ChunkedArray$create(c(NA, 1:3), 4:5)
)
})
test_that("logic ops with Array", {
truth <- expand.grid(left = c(TRUE, FALSE, NA), right = c(TRUE, FALSE, NA))
a_left <- Array$create(truth$left)
a_right <- Array$create(truth$right)
expect_bool_function_equal(a_left & a_right, truth$left & truth$right)
expect_bool_function_equal(a_left | a_right, truth$left | truth$right)
expect_bool_function_equal(a_left == a_right, truth$left == truth$right)
expect_bool_function_equal(a_left != a_right, truth$left != truth$right)
expect_bool_function_equal(!a_left, !truth$left)
# More complexity
isEqualTo <- function(x, y) x == y & !is.na(x)
expect_bool_function_equal(
isEqualTo(a_left, a_right),
isEqualTo(truth$left, truth$right)
)
})
test_that("logic ops with ChunkedArray", {
truth <- expand.grid(left = c(TRUE, FALSE, NA), right = c(TRUE, FALSE, NA))
a_left <- ChunkedArray$create(truth$left)
a_right <- ChunkedArray$create(truth$right)
expect_bool_function_equal(a_left & a_right, truth$left & truth$right)
expect_bool_function_equal(a_left | a_right, truth$left | truth$right)
expect_bool_function_equal(a_left == a_right, truth$left == truth$right)
expect_bool_function_equal(a_left != a_right, truth$left != truth$right)
expect_bool_function_equal(!a_left, !truth$left)
# More complexity
isEqualTo <- function(x, y) x == y & !is.na(x)
expect_bool_function_equal(
isEqualTo(a_left, a_right),
isEqualTo(truth$left, truth$right)
)
})
test_that("call_function validation", {
expect_error(
call_function("filter", 4),
'Argument 1 is of class numeric but it must be one of "Array", "ChunkedArray", "RecordBatch", "Table", or "Scalar"'
)
expect_error(
call_function("filter", Array$create(1:4), 3),
"Argument 2 is of class numeric"
)
expect_error(
call_function("filter",
Array$create(1:4),
Array$create(c(TRUE, FALSE, TRUE)),
options = list(keep_na = TRUE)
),
"arguments must all be the same length"
)
expect_error(
call_function("filter",
record_batch(a = 1:3),
Array$create(c(TRUE, FALSE, TRUE)),
options = list(keep_na = TRUE)
),
NA
)
expect_error(
call_function("filter", options = list(keep_na = TRUE)),
"accepts 2 arguments"
)
})
|
set.seed(1)
peddir="./data/ped.infor"
progdir="./progfinal/";
chrsinput="./data/CHR1.RData";
source(paste(progdir,"FunctionSeqFam.r",sep=""))
nlocsave=3
nloctrue=1
effect=2
ndrop=1
#lprop=132
sd.err=1
pedscheme=2
Noff1=4
Noff2=0
pfamily=2/3
binary=FALSE
scheme="Equal"
out_type="C"
drop.effect=0.01
HeterAll=1/c(1,2,4,8,16,32,64,128)
#heter.prop=1
HeterMax=max(1/HeterAll);
wtinputs=c(0,1)
for(i in 1:length(wtinputs))
{
Result=list();
wtinput=wtinputs[i];
cat(i,'\n')
for(j in 1:50)
{
cat(j,'\n')
for(k in 1:length(HeterAll))
{
heter.prop=HeterAll[k];lprop=HeterMax*heter.prop;
if(k==1)
{
GenoGenerate=FALSE;
lprop=32;
re=Simulation2(chrsinput=chrsinput,nlocsave=nlocsave,nloctrue=nloctrue,effect=effect,peddir=peddir,ndrop=ndrop,drop.effect=drop.effect,lprop=lprop,sd.err=sd.err,wtinput=wtinput,pedscheme=pedscheme,Noff1=Noff1,Noff2=Noff2,pfamily=pfamily,binary=binary,scheme=scheme,X=NULL,heter.prop=heter.prop)
DataPast=list();
DataPast$index=re$index;
DataPast$model=re$model;
if(j==1) Result[[k]]=re$result;
if(j!=1) Result[[k]]=rbind(Result[[k]],re$result);
}
if(k!=1)
{
GenoGenerate=TRUE;
if(k==2) lprop=16;
re=Simulation2(chrsinput=chrsinput,nlocsave=nlocsave,nloctrue=nloctrue,effect=effect,peddir=peddir,ndrop=ndrop,drop.effect=drop.effect,lprop=lprop,sd.err=sd.err,wtinput=wtinput,pedscheme=pedscheme,Noff1=Noff1,Noff2=Noff2,pfamily=pfamily,binary=binary,scheme=scheme,X=NULL,heter.prop=heter.prop,GenoGenerate=GenoGenerate,DataPast=DataPast)
if(j==1) Result[[k]]=re$result;
if(j!=1) Result[[k]]=rbind(Result[[k]],re$result);
}
}
}
for(k in 1:length(HeterAll))
{
write.table(Result[[k]],paste("He",1/HeterAll[k],"Drop",drop.effect,"Wt",wtinput,"Ped2",".txt",sep=""),quote=F,row.names=F)
}
}
|
/SampleCodes/Sim2Demo.r
|
no_license
|
YaluWen/FRF
|
R
| false
| false
| 1,809
|
r
|
set.seed(1)
peddir="./data/ped.infor"
progdir="./progfinal/";
chrsinput="./data/CHR1.RData";
source(paste(progdir,"FunctionSeqFam.r",sep=""))
nlocsave=3
nloctrue=1
effect=2
ndrop=1
#lprop=132
sd.err=1
pedscheme=2
Noff1=4
Noff2=0
pfamily=2/3
binary=FALSE
scheme="Equal"
out_type="C"
drop.effect=0.01
HeterAll=1/c(1,2,4,8,16,32,64,128)
#heter.prop=1
HeterMax=max(1/HeterAll);
wtinputs=c(0,1)
for(i in 1:length(wtinputs))
{
Result=list();
wtinput=wtinputs[i];
cat(i,'\n')
for(j in 1:50)
{
cat(j,'\n')
for(k in 1:length(HeterAll))
{
heter.prop=HeterAll[k];lprop=HeterMax*heter.prop;
if(k==1)
{
GenoGenerate=FALSE;
lprop=32;
re=Simulation2(chrsinput=chrsinput,nlocsave=nlocsave,nloctrue=nloctrue,effect=effect,peddir=peddir,ndrop=ndrop,drop.effect=drop.effect,lprop=lprop,sd.err=sd.err,wtinput=wtinput,pedscheme=pedscheme,Noff1=Noff1,Noff2=Noff2,pfamily=pfamily,binary=binary,scheme=scheme,X=NULL,heter.prop=heter.prop)
DataPast=list();
DataPast$index=re$index;
DataPast$model=re$model;
if(j==1) Result[[k]]=re$result;
if(j!=1) Result[[k]]=rbind(Result[[k]],re$result);
}
if(k!=1)
{
GenoGenerate=TRUE;
if(k==2) lprop=16;
re=Simulation2(chrsinput=chrsinput,nlocsave=nlocsave,nloctrue=nloctrue,effect=effect,peddir=peddir,ndrop=ndrop,drop.effect=drop.effect,lprop=lprop,sd.err=sd.err,wtinput=wtinput,pedscheme=pedscheme,Noff1=Noff1,Noff2=Noff2,pfamily=pfamily,binary=binary,scheme=scheme,X=NULL,heter.prop=heter.prop,GenoGenerate=GenoGenerate,DataPast=DataPast)
if(j==1) Result[[k]]=re$result;
if(j!=1) Result[[k]]=rbind(Result[[k]],re$result);
}
}
}
for(k in 1:length(HeterAll))
{
write.table(Result[[k]],paste("He",1/HeterAll[k],"Drop",drop.effect,"Wt",wtinput,"Ped2",".txt",sep=""),quote=F,row.names=F)
}
}
|
library(QCApro)
### Name: randomDGS
### Title: Build a Random Data-Generating Structure
### Aliases: randomDGS
### ** Examples
# randomly generate three data-generating structures on the basis of four
# exogenous factors
str <- randomDGS(n.DGS = 3, exo.facs = LETTERS[1:4], seed.1 = 1375, seed.2 = 3917)
str$DGS
# all correctness-preserving submodels of DGS 2, bd + abC, can then be found with the
# 'submodels' function
submodels(str$DGS[2])$submodels
|
/data/genthat_extracted_code/QCApro/examples/randomDGS.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 462
|
r
|
library(QCApro)
### Name: randomDGS
### Title: Build a Random Data-Generating Structure
### Aliases: randomDGS
### ** Examples
# randomly generate three data-generating structures on the basis of four
# exogenous factors
str <- randomDGS(n.DGS = 3, exo.facs = LETTERS[1:4], seed.1 = 1375, seed.2 = 3917)
str$DGS
# all correctness-preserving submodels of DGS 2, bd + abC, can then be found with the
# 'submodels' function
submodels(str$DGS[2])$submodels
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clear_selection.R
\name{clear_selection}
\alias{clear_selection}
\title{Clear an active selection of nodes or edges}
\usage{
clear_selection(graph)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
}
\value{
a graph object of class
\code{dgr_graph}.
}
\description{
Clear the selection of
nodes or edges within a graph object.
}
\examples{
# Create a graph with
# a single path
graph <-
create_graph() \%>\%
add_path(n = 5)
# Select nodes with IDs `1`
# and `3`
graph <-
graph \%>\%
select_nodes(
nodes = c(1, 3))
# Verify that a node selection
# has been made
graph \%>\%
get_selection()
# Clear the selection with
# `clear_selection()`
graph <-
graph \%>\%
clear_selection()
# Verify that the node
# selection has been cleared
graph \%>\%
get_selection()
}
|
/man/clear_selection.Rd
|
permissive
|
akkalbist55/DiagrammeR
|
R
| false
| true
| 881
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clear_selection.R
\name{clear_selection}
\alias{clear_selection}
\title{Clear an active selection of nodes or edges}
\usage{
clear_selection(graph)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
}
\value{
a graph object of class
\code{dgr_graph}.
}
\description{
Clear the selection of
nodes or edges within a graph object.
}
\examples{
# Create a graph with
# a single path
graph <-
create_graph() \%>\%
add_path(n = 5)
# Select nodes with IDs `1`
# and `3`
graph <-
graph \%>\%
select_nodes(
nodes = c(1, 3))
# Verify that a node selection
# has been made
graph \%>\%
get_selection()
# Clear the selection with
# `clear_selection()`
graph <-
graph \%>\%
clear_selection()
# Verify that the node
# selection has been cleared
graph \%>\%
get_selection()
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1574
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1573
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1573
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/wmiforward/stmt24_292_293.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 516
c no.of clauses 1574
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1573
c
c QBFLIB/Basler/wmiforward/stmt24_292_293.qdimacs 516 1574 E1 [1] 0 43 472 1573 RED
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Basler/wmiforward/stmt24_292_293/stmt24_292_293.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 709
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1574
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1573
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1573
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/wmiforward/stmt24_292_293.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 516
c no.of clauses 1574
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1573
c
c QBFLIB/Basler/wmiforward/stmt24_292_293.qdimacs 516 1574 E1 [1] 0 43 472 1573 RED
|
## @knitr setup, echo=FALSE
options(width = 75)
options(useFancyQuotes=FALSE)
## @knitr hook-printfun, echo=FALSE
library(knitr)
library(formatR)
knit_hooks$set(printfun = function(before, options, envir) {
if (before) return()
txt = capture.output(dump(options$printfun, '', envir = envir))
## reformat if tidy=TRUE
if (options$tidy)
txt = tidy.source(text=txt, output=FALSE,
width.cutoff=30L, keep.comment=TRUE,
keep.blank.line=FALSE)$text.tidy
paste(c('\n```r\n', txt, '\n```\n'), collapse="\n")
})
## @knitr hook-printmanpage, echo=FALSE
knit_hooks$set(printmanpage = function(before, options, envir) {
if (before) return()
manpage <- file.path("..", "man",
sprintf("%s.Rd", options$printmanpage))
lines <- readLines(manpage)
ret <- "\n"
for (line in lines)
{
ret <- paste(ret, "\t", line, "\n", sep="")
}
ret
})
## @knitr library_RGalaxy_fake, eval=FALSE
## library(RGalaxy)
## @knitr library_RGalaxy_real, echo=FALSE, results="hide"
suppressPackageStartupMessages(library(RGalaxy))
## @knitr addTwoNumbers, printfun='addTwoNumbers', echo=FALSE, tidy=FALSE
#source code goes here
## @knitr run_addTwoNumbers
t <- tempfile()
addTwoNumbers(2, 2, t)
readLines(t, warn=FALSE)
## @knitr addTwoNumbers_man, printmanpage='addTwoNumbers', echo=FALSE, tidy=FALSE
#source code goes here
## @knitr galaxyHomeSetup, echo=FALSE, results="hide"
if (!exists("galaxyHome"))
galaxyHome <- getwd()
toolDir <- "RGalaxy_test_tool"
funcName <- "functionToGalaxify"
file.copy(system.file("galaxy", "tool_conf.xml", package="RGalaxy"),
file.path(galaxyHome, "tool_conf.xml"), overwrite=FALSE)
if(!file.exists("test-data")) dir.create("test-data", )
## @knitr run_galaxy, tidy=FALSE
galaxy("addTwoNumbers",
galaxyConfig=
GalaxyConfig(galaxyHome, "mytool", "Test Section",
"testSectionId")
)
## @knitr addTwoNumbersWithTest, printfun='addTwoNumbersWithTest', echo=FALSE, tidy=FALSE
#source code goes here
## @knitr runFunctionalTest
runFunctionalTest(addTwoNumbersWithTest)
## @knitr withRserve, tidy=FALSE, eval=FALSE
## galaxy("addTwoNumbersWithTest",
## galaxyConfig=
## GalaxyConfig(galaxyHome, "mytool", "Test Section",
## "testSectionId"),
## RserveConnection=RserveConnection()
## )
## @knitr install_RSclient, eval=FALSE
## source("http://bioconductor.org/biocLite.R")
## biocLite("RSclient", siteRepos="http://www.rforge.net")
## @knitr RserveConnection
RserveConnection(host="mymachine", port=2012L)
## @knitr probeLookup, printfun='probeLookup', echo=FALSE, tidy=FALSE
#source code goes here
## @knitr test_probeLookup
runFunctionalTest(probeLookup)
|
/RGalaxy-vignette.R
|
no_license
|
psnehal/LrpathExecutor
|
R
| false
| false
| 2,683
|
r
|
## @knitr setup, echo=FALSE
options(width = 75)
options(useFancyQuotes=FALSE)
## @knitr hook-printfun, echo=FALSE
library(knitr)
library(formatR)
knit_hooks$set(printfun = function(before, options, envir) {
if (before) return()
txt = capture.output(dump(options$printfun, '', envir = envir))
## reformat if tidy=TRUE
if (options$tidy)
txt = tidy.source(text=txt, output=FALSE,
width.cutoff=30L, keep.comment=TRUE,
keep.blank.line=FALSE)$text.tidy
paste(c('\n```r\n', txt, '\n```\n'), collapse="\n")
})
## @knitr hook-printmanpage, echo=FALSE
knit_hooks$set(printmanpage = function(before, options, envir) {
if (before) return()
manpage <- file.path("..", "man",
sprintf("%s.Rd", options$printmanpage))
lines <- readLines(manpage)
ret <- "\n"
for (line in lines)
{
ret <- paste(ret, "\t", line, "\n", sep="")
}
ret
})
## @knitr library_RGalaxy_fake, eval=FALSE
## library(RGalaxy)
## @knitr library_RGalaxy_real, echo=FALSE, results="hide"
suppressPackageStartupMessages(library(RGalaxy))
## @knitr addTwoNumbers, printfun='addTwoNumbers', echo=FALSE, tidy=FALSE
#source code goes here
## @knitr run_addTwoNumbers
t <- tempfile()
addTwoNumbers(2, 2, t)
readLines(t, warn=FALSE)
## @knitr addTwoNumbers_man, printmanpage='addTwoNumbers', echo=FALSE, tidy=FALSE
#source code goes here
## @knitr galaxyHomeSetup, echo=FALSE, results="hide"
if (!exists("galaxyHome"))
galaxyHome <- getwd()
toolDir <- "RGalaxy_test_tool"
funcName <- "functionToGalaxify"
file.copy(system.file("galaxy", "tool_conf.xml", package="RGalaxy"),
file.path(galaxyHome, "tool_conf.xml"), overwrite=FALSE)
if(!file.exists("test-data")) dir.create("test-data", )
## @knitr run_galaxy, tidy=FALSE
galaxy("addTwoNumbers",
galaxyConfig=
GalaxyConfig(galaxyHome, "mytool", "Test Section",
"testSectionId")
)
## @knitr addTwoNumbersWithTest, printfun='addTwoNumbersWithTest', echo=FALSE, tidy=FALSE
#source code goes here
## @knitr runFunctionalTest
runFunctionalTest(addTwoNumbersWithTest)
## @knitr withRserve, tidy=FALSE, eval=FALSE
## galaxy("addTwoNumbersWithTest",
## galaxyConfig=
## GalaxyConfig(galaxyHome, "mytool", "Test Section",
## "testSectionId"),
## RserveConnection=RserveConnection()
## )
## @knitr install_RSclient, eval=FALSE
## source("http://bioconductor.org/biocLite.R")
## biocLite("RSclient", siteRepos="http://www.rforge.net")
## @knitr RserveConnection
RserveConnection(host="mymachine", port=2012L)
## @knitr probeLookup, printfun='probeLookup', echo=FALSE, tidy=FALSE
#source code goes here
## @knitr test_probeLookup
runFunctionalTest(probeLookup)
|
## This function retrives the Cahced matrix inversion instead of generating the Matrix inversion
##repeatedly.
## makeCacheMatrix function creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
invs <- NULL
set <- function(y) {
x <<- y
invs <<- NULL
}
get <- function() x
setinverse <- function(inverse) invs <<- inverse
getinverse <- function() invs
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
##cacheSolve function computes the inverse of the matrix returned by makeCacheMatrix above.
##If the inverse has already been calculated and there is no change in the matric, cacheSolve
## will retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
invs <- x$getinverse()
if(!is.null(invs)) {
message("Retrieve from cache.")
return(invs)
}
data <- x$get()
invs <- solve(data)
x$setinverse(invs)
invs
}
|
/cachematrix.R
|
no_license
|
RajuMudunuri/ProgrammingAssignment2
|
R
| false
| false
| 937
|
r
|
## This function retrives the Cahced matrix inversion instead of generating the Matrix inversion
##repeatedly.
## makeCacheMatrix function creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
invs <- NULL
set <- function(y) {
x <<- y
invs <<- NULL
}
get <- function() x
setinverse <- function(inverse) invs <<- inverse
getinverse <- function() invs
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
##cacheSolve function computes the inverse of the matrix returned by makeCacheMatrix above.
##If the inverse has already been calculated and there is no change in the matric, cacheSolve
## will retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
invs <- x$getinverse()
if(!is.null(invs)) {
message("Retrieve from cache.")
return(invs)
}
data <- x$get()
invs <- solve(data)
x$setinverse(invs)
invs
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.