content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
rm(list=ls())
drive = "G:\\My Drive\\"
work_dir = paste0(drive,"SOP\\methylation\\robustMWAS\\github_scripts\\")
source(paste0(work_dir,"functions_run_simulations.R"))
library(MASS)
sim_type="H0_split_half_replication"
set.seed(1)
n_sample = 250
preplic = c(1e-3)
preplicH0 = 0.05
# Type I error
n_sims = 1e2
nfolds_vector = c(2,10,50)
r_vector = c(0)
poutlier_vector = c(0.005,0.02)
toutlier_vector = c("dep","indepboth")
min_error2 = 3
max_error2 = 5
pdisc_vector = 1.5 # values larger than 1 will not select
directional = F # if pdisc < 1 should directional replcation tests be used?
### power
#n_sims = 1e4
#nfolds_vector = c(2,10,50)
#r_vector = c(0.3)
#poutlier_vector = c(0,0.02)
#toutlier_vector = c("indepboth")
#pdisc_vector = 0.05 # values larger than 1 will not select
#directional = F # if pdisc < 1 should directional replcation tests be used?
#min_error2=3
#max_error2=5
#pdisc_vector = 1.5 # values larger than 1 will not select
### split half
#n_sims = 1e5
#nfolds_vector = c(2)
#r_vector = c(0)
#poutlier_vector = c(0.005,0.02)
#toutlier_vector = c("dep","indepboth")
#min_error2 = 3
#max_error2 = 5
#pdisc_vector = 0.05 # values larger than 1 will not select
#directional = F # if pdisc < 1 should directional replcation tests be used?
# all sims will have same random components (pred_data,error,foldid - same seed used)) to increase comprability
start_seed = sample(1:(.Machine$integer.max-(n_sims+1)), 1)
print(start_seed)
dir.create(paste0(work_dir,"\\results\\"), showWarnings=F, recursive=T)
n_conditions = length(r_vector)*length(nfolds_vector)*length(toutlier_vector)*length(poutlier_vector*length(pdisc_vector))
conditions = matrix(NA,n_conditions ,5)
analysis_labels = rep(NA,n_conditions)
colnames(conditions) = c("r","nfolds","t_outlier","p_outlier","pdisc")
x=0
for (i in seq_along(r_vector) )
for (j in seq_along(nfolds_vector) )
for (k in seq_along(toutlier_vector) )
for (l in seq_along(poutlier_vector) )
for (m in seq_along(pdisc_vector) ) { # i=j=k=l=m=1
x=x+1
conditions[x,1] = r = r_vector[i]
conditions[x,2] = nfolds = nfolds_vector[j]
conditions[x,3] = toutlier = toutlier_vector[k]
conditions[x,4] = poutlier = poutlier_vector[l]
conditions[x,5] = pdisc = pdisc_vector[m]
analysis_labels[x] = paste0("r",r,"_nfolds",nfolds,
"_poutl.",toutlier,"_poutl",poutlier,
"_pdisc",pdisc )
}
rownames(conditions) = analysis_labels
ncol=8
results = matrix(NA,n_conditions ,ncol)
colnames(results) = c("mean_cor_train","mean_cor","mean_p","mean_t",
"nfolds<p.repl","p_fold1","p_meta","p_all")
x=0
for (i in seq_along(r_vector) )
for (j in seq_along(nfolds_vector) )
for (k in seq_along(toutlier_vector) )
for (l in seq_along(poutlier_vector) )
for (m in seq_along(pdisc_vector) ) { # i=j=k=l=m=1
x=x+1
# if ( r_vector[i] == 0 ) n_sims = n_sims * 50
if ( r_vector[i] == 0 ) preplic = preplicH0
print( analysis_labels[x] )
results = matrix(NA,n_sims,ncol)
for (iter in 1:n_sims ) { # iter=1
if(iter %% 1000==0) cat(paste0("Simulation: ", iter, "\n"))
set.seed(iter+start_seed)
pred_data = rnorm(n_sample)
error = rnorm(n_sample)
r = r_vector[i]
outcome = r*pred_data + sqrt(1-r^2)*error
n_outlier = round( poutlier_vector[l] * n_sample )
if (n_outlier>0) {
error2 = runif(n_outlier,min=min_error2,max=max_error2)
if (toutlier_vector[k] == "dep") {
pred_data[1:n_outlier] = abs(pred_data[1:n_outlier]) + error2
outcome[1:n_outlier] = abs(outcome[1:n_outlier] + error2) }
if (toutlier_vector[k] == "indepboth") {
dir = sign( outcome[1:n_outlier] )
outcome[1:n_outlier] = outcome[1:n_outlier] + dir*error2
dir = sign( pred_data[1+n_outlier:(2*n_outlier-1)] )
pred_data[1+n_outlier:(2*n_outlier-1)] = pred_data[1+n_outlier:(2*n_outlier-1)] + dir*error2
}
}
results[iter,] = run_lm_sim(n_sample,pred_data,outcome,nfolds_vector[j],pdisc_vector[m])
}
temp = calc_summary_results(results,preplic)
if (x==1) summary_results = temp else
summary_results = rbind(summary_results,temp)
print (temp )
write.csv(results,paste0(work_dir,"\\results\\",analysis_labels[x],".csv"),row.names=F,quote=F)
}
rownames(summary_results) = analysis_labels
temp = data.frame(cbind(conditions,summary_results) )
temp
write.csv(temp,paste0(work_dir,sim_type,"_summary_results.csv"),row.names=T,quote=F)
| /run_simulations.R | no_license | ejvandenoord/robust-association | R | false | false | 5,188 | r |
rm(list=ls())
drive = "G:\\My Drive\\"
work_dir = paste0(drive,"SOP\\methylation\\robustMWAS\\github_scripts\\")
source(paste0(work_dir,"functions_run_simulations.R"))
library(MASS)
sim_type="H0_split_half_replication"
set.seed(1)
n_sample = 250
preplic = c(1e-3)
preplicH0 = 0.05
# Type I error
n_sims = 1e2
nfolds_vector = c(2,10,50)
r_vector = c(0)
poutlier_vector = c(0.005,0.02)
toutlier_vector = c("dep","indepboth")
min_error2 = 3
max_error2 = 5
pdisc_vector = 1.5 # values larger than 1 will not select
directional = F # if pdisc < 1 should directional replcation tests be used?
### power
#n_sims = 1e4
#nfolds_vector = c(2,10,50)
#r_vector = c(0.3)
#poutlier_vector = c(0,0.02)
#toutlier_vector = c("indepboth")
#pdisc_vector = 0.05 # values larger than 1 will not select
#directional = F # if pdisc < 1 should directional replcation tests be used?
#min_error2=3
#max_error2=5
#pdisc_vector = 1.5 # values larger than 1 will not select
### split half
#n_sims = 1e5
#nfolds_vector = c(2)
#r_vector = c(0)
#poutlier_vector = c(0.005,0.02)
#toutlier_vector = c("dep","indepboth")
#min_error2 = 3
#max_error2 = 5
#pdisc_vector = 0.05 # values larger than 1 will not select
#directional = F # if pdisc < 1 should directional replcation tests be used?
# all sims will have same random components (pred_data,error,foldid - same seed used)) to increase comprability
start_seed = sample(1:(.Machine$integer.max-(n_sims+1)), 1)
print(start_seed)
dir.create(paste0(work_dir,"\\results\\"), showWarnings=F, recursive=T)
n_conditions = length(r_vector)*length(nfolds_vector)*length(toutlier_vector)*length(poutlier_vector*length(pdisc_vector))
conditions = matrix(NA,n_conditions ,5)
analysis_labels = rep(NA,n_conditions)
colnames(conditions) = c("r","nfolds","t_outlier","p_outlier","pdisc")
x=0
for (i in seq_along(r_vector) )
for (j in seq_along(nfolds_vector) )
for (k in seq_along(toutlier_vector) )
for (l in seq_along(poutlier_vector) )
for (m in seq_along(pdisc_vector) ) { # i=j=k=l=m=1
x=x+1
conditions[x,1] = r = r_vector[i]
conditions[x,2] = nfolds = nfolds_vector[j]
conditions[x,3] = toutlier = toutlier_vector[k]
conditions[x,4] = poutlier = poutlier_vector[l]
conditions[x,5] = pdisc = pdisc_vector[m]
analysis_labels[x] = paste0("r",r,"_nfolds",nfolds,
"_poutl.",toutlier,"_poutl",poutlier,
"_pdisc",pdisc )
}
rownames(conditions) = analysis_labels
ncol=8
results = matrix(NA,n_conditions ,ncol)
colnames(results) = c("mean_cor_train","mean_cor","mean_p","mean_t",
"nfolds<p.repl","p_fold1","p_meta","p_all")
x=0
for (i in seq_along(r_vector) )
for (j in seq_along(nfolds_vector) )
for (k in seq_along(toutlier_vector) )
for (l in seq_along(poutlier_vector) )
for (m in seq_along(pdisc_vector) ) { # i=j=k=l=m=1
x=x+1
# if ( r_vector[i] == 0 ) n_sims = n_sims * 50
if ( r_vector[i] == 0 ) preplic = preplicH0
print( analysis_labels[x] )
results = matrix(NA,n_sims,ncol)
for (iter in 1:n_sims ) { # iter=1
if(iter %% 1000==0) cat(paste0("Simulation: ", iter, "\n"))
set.seed(iter+start_seed)
pred_data = rnorm(n_sample)
error = rnorm(n_sample)
r = r_vector[i]
outcome = r*pred_data + sqrt(1-r^2)*error
n_outlier = round( poutlier_vector[l] * n_sample )
if (n_outlier>0) {
error2 = runif(n_outlier,min=min_error2,max=max_error2)
if (toutlier_vector[k] == "dep") {
pred_data[1:n_outlier] = abs(pred_data[1:n_outlier]) + error2
outcome[1:n_outlier] = abs(outcome[1:n_outlier] + error2) }
if (toutlier_vector[k] == "indepboth") {
dir = sign( outcome[1:n_outlier] )
outcome[1:n_outlier] = outcome[1:n_outlier] + dir*error2
dir = sign( pred_data[1+n_outlier:(2*n_outlier-1)] )
pred_data[1+n_outlier:(2*n_outlier-1)] = pred_data[1+n_outlier:(2*n_outlier-1)] + dir*error2
}
}
results[iter,] = run_lm_sim(n_sample,pred_data,outcome,nfolds_vector[j],pdisc_vector[m])
}
temp = calc_summary_results(results,preplic)
if (x==1) summary_results = temp else
summary_results = rbind(summary_results,temp)
print (temp )
write.csv(results,paste0(work_dir,"\\results\\",analysis_labels[x],".csv"),row.names=F,quote=F)
}
rownames(summary_results) = analysis_labels
temp = data.frame(cbind(conditions,summary_results) )
temp
write.csv(temp,paste0(work_dir,sim_type,"_summary_results.csv"),row.names=T,quote=F)
|
library(mazeGen)
### Name: gridSevenRight
### Title: Grid Seven Right
### Aliases: gridSevenRight
### Keywords: datasets
### ** Examples
## Not run:
##D
##D # Returns a Grid with rank = 7
##D data(gridSevenRight)
##D coordinates <- gridSevenRight
##D
## End(Not run)
| /data/genthat_extracted_code/mazeGen/examples/gridSevenRight.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 278 | r | library(mazeGen)
### Name: gridSevenRight
### Title: Grid Seven Right
### Aliases: gridSevenRight
### Keywords: datasets
### ** Examples
## Not run:
##D
##D # Returns a Grid with rank = 7
##D data(gridSevenRight)
##D coordinates <- gridSevenRight
##D
## End(Not run)
|
# Exercise 1: creating and operating on vectors
# Create a vector `names` that contains your name and the names of 2 people
# next to you. Print the vector.
name <- c("Natalia", "Julia", "Ghost")
# Use the colon operator : to create a vector `n` of numbers from 10:49
n <- 10:49
# Use the `length()` function to get the number of elements in `n`
length(n)
# Add 1 to each element in `n` and print the result
print(n+1)
# Create a vector `m` that contains the numbers 10 to 1 (in that order).
# Hint: use the `seq()` function
m <- seq(10, 1)
# Subtract `m` FROM `n`. Note the recycling!
m - n
# Use the `seq()` function to produce a range of numbers from -5 to 10 in `0.1`
# increments. Store it in a variable `x_range`
x_range <- seq(-5, 10, 0.1)
# Create a vector `sin_wave` by calling the `sin()` function on each element
# in `x_range`.
sin_wave <- sin(x_range)
# Create a vector `cos_wave` by calling the `cos()` function on each element er, then
# adding `sin_wave` to the product
cos_wave <- cos(x_range)
# Use the `plot()` function to plot your `wave`!
wave <- sin_wave * cos_wave + sin_wave
# plot `wave`
plot(wave)
| /chapter-07-exercises/exercise-1/exercise.R | permissive | natm94/book-exercises | R | false | false | 1,157 | r | # Exercise 1: creating and operating on vectors
# Create a vector `names` that contains your name and the names of 2 people
# next to you. Print the vector.
name <- c("Natalia", "Julia", "Ghost")
# Use the colon operator : to create a vector `n` of numbers from 10:49
n <- 10:49
# Use the `length()` function to get the number of elements in `n`
length(n)
# Add 1 to each element in `n` and print the result
print(n+1)
# Create a vector `m` that contains the numbers 10 to 1 (in that order).
# Hint: use the `seq()` function
m <- seq(10, 1)
# Subtract `m` FROM `n`. Note the recycling!
m - n
# Use the `seq()` function to produce a range of numbers from -5 to 10 in `0.1`
# increments. Store it in a variable `x_range`
x_range <- seq(-5, 10, 0.1)
# Create a vector `sin_wave` by calling the `sin()` function on each element
# in `x_range`.
sin_wave <- sin(x_range)
# Create a vector `cos_wave` by calling the `cos()` function on each element er, then
# adding `sin_wave` to the product
cos_wave <- cos(x_range)
# Use the `plot()` function to plot your `wave`!
wave <- sin_wave * cos_wave + sin_wave
# plot `wave`
plot(wave)
|
###########################################################################/**
# @RdocClass AvgPlm
#
# @title "The AvgPlm class"
#
# \description{
# @classhierarchy
#
# This class represents a PLM where the probe intensities are averaged
# assuming identical probe affinities.
# For instance, one may assume that replicated probes with identical
# sequences have the same probe affinities, cf. the GenomeWideSNP\_6
# chip type.
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Arguments passed to @see "ProbeLevelModel".}
# \item{flavor}{A @character string specifying what model fitting algorithm
# to be used. This makes it possible to get identical estimates as other
# packages.}
# }
#
# \section{Fields and Methods}{
# @allmethods "public"
# }
#
# \section{Model}{
# For a single unit group, the averaging PLM of K probes is:
#
# \deqn{y_{ik} = \theta_i + \varepsilon_{ik}}
#
# where \eqn{\theta_i} are the chip effects for arrays \eqn{i=1,...,I}.
# The \eqn{\varepsilon_{ik}} are zero-mean noise with equal variance.
# }
#
# \section{Different flavors of model fitting}{
# The above model can be fitted in two ways, either robustly or
# non-robustly.
# Use argument \code{flavor="mean"} to fit the model non-robustly, i.e.
#
# \deqn{\hat{\theta}_{i} = 1/K \sum_k y_{ik}}.
#
# Use argument \code{flavor="median"} to fit the model robustly, i.e.
#
# \deqn{\hat{\theta}_{i} = median_k y_{ik}}.
#
# Missing values are always excluded.
# }
#
# @author "HB"
#*/###########################################################################
setConstructorS3("AvgPlm", function(..., flavor=c("median", "mean")) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'flavor':
flavor <- match.arg(flavor)
this <- extend(ProbeLevelModel(...), "AvgPlm",
.flavor = flavor
)
validate(this)
this
})
setMethodS3("validate", "AvgPlm", function(this, ...) {
ds <- getDataSet(this)
if (is.null(ds))
return(invisible(TRUE))
if (length(ds) < 1) {
throw("This ", class(this)[1], " requires at least 1 array: ",
length(ds))
}
invisible(TRUE)
}, protected=TRUE)
setMethodS3("getAsteriskTags", "AvgPlm", function(this, collapse=NULL, ...) {
# Returns 'PLM[,<shift>]'
tags <- NextMethod("getAsteriskTags", collapse=NULL)
tags[1] <- "AVG"
# Add class specific parameter tags
if (this$.flavor != "median")
tags <- paste(tags, this$.flavor, sep=",")
# Collapse
tags <- paste(tags, collapse=collapse)
tags
}, protected=TRUE)
setMethodS3("getParameters", "AvgPlm", function(this, ...) {
params <- NextMethod("getParameters")
params$flavor <- this$.flavor
params
}, protected=TRUE)
setMethodS3("getProbeAffinityFile", "AvgPlm", function(this, ...) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get the probe affinities (and create files etc)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
paf <- NextMethod("getProbeAffinityFile")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Update the encode and decode functions
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
setEncodeFunction(paf, function(groupData, ...) {
phi <- .subset2(groupData, "phi")
stdvs <- .subset2(groupData, "sdPhi")
outliers <- .subset2(groupData, "phiOutliers")
# Encode outliers as the sign of 'pixels'; -1 = TRUE, +1 = FALSE
pixels <- sign(0.5 - as.integer(outliers))
list(intensities=phi, stdvs=stdvs, pixels=pixels)
})
setEncodeFunction(paf, function(groupData, ...) {
list(
intensities = .subset2(groupData, "phi"),
stdvs = .subset2(groupData, "sdPhi"),
# Encode outliers as the sign of 'pixels'; -1 = TRUE, +1 = FALSE
pixels = ifelse(.subset2(groupData, "phiOutliers"), -1, +1)
)
})
setDecodeFunction(paf, function(groupData, ...) {
intensities <- .subset2(groupData, "intensities")
stdvs <- .subset2(groupData, "stdvs")
pixels <- .subset2(groupData, "pixels")
# Outliers are encoded by the sign of 'pixels'.
outliers <- as.logical(1-sign(pixels))
list(
phi=intensities,
sdPhi=stdvs,
phiOutliers=outliers
)
})
paf
}, private=TRUE)
###########################################################################/**
# @RdocMethod getFitUnitGroupFunction
#
# @title "Gets the low-level function that fits the PLM"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @function.
# }
#
# \seealso{
# @seeclass
# }
#*/###########################################################################
setMethodS3("getFitUnitGroupFunction", "AvgPlm", function(this, ...) {
# Float precision
# .Machine$float.eps <- sqrt(.Machine$double.eps)
floatEps <- sqrt(.Machine$double.eps)
# Shift signals?
shift <- this$shift
if (is.null(shift))
shift <- 0
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# avgModel()
# Author: Henrik Bengtsson, UC Berkeley.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
avgPlmModel <- function(y, ...){
# Assert right dimensions of 'y'.
# If input data are dimensionless, return NAs.
if (is.null(dim(y))) {
nbrOfArrays <- length(getDataSet(this))
return(list(theta=rep(NA, nbrOfArrays),
sdTheta=rep(NA, nbrOfArrays),
thetaOutliers=rep(NA, nbrOfArrays),
phi=c(),
sdPhi=c(),
phiOutliers=c()
)
)
}
if (length(dim(y)) != 2) {
str(y)
stop("Argument 'y' must have two dimensions: ",
paste(dim(y), collapse="x"))
}
# Add shift
y <- y + shift
I <- ncol(y); # Number of arrays
K <- nrow(y); # Number of probes
# Fit model
if (K == 1) {
theta <- y
sdTheta <- rep(0,I); # floatEps
} else {
y <- t(y)
if (flavor == "median") {
theta <- rowMedians(y, na.rm=TRUE)
sdTheta <- rowMads(y, center=theta, na.rm=TRUE)
} else if (flavor == "mean") {
theta <- .rowMeans(y, na.rm=TRUE)
sdTheta <- rowSds(y, mean=theta, na.rm=TRUE)
}
}
# Should we store std deviations or std errors?!? /HB 2007-09-08
sdTheta <- sdTheta/sqrt(K)
# Hmm..., when searching for "units todo", we use
# (sdTheta <= 0).
sdTheta <- sdTheta + floatEps
# Probe affinities are all identical (==ones)
phi <- rep(1, K)
sdPhi <- rep(1, K); # Default, we not estimated (should we store NAs?!?)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# A fit function must return: theta, sdTheta, thetaOutliers,
# phi, sdPhi, phiOutliers.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
thetaOutliers <- rep(FALSE, I)
phiOutliers <- rep(FALSE, K)
# Return data on the intensity scale
list(theta=theta, sdTheta=sdTheta, thetaOutliers=thetaOutliers,
phi=phi, sdPhi=sdPhi, phiOutliers=phiOutliers)
} # avgPlmModel()
attr(avgPlmModel, "name") <- "avgPlmModel"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get the flavor of fitting algorithm for the averaging PLM
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
flavor <- this$.flavor
if (flavor == "median") {
fitFcn <- avgPlmModel
} else if (flavor == "mean") {
fitFcn <- avgPlmModel
} else {
throw("Cannot get fit function for AvgPlm. Unknown flavor: ", flavor)
}
# Test that it works and is available.
ok <- FALSE
tryCatch({
fitFcn(matrix(1:6+0.1, ncol=3))
ok <- TRUE
}, error = function(ex) {
print(ex)
})
if (!ok) {
throw("The fit function for requested AvgPlm flavor failed: ", flavor)
}
fitFcn
}, private=TRUE)
setMethodS3("getCalculateResidualsFunction", "AvgPlm", function(static, ...) {
function(y, yhat) {
y - yhat
}
}, static=TRUE, protected=TRUE)
| /R/AvgPlm.R | no_license | MimoriK/aroma.affymetrix | R | false | false | 8,311 | r | ###########################################################################/**
# @RdocClass AvgPlm
#
# @title "The AvgPlm class"
#
# \description{
# @classhierarchy
#
# This class represents a PLM where the probe intensities are averaged
# assuming identical probe affinities.
# For instance, one may assume that replicated probes with identical
# sequences have the same probe affinities, cf. the GenomeWideSNP\_6
# chip type.
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Arguments passed to @see "ProbeLevelModel".}
# \item{flavor}{A @character string specifying what model fitting algorithm
# to be used. This makes it possible to get identical estimates as other
# packages.}
# }
#
# \section{Fields and Methods}{
# @allmethods "public"
# }
#
# \section{Model}{
# For a single unit group, the averaging PLM of K probes is:
#
# \deqn{y_{ik} = \theta_i + \varepsilon_{ik}}
#
# where \eqn{\theta_i} are the chip effects for arrays \eqn{i=1,...,I}.
# The \eqn{\varepsilon_{ik}} are zero-mean noise with equal variance.
# }
#
# \section{Different flavors of model fitting}{
# The above model can be fitted in two ways, either robustly or
# non-robustly.
# Use argument \code{flavor="mean"} to fit the model non-robustly, i.e.
#
# \deqn{\hat{\theta}_{i} = 1/K \sum_k y_{ik}}.
#
# Use argument \code{flavor="median"} to fit the model robustly, i.e.
#
# \deqn{\hat{\theta}_{i} = median_k y_{ik}}.
#
# Missing values are always excluded.
# }
#
# @author "HB"
#*/###########################################################################
setConstructorS3("AvgPlm", function(..., flavor=c("median", "mean")) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'flavor':
flavor <- match.arg(flavor)
this <- extend(ProbeLevelModel(...), "AvgPlm",
.flavor = flavor
)
validate(this)
this
})
setMethodS3("validate", "AvgPlm", function(this, ...) {
ds <- getDataSet(this)
if (is.null(ds))
return(invisible(TRUE))
if (length(ds) < 1) {
throw("This ", class(this)[1], " requires at least 1 array: ",
length(ds))
}
invisible(TRUE)
}, protected=TRUE)
setMethodS3("getAsteriskTags", "AvgPlm", function(this, collapse=NULL, ...) {
# Returns 'PLM[,<shift>]'
tags <- NextMethod("getAsteriskTags", collapse=NULL)
tags[1] <- "AVG"
# Add class specific parameter tags
if (this$.flavor != "median")
tags <- paste(tags, this$.flavor, sep=",")
# Collapse
tags <- paste(tags, collapse=collapse)
tags
}, protected=TRUE)
setMethodS3("getParameters", "AvgPlm", function(this, ...) {
params <- NextMethod("getParameters")
params$flavor <- this$.flavor
params
}, protected=TRUE)
setMethodS3("getProbeAffinityFile", "AvgPlm", function(this, ...) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get the probe affinities (and create files etc)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
paf <- NextMethod("getProbeAffinityFile")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Update the encode and decode functions
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
setEncodeFunction(paf, function(groupData, ...) {
phi <- .subset2(groupData, "phi")
stdvs <- .subset2(groupData, "sdPhi")
outliers <- .subset2(groupData, "phiOutliers")
# Encode outliers as the sign of 'pixels'; -1 = TRUE, +1 = FALSE
pixels <- sign(0.5 - as.integer(outliers))
list(intensities=phi, stdvs=stdvs, pixels=pixels)
})
setEncodeFunction(paf, function(groupData, ...) {
list(
intensities = .subset2(groupData, "phi"),
stdvs = .subset2(groupData, "sdPhi"),
# Encode outliers as the sign of 'pixels'; -1 = TRUE, +1 = FALSE
pixels = ifelse(.subset2(groupData, "phiOutliers"), -1, +1)
)
})
setDecodeFunction(paf, function(groupData, ...) {
intensities <- .subset2(groupData, "intensities")
stdvs <- .subset2(groupData, "stdvs")
pixels <- .subset2(groupData, "pixels")
# Outliers are encoded by the sign of 'pixels'.
outliers <- as.logical(1-sign(pixels))
list(
phi=intensities,
sdPhi=stdvs,
phiOutliers=outliers
)
})
paf
}, private=TRUE)
###########################################################################/**
# @RdocMethod getFitUnitGroupFunction
#
# @title "Gets the low-level function that fits the PLM"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @function.
# }
#
# \seealso{
# @seeclass
# }
#*/###########################################################################
setMethodS3("getFitUnitGroupFunction", "AvgPlm", function(this, ...) {
# Float precision
# .Machine$float.eps <- sqrt(.Machine$double.eps)
floatEps <- sqrt(.Machine$double.eps)
# Shift signals?
shift <- this$shift
if (is.null(shift))
shift <- 0
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# avgModel()
# Author: Henrik Bengtsson, UC Berkeley.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
avgPlmModel <- function(y, ...){
# Assert right dimensions of 'y'.
# If input data are dimensionless, return NAs.
if (is.null(dim(y))) {
nbrOfArrays <- length(getDataSet(this))
return(list(theta=rep(NA, nbrOfArrays),
sdTheta=rep(NA, nbrOfArrays),
thetaOutliers=rep(NA, nbrOfArrays),
phi=c(),
sdPhi=c(),
phiOutliers=c()
)
)
}
if (length(dim(y)) != 2) {
str(y)
stop("Argument 'y' must have two dimensions: ",
paste(dim(y), collapse="x"))
}
# Add shift
y <- y + shift
I <- ncol(y); # Number of arrays
K <- nrow(y); # Number of probes
# Fit model
if (K == 1) {
theta <- y
sdTheta <- rep(0,I); # floatEps
} else {
y <- t(y)
if (flavor == "median") {
theta <- rowMedians(y, na.rm=TRUE)
sdTheta <- rowMads(y, center=theta, na.rm=TRUE)
} else if (flavor == "mean") {
theta <- .rowMeans(y, na.rm=TRUE)
sdTheta <- rowSds(y, mean=theta, na.rm=TRUE)
}
}
# Should we store std deviations or std errors?!? /HB 2007-09-08
sdTheta <- sdTheta/sqrt(K)
# Hmm..., when searching for "units todo", we use
# (sdTheta <= 0).
sdTheta <- sdTheta + floatEps
# Probe affinities are all identical (==ones)
phi <- rep(1, K)
sdPhi <- rep(1, K); # Default, we not estimated (should we store NAs?!?)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# A fit function must return: theta, sdTheta, thetaOutliers,
# phi, sdPhi, phiOutliers.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
thetaOutliers <- rep(FALSE, I)
phiOutliers <- rep(FALSE, K)
# Return data on the intensity scale
list(theta=theta, sdTheta=sdTheta, thetaOutliers=thetaOutliers,
phi=phi, sdPhi=sdPhi, phiOutliers=phiOutliers)
} # avgPlmModel()
attr(avgPlmModel, "name") <- "avgPlmModel"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get the flavor of fitting algorithm for the averaging PLM
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
flavor <- this$.flavor
if (flavor == "median") {
fitFcn <- avgPlmModel
} else if (flavor == "mean") {
fitFcn <- avgPlmModel
} else {
throw("Cannot get fit function for AvgPlm. Unknown flavor: ", flavor)
}
# Test that it works and is available.
ok <- FALSE
tryCatch({
fitFcn(matrix(1:6+0.1, ncol=3))
ok <- TRUE
}, error = function(ex) {
print(ex)
})
if (!ok) {
throw("The fit function for requested AvgPlm flavor failed: ", flavor)
}
fitFcn
}, private=TRUE)
setMethodS3("getCalculateResidualsFunction", "AvgPlm", function(static, ...) {
function(y, yhat) {
y - yhat
}
}, static=TRUE, protected=TRUE)
|
#' Check Quantiles Required are Present
#'
#' @param posterior A dataframe containing quantiles identified using
#' the `q5` naming scheme. Default: No default.
#'
#' @param req_probs A numeric vector of required probabilities. Default:
#' c(0.5, 0.95, 0.2, 0.8).
#'
#' @return NULL
#'
#' @family check
check_quantiles <- function(posterior, req_probs = c(0.5, 0.95, 0.2, 0.8)) {
cols <- colnames(posterior)
if (sum(cols %in% c("q5", "q95", "q20", "q80")) != 4) {
stop(
"Following quantiles must be present (set with probs): ",
paste(req_probs, collapse = ", ")
)
}
return(invisible(NULL))
}
| /R/check.R | permissive | laasousa/epinowcast | R | false | false | 620 | r | #' Check Quantiles Required are Present
#'
#' @param posterior A dataframe containing quantiles identified using
#' the `q5` naming scheme. Default: No default.
#'
#' @param req_probs A numeric vector of required probabilities. Default:
#' c(0.5, 0.95, 0.2, 0.8).
#'
#' @return NULL
#'
#' @family check
check_quantiles <- function(posterior, req_probs = c(0.5, 0.95, 0.2, 0.8)) {
cols <- colnames(posterior)
if (sum(cols %in% c("q5", "q95", "q20", "q80")) != 4) {
stop(
"Following quantiles must be present (set with probs): ",
paste(req_probs, collapse = ", ")
)
}
return(invisible(NULL))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.kms_operations.R
\name{get_key_policy}
\alias{get_key_policy}
\title{Gets a key policy attached to the specified customer master key (CMK)}
\usage{
get_key_policy(KeyId, PolicyName)
}
\arguments{
\item{KeyId}{[required] A unique identifier for the customer master key (CMK).
Specify the key ID or the Amazon Resource Name (ARN) of the CMK.
For example:
\itemize{
\item Key ID: \code{1234abcd-12ab-34cd-56ef-1234567890ab}
\item Key ARN: \code{arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab}
}
To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey.}
\item{PolicyName}{[required] Specifies the name of the key policy. The only valid name is \code{default}. To get the names of key policies, use ListKeyPolicies.}
}
\description{
Gets a key policy attached to the specified customer master key (CMK). You cannot perform this operation on a CMK in a different AWS account.
}
\section{Accepted Parameters}{
\preformatted{get_key_policy(
KeyId = "string",
PolicyName = "string"
)
}
}
\examples{
# The following example retrieves the key policy for the specified
# customer master key (CMK).
\donttest{get_key_policy(
KeyId = "1234abcd-12ab-34cd-56ef-1234567890ab",
PolicyName = "default"
)}
}
| /service/paws.kms/man/get_key_policy.Rd | permissive | CR-Mercado/paws | R | false | true | 1,327 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.kms_operations.R
\name{get_key_policy}
\alias{get_key_policy}
\title{Gets a key policy attached to the specified customer master key (CMK)}
\usage{
get_key_policy(KeyId, PolicyName)
}
\arguments{
\item{KeyId}{[required] A unique identifier for the customer master key (CMK).
Specify the key ID or the Amazon Resource Name (ARN) of the CMK.
For example:
\itemize{
\item Key ID: \code{1234abcd-12ab-34cd-56ef-1234567890ab}
\item Key ARN: \code{arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab}
}
To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey.}
\item{PolicyName}{[required] Specifies the name of the key policy. The only valid name is \code{default}. To get the names of key policies, use ListKeyPolicies.}
}
\description{
Gets a key policy attached to the specified customer master key (CMK). You cannot perform this operation on a CMK in a different AWS account.
}
\section{Accepted Parameters}{
\preformatted{get_key_policy(
KeyId = "string",
PolicyName = "string"
)
}
}
\examples{
# The following example retrieves the key policy for the specified
# customer master key (CMK).
\donttest{get_key_policy(
KeyId = "1234abcd-12ab-34cd-56ef-1234567890ab",
PolicyName = "default"
)}
}
|
#******************************************************************************
#* +------------------------------------------------------------------------+ *
#* | Function optim.NLM, optimize function by levenberg marquardt. | *
#* | | *
#* | Note: becarefull to using this function when there is not outlier, it | *
#* | may not work witout outlier, in this case better to use nlmest | *
#* | argumnts: | *
#* | objfnc: any objective function for minimizing, it must contains: | *
#* | formula, data and start, extra will be defined in (...) | *
#* | the output of objfnc must contains: | *
#* | $value(attr,gradient,hessian), $angmat,$angvec | *
#* | data: data, contains dependents and independents, | *
#* | data.frame, list or named matrix it can be. | *
#* | theta: starting values, it must contains tau elements | *
#* | | *
#* | ...: can be entries objfnc | *
#* | | *
#* +------------------------------------------------------------------------+ *
#******************************************************************************
optim.NLM<-function(objfnc,data,start=getInitial(objfnc,data),
control=nlr.control(tolerance=0.001, minlanda=1 / 2 ^ 10, maxiter=25 * length(start)),...)
{
tolerance <- control$tolerance
maxiter <- control$maxiter
minlanda <- control$minlanda
trace <- control$trace
loc.start <- start
Fault2 <- Fault() ###### no error, automatically will be created by the Fault object
th <- loc.start
theta <- unlist(loc.start)
theta1 <- theta
p <- length(theta)
n <- length(data[[1]])
eol <- F
iterate <- 0
iterhist <- NULL
.datalist <- NULL
.datalist[names(loc.start)] <- loc.start
.datalist[names(data)]<-data
ht <- objfnc(data=data,start=th,...)
if(is.Faultwarn(ht)) return(ht)
lambda <- 1
landa <-1
fact <- 2
#///*************************************************
while (!eol) #///********* Start Iteration ****************
{
iterate <- iterate + 1
grh <- attr(ht$value,"gradient") ## g(theta) grad
hsh <- attr(ht$value,"hessian") ## H(theta) hess
hshinv <- eiginv(hsh,stp=F,symmetric=T)
ilev <-0
if(is.Fault(hshinv)) ##################################### LM modified
{
###################################################################
### LM part
###################################################################
if(any(is.infinite(hsh)) || any(is.na(hsh))) return(Fault(FN=18))
heig <-eigen(hsh,symmetric=T) ## eig(f+land I) = (eig(F) +lnd)
lambda <- abs(min(heig$values))*2 ## add the smallest minus eig to
## to make all possitive
repeat{
ilev<- ilev + 1
I <- diag(dim(hsh) [2])
zeta <- hsh + lambda * diag(abs(diag(hsh)))
zetainv <- eiginv(zeta,stp=F,symmetric=T)
if(is.Fault(zetainv)) return(nl.fitt.rob(Fault=zetainv))
#zetainv <- svdinv(hsh)
zqr <-qr(zeta)
delta2 <- grh %*% zetainv
theta2 <- theta1 - delta2
th2 <- as.list(theta2)
names(theta2) <- names(theta1)
names(th2) <- names(theta1)
ht2 <- objfnc(data=data,start=th2,...)
if(is.Faultwarn(ht2)){
return(ht2)}
cnv <- sum((theta1-theta2)^2)
diference <- abs(as.numeric(ht2$value)-as.numeric(ht$value))
if(as.numeric(ht2$value) <= as.numeric(ht$value) || cnv < tolerance/10^10)
{
theta1 <- theta2
ht <- ht2
lambda <- lambda / 10
break
if(cnv < tolerance/10^10) break
}
else {
lambda <- lambda * 10
if( lambda > 1e12) lambda <- lambda / 20#return(Fault(FN=14,FF="optim.NLM"))
}
}
}
##################### end of singular case ####################
###################################################################################
else { ##################### Possitive definit case ###################################
delta1 <- grh %*% hshinv #%*% t(grh)
delta1 <- as.numeric(delta1)
theta2 <- theta1 - landa*delta1
th2 <- as.list(theta2)
names(theta2) <- names(theta1)
names(th2) <- names(theta1)
ht2 <- objfnc(data=data,start=th2,...)
if(is.Faultwarn(ht2)) {
return(ht2)
}
cnvg <- cnvgnew <- F
diference <- abs(as.numeric(ht2$value)-as.numeric(ht$value))
# cat("positive definit case",diference ,tolerance/1000,"\n")
# cat("theta 1...theta2==",theta1,theta2,"\n","values 2..1=",as.numeric(ht2$value), as.numeric(ht$value))
# cat("\n landa... delta",landa,delta1,landa*delta1,"\n \n")
.temp1<-as.numeric(ht2$value)
.temp2<-as.numeric(ht$value)
if(is.missing(.temp1) || is.nan(.temp1) || is.inf(.temp1) ||
is.missing(.temp1) || is.nan(.temp1) || is.inf(.temp1)) return(nl.fitt.rob(Fault=Fault(FN=18,FF="optim.NLM")))
if(as.numeric(ht2$value) > as.numeric(ht$value)){
ht.prev <- as.numeric(ht2$value)
###############################
while(landa >= minlanda){ ####### iteration landa ###
landa <- landa / fact
theta2 <- theta1 - landa * delta1
th2 <- as.list(theta2)
names(theta2) <- names(theta1)
names(th2) <- names(theta1)
ht2 <- objfnc(data=data,start=th2,...)
if(is.Faultwarn(ht2)) return(ht2)
ht.new <- as.numeric(ht2$value)
diference <- abs(as.numeric(ht2$value)-as.numeric(ht$value))
if(as.numeric(ht2$value) <= as.numeric(ht$value))
{
theta1 <- theta2
ht <- ht2
cnvg <- T
break
}
if(ht.new < ht.prev) {
ht2.new <- ht2
theta2.new <- theta2
ht.prev <- ht.new
cnvgnew <- T
}
} ####### iteration landa ###
###############################
landa <- min(1,fact*landa)
#landa <- 1
if(! cnvg)
if(cnvgnew){
theta1 <- theta2.new
ht <- ht2.new
}
}
else{
theta1 <- theta2
ht <- ht2
#landa <- landa * fact
cnvg <- T
}
} ####################### end positive ############
####################### from above theta1 & ht must be returned back ############
##########################################################################################
g2 <- as.matrix(ht$angvec)
htg <- ht$angmat
.expr1 <- t(htg) %*% (htg) ## H' H p*p
.expr2 <- eiginv(.expr1,symmetric=T,stp=F) ## (H' H)-1
if(is.Fault(.expr2)){
heig <-eigen(.expr1,symmetric=T)
lambda <- abs(max(heig$values))
I <- diag(dim(.expr1) [2])
.expr1 <- .expr1 + lambda * I
.expr2 <- eiginv(.expr1,symmetric=T,stp=F)
}
#.expr2 <- zetainv
if(is.Fault(.expr2)){
return(.expr2) ## Note: eiginv only shoud return back Fault
}
.expr3 <- htg %*% .expr2 ## H (H' H)^-1 n*p
.expr4 <- .expr3 %*% t(htg) ## H (H' H)^-1 H' n*n
.expr5 <- .expr4 %*% g2 ## VT = H (H' H)^-1 H' V n*1
angle <- t(g2) %*% (.expr5) ## V' * VT 1*1
angle <- angle / sqrt( sum(g2^2) * sum(.expr5^2) ) ## cos(V,VT)
if(is.missing(angle)|| is.nan(angle) || is.inf(angle)) return(Fault(FN=16,FF="optim.NLM"))
th <-as.list(theta1) ## without sigma
names(th) <- names(theta1)
iterhist <- rbind(iterhist,c(iteration = iterate,objfnc = as.numeric(ht$value),
unlist(th),converge = angle ,ilev=ilev))
if(angle < tolerance || diference < (tolerance/10)*abs(as.numeric(ht$value))) eol <- T
else if(iterate > maxiter) {
eol <- T
Fault2 <- Fault(FN=1,FF = "optim.NLM")
}
else {
ht <- objfnc(data=data,start=th,...)
if(is.Faultwarn(ht)){
return(ht)
}
}
} #\\\********* End Iteration ****************
#\\\*************************************************
result=list(parameters = th, objfnc=ht, history=iterhist)
return(result)
}
#****************************************************
#***** output: *****
#***** Structure of minus of pseudo likelihood *****
#** **
#** (very important) the output is different **
#** from standard **
#** inheritance: nl.fitt: **
#** parameters = tau **
#****************************************************
#******************************************************************************
#* +------------------------------------------------------------------------+ *
#* | End of 'optim.NLM' | *
#* | | *
#* | Hossein Riazoshams, UPM, INSPEM | *
#* | | *
#* | modified from 'nlmest' | *
#* | | *
#* | 11 Jan 2010 | *
#* | | *
#* +------------------------------------------------------------------------+ *
#******************************************************************************
| /R/optim_NLM.R | no_license | Allisterh/nlr | R | false | false | 10,563 | r |
#******************************************************************************
#* +------------------------------------------------------------------------+ *
#* | Function optim.NLM, optimize function by levenberg marquardt. | *
#* | | *
#* | Note: becarefull to using this function when there is not outlier, it | *
#* | may not work witout outlier, in this case better to use nlmest | *
#* | argumnts: | *
#* | objfnc: any objective function for minimizing, it must contains: | *
#* | formula, data and start, extra will be defined in (...) | *
#* | the output of objfnc must contains: | *
#* | $value(attr,gradient,hessian), $angmat,$angvec | *
#* | data: data, contains dependents and independents, | *
#* | data.frame, list or named matrix it can be. | *
#* | theta: starting values, it must contains tau elements | *
#* | | *
#* | ...: can be entries objfnc | *
#* | | *
#* +------------------------------------------------------------------------+ *
#******************************************************************************
optim.NLM<-function(objfnc,data,start=getInitial(objfnc,data),
control=nlr.control(tolerance=0.001, minlanda=1 / 2 ^ 10, maxiter=25 * length(start)),...)
{
tolerance <- control$tolerance
maxiter <- control$maxiter
minlanda <- control$minlanda
trace <- control$trace
loc.start <- start
Fault2 <- Fault() ###### no error, automatically will be created by the Fault object
th <- loc.start
theta <- unlist(loc.start)
theta1 <- theta
p <- length(theta)
n <- length(data[[1]])
eol <- F
iterate <- 0
iterhist <- NULL
.datalist <- NULL
.datalist[names(loc.start)] <- loc.start
.datalist[names(data)]<-data
ht <- objfnc(data=data,start=th,...)
if(is.Faultwarn(ht)) return(ht)
lambda <- 1
landa <-1
fact <- 2
#///*************************************************
while (!eol) #///********* Start Iteration ****************
{
iterate <- iterate + 1
grh <- attr(ht$value,"gradient") ## g(theta) grad
hsh <- attr(ht$value,"hessian") ## H(theta) hess
hshinv <- eiginv(hsh,stp=F,symmetric=T)
ilev <-0
if(is.Fault(hshinv)) ##################################### LM modified
{
###################################################################
### LM part
###################################################################
if(any(is.infinite(hsh)) || any(is.na(hsh))) return(Fault(FN=18))
heig <-eigen(hsh,symmetric=T) ## eig(f+land I) = (eig(F) +lnd)
lambda <- abs(min(heig$values))*2 ## add the smallest minus eig to
## to make all possitive
repeat{
ilev<- ilev + 1
I <- diag(dim(hsh) [2])
zeta <- hsh + lambda * diag(abs(diag(hsh)))
zetainv <- eiginv(zeta,stp=F,symmetric=T)
if(is.Fault(zetainv)) return(nl.fitt.rob(Fault=zetainv))
#zetainv <- svdinv(hsh)
zqr <-qr(zeta)
delta2 <- grh %*% zetainv
theta2 <- theta1 - delta2
th2 <- as.list(theta2)
names(theta2) <- names(theta1)
names(th2) <- names(theta1)
ht2 <- objfnc(data=data,start=th2,...)
if(is.Faultwarn(ht2)){
return(ht2)}
cnv <- sum((theta1-theta2)^2)
diference <- abs(as.numeric(ht2$value)-as.numeric(ht$value))
if(as.numeric(ht2$value) <= as.numeric(ht$value) || cnv < tolerance/10^10)
{
theta1 <- theta2
ht <- ht2
lambda <- lambda / 10
break
if(cnv < tolerance/10^10) break
}
else {
lambda <- lambda * 10
if( lambda > 1e12) lambda <- lambda / 20#return(Fault(FN=14,FF="optim.NLM"))
}
}
}
##################### end of singular case ####################
###################################################################################
else { ##################### Possitive definit case ###################################
delta1 <- grh %*% hshinv #%*% t(grh)
delta1 <- as.numeric(delta1)
theta2 <- theta1 - landa*delta1
th2 <- as.list(theta2)
names(theta2) <- names(theta1)
names(th2) <- names(theta1)
ht2 <- objfnc(data=data,start=th2,...)
if(is.Faultwarn(ht2)) {
return(ht2)
}
cnvg <- cnvgnew <- F
diference <- abs(as.numeric(ht2$value)-as.numeric(ht$value))
# cat("positive definit case",diference ,tolerance/1000,"\n")
# cat("theta 1...theta2==",theta1,theta2,"\n","values 2..1=",as.numeric(ht2$value), as.numeric(ht$value))
# cat("\n landa... delta",landa,delta1,landa*delta1,"\n \n")
.temp1<-as.numeric(ht2$value)
.temp2<-as.numeric(ht$value)
if(is.missing(.temp1) || is.nan(.temp1) || is.inf(.temp1) ||
is.missing(.temp1) || is.nan(.temp1) || is.inf(.temp1)) return(nl.fitt.rob(Fault=Fault(FN=18,FF="optim.NLM")))
if(as.numeric(ht2$value) > as.numeric(ht$value)){
ht.prev <- as.numeric(ht2$value)
###############################
while(landa >= minlanda){ ####### iteration landa ###
landa <- landa / fact
theta2 <- theta1 - landa * delta1
th2 <- as.list(theta2)
names(theta2) <- names(theta1)
names(th2) <- names(theta1)
ht2 <- objfnc(data=data,start=th2,...)
if(is.Faultwarn(ht2)) return(ht2)
ht.new <- as.numeric(ht2$value)
diference <- abs(as.numeric(ht2$value)-as.numeric(ht$value))
if(as.numeric(ht2$value) <= as.numeric(ht$value))
{
theta1 <- theta2
ht <- ht2
cnvg <- T
break
}
if(ht.new < ht.prev) {
ht2.new <- ht2
theta2.new <- theta2
ht.prev <- ht.new
cnvgnew <- T
}
} ####### iteration landa ###
###############################
landa <- min(1,fact*landa)
#landa <- 1
if(! cnvg)
if(cnvgnew){
theta1 <- theta2.new
ht <- ht2.new
}
}
else{
theta1 <- theta2
ht <- ht2
#landa <- landa * fact
cnvg <- T
}
} ####################### end positive ############
####################### from above theta1 & ht must be returned back ############
##########################################################################################
g2 <- as.matrix(ht$angvec)
htg <- ht$angmat
.expr1 <- t(htg) %*% (htg) ## H' H p*p
.expr2 <- eiginv(.expr1,symmetric=T,stp=F) ## (H' H)-1
if(is.Fault(.expr2)){
heig <-eigen(.expr1,symmetric=T)
lambda <- abs(max(heig$values))
I <- diag(dim(.expr1) [2])
.expr1 <- .expr1 + lambda * I
.expr2 <- eiginv(.expr1,symmetric=T,stp=F)
}
#.expr2 <- zetainv
if(is.Fault(.expr2)){
return(.expr2) ## Note: eiginv only shoud return back Fault
}
.expr3 <- htg %*% .expr2 ## H (H' H)^-1 n*p
.expr4 <- .expr3 %*% t(htg) ## H (H' H)^-1 H' n*n
.expr5 <- .expr4 %*% g2 ## VT = H (H' H)^-1 H' V n*1
angle <- t(g2) %*% (.expr5) ## V' * VT 1*1
angle <- angle / sqrt( sum(g2^2) * sum(.expr5^2) ) ## cos(V,VT)
if(is.missing(angle)|| is.nan(angle) || is.inf(angle)) return(Fault(FN=16,FF="optim.NLM"))
th <-as.list(theta1) ## without sigma
names(th) <- names(theta1)
iterhist <- rbind(iterhist,c(iteration = iterate,objfnc = as.numeric(ht$value),
unlist(th),converge = angle ,ilev=ilev))
if(angle < tolerance || diference < (tolerance/10)*abs(as.numeric(ht$value))) eol <- T
else if(iterate > maxiter) {
eol <- T
Fault2 <- Fault(FN=1,FF = "optim.NLM")
}
else {
ht <- objfnc(data=data,start=th,...)
if(is.Faultwarn(ht)){
return(ht)
}
}
} #\\\********* End Iteration ****************
#\\\*************************************************
result=list(parameters = th, objfnc=ht, history=iterhist)
return(result)
}
#****************************************************
#***** output: *****
#***** Structure of minus of pseudo likelihood *****
#** **
#** (very important) the output is different **
#** from standard **
#** inheritance: nl.fitt: **
#** parameters = tau **
#****************************************************
#******************************************************************************
#* +------------------------------------------------------------------------+ *
#* | End of 'optim.NLM' | *
#* | | *
#* | Hossein Riazoshams, UPM, INSPEM | *
#* | | *
#* | modified from 'nlmest' | *
#* | | *
#* | 11 Jan 2010 | *
#* | | *
#* +------------------------------------------------------------------------+ *
#******************************************************************************
|
#라면 끓이는 순서
#문제를 스스로 정의해야 함. 결론도출까지 된다면 결론도출까지. 할 수 있는 곳까지, 내가 생각한 범위까지.
#문제 정의: 대형 해상 조난사고였던 타이타닉 호 생존자 분류를 통해서 구조 장비 추가 증설 시 최적 위치 찾기.
#객실별 생존자 따져서 가장 적은 곳
#동승객 비율 찾기.
#여객선 사고의 경우 사고 경우에 따라서 적재된 구조장비를 모두 활용하지 못하는 경우가 발생할 수 있다.
setwd ("C:\\Rworkspace\\new-R-working-space")
ti <- read.csv(file="titanic_train.csv",
header = T)
library(dplyr)
library(ggplot2)
ls(ti)
class(ti)
summary(ti)
str(ti)
head(ti)
tail(ti)
#전체 탑승 승객*완료
length(ti$name)
#*총 탑승자 수는 916명.*
#전체 승객 성별*완료 별로 중요하지는 않다. 그냥 보여주는 용도로.
ggplot(ti, aes(x=sex))+
geom_bar()+
labs(x="성별",y="인원 수")+
ggtitle("전체 승객 성별 현황")
pie(table(ti$sex))
#남성 581명, 여성 335명.
#남성이 여성에 비해 2배 정도 많은 수치.
#전체 탑승객 연령
ggplot(ti,aes(age))+
geom_histogram(breaks=seq(0,75,by=1),
fill="blue")+
ggtitle("탑승객 연령대")
#20~30세 사이에 주로 분포.
#전체 승객 사망/생존
td <-ti$survived
td1<- table(td)
pie(td1)
#사망: 563명 생존:353
#사망 관련.
#사망자 성별
ti.dead <- ti%>%
group_by(survived)%>%
filter(survived==0)
ggplot(ti.dead,aes(x=sex))+
geom_bar()
#남성이 여성보다 4배 넘게 사망.
#사망자 객실
ti.d.c <- ti.dead%>%
group_by(pclass)%>%
filter(pclass==1)
summary(ti.dead)
#사망자 연령
ggplot(ti.dead,aes(age))+
geom_histogram(breaks=seq(0,75,by=3),
fill="black")+
ggtitle("연령별 사망자")
#20~40세 사이에서 사망자 다수 발생
#사망자 요금 대표값:12/평균 23,
#연령 대표값:28, 평균 30 결측치 128.
#남성의 사망자 비율이 여성보다 4배 이상임을 알 수 있다.
#생존자 정보
ti.alive <- ti%>%
group_by(survived)%>%
filter(survived==1)
ggplot(ti.alive,aes(x=sex))+
geom_bar()
#
summary(ti.alive)
#생존자 연령
ggplot(ti.alive,aes(age))+
geom_histogram(breaks=seq(0,75,by=3),
fill="green")+
ggtitle("연령별 생존자")
#생존자 요금 대표값: 26, 평균 46
#연령 대표값:29, 평균29.9
#생존자 비율에서는 여성이 남성보다 2배 이상의 수치를 기록했다.
#등급별 승객 수*완료
ti.pclass <- ti%>%
group_by(pclass)
ti.pclass
ggplot(ti.pclass, aes(x=pclass))+
geom_bar()+
labs(x="객실 등급", y="승객 수")+
ggtitle("각 객실별 탐승객 수")
#등급별 사망/생존
#가족 승객 사망/생존
#승객 연령*if로 그룹별로.
#등급별 남녀승객
class1 <- subset(ti, ti$pclass==1)
class1
ggplot(class1, aes(x=sex))+
geom_bar()
#숫자를 어떻게...
class2 <- subset(ti, ti$pclass==2)
class2
ggplot(class2, aes(x=sex))+
geom_bar()
class3 <- subset(ti, ti$pclass==3)
class3
ggplot(class3, aes(x=sex))+
geom_bar()
#등급별 남녀 승객 사망
class1.d <- subset(ti, ti$pclass==1)%>%
subset(survived==0) | /진진행1.R | no_license | krousk/r-working-space | R | false | false | 3,275 | r | #라면 끓이는 순서
#문제를 스스로 정의해야 함. 결론도출까지 된다면 결론도출까지. 할 수 있는 곳까지, 내가 생각한 범위까지.
#문제 정의: 대형 해상 조난사고였던 타이타닉 호 생존자 분류를 통해서 구조 장비 추가 증설 시 최적 위치 찾기.
#객실별 생존자 따져서 가장 적은 곳
#동승객 비율 찾기.
#여객선 사고의 경우 사고 경우에 따라서 적재된 구조장비를 모두 활용하지 못하는 경우가 발생할 수 있다.
setwd ("C:\\Rworkspace\\new-R-working-space")
ti <- read.csv(file="titanic_train.csv",
header = T)
library(dplyr)
library(ggplot2)
ls(ti)
class(ti)
summary(ti)
str(ti)
head(ti)
tail(ti)
#전체 탑승 승객*완료
length(ti$name)
#*총 탑승자 수는 916명.*
#전체 승객 성별*완료 별로 중요하지는 않다. 그냥 보여주는 용도로.
ggplot(ti, aes(x=sex))+
geom_bar()+
labs(x="성별",y="인원 수")+
ggtitle("전체 승객 성별 현황")
pie(table(ti$sex))
#남성 581명, 여성 335명.
#남성이 여성에 비해 2배 정도 많은 수치.
#전체 탑승객 연령
ggplot(ti,aes(age))+
geom_histogram(breaks=seq(0,75,by=1),
fill="blue")+
ggtitle("탑승객 연령대")
#20~30세 사이에 주로 분포.
#전체 승객 사망/생존
td <-ti$survived
td1<- table(td)
pie(td1)
#사망: 563명 생존:353
#사망 관련.
#사망자 성별
ti.dead <- ti%>%
group_by(survived)%>%
filter(survived==0)
ggplot(ti.dead,aes(x=sex))+
geom_bar()
#남성이 여성보다 4배 넘게 사망.
#사망자 객실
ti.d.c <- ti.dead%>%
group_by(pclass)%>%
filter(pclass==1)
summary(ti.dead)
#사망자 연령
ggplot(ti.dead,aes(age))+
geom_histogram(breaks=seq(0,75,by=3),
fill="black")+
ggtitle("연령별 사망자")
#20~40세 사이에서 사망자 다수 발생
#사망자 요금 대표값:12/평균 23,
#연령 대표값:28, 평균 30 결측치 128.
#남성의 사망자 비율이 여성보다 4배 이상임을 알 수 있다.
#생존자 정보
ti.alive <- ti%>%
group_by(survived)%>%
filter(survived==1)
ggplot(ti.alive,aes(x=sex))+
geom_bar()
#
summary(ti.alive)
#생존자 연령
ggplot(ti.alive,aes(age))+
geom_histogram(breaks=seq(0,75,by=3),
fill="green")+
ggtitle("연령별 생존자")
#생존자 요금 대표값: 26, 평균 46
#연령 대표값:29, 평균29.9
#생존자 비율에서는 여성이 남성보다 2배 이상의 수치를 기록했다.
#등급별 승객 수*완료
ti.pclass <- ti%>%
group_by(pclass)
ti.pclass
ggplot(ti.pclass, aes(x=pclass))+
geom_bar()+
labs(x="객실 등급", y="승객 수")+
ggtitle("각 객실별 탐승객 수")
#등급별 사망/생존
#가족 승객 사망/생존
#승객 연령*if로 그룹별로.
#등급별 남녀승객
class1 <- subset(ti, ti$pclass==1)
class1
ggplot(class1, aes(x=sex))+
geom_bar()
#숫자를 어떻게...
class2 <- subset(ti, ti$pclass==2)
class2
ggplot(class2, aes(x=sex))+
geom_bar()
class3 <- subset(ti, ti$pclass==3)
class3
ggplot(class3, aes(x=sex))+
geom_bar()
#등급별 남녀 승객 사망
class1.d <- subset(ti, ti$pclass==1)%>%
subset(survived==0) |
reviews <- scan("reviews_per_user", sep="\n")
#reviews <- read.table("reviews_per_user")
pdf("cdf_reviews.pdf")
#reviews
#data = hist(reviews$score, plot=F)
#data
#data_counts = log10(data$counts)
#data
#replace(data_counts, data_counts<=0.0, 1)
#plot(1:26, data_counts, ylab='log10(Frequency)')
cdf_data <-ecdf(reviews)
#summary(cdf_data)
plot(cdf_data, log="x", xlim=c(1, 1e3), main="CDF of # Reviews per User", xlab="# Reviews", ylab="CDF", cex=0.3, cex.lab=1.3, cex.axis=1.3, cex.main=1.3, cex.sub=1.3)
#plot.ecdf(reviews, main="CDF of Reviews per User", xlab="# Reviews", ylab="CDF", cex=0.3, cex.lab=1.3, cex.axis=1.3, cex.main=1.3, cex.sub=1.3)#
#hist(reviews, main="Histogram of reviews per user", xlab="# Reviews")
#data <- hist(reviews, plot=FALSE)
#print (data)
#plot(data$count, log="y")
dev.off()
| /analysis/histograms/cdf_reviews.r | no_license | jcarreira/amazon-study | R | false | false | 814 | r | reviews <- scan("reviews_per_user", sep="\n")
#reviews <- read.table("reviews_per_user")
pdf("cdf_reviews.pdf")
#reviews
#data = hist(reviews$score, plot=F)
#data
#data_counts = log10(data$counts)
#data
#replace(data_counts, data_counts<=0.0, 1)
#plot(1:26, data_counts, ylab='log10(Frequency)')
cdf_data <-ecdf(reviews)
#summary(cdf_data)
plot(cdf_data, log="x", xlim=c(1, 1e3), main="CDF of # Reviews per User", xlab="# Reviews", ylab="CDF", cex=0.3, cex.lab=1.3, cex.axis=1.3, cex.main=1.3, cex.sub=1.3)
#plot.ecdf(reviews, main="CDF of Reviews per User", xlab="# Reviews", ylab="CDF", cex=0.3, cex.lab=1.3, cex.axis=1.3, cex.main=1.3, cex.sub=1.3)#
#hist(reviews, main="Histogram of reviews per user", xlab="# Reviews")
#data <- hist(reviews, plot=FALSE)
#print (data)
#plot(data$count, log="y")
dev.off()
|
library(tidyverse)
library(sp) #Transforming latitude and longitude
library("iNEXT")
library(openxlsx)
#library(readxl)
library(parzer) #parse coordinates
dir_ini <- getwd()
##########################
#Data: 84_RuanVeldtmanApple_2011
##########################
data_raw <- read.xlsx("Processing_files/Datasets_processing/KLEIJN 2015 DATABASE/84_RuanVeldtmanApple_2011/SA_Apple_FieldTrip2010_JFColville_FOR_RUAN_July2012_EDIT.xlsx",
sheet = "Scan Sampling")
data_raw <- as_tibble(data_raw) %>% select(-X1)
# Tranforms date to proper units
data_raw$date <- openxlsx::convertToDate(data_raw$date)
data_raw$month_of_study <- as.numeric(format(as.Date(data_raw$date, format="%Y/%m/%d"),"%m"))
# There should be 10 sites
data_raw %>% group_by(site_id) %>% count()
##############
# Data site
##############
data.site <- data_raw %>%
select(site_id,latitude,longitude) %>%
group_by(site_id,latitude,longitude) %>% count() %>% select(-n)
# We add data site ID
data.site$study_id <- "Ruan_Veldtman_Malus_domestica_South_Africa_2011"
data.site$crop <- "Malus domestica"
data.site$variety <- "Royal Gala"
data.site$management <- "IPM"
data.site$country <- "South Africa"
data.site$X_UTM <- NA
data.site$Y_UTM <- NA
data.site$zone_UTM <- NA
data.site$sampling_year <- 2011
data.site$field_size <- NA
data.site$yield <- NA
data.site$yield_units <- NA
data.site$yield2 <- NA
data.site$yield2_units <- NA
data.site$yield_treatments_no_pollinators <- NA
data.site$yield_treatments_no_pollinators <- NA
data.site$yield_treatments_no_pollinators2 <- NA
data.site$yield_treatments_pollen_supplement2 <- NA
data.site$fruits_per_plant <- NA
data.site$fruit_weight <- NA
data.site$plant_density <- NA
data.site$seeds_per_fruit <- NA
data.site$seeds_per_plant <- NA
data.site$seed_weight <- NA
data.site$Publication <- "10.1038/ncomms8414"
data.site$Credit <- "J.F. Colville, R. Veldtman"
data.site$Email_contact <- "veldtman@sun.ac.za"
# Add sampling months
data.site$sampling_start_month <- NA
data.site$sampling_end_month <- NA
sites <- unique(data.site$site_id)
for (i in sites){
data.site$sampling_start_month[data.site$site_id==i] <-
data_raw %>% filter(site_id==i) %>%
select(month_of_study) %>% min()
data.site$sampling_end_month[data.site$site_id==i] <-
data_raw %>% filter(site_id==i) %>%
select(month_of_study) %>% max()
}
###########################
# SAMPLING DATA
###########################
data_raw_obs <- data_raw %>% select(site_id,Xylocopa.sp.,Honeybees) %>%
gather("Organism_ID","abundance",c(Xylocopa.sp.,Honeybees)) %>% filter(abundance>0)
#Add guild via guild list
gild_list_raw <- read_csv("Processing_files/Thesaurus_Pollinators/Table_organism_guild_META.csv")
gild_list <- gild_list_raw %>% select(-Family) %>% unique()
list_organisms <- select(data_raw_obs,Organism_ID) %>% unique() %>% filter(!is.na(Organism_ID))
list_organisms_guild <- list_organisms %>% left_join(gild_list,by=c("Organism_ID"))
#Check NA's in guild
list_organisms_guild %>% filter(is.na(Guild)) %>% group_by(Organism_ID) %>% count()
list_organisms_guild$Guild[list_organisms_guild$Organism_ID=="Honeybees"] <- "honeybees"
list_organisms_guild$Guild[is.na(list_organisms_guild$Guild)] <- "other_wild_bees"
#Sanity Checks
list_organisms_guild %>% filter(is.na(Guild)) %>% group_by(Organism_ID) %>% count()
#Add guild to observations
data_obs_guild <- data_raw_obs %>% left_join(list_organisms_guild, by = "Organism_ID")
#######################
# INSECT SAMPLING
#######################
# Remove entries with zero abundance
data_obs_guild <- data_obs_guild %>% filter(abundance>0)
insect_sampling <- tibble(
study_id = "Ruan_Veldtman_Malus_domestica_South_Africa_2011",
site_id = data_obs_guild$site_id,
pollinator = data_obs_guild$Organism_ID,
guild = data_obs_guild$Guild,
sampling_method = "observation",
abundance = data_obs_guild$abundance,
total_sampled_area = 2*(8-1)*5,
total_sampled_time = 2*8*5, #netting+observations
total_sampled_flowers = 2*8*758.5*0.5, #only one side of each tree is scanned
Description = "On each survey, one side of eight trees along a transect (trees spaced 5 m apart) were scanned for five minutes each and bee species were recorded and voucher specimens collected (average of 758.5 +/- 265.1 [1SD] open flowers per apple tree)."
)
#setwd("C:/Users/USUARIO/Desktop/OBservData/Datasets_storage")
write_csv(insect_sampling, "Processing_files/Datasets_storage/insect_sampling_Ruan_Veldtman_Malus_domestica_South_Africa_2011.csv")
#setwd(dir_ini)
#######################################
# ABUNDANCE
#######################################
# Add site observations
data_obs_guild_2 <- data_obs_guild %>%
group_by(site_id,Organism_ID,Guild) %>% summarise_all(sum,na.rm=TRUE)
abundance_aux <- data_obs_guild_2 %>%
group_by(site_id,Guild) %>% count(wt=abundance) %>%
spread(key=Guild, value=n)
names(abundance_aux)
# There are "honeybees" "other_wild_bees"
# GUILDS:honeybees, bumblebees, other wild bees, syrphids, humbleflies,
# other flies, beetles, non-bee hymenoptera, lepidoptera, and other
abundance_aux <- abundance_aux %>% mutate(lepidoptera=0,beetles=0,bumblebees=0,
syrphids=0,other=0,humbleflies=0,
non_bee_hymenoptera=0,other_flies=0,
total=0)
abundance_aux[is.na(abundance_aux)] <- 0
abundance_aux$total <- rowSums(abundance_aux[,c(2:ncol(abundance_aux))])
data.site <- data.site %>% left_join(abundance_aux, by = "site_id")
######################################################
# ESTIMATING CHAO INDEX
######################################################
abundace_field <- data_obs_guild %>%
select(site_id,Organism_ID,abundance)%>%
group_by(site_id,Organism_ID) %>% count(wt=abundance)
abundace_field <- abundace_field %>% spread(key=Organism_ID,value=n)
abundace_field[is.na(abundace_field)] <- 0
abundace_field$r_obser <- 0
abundace_field$r_chao <- 0
for (i in 1:nrow(abundace_field)) {
x <- as.numeric(abundace_field[i,2:(ncol(abundace_field)-2)])
chao <- ChaoRichness(x, datatype = "abundance", conf = 0.95)
abundace_field$r_obser[i] <- chao$Observed
abundace_field$r_chao[i] <- chao$Estimator
}
# Load our estimation for taxonomic resolution
percentage_species_morphos <- 0.9
richness_aux <- abundace_field %>% select(site_id,r_obser,r_chao)
richness_aux <- richness_aux %>% rename(observed_pollinator_richness=r_obser,
other_pollinator_richness=r_chao) %>%
mutate(other_richness_estimator_method="Chao1",richness_restriction="only bees")
if (percentage_species_morphos < 0.8){
richness_aux[,2:ncol(richness_aux)] <- NA
}
data.site <- data.site %>% left_join(richness_aux, by = "site_id")
###############################
# FIELD LEVEL DATA
###############################
field_level_data <- tibble(
study_id = data.site$study_id,
site_id = data.site$site_id,
crop = data.site$crop,
variety = data.site$variety,
management = data.site$management,
country = data.site$country,
latitude = data.site$latitude,
longitude = data.site$longitude,
X_UTM=data.site$X_UTM,
Y_UTM=data.site$Y_UTM,
zone_UTM=data.site$zone_UTM,
sampling_start_month = data.site$sampling_start_month,
sampling_end_month = data.site$sampling_end_month,
sampling_year = data.site$sampling_year,
field_size = data.site$field_size,
yield=data.site$yield,
yield_units=data.site$yield_units,
yield2=data.site$yield2,
yield2_units=data.site$yield2_units,
yield_treatments_no_pollinators=data.site$yield_treatments_no_pollinators,
yield_treatments_pollen_supplement=data.site$yield_treatments_no_pollinators,
yield_treatments_no_pollinators2=data.site$yield_treatments_no_pollinators2,
yield_treatments_pollen_supplement2=data.site$yield_treatments_pollen_supplement2,
fruits_per_plant=data.site$fruits_per_plant,
fruit_weight= data.site$fruit_weight,
plant_density=data.site$plant_density,
seeds_per_fruit=data.site$seeds_per_fruit,
seeds_per_plant=data.site$seeds_per_plant,
seed_weight=data.site$seed_weight,
observed_pollinator_richness=data.site$observed_pollinator_richness,
other_pollinator_richness=data.site$other_pollinator_richness,
other_richness_estimator_method=data.site$other_richness_estimator_method,
richness_restriction = data.site$richness_restriction,
abundance = data.site$total,
ab_honeybee = data.site$honeybees,
ab_bombus = data.site$bumblebees,
ab_wildbees = data.site$other_wild_bees,
ab_syrphids = data.site$syrphids,
ab_humbleflies= data.site$humbleflies,
ab_other_flies= data.site$other_flies,
ab_beetles=data.site$beetles,
ab_lepidoptera=data.site$lepidoptera,
ab_nonbee_hymenoptera=data.site$non_bee_hymenoptera,
ab_others = data.site$other,
total_sampled_area = 70,
total_sampled_time = 80,
visitation_rate_units = "visits per 100 flowers and hour",
visitation_rate = data.site$total*60*100/80/(2*8*758.5*0.5),
visit_honeybee = data.site$honeybees*60*100/80/(2*8*758.5*0.5),
visit_bombus = data.site$bumblebees*60*100/80/(2*8*758.5*0.5),
visit_wildbees = data.site$other_wild_bees*60*100/80/(2*8*758.5*0.5),
visit_syrphids = data.site$syrphids*60*100/80/(2*8*758.5*0.5),
visit_humbleflies = data.site$humbleflies*60*100/80/(2*8*758.5*0.5),
visit_other_flies = data.site$other_flies*60*100/80/(2*8*758.5*0.5),
visit_beetles = data.site$beetles*60*100/80/(2*8*758.5*0.5),
visit_lepidoptera = data.site$lepidoptera*60*100/80/(2*8*758.5*0.5),
visit_nonbee_hymenoptera = data.site$non_bee_hymenoptera*60*100/80/(2*8*758.5*0.5),
visit_others = data.site$other*60*100/80/(2*8*758.5*0.5),
Publication = data.site$Publication,
Credit = data.site$Credit,
Email_contact = data.site$Email_contact
)
#setwd("C:/Users/USUARIO/Desktop/OBservData/Datasets_storage")
write_csv(field_level_data, "Processing_files/Datasets_storage/field_level_data_Ruan_Veldtman_Malus_domestica_South_Africa_2011.csv")
#setwd(dir_ini)
| /Processing_files/Datasets_Processing/KLEIJN 2015 DATABASE/Ruan_Veldtman_Malus_domestica_South_Africa_2011_NEW.R | permissive | ibartomeus/OBservData | R | false | false | 10,068 | r |
library(tidyverse)
library(sp) #Transforming latitude and longitude
library("iNEXT")
library(openxlsx)
#library(readxl)
library(parzer) #parse coordinates
dir_ini <- getwd()
##########################
#Data: 84_RuanVeldtmanApple_2011
##########################
data_raw <- read.xlsx("Processing_files/Datasets_processing/KLEIJN 2015 DATABASE/84_RuanVeldtmanApple_2011/SA_Apple_FieldTrip2010_JFColville_FOR_RUAN_July2012_EDIT.xlsx",
sheet = "Scan Sampling")
data_raw <- as_tibble(data_raw) %>% select(-X1)
# Tranforms date to proper units
data_raw$date <- openxlsx::convertToDate(data_raw$date)
data_raw$month_of_study <- as.numeric(format(as.Date(data_raw$date, format="%Y/%m/%d"),"%m"))
# There should be 10 sites
data_raw %>% group_by(site_id) %>% count()
##############
# Data site
##############
data.site <- data_raw %>%
select(site_id,latitude,longitude) %>%
group_by(site_id,latitude,longitude) %>% count() %>% select(-n)
# We add data site ID
data.site$study_id <- "Ruan_Veldtman_Malus_domestica_South_Africa_2011"
data.site$crop <- "Malus domestica"
data.site$variety <- "Royal Gala"
data.site$management <- "IPM"
data.site$country <- "South Africa"
data.site$X_UTM <- NA
data.site$Y_UTM <- NA
data.site$zone_UTM <- NA
data.site$sampling_year <- 2011
data.site$field_size <- NA
data.site$yield <- NA
data.site$yield_units <- NA
data.site$yield2 <- NA
data.site$yield2_units <- NA
data.site$yield_treatments_no_pollinators <- NA
data.site$yield_treatments_no_pollinators <- NA
data.site$yield_treatments_no_pollinators2 <- NA
data.site$yield_treatments_pollen_supplement2 <- NA
data.site$fruits_per_plant <- NA
data.site$fruit_weight <- NA
data.site$plant_density <- NA
data.site$seeds_per_fruit <- NA
data.site$seeds_per_plant <- NA
data.site$seed_weight <- NA
data.site$Publication <- "10.1038/ncomms8414"
data.site$Credit <- "J.F. Colville, R. Veldtman"
data.site$Email_contact <- "veldtman@sun.ac.za"
# Add sampling months
data.site$sampling_start_month <- NA
data.site$sampling_end_month <- NA
sites <- unique(data.site$site_id)
for (i in sites){
data.site$sampling_start_month[data.site$site_id==i] <-
data_raw %>% filter(site_id==i) %>%
select(month_of_study) %>% min()
data.site$sampling_end_month[data.site$site_id==i] <-
data_raw %>% filter(site_id==i) %>%
select(month_of_study) %>% max()
}
###########################
# SAMPLING DATA
###########################
data_raw_obs <- data_raw %>% select(site_id,Xylocopa.sp.,Honeybees) %>%
gather("Organism_ID","abundance",c(Xylocopa.sp.,Honeybees)) %>% filter(abundance>0)
#Add guild via guild list
gild_list_raw <- read_csv("Processing_files/Thesaurus_Pollinators/Table_organism_guild_META.csv")
gild_list <- gild_list_raw %>% select(-Family) %>% unique()
list_organisms <- select(data_raw_obs,Organism_ID) %>% unique() %>% filter(!is.na(Organism_ID))
list_organisms_guild <- list_organisms %>% left_join(gild_list,by=c("Organism_ID"))
#Check NA's in guild
list_organisms_guild %>% filter(is.na(Guild)) %>% group_by(Organism_ID) %>% count()
list_organisms_guild$Guild[list_organisms_guild$Organism_ID=="Honeybees"] <- "honeybees"
list_organisms_guild$Guild[is.na(list_organisms_guild$Guild)] <- "other_wild_bees"
#Sanity Checks
list_organisms_guild %>% filter(is.na(Guild)) %>% group_by(Organism_ID) %>% count()
#Add guild to observations
data_obs_guild <- data_raw_obs %>% left_join(list_organisms_guild, by = "Organism_ID")
#######################
# INSECT SAMPLING
#######################
# Remove entries with zero abundance
data_obs_guild <- data_obs_guild %>% filter(abundance>0)
insect_sampling <- tibble(
study_id = "Ruan_Veldtman_Malus_domestica_South_Africa_2011",
site_id = data_obs_guild$site_id,
pollinator = data_obs_guild$Organism_ID,
guild = data_obs_guild$Guild,
sampling_method = "observation",
abundance = data_obs_guild$abundance,
total_sampled_area = 2*(8-1)*5,
total_sampled_time = 2*8*5, #netting+observations
total_sampled_flowers = 2*8*758.5*0.5, #only one side of each tree is scanned
Description = "On each survey, one side of eight trees along a transect (trees spaced 5 m apart) were scanned for five minutes each and bee species were recorded and voucher specimens collected (average of 758.5 +/- 265.1 [1SD] open flowers per apple tree)."
)
#setwd("C:/Users/USUARIO/Desktop/OBservData/Datasets_storage")
write_csv(insect_sampling, "Processing_files/Datasets_storage/insect_sampling_Ruan_Veldtman_Malus_domestica_South_Africa_2011.csv")
#setwd(dir_ini)
#######################################
# ABUNDANCE
#######################################
# Add site observations
data_obs_guild_2 <- data_obs_guild %>%
group_by(site_id,Organism_ID,Guild) %>% summarise_all(sum,na.rm=TRUE)
abundance_aux <- data_obs_guild_2 %>%
group_by(site_id,Guild) %>% count(wt=abundance) %>%
spread(key=Guild, value=n)
names(abundance_aux)
# There are "honeybees" "other_wild_bees"
# GUILDS:honeybees, bumblebees, other wild bees, syrphids, humbleflies,
# other flies, beetles, non-bee hymenoptera, lepidoptera, and other
abundance_aux <- abundance_aux %>% mutate(lepidoptera=0,beetles=0,bumblebees=0,
syrphids=0,other=0,humbleflies=0,
non_bee_hymenoptera=0,other_flies=0,
total=0)
abundance_aux[is.na(abundance_aux)] <- 0
abundance_aux$total <- rowSums(abundance_aux[,c(2:ncol(abundance_aux))])
data.site <- data.site %>% left_join(abundance_aux, by = "site_id")
######################################################
# ESTIMATING CHAO INDEX
######################################################
abundace_field <- data_obs_guild %>%
select(site_id,Organism_ID,abundance)%>%
group_by(site_id,Organism_ID) %>% count(wt=abundance)
abundace_field <- abundace_field %>% spread(key=Organism_ID,value=n)
abundace_field[is.na(abundace_field)] <- 0
abundace_field$r_obser <- 0
abundace_field$r_chao <- 0
for (i in 1:nrow(abundace_field)) {
x <- as.numeric(abundace_field[i,2:(ncol(abundace_field)-2)])
chao <- ChaoRichness(x, datatype = "abundance", conf = 0.95)
abundace_field$r_obser[i] <- chao$Observed
abundace_field$r_chao[i] <- chao$Estimator
}
# Load our estimation for taxonomic resolution
percentage_species_morphos <- 0.9
richness_aux <- abundace_field %>% select(site_id,r_obser,r_chao)
richness_aux <- richness_aux %>% rename(observed_pollinator_richness=r_obser,
other_pollinator_richness=r_chao) %>%
mutate(other_richness_estimator_method="Chao1",richness_restriction="only bees")
if (percentage_species_morphos < 0.8){
richness_aux[,2:ncol(richness_aux)] <- NA
}
data.site <- data.site %>% left_join(richness_aux, by = "site_id")
###############################
# FIELD LEVEL DATA
###############################
field_level_data <- tibble(
study_id = data.site$study_id,
site_id = data.site$site_id,
crop = data.site$crop,
variety = data.site$variety,
management = data.site$management,
country = data.site$country,
latitude = data.site$latitude,
longitude = data.site$longitude,
X_UTM=data.site$X_UTM,
Y_UTM=data.site$Y_UTM,
zone_UTM=data.site$zone_UTM,
sampling_start_month = data.site$sampling_start_month,
sampling_end_month = data.site$sampling_end_month,
sampling_year = data.site$sampling_year,
field_size = data.site$field_size,
yield=data.site$yield,
yield_units=data.site$yield_units,
yield2=data.site$yield2,
yield2_units=data.site$yield2_units,
yield_treatments_no_pollinators=data.site$yield_treatments_no_pollinators,
yield_treatments_pollen_supplement=data.site$yield_treatments_no_pollinators,
yield_treatments_no_pollinators2=data.site$yield_treatments_no_pollinators2,
yield_treatments_pollen_supplement2=data.site$yield_treatments_pollen_supplement2,
fruits_per_plant=data.site$fruits_per_plant,
fruit_weight= data.site$fruit_weight,
plant_density=data.site$plant_density,
seeds_per_fruit=data.site$seeds_per_fruit,
seeds_per_plant=data.site$seeds_per_plant,
seed_weight=data.site$seed_weight,
observed_pollinator_richness=data.site$observed_pollinator_richness,
other_pollinator_richness=data.site$other_pollinator_richness,
other_richness_estimator_method=data.site$other_richness_estimator_method,
richness_restriction = data.site$richness_restriction,
abundance = data.site$total,
ab_honeybee = data.site$honeybees,
ab_bombus = data.site$bumblebees,
ab_wildbees = data.site$other_wild_bees,
ab_syrphids = data.site$syrphids,
ab_humbleflies= data.site$humbleflies,
ab_other_flies= data.site$other_flies,
ab_beetles=data.site$beetles,
ab_lepidoptera=data.site$lepidoptera,
ab_nonbee_hymenoptera=data.site$non_bee_hymenoptera,
ab_others = data.site$other,
total_sampled_area = 70,
total_sampled_time = 80,
visitation_rate_units = "visits per 100 flowers and hour",
visitation_rate = data.site$total*60*100/80/(2*8*758.5*0.5),
visit_honeybee = data.site$honeybees*60*100/80/(2*8*758.5*0.5),
visit_bombus = data.site$bumblebees*60*100/80/(2*8*758.5*0.5),
visit_wildbees = data.site$other_wild_bees*60*100/80/(2*8*758.5*0.5),
visit_syrphids = data.site$syrphids*60*100/80/(2*8*758.5*0.5),
visit_humbleflies = data.site$humbleflies*60*100/80/(2*8*758.5*0.5),
visit_other_flies = data.site$other_flies*60*100/80/(2*8*758.5*0.5),
visit_beetles = data.site$beetles*60*100/80/(2*8*758.5*0.5),
visit_lepidoptera = data.site$lepidoptera*60*100/80/(2*8*758.5*0.5),
visit_nonbee_hymenoptera = data.site$non_bee_hymenoptera*60*100/80/(2*8*758.5*0.5),
visit_others = data.site$other*60*100/80/(2*8*758.5*0.5),
Publication = data.site$Publication,
Credit = data.site$Credit,
Email_contact = data.site$Email_contact
)
#setwd("C:/Users/USUARIO/Desktop/OBservData/Datasets_storage")
write_csv(field_level_data, "Processing_files/Datasets_storage/field_level_data_Ruan_Veldtman_Malus_domestica_South_Africa_2011.csv")
#setwd(dir_ini)
|
####### NAIVE METHOD FOR ESTIMATING VOTES OF PEOPLE IN CHARITY 2 DATABASE ######
### set working directory to hw3
###### LOADING LIBRARY ######
library(ggplot2)
library(reshape2)
###### LOADING DATA ######
## Load contacts data with zipcode and code_geo
contacts <- read.csv('data/charity2_contacts.csv',sep=';')
# Remove useless columns of contacts
contacts$id <- NULL
contacts$prefix_id <- NULL
contacts$birth_date <- NULL
contacts$age <- NULL
contacts$first_name_clean <- NULL
## Load 2012 first round french presidential election data, for each city in France, the distribution of votes for
## the different candidates
Election_2012_with_INSEE <- read.csv("data/Election_2012_with_INSEE_Round1.csv",sep=',')
## Build what we need : for each city, the distribution of the votes for the candidates
Election <- Election_2012_with_INSEE[,c("Code.INSEE","Code.du.département","Libellé.du.département",
"Code.de.la.commune","Libellé.de.la.commune", "Inscrits",
"Abstentions", "Votants", "Blancs.et.nuls", "Exprimés")]
Election["JOLY"] <- Election_2012_with_INSEE[,22]
Election["LE PEN"] <- Election_2012_with_INSEE[,28]
Election["SARKOZY"] <- Election_2012_with_INSEE[,34]
Election["MELENCHON"] <- Election_2012_with_INSEE[,40]
Election["POUTOU"] <- Election_2012_with_INSEE[,46]
Election["ARTHAUD"] <- Election_2012_with_INSEE[,52]
Election["CHEMINADE"] <- Election_2012_with_INSEE[,58]
Election["BAYROU"] <- Election_2012_with_INSEE[,64]
Election["DUPONT-AIGNAN"] <- Election_2012_with_INSEE[,70]
Election["HOLLANDE"] <- Election_2012_with_INSEE[,76]
rm(Election_2012_with_INSEE)
# Add 0 to code insee in Election table when the initial insee code has only 4 digits, for merge purpose
add_0toinsee <- function(code)
{
if (length(unlist(strsplit(code,''))) == 4){
return(paste('0',code,sep=''))
}else {
return(code)
}
}
insee <- as.character(Election$Code.INSEE)
transformed_insee <- as.vector(sapply(insee,add_0toinsee))
# change the insee column of Election
Election$Code.INSEE <- transformed_insee
###### NAIVE COUNT ######
# Merge contacts and its city votes during first round, we are merging on code_geo and Code.INSEE
contacts_votes <- merge(contacts, Election[,c('Code.INSEE','Inscrits',
'Abstentions','Blancs.et.nuls',
'JOLY','LE PEN','SARKOZY','MELENCHON','POUTOU','ARTHAUD','CHEMINADE',
'BAYROU','DUPONT-AIGNAN','HOLLANDE')],by.x='code_geo',by.y='Code.INSEE')
# After merging, 74 302 remaining people, the other one did not live in France
# or zipcode are not present in the election results table ( like DOM TOM)
# Then for each person, assign the candidate who had the most votes in the corresponding city
votes = colnames(contacts_votes[,c(4:15)])[apply(contacts_votes[,c(4:15)],1,which.max)]
final_charity_votes = cbind(contacts_votes[,c(1:3)],votes)
# Distribution of the votes for our charity database
nb_abs = as.numeric(table(final_charity_votes$votes)[1])
proportion_with_abs = table(final_charity_votes$votes)/dim(final_charity_votes)[1]
proportion_without_abs = (table(final_charity_votes$votes)/(dim(final_charity_votes)[1]-nb_abs))[-1]
# On our target population, we had 13.36 % abstentions
# For the expressed votes, Hollande got 52.96 % of the votes, Sarkozy 40.73 %, Le Pen 5.94 %, Melenchon 0.24 %
# Dupont Aignan : 0.075 %, Bayrou : 0.038 %, Joly : 0.0031 %, Blank votes : 0.0016 %and nothing for the others candidates
# To compare with the true distribution on the whole french population :
# Hollande : 28.63 %, Sarkozy : 27.18 %, Le Pen : 17.9 %, Melenchon : 11.1 %, Dupont Aignan : 1.79 %
# Plot true vs estimated with our target population
data = data.frame(rbind(c('True',28.63,27.18,17.9,11.1,9.13,2.31,1.79,1.52,20.52),
c('Estimated',52.96,40.73,5.94,0.24,0.075,0.039,0.0031,0.0016,13.36)))
colnames(data) = c('model','Hollande','Sarkozy','Le Pen','Melenchon','Bayrou','Joly','Dupont-Aignan','Blank','Abs')
melt_data <-melt(data,id.vars="model")
melt_data$value <- as.numeric(melt_data$value)
bar = ggplot(melt_data,aes(x=variable,y=value,fill=model))+
geom_bar(stat="identity",position="dodge")+
geom_text(aes(label=value), position=position_dodge(width=0.9), vjust=-0.25)+
xlab("Candidates")+ylab("Percentage of votes") + labs(title='Percentages of votes for candidates Targeted Population with Naive Count')
plot(bar)
## CONCLUSION
# We don't know if our target population in the charity database significantly represents the whole french population
# However, with the naive method, we do not fin results with the reference population, so our database is surely not
# representative of the whole french population.
| /code/Naive_method.R | no_license | MKTA-LOJX/hw3 | R | false | false | 4,929 | r | ####### NAIVE METHOD FOR ESTIMATING VOTES OF PEOPLE IN CHARITY 2 DATABASE ######
### set working directory to hw3
###### LOADING LIBRARY ######
library(ggplot2)
library(reshape2)
###### LOADING DATA ######
## Load contacts data with zipcode and code_geo
contacts <- read.csv('data/charity2_contacts.csv',sep=';')
# Remove useless columns of contacts
contacts$id <- NULL
contacts$prefix_id <- NULL
contacts$birth_date <- NULL
contacts$age <- NULL
contacts$first_name_clean <- NULL
## Load 2012 first round french presidential election data, for each city in France, the distribution of votes for
## the different candidates
Election_2012_with_INSEE <- read.csv("data/Election_2012_with_INSEE_Round1.csv",sep=',')
## Build what we need : for each city, the distribution of the votes for the candidates
Election <- Election_2012_with_INSEE[,c("Code.INSEE","Code.du.département","Libellé.du.département",
"Code.de.la.commune","Libellé.de.la.commune", "Inscrits",
"Abstentions", "Votants", "Blancs.et.nuls", "Exprimés")]
Election["JOLY"] <- Election_2012_with_INSEE[,22]
Election["LE PEN"] <- Election_2012_with_INSEE[,28]
Election["SARKOZY"] <- Election_2012_with_INSEE[,34]
Election["MELENCHON"] <- Election_2012_with_INSEE[,40]
Election["POUTOU"] <- Election_2012_with_INSEE[,46]
Election["ARTHAUD"] <- Election_2012_with_INSEE[,52]
Election["CHEMINADE"] <- Election_2012_with_INSEE[,58]
Election["BAYROU"] <- Election_2012_with_INSEE[,64]
Election["DUPONT-AIGNAN"] <- Election_2012_with_INSEE[,70]
Election["HOLLANDE"] <- Election_2012_with_INSEE[,76]
rm(Election_2012_with_INSEE)
# Add 0 to code insee in Election table when the initial insee code has only 4 digits, for merge purpose
add_0toinsee <- function(code)
{
if (length(unlist(strsplit(code,''))) == 4){
return(paste('0',code,sep=''))
}else {
return(code)
}
}
insee <- as.character(Election$Code.INSEE)
transformed_insee <- as.vector(sapply(insee,add_0toinsee))
# change the insee column of Election
Election$Code.INSEE <- transformed_insee
###### NAIVE COUNT ######
# Merge contacts and its city votes during first round, we are merging on code_geo and Code.INSEE
contacts_votes <- merge(contacts, Election[,c('Code.INSEE','Inscrits',
'Abstentions','Blancs.et.nuls',
'JOLY','LE PEN','SARKOZY','MELENCHON','POUTOU','ARTHAUD','CHEMINADE',
'BAYROU','DUPONT-AIGNAN','HOLLANDE')],by.x='code_geo',by.y='Code.INSEE')
# After merging, 74 302 remaining people, the other one did not live in France
# or zipcode are not present in the election results table ( like DOM TOM)
# Then for each person, assign the candidate who had the most votes in the corresponding city
votes = colnames(contacts_votes[,c(4:15)])[apply(contacts_votes[,c(4:15)],1,which.max)]
final_charity_votes = cbind(contacts_votes[,c(1:3)],votes)
# Distribution of the votes for our charity database
nb_abs = as.numeric(table(final_charity_votes$votes)[1])
proportion_with_abs = table(final_charity_votes$votes)/dim(final_charity_votes)[1]
proportion_without_abs = (table(final_charity_votes$votes)/(dim(final_charity_votes)[1]-nb_abs))[-1]
# On our target population, we had 13.36 % abstentions
# For the expressed votes, Hollande got 52.96 % of the votes, Sarkozy 40.73 %, Le Pen 5.94 %, Melenchon 0.24 %
# Dupont Aignan : 0.075 %, Bayrou : 0.038 %, Joly : 0.0031 %, Blank votes : 0.0016 %and nothing for the others candidates
# To compare with the true distribution on the whole french population :
# Hollande : 28.63 %, Sarkozy : 27.18 %, Le Pen : 17.9 %, Melenchon : 11.1 %, Dupont Aignan : 1.79 %
# Plot true vs estimated with our target population
data = data.frame(rbind(c('True',28.63,27.18,17.9,11.1,9.13,2.31,1.79,1.52,20.52),
c('Estimated',52.96,40.73,5.94,0.24,0.075,0.039,0.0031,0.0016,13.36)))
colnames(data) = c('model','Hollande','Sarkozy','Le Pen','Melenchon','Bayrou','Joly','Dupont-Aignan','Blank','Abs')
melt_data <-melt(data,id.vars="model")
melt_data$value <- as.numeric(melt_data$value)
bar = ggplot(melt_data,aes(x=variable,y=value,fill=model))+
geom_bar(stat="identity",position="dodge")+
geom_text(aes(label=value), position=position_dodge(width=0.9), vjust=-0.25)+
xlab("Candidates")+ylab("Percentage of votes") + labs(title='Percentages of votes for candidates Targeted Population with Naive Count')
plot(bar)
## CONCLUSION
# We don't know if our target population in the charity database significantly represents the whole french population
# However, with the naive method, we do not fin results with the reference population, so our database is surely not
# representative of the whole french population.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/orgs.R
\name{delete.member.from.organization}
\alias{delete.member.from.organization}
\title{delete user from an organization}
\usage{
delete.member.from.organization(org, user, ctx = get.github.context())
}
\arguments{
\item{org}{the organization name}
\item{user}{the user name}
\item{ctx}{the github context object}
}
\value{
none
}
\description{
delete user from an organization
}
| /man/delete.member.from.organization.Rd | permissive | att/rgithub | R | false | true | 465 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/orgs.R
\name{delete.member.from.organization}
\alias{delete.member.from.organization}
\title{delete user from an organization}
\usage{
delete.member.from.organization(org, user, ctx = get.github.context())
}
\arguments{
\item{org}{the organization name}
\item{user}{the user name}
\item{ctx}{the github context object}
}
\value{
none
}
\description{
delete user from an organization
}
|
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# stats
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#' show information about a data frame or other object
#' @param
#' @return
#' @examples
#' @export
z.show = function(...){
# flush otherwise not print large text
# show(...)
print(Hmisc::describe(...))
flush.console()
cat('------------------------------\n')
print(summary(...))
flush.console()
cat('------------------------------\n')
str(...)
flush.console()
cat('------------------------------\n')
z.view(...)
}
#' show information about a data frame or other object
#' @param
#' @return
#' @examples
#' @export
z.info = z.show
#' show information about a data frame or similar object (like spss variable view)
#' @description wrapper of \code{\link[sjPlot]{view_df}}; can make the html bigger by openning in internet browser
#' @param
#' @return
#' @examples
#' @export
z.view = function(x, showFreq = T, showPerc = T, sortByName = F, ...){
sjPlot::view_df(x, showFreq = showFreq, showPerc = showPerc, sortByName = sortByName, ...)
}
| /R/stats.R | permissive | hmorzaria/zmisc | R | false | false | 1,191 | r | # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# stats
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#' show information about a data frame or other object
#' @param
#' @return
#' @examples
#' @export
z.show = function(...){
# flush otherwise not print large text
# show(...)
print(Hmisc::describe(...))
flush.console()
cat('------------------------------\n')
print(summary(...))
flush.console()
cat('------------------------------\n')
str(...)
flush.console()
cat('------------------------------\n')
z.view(...)
}
#' show information about a data frame or other object
#' @param
#' @return
#' @examples
#' @export
z.info = z.show
#' show information about a data frame or similar object (like spss variable view)
#' @description wrapper of \code{\link[sjPlot]{view_df}}; can make the html bigger by openning in internet browser
#' @param
#' @return
#' @examples
#' @export
z.view = function(x, showFreq = T, showPerc = T, sortByName = F, ...){
sjPlot::view_df(x, showFreq = showFreq, showPerc = showPerc, sortByName = sortByName, ...)
}
|
rm( list=ls() )
source('~/coordination_fresh/functions.R')
source('setup.R')
eo_res <- read.csv( '~/coordination_fresh/figs/table_s3.csv', head=T )
opt_pvs <- as.character( eo_res[,3] )
names(opt_pvs) <- paste0( as.character( eo_res[,1] ), '_', as.character( eo_res[,2] ) )
hitphens <- as.character( eo_res[ eo_res[,'FDR'] < .1, 1 ] )
hitphens <- phens[ nicephens[phens] %in% hitphens ]
opt <- '_nondiag'
output <- matrix( NA, 0, 8 )
colnames(output) <- c( 'Phenotype', 'Tissue', 'PRS Type', 'TPRS %VE', 'p-value', 'Signif Level', 'p-va-exact', 'fdr' )
for( type in c( 'int', 'bin', 'ext' ) ){
if( type == 'ext' ){
phens <- extphens
} else if( type == 'bin' ){
phens <- binphens
} else {
phens <- qphens
}
phens <- intersect( phens, hitphens )
output_loc <- NULL
for( phen in phens )
for( tiss in tissues )
{
if( type == 'ext' & phen %in% extphens1 ){
pv <- '1.0'
} else {
pv <- opt_pvs[paste0( nicephens[phen], '_', nicetypes[type] )]
}
if( pv == 1 ) pv <- '1.0'
if( pv == 1e-4 ) pv <- '0.0001'
pv <- as.character(pv)
load( paste0('Rdata/allpairs_', pv, '_', phen, '_', type, '_', tiss, '_', 0, opt, '.Rdata') )
load( paste0('Rdata/hom_', pv, '_', phen, '_', type, '_', tiss, '.Rdata') )
pve <- round( fit_summ$coef['All_tiss',1]^2 * 100, 3 )
output_loc <- rbind( output_loc, c(
nicephens[phen],
nicetiss[tiss],
nicetypes[type],
paste0( pve, '%' ),
tidy( pval_inter ),
paste0( rep( '*', ( pval_inter < .05 ) + ( pval_inter < .05/ntiss ) ), collapse='' ),
pval_inter
))
rm( nicepv, fit_summ, pve, pval_inter )
}
fdrs <- sapply( phens, function(phen){
phensub <- which( output_loc[,1] == nicephens[phen] )
p.adjust(output_loc[phensub,7],'fdr')
})
fdrs <- sapply( c( fdrs ), function(x) ifelse( x > .01, round( x, 2 ), format( x, digit=2, scientific=T ) ) )
output_loc<- cbind( output_loc, fdrs )
output_loc<- output_loc[ sort.list( output_loc[,1], dec=F ), ]
output <- rbind( output, output_loc )
rm( output_loc )
}
write.table( output[ , c(1:6,8)], file='~/coordination_fresh/figs/table_s4.csv', row.names=FALSE, quote=FALSE, sep=',' )
write.table( output[ as.numeric(output[,5]) < .05/ntiss , 1:5 ], file='~/coordination_fresh/figs/table3.csv' , row.names=FALSE, quote=FALSE, sep=',' )
| /test_CE/tiss/table3.R | no_license | nadavrap/CoordinatedInteractions | R | false | false | 2,317 | r | rm( list=ls() )
source('~/coordination_fresh/functions.R')
source('setup.R')
eo_res <- read.csv( '~/coordination_fresh/figs/table_s3.csv', head=T )
opt_pvs <- as.character( eo_res[,3] )
names(opt_pvs) <- paste0( as.character( eo_res[,1] ), '_', as.character( eo_res[,2] ) )
hitphens <- as.character( eo_res[ eo_res[,'FDR'] < .1, 1 ] )
hitphens <- phens[ nicephens[phens] %in% hitphens ]
opt <- '_nondiag'
output <- matrix( NA, 0, 8 )
colnames(output) <- c( 'Phenotype', 'Tissue', 'PRS Type', 'TPRS %VE', 'p-value', 'Signif Level', 'p-va-exact', 'fdr' )
for( type in c( 'int', 'bin', 'ext' ) ){
if( type == 'ext' ){
phens <- extphens
} else if( type == 'bin' ){
phens <- binphens
} else {
phens <- qphens
}
phens <- intersect( phens, hitphens )
output_loc <- NULL
for( phen in phens )
for( tiss in tissues )
{
if( type == 'ext' & phen %in% extphens1 ){
pv <- '1.0'
} else {
pv <- opt_pvs[paste0( nicephens[phen], '_', nicetypes[type] )]
}
if( pv == 1 ) pv <- '1.0'
if( pv == 1e-4 ) pv <- '0.0001'
pv <- as.character(pv)
load( paste0('Rdata/allpairs_', pv, '_', phen, '_', type, '_', tiss, '_', 0, opt, '.Rdata') )
load( paste0('Rdata/hom_', pv, '_', phen, '_', type, '_', tiss, '.Rdata') )
pve <- round( fit_summ$coef['All_tiss',1]^2 * 100, 3 )
output_loc <- rbind( output_loc, c(
nicephens[phen],
nicetiss[tiss],
nicetypes[type],
paste0( pve, '%' ),
tidy( pval_inter ),
paste0( rep( '*', ( pval_inter < .05 ) + ( pval_inter < .05/ntiss ) ), collapse='' ),
pval_inter
))
rm( nicepv, fit_summ, pve, pval_inter )
}
fdrs <- sapply( phens, function(phen){
phensub <- which( output_loc[,1] == nicephens[phen] )
p.adjust(output_loc[phensub,7],'fdr')
})
fdrs <- sapply( c( fdrs ), function(x) ifelse( x > .01, round( x, 2 ), format( x, digit=2, scientific=T ) ) )
output_loc<- cbind( output_loc, fdrs )
output_loc<- output_loc[ sort.list( output_loc[,1], dec=F ), ]
output <- rbind( output, output_loc )
rm( output_loc )
}
write.table( output[ , c(1:6,8)], file='~/coordination_fresh/figs/table_s4.csv', row.names=FALSE, quote=FALSE, sep=',' )
write.table( output[ as.numeric(output[,5]) < .05/ntiss , 1:5 ], file='~/coordination_fresh/figs/table3.csv' , row.names=FALSE, quote=FALSE, sep=',' )
|
#' @export
#'
r50x.plots.combo.lineplot_ty <- function( EXPdataList
, title = "RICE50x ty lineplot"
, group_variable
, y_label = "Value"
, x_label = "Year"
, legend_columns = NULL
, legend = "Legend"
, no_legend = FALSE
, LaTeX_text = FALSE
, categories = NULL
, colors_per_category=NULL
, columns = NULL
, show_confidence = FALSE
, y_lim = NULL
, x_lim = NULL
){
if(LaTeX_text){ mytitle = TeX(title) } else { mytitle = title }
## Prepare single plots
plotlist = list()
for(p in c(1:length(EXPdataList))){
message( paste0("preparing plot < ",names(EXPdataList)[p]," > ...") )
EXPdata = r50x.utils.listify_EXPdataframe( EXPdataframe = EXPdataList[[p]]
, column_to_list = group_variable )
plottigat = r50x.plots.lineplot_ty( EXPdata = EXPdata
, title = names(EXPdataList)[p]
, y_label = y_label
, x_label = x_label
, legend_columns = legend_columns
, legend = legend
, LaTeX_text = LaTeX_text
, categories = categories
, colors_per_category = colors_per_category
, show_confidence = show_confidence
)
# if(!is.null(y_lim)) plottigat = plottigat + ylim(y_lim[1], y_lim[2])
# if(!is.null(x_lim)) plottigat = plottigat + xlim(x_lim[1], x_lim[2])
plotlist[[p]] <- local(print(plottigat + theme(legend.position="none") ))
}
## Combine plots
message( paste0("putting all together..") )
if(!is.null(columns)){ nCol = columns
nRow = ceiling(length(EXPdataList)/nCol)
} else { nCol = ceiling(length(EXPdata)/2)
nRow = ceiling(length(EXPdataList)/nCol) }
plottigat = ggpubr::annotate_figure( do.call("ggarrange", c(plotlist, ncol=nCol, nrow=nRow, common.legend = TRUE, legend="right") )
, top = text_grob( mytitle, face = "bold", size = 16)
)
return(plottigat)
}
| /R/73_lineplot_combo.R | permissive | gappix/rice50xplots | R | false | false | 2,784 | r |
#' @export
#'
r50x.plots.combo.lineplot_ty <- function( EXPdataList
, title = "RICE50x ty lineplot"
, group_variable
, y_label = "Value"
, x_label = "Year"
, legend_columns = NULL
, legend = "Legend"
, no_legend = FALSE
, LaTeX_text = FALSE
, categories = NULL
, colors_per_category=NULL
, columns = NULL
, show_confidence = FALSE
, y_lim = NULL
, x_lim = NULL
){
if(LaTeX_text){ mytitle = TeX(title) } else { mytitle = title }
## Prepare single plots
plotlist = list()
for(p in c(1:length(EXPdataList))){
message( paste0("preparing plot < ",names(EXPdataList)[p]," > ...") )
EXPdata = r50x.utils.listify_EXPdataframe( EXPdataframe = EXPdataList[[p]]
, column_to_list = group_variable )
plottigat = r50x.plots.lineplot_ty( EXPdata = EXPdata
, title = names(EXPdataList)[p]
, y_label = y_label
, x_label = x_label
, legend_columns = legend_columns
, legend = legend
, LaTeX_text = LaTeX_text
, categories = categories
, colors_per_category = colors_per_category
, show_confidence = show_confidence
)
# if(!is.null(y_lim)) plottigat = plottigat + ylim(y_lim[1], y_lim[2])
# if(!is.null(x_lim)) plottigat = plottigat + xlim(x_lim[1], x_lim[2])
plotlist[[p]] <- local(print(plottigat + theme(legend.position="none") ))
}
## Combine plots
message( paste0("putting all together..") )
if(!is.null(columns)){ nCol = columns
nRow = ceiling(length(EXPdataList)/nCol)
} else { nCol = ceiling(length(EXPdata)/2)
nRow = ceiling(length(EXPdataList)/nCol) }
plottigat = ggpubr::annotate_figure( do.call("ggarrange", c(plotlist, ncol=nCol, nrow=nRow, common.legend = TRUE, legend="right") )
, top = text_grob( mytitle, face = "bold", size = 16)
)
return(plottigat)
}
|
# Exercise 2: Data Frame Practice with `dplyr`.
# Use a different appraoch to accomplish the same tasks as exercise-1
# Install devtools package: allows installations from GitHub
install.packages('devtools')
install.packages('dplyr')
library(dplyr)
# Install "fueleconomy" package from GitHub
devtools::install_github("hadley/fueleconomy")
# Require/library the fueleconomy package
library(fueleconomy)
# You should have have access to the `vehicles` data.frame
vehicles <- vehicles
# Create a data.frame of vehicles from 1997
vehicles.1997 <- filter(vehicles, year == "1997")
# Use the `unique` function to verify that there is only 1 value in the `year` column of your new data.frame
unique(vehicles.1997$year)
# Create a data.frame of 2-Wheel Drive vehicles that get more than 20 miles/gallon in the city
two.wheels <- filter(vehicles, drive == "2-Wheel Drive", cty > 20 )
# Of those vehicles, what is the vehicle ID of the vehicle with the worst hwy mpg?
worst.hwy <- filter(two.wheels, hwy == min(hwy))
# Write a function that takes a `year` and a `make` as parameters, and returns
# The vehicle that gets the most hwy miles/gallon of vehicles of that make in that year
best.hwy <- function(year, make){
hwy <- filter(vehicles$year, why == max(hwy))
}
# What was the most efficient honda model of 1995?
| /exercise-2/exercise.R | permissive | clalissayi/m9-dplyr | R | false | false | 1,333 | r | # Exercise 2: Data Frame Practice with `dplyr`.
# Use a different appraoch to accomplish the same tasks as exercise-1
# Install devtools package: allows installations from GitHub
install.packages('devtools')
install.packages('dplyr')
library(dplyr)
# Install "fueleconomy" package from GitHub
devtools::install_github("hadley/fueleconomy")
# Require/library the fueleconomy package
library(fueleconomy)
# You should have have access to the `vehicles` data.frame
vehicles <- vehicles
# Create a data.frame of vehicles from 1997
vehicles.1997 <- filter(vehicles, year == "1997")
# Use the `unique` function to verify that there is only 1 value in the `year` column of your new data.frame
unique(vehicles.1997$year)
# Create a data.frame of 2-Wheel Drive vehicles that get more than 20 miles/gallon in the city
two.wheels <- filter(vehicles, drive == "2-Wheel Drive", cty > 20 )
# Of those vehicles, what is the vehicle ID of the vehicle with the worst hwy mpg?
worst.hwy <- filter(two.wheels, hwy == min(hwy))
# Write a function that takes a `year` and a `make` as parameters, and returns
# The vehicle that gets the most hwy miles/gallon of vehicles of that make in that year
best.hwy <- function(year, make){
hwy <- filter(vehicles$year, why == max(hwy))
}
# What was the most efficient honda model of 1995?
|
#' Accumulated Local Effects Profiles aka ALEPlots
#'
#' Accumulated Local Effects Profiles accumulate local changes in Ceteris Paribus Profiles.
#' Function \code{\link{accumulated_dependency}} calls \code{\link{ceteris_paribus}} and then \code{\link{aggregate_profiles}}.
#'
#' Find more detailes in the \href{https://pbiecek.github.io/PM_VEE/accumulatedLocalProfiles.html}{Accumulated Local Dependency Chapter}.
#'
#' @param x an explainer created with function \code{DALEX::explain()}, an object of the class \code{ceteris_paribus_explainer}
#' or a model to be explained.
#' @param data validation dataset Will be extracted from \code{x} if it's an explainer
#' NOTE: It is best when target variable is not present in the \code{data}
#' @param predict_function predict function Will be extracted from \code{x} if it's an explainer
#' @param variables names of variables for which profiles shall be calculated.
#' Will be passed to \code{\link{calculate_variable_split}}.
#' If \code{NULL} then all variables from the validation data will be used.
#' @param N number of observations used for calculation of partial dependency profiles.
#' By default, 500 observations will be chosen randomly.
#' @param ... other parameters
#' @param variable_splits named list of splits for variables, in most cases created with \code{\link{calculate_variable_split}}.
#' If \code{NULL} then it will be calculated based on validation data avaliable in the \code{explainer}.
#' @param grid_points number of points for profile. Will be passed to\code{\link{calculate_variable_split}}.
#' @param label name of the model. By default it's extracted from the \code{class} attribute of the model
#' @param variable_type a character. If "numerical" then only numerical variables will be calculated.
#' If "categorical" then only categorical variables will be calculated.
#'
#' @references ALEPlot: Accumulated Local Effects (ALE) Plots and Partial Dependence (PD) Plots \url{https://cran.r-project.org/package=ALEPlot},
#' Predictive Models: Visual Exploration, Explanation and Debugging \url{https://pbiecek.github.io/PM_VEE}
#'
#' @return an object of the class \code{aggregated_profiles_explainer}
#'
#' @examples
#' library("DALEX")
#'
#' model_titanic_glm <- glm(survived ~ gender + age + fare,
#' data = titanic_imputed, family = "binomial")
#'
#' explain_titanic_glm <- explain(model_titanic_glm,
#' data = titanic_imputed[,-8],
#' y = titanic_imputed[,8],
#' verbose = FALSE)
#'
#' adp_glm <- accumulated_dependency(explain_titanic_glm,
#' N = 150, variables = c("age", "fare"))
#' head(adp_glm)
#' plot(adp_glm)
#'
#' \donttest{
#' library("randomForest")
#'
#' model_titanic_rf <- randomForest(survived ~., data = titanic_imputed)
#'
#' explain_titanic_rf <- explain(model_titanic_rf,
#' data = titanic_imputed[,-8],
#' y = titanic_imputed[,8],
#' verbose = FALSE)
#'
#' adp_rf <- accumulated_dependency(explain_titanic_rf, N = 200, variable_type = "numerical")
#' plot(adp_rf)
#'
#' adp_rf <- accumulated_dependency(explain_titanic_rf, N = 200, variable_type = "categorical")
#' plotD3(adp_rf, label_margin = 80, scale_plot = TRUE)
#' }
#'
#' @export
#' @rdname accumulated_dependency
accumulated_dependency <- function(x, ...)
UseMethod("accumulated_dependency")
#' @export
#' @rdname accumulated_dependency
accumulated_dependency.explainer <- function(x,
variables = NULL,
N = 500,
variable_splits = NULL,
grid_points = 101,
...,
variable_type = "numerical") {
# extracts model, data and predict function from the explainer
model <- x$model
data <- x$data
predict_function <- x$predict_function
label <- x$label
accumulated_dependency.default(x = model,
data = data,
predict_function = predict_function,
label = label,
variables = variables,
grid_points = grid_points,
variable_splits = variable_splits,
N = N,
..., variable_type = variable_type)
}
#' @export
#' @rdname accumulated_dependency
accumulated_dependency.default <- function(x,
data,
predict_function = predict,
label = class(x)[1],
variables = NULL,
N = 500,
variable_splits = NULL,
grid_points = 101,
...,
variable_type = "numerical") {
if (N < nrow(data)) {
# sample N points
ndata <- data[sample(1:nrow(data), N),]
} else {
ndata <- data
}
cp <- ceteris_paribus.default(x,
data,
predict_function = predict_function,
new_observation = ndata,
variables = variables,
grid_points = grid_points,
variable_splits = variable_splits,
label = label, ...)
aggregate_profiles(cp, variables = variables, type = "accumulated", variable_type = variable_type, ...)
}
#' @export
#' @rdname accumulated_dependency
accumulated_dependency.ceteris_paribus_explainer <- function(x, ...,
variables = NULL) {
aggregate_profiles(x, ..., type = "accumulated", variables = variables)
}
| /R/accumulated_dependency.R | no_license | WojciechKretowicz/ingredients | R | false | false | 6,217 | r | #' Accumulated Local Effects Profiles aka ALEPlots
#'
#' Accumulated Local Effects Profiles accumulate local changes in Ceteris Paribus Profiles.
#' Function \code{\link{accumulated_dependency}} calls \code{\link{ceteris_paribus}} and then \code{\link{aggregate_profiles}}.
#'
#' Find more detailes in the \href{https://pbiecek.github.io/PM_VEE/accumulatedLocalProfiles.html}{Accumulated Local Dependency Chapter}.
#'
#' @param x an explainer created with function \code{DALEX::explain()}, an object of the class \code{ceteris_paribus_explainer}
#' or a model to be explained.
#' @param data validation dataset Will be extracted from \code{x} if it's an explainer
#' NOTE: It is best when target variable is not present in the \code{data}
#' @param predict_function predict function Will be extracted from \code{x} if it's an explainer
#' @param variables names of variables for which profiles shall be calculated.
#' Will be passed to \code{\link{calculate_variable_split}}.
#' If \code{NULL} then all variables from the validation data will be used.
#' @param N number of observations used for calculation of partial dependency profiles.
#' By default, 500 observations will be chosen randomly.
#' @param ... other parameters
#' @param variable_splits named list of splits for variables, in most cases created with \code{\link{calculate_variable_split}}.
#' If \code{NULL} then it will be calculated based on validation data avaliable in the \code{explainer}.
#' @param grid_points number of points for profile. Will be passed to\code{\link{calculate_variable_split}}.
#' @param label name of the model. By default it's extracted from the \code{class} attribute of the model
#' @param variable_type a character. If "numerical" then only numerical variables will be calculated.
#' If "categorical" then only categorical variables will be calculated.
#'
#' @references ALEPlot: Accumulated Local Effects (ALE) Plots and Partial Dependence (PD) Plots \url{https://cran.r-project.org/package=ALEPlot},
#' Predictive Models: Visual Exploration, Explanation and Debugging \url{https://pbiecek.github.io/PM_VEE}
#'
#' @return an object of the class \code{aggregated_profiles_explainer}
#'
#' @examples
#' library("DALEX")
#'
#' model_titanic_glm <- glm(survived ~ gender + age + fare,
#' data = titanic_imputed, family = "binomial")
#'
#' explain_titanic_glm <- explain(model_titanic_glm,
#' data = titanic_imputed[,-8],
#' y = titanic_imputed[,8],
#' verbose = FALSE)
#'
#' adp_glm <- accumulated_dependency(explain_titanic_glm,
#' N = 150, variables = c("age", "fare"))
#' head(adp_glm)
#' plot(adp_glm)
#'
#' \donttest{
#' library("randomForest")
#'
#' model_titanic_rf <- randomForest(survived ~., data = titanic_imputed)
#'
#' explain_titanic_rf <- explain(model_titanic_rf,
#' data = titanic_imputed[,-8],
#' y = titanic_imputed[,8],
#' verbose = FALSE)
#'
#' adp_rf <- accumulated_dependency(explain_titanic_rf, N = 200, variable_type = "numerical")
#' plot(adp_rf)
#'
#' adp_rf <- accumulated_dependency(explain_titanic_rf, N = 200, variable_type = "categorical")
#' plotD3(adp_rf, label_margin = 80, scale_plot = TRUE)
#' }
#'
#' @export
#' @rdname accumulated_dependency
accumulated_dependency <- function(x, ...)
UseMethod("accumulated_dependency")
#' @export
#' @rdname accumulated_dependency
accumulated_dependency.explainer <- function(x,
variables = NULL,
N = 500,
variable_splits = NULL,
grid_points = 101,
...,
variable_type = "numerical") {
# extracts model, data and predict function from the explainer
model <- x$model
data <- x$data
predict_function <- x$predict_function
label <- x$label
accumulated_dependency.default(x = model,
data = data,
predict_function = predict_function,
label = label,
variables = variables,
grid_points = grid_points,
variable_splits = variable_splits,
N = N,
..., variable_type = variable_type)
}
#' @export
#' @rdname accumulated_dependency
accumulated_dependency.default <- function(x,
data,
predict_function = predict,
label = class(x)[1],
variables = NULL,
N = 500,
variable_splits = NULL,
grid_points = 101,
...,
variable_type = "numerical") {
if (N < nrow(data)) {
# sample N points
ndata <- data[sample(1:nrow(data), N),]
} else {
ndata <- data
}
cp <- ceteris_paribus.default(x,
data,
predict_function = predict_function,
new_observation = ndata,
variables = variables,
grid_points = grid_points,
variable_splits = variable_splits,
label = label, ...)
aggregate_profiles(cp, variables = variables, type = "accumulated", variable_type = variable_type, ...)
}
#' @export
#' @rdname accumulated_dependency
accumulated_dependency.ceteris_paribus_explainer <- function(x, ...,
variables = NULL) {
aggregate_profiles(x, ..., type = "accumulated", variables = variables)
}
|
Var <-
function(S.PLFN){
n <- dim(S.PLFN)[3]
knot.n <- dim(S.PLFN)[1] - 2
M = Mean(S.PLFN)
Sq = 0
for(k in 1:n) {
Sq <- Sq + ( cuts.to.PLFN(S.PLFN[,,k]) - M )^2
}
v <- (1/n) * Sq
return(v)
}
| /R/Var.R | no_license | cran/Sim.PLFN | R | false | false | 210 | r | Var <-
function(S.PLFN){
n <- dim(S.PLFN)[3]
knot.n <- dim(S.PLFN)[1] - 2
M = Mean(S.PLFN)
Sq = 0
for(k in 1:n) {
Sq <- Sq + ( cuts.to.PLFN(S.PLFN[,,k]) - M )^2
}
v <- (1/n) * Sq
return(v)
}
|
Relocation section '\.rela\.dyn' at offset .* contains .* entries:
Offset Info Type Sym\.Value Sym\. Name \+ Addend
00080c0c .*05 R_MIPS_HI16 00000000 __GOTT_BASE__ \+ 0
00080c10 .*06 R_MIPS_LO16 00000000 __GOTT_BASE__ \+ 0
00080c14 .*01 R_MIPS_16 00000000 __GOTT_INDEX__ \+ 0
0008141c 00000002 R_MIPS_32 80c5c
00081800 00000002 R_MIPS_32 80c5c
00081804 00000002 R_MIPS_32 81800
00081808 .*02 R_MIPS_32 00081808 dglobal \+ 0
0008180c .*02 R_MIPS_32 00000000 dexternal \+ 0
00081420 .*02 R_MIPS_32 00081c00 x \+ 0
Relocation section '\.rela\.plt' at offset .* contains 2 entries:
Offset Info Type Sym\.Value Sym\. Name \+ Addend
00081400 .*7f R_MIPS_JUMP_SLOT 00000000 sexternal \+ 0
00081404 .*7f R_MIPS_JUMP_SLOT 00080c64 sglobal \+ 0
| /external/binutils-2.38/ld/testsuite/ld-mips-elf/vxworks1-lib.rd | permissive | zhmu/ananas | R | false | false | 908 | rd |
Relocation section '\.rela\.dyn' at offset .* contains .* entries:
Offset Info Type Sym\.Value Sym\. Name \+ Addend
00080c0c .*05 R_MIPS_HI16 00000000 __GOTT_BASE__ \+ 0
00080c10 .*06 R_MIPS_LO16 00000000 __GOTT_BASE__ \+ 0
00080c14 .*01 R_MIPS_16 00000000 __GOTT_INDEX__ \+ 0
0008141c 00000002 R_MIPS_32 80c5c
00081800 00000002 R_MIPS_32 80c5c
00081804 00000002 R_MIPS_32 81800
00081808 .*02 R_MIPS_32 00081808 dglobal \+ 0
0008180c .*02 R_MIPS_32 00000000 dexternal \+ 0
00081420 .*02 R_MIPS_32 00081c00 x \+ 0
Relocation section '\.rela\.plt' at offset .* contains 2 entries:
Offset Info Type Sym\.Value Sym\. Name \+ Addend
00081400 .*7f R_MIPS_JUMP_SLOT 00000000 sexternal \+ 0
00081404 .*7f R_MIPS_JUMP_SLOT 00080c64 sglobal \+ 0
|
#' @title Modified Mann-Kendall Test For Serially Correlated Data Using Hamed and Rao (1998) Variance Correction Approach Considering Only First Three Significant Lags.
#'
#' @description Time series data is often influenced by serial-correlation. When data is not random and influenced by auto-correlation, Modified Mann-Kendall tests are to be used in trend detction. Hamed and Rao (1998) have proposed variance correction approach to address the issue of serial correlation in Trend analysis. Trend is removed from the series and effective sample size is calulated using significant serial correlation coefficients.
#'
#' @importFrom stats acf median pnorm qnorm
#'
#' @usage mmkh3lag(x,ci=0.95)
#'
#' @param x - Time series data vector
#'
#' @param ci - Confidence Interval
#'
#' @return Corrected Zc - Z-Statistic after variance Correction
#'
#' new P.value - P-Value after variance correction
#'
#' N/N* - Effective sample size
#'
#' Original Z - Original Mann-Kendall Z-Statistic
#'
#' Old P-value - Original Mann-Kendall P-Value
#'
#' Tau - Mann-Kendall's Tau
#'
#' Sen's Slope - Sen's slope
#'
#' old.variance - Old variance before variance Correction
#'
#' new.variance - Variance after correction
#'
#' @references Mann, H. B. (1945). Nonparametric Tests Against Trend. Econometrica, 13(3), 245–259. http://doi.org/10.1017/CBO9781107415324.004
#'
#' @references Kendall, M. (1975). Multivariate analysis. Charles Griffin. Londres. 0-85264-234-2.
#'
#' @references Sen, P. K. (1968). Estimates of the Regression Coefficient Based on Kendall’s Tau. Journal of the American Statistical Association, 63(324), 1379. http://doi.org/10.2307/2285891
#'
#' @references Hamed, K. H., & Ramachandra Rao, A. (1998). A modified Mann-Kendall trend test for autocorrelated data. Journal of Hydrology, 204(1–4), 182–196. http://doi.org/10.1016/S0022-1694(97)00125-X
#'
#' @references Rao, A. R., Hamed, K. H., & Chen, H.-L. (2003). Nonstationarities in hydrologic and environmental time series. http://doi.org/10.1007/978-94-010-0117-5
#'
#' @references Salas, J.D., 1980. Applied modeling of hydrologic times series. Water Resources Publication.
#'
#' @details Trend free time series is constructed by calculating Sen's slope and Auto Correlation coefficient AR(1). Variance correction approach proposed by Hamed and Rao (1998) uses only significant values from all the available values of Auto-Correlation Coefficients. As suggested by Rao, A. R., Hamed, K. H., & Chen, H.-L. (2003), only first three Auto-Correlation coefficients are used in this function.
#'
#' @examples x<-c(Nile)
#' mmkh3lag(x)
#'
#' @export
mmkh3lag <-function(x, ci=0.95) {
# Initialize the test Parameters
# Time-Series Vector
x = x
# Modified Z-Statistic after Variance Correction by Hamed&Rao(1998) method
z = NULL
# Original Z-Statistic for Mann-Kendall test before variance correction
z0 = NULL
# Modified Z-Statistic after Variance Correction by Hamed&Rao(1998) method
pval = NULL
# Original P-Value for Mann-Kendall test before variance correction
pval0 = NULL
# Initialize Mann-Kendall 'S'- Statistic
S = 0
# Initialize Mann-Kendall Tau
Tau = NULL
# Correction factor n/n* value
essf = NULL
# Confidance Interval
ci = ci
# To test whether the data is in vector format
if (is.vector(x) == FALSE) {
stop("Input data must be a vector")
}
# To test whether the data values are finite numbers and attempting to eliminate non-finite numbers
if (any(is.finite(x) == FALSE)) {
x[-c(which(is.finite(x) == FALSE))] -> x
warning("The input vector contains non-finite numbers. An attempt was made to remove them")
}
# Calculating Sen's slope
n <- length(x)
rep(NA, n * (n - 1)/2) -> V
k = 0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
k = k+1
V[k] = (x[j]-x[i])/(j-i)
}
}
median(V,na.rm=TRUE)->slp
# Calculating Trend-Free Series
t=1:length(x)
xn<-(x[1:n])-((slp)*(t))
# Calculating Mann-Kendall 'S'- Statistic
n <- length(x)
for (i in 1:(n-1)) {
for (j in (i+1):n) {
S = S + sign(x[j]-x[i])
}
}
# Calculating auto-correlation function of the ranks of observations (ro)
acf(rank(xn), lag.max=3, plot=FALSE)$acf[-1] -> ro
# Calculating significant auto-correlation at given confidance interval (rof)
qnorm((1+ci)/2)/sqrt(n) -> sig
rep(NA,length(ro)) -> rof
for (i in 1:(length(ro))) {
if(ro[i] > sig || ro[i] < -sig) {
rof[i] <- ro[i]
} else {
rof[i] = 0
}
}
# Calculating 2/(n*(n-1)*(n-2))
2 / (n*(n-1)*(n-2)) -> cte
# Calculating sum(((n-i)*(n-i-1)*(n-i-2)*rof[i]
ess=0
for (i in 1:3) {
ess = ess + (n-i)*(n-i-1)*(n-i-2)*rof[i]
}
# Calculating variance correction factor (n/n*) as per Hamed and Rao(1994)
essf = 1 + ess*cte
# Calculating Mann-Kendall Variance before correction (Var(s))
var.S = n*(n-1)*(2*n+5)*(1/18)
if(length(unique(x)) < n) {
unique(x) -> aux
for (i in 1:length(aux)) {
length(which(x == aux[i])) -> tie
if (tie > 1) {
var.S = var.S - tie*(tie-1)*(2*tie+5)*(1/18)
}
}
}
# Calculating new variance Var(s)*=(Var(s))*(n/n*) as per Hamed and Rao(1994)
VS = var.S * essf
# Calculating Z-Statistic values before and after Variance coorection
if (S == 0) {
z = 0
z0 = 0
}
if (S > 0) {
z = (S-1)/sqrt(VS)
z0 = (S-1)/sqrt(var.S)
} else {
z = (S+1)/sqrt(VS)
z0 = (S+1)/sqrt(var.S)
}
# Calculating P-Value before and after Variance coorection
pval = 2*pnorm(-abs(z))
pval0 = 2*pnorm(-abs(z0))
# Calculating kendall's Tau
Tau = S/(.5*n*(n-1))
# Calculating Sen's slope
rep(NA, n * (n - 1)/2) -> V
k = 0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
k = k+1
V[k] = (x[j]-x[i])/(j-i)
}
}
median(V,na.rm=TRUE)->slp
return(c("Corrected Zc" = z,
"new P-value" = pval,
"N/N*" = essf,
"Original Z" = z0,
"old P.value" = pval0,
"Tau" = Tau,
"Sen's slope" = slp,
"old.variance"=var.S,
"new.variance"= VS))
}
| /R/mmkh3.R | no_license | nobrienUW/modifiedmk | R | false | false | 6,243 | r | #' @title Modified Mann-Kendall Test For Serially Correlated Data Using Hamed and Rao (1998) Variance Correction Approach Considering Only First Three Significant Lags.
#'
#' @description Time series data is often influenced by serial-correlation. When data is not random and influenced by auto-correlation, Modified Mann-Kendall tests are to be used in trend detction. Hamed and Rao (1998) have proposed variance correction approach to address the issue of serial correlation in Trend analysis. Trend is removed from the series and effective sample size is calulated using significant serial correlation coefficients.
#'
#' @importFrom stats acf median pnorm qnorm
#'
#' @usage mmkh3lag(x,ci=0.95)
#'
#' @param x - Time series data vector
#'
#' @param ci - Confidence Interval
#'
#' @return Corrected Zc - Z-Statistic after variance Correction
#'
#' new P.value - P-Value after variance correction
#'
#' N/N* - Effective sample size
#'
#' Original Z - Original Mann-Kendall Z-Statistic
#'
#' Old P-value - Original Mann-Kendall P-Value
#'
#' Tau - Mann-Kendall's Tau
#'
#' Sen's Slope - Sen's slope
#'
#' old.variance - Old variance before variance Correction
#'
#' new.variance - Variance after correction
#'
#' @references Mann, H. B. (1945). Nonparametric Tests Against Trend. Econometrica, 13(3), 245–259. http://doi.org/10.1017/CBO9781107415324.004
#'
#' @references Kendall, M. (1975). Multivariate analysis. Charles Griffin. Londres. 0-85264-234-2.
#'
#' @references Sen, P. K. (1968). Estimates of the Regression Coefficient Based on Kendall’s Tau. Journal of the American Statistical Association, 63(324), 1379. http://doi.org/10.2307/2285891
#'
#' @references Hamed, K. H., & Ramachandra Rao, A. (1998). A modified Mann-Kendall trend test for autocorrelated data. Journal of Hydrology, 204(1–4), 182–196. http://doi.org/10.1016/S0022-1694(97)00125-X
#'
#' @references Rao, A. R., Hamed, K. H., & Chen, H.-L. (2003). Nonstationarities in hydrologic and environmental time series. http://doi.org/10.1007/978-94-010-0117-5
#'
#' @references Salas, J.D., 1980. Applied modeling of hydrologic times series. Water Resources Publication.
#'
#' @details Trend free time series is constructed by calculating Sen's slope and Auto Correlation coefficient AR(1). Variance correction approach proposed by Hamed and Rao (1998) uses only significant values from all the available values of Auto-Correlation Coefficients. As suggested by Rao, A. R., Hamed, K. H., & Chen, H.-L. (2003), only first three Auto-Correlation coefficients are used in this function.
#'
#' @examples x<-c(Nile)
#' mmkh3lag(x)
#'
#' @export
mmkh3lag <-function(x, ci=0.95) {
# Initialize the test Parameters
# Time-Series Vector
x = x
# Modified Z-Statistic after Variance Correction by Hamed&Rao(1998) method
z = NULL
# Original Z-Statistic for Mann-Kendall test before variance correction
z0 = NULL
# Modified Z-Statistic after Variance Correction by Hamed&Rao(1998) method
pval = NULL
# Original P-Value for Mann-Kendall test before variance correction
pval0 = NULL
# Initialize Mann-Kendall 'S'- Statistic
S = 0
# Initialize Mann-Kendall Tau
Tau = NULL
# Correction factor n/n* value
essf = NULL
# Confidance Interval
ci = ci
# To test whether the data is in vector format
if (is.vector(x) == FALSE) {
stop("Input data must be a vector")
}
# To test whether the data values are finite numbers and attempting to eliminate non-finite numbers
if (any(is.finite(x) == FALSE)) {
x[-c(which(is.finite(x) == FALSE))] -> x
warning("The input vector contains non-finite numbers. An attempt was made to remove them")
}
# Calculating Sen's slope
n <- length(x)
rep(NA, n * (n - 1)/2) -> V
k = 0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
k = k+1
V[k] = (x[j]-x[i])/(j-i)
}
}
median(V,na.rm=TRUE)->slp
# Calculating Trend-Free Series
t=1:length(x)
xn<-(x[1:n])-((slp)*(t))
# Calculating Mann-Kendall 'S'- Statistic
n <- length(x)
for (i in 1:(n-1)) {
for (j in (i+1):n) {
S = S + sign(x[j]-x[i])
}
}
# Calculating auto-correlation function of the ranks of observations (ro)
acf(rank(xn), lag.max=3, plot=FALSE)$acf[-1] -> ro
# Calculating significant auto-correlation at given confidance interval (rof)
qnorm((1+ci)/2)/sqrt(n) -> sig
rep(NA,length(ro)) -> rof
for (i in 1:(length(ro))) {
if(ro[i] > sig || ro[i] < -sig) {
rof[i] <- ro[i]
} else {
rof[i] = 0
}
}
# Calculating 2/(n*(n-1)*(n-2))
2 / (n*(n-1)*(n-2)) -> cte
# Calculating sum(((n-i)*(n-i-1)*(n-i-2)*rof[i]
ess=0
for (i in 1:3) {
ess = ess + (n-i)*(n-i-1)*(n-i-2)*rof[i]
}
# Calculating variance correction factor (n/n*) as per Hamed and Rao(1994)
essf = 1 + ess*cte
# Calculating Mann-Kendall Variance before correction (Var(s))
var.S = n*(n-1)*(2*n+5)*(1/18)
if(length(unique(x)) < n) {
unique(x) -> aux
for (i in 1:length(aux)) {
length(which(x == aux[i])) -> tie
if (tie > 1) {
var.S = var.S - tie*(tie-1)*(2*tie+5)*(1/18)
}
}
}
# Calculating new variance Var(s)*=(Var(s))*(n/n*) as per Hamed and Rao(1994)
VS = var.S * essf
# Calculating Z-Statistic values before and after Variance coorection
if (S == 0) {
z = 0
z0 = 0
}
if (S > 0) {
z = (S-1)/sqrt(VS)
z0 = (S-1)/sqrt(var.S)
} else {
z = (S+1)/sqrt(VS)
z0 = (S+1)/sqrt(var.S)
}
# Calculating P-Value before and after Variance coorection
pval = 2*pnorm(-abs(z))
pval0 = 2*pnorm(-abs(z0))
# Calculating kendall's Tau
Tau = S/(.5*n*(n-1))
# Calculating Sen's slope
rep(NA, n * (n - 1)/2) -> V
k = 0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
k = k+1
V[k] = (x[j]-x[i])/(j-i)
}
}
median(V,na.rm=TRUE)->slp
return(c("Corrected Zc" = z,
"new P-value" = pval,
"N/N*" = essf,
"Original Z" = z0,
"old P.value" = pval0,
"Tau" = Tau,
"Sen's slope" = slp,
"old.variance"=var.S,
"new.variance"= VS))
}
|
\name{demangle}
\alias{demangle}
\alias{Mangler-class}
\title{Mangle and Demangle C++ Symbols}
\description{
}
\usage{
demangle(str, status = FALSE)
}
\arguments{
\item{str}{a character vector to demangle}
\item{status}{a logical value. If \code{TRUE}, then the return value
is an integer vector giving the value of the demangle operation.
This can be useful for determining why a string was not demangled
and a \code{NA} is returned for an element when \code{status} was \code{FALSE}.
}
}
\details{
}
\value{
If \code{status} is \code{FALSE}
a character vector with elements corresponding
to those in the input \code{str}.
These are the human-readable demangled strings.
If there was an error demangling an element of \code{str},
the corresponding element in the result willl be \code{NA}.
If \code{status} is \code{TRUE}, an integer vector.
}
\references{
}
\author{
DTL
}
\seealso{
}
\examples{
demangle("_ZN9wikipedia7article6formatE")
demangle(c("_ZN9wikipedia7article6formatE", "_ZN9Something6Inside6Deeper10deepMethodEv"))
demangle(c("_ZN9wikipedia7article6formatE", "_ZN9Something6Inside6Deeper10deepMethodEv", "bob"), status = TRUE)
}
\keyword{programming}
| /man/demangle.Rd | no_license | duncantl/Rllvm | R | false | false | 1,203 | rd | \name{demangle}
\alias{demangle}
\alias{Mangler-class}
\title{Mangle and Demangle C++ Symbols}
\description{
}
\usage{
demangle(str, status = FALSE)
}
\arguments{
\item{str}{a character vector to demangle}
\item{status}{a logical value. If \code{TRUE}, then the return value
is an integer vector giving the value of the demangle operation.
This can be useful for determining why a string was not demangled
and a \code{NA} is returned for an element when \code{status} was \code{FALSE}.
}
}
\details{
}
\value{
If \code{status} is \code{FALSE}
a character vector with elements corresponding
to those in the input \code{str}.
These are the human-readable demangled strings.
If there was an error demangling an element of \code{str},
the corresponding element in the result willl be \code{NA}.
If \code{status} is \code{TRUE}, an integer vector.
}
\references{
}
\author{
DTL
}
\seealso{
}
\examples{
demangle("_ZN9wikipedia7article6formatE")
demangle(c("_ZN9wikipedia7article6formatE", "_ZN9Something6Inside6Deeper10deepMethodEv"))
demangle(c("_ZN9wikipedia7article6formatE", "_ZN9Something6Inside6Deeper10deepMethodEv", "bob"), status = TRUE)
}
\keyword{programming}
|
# Read source file
power_con <- read.table("household_power_consumption.txt", sep=";",header=TRUE)
# Coerce data classes
power_con$Date <- as.Date(power_con$Date,format="%d/%m/%Y")
power_con$Time <- format(strptime(power_con$Time, "%H:%M:%S"),"%H:%M:%S")
power_con[,3:9] <- lapply(power_con[,3:9],as.numeric)
# Create datetime Column
power_con$datetime <- as.POSIXlt(paste(power_con$Date, power_con$Time))
# Subset to 2007-02-01 and 2007-02-02
power_con <- subset(power_con, Date=="2007-02-01" | Date=="2007-02-02")
# Filter non-na values and relevant plotting variables
gap <- subset(power_con,!is.na(Global_active_power))$Global_active_power
# Generate Plot 1
hist(gap,col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)",
ylab = "Frequency",
ylim=c(0,1200))
# Copy plot to png graphics driver
dev.copy(png, file="plot1.png",width=480,height=480,units="px")
dev.off() | /Module_4-Exploratory_Data_Analysis/Week 1 - Assignment/plot1.R | no_license | markwanys/Data_Science_Specialization_JHU | R | false | false | 920 | r | # Read source file
power_con <- read.table("household_power_consumption.txt", sep=";",header=TRUE)
# Coerce data classes
power_con$Date <- as.Date(power_con$Date,format="%d/%m/%Y")
power_con$Time <- format(strptime(power_con$Time, "%H:%M:%S"),"%H:%M:%S")
power_con[,3:9] <- lapply(power_con[,3:9],as.numeric)
# Create datetime Column
power_con$datetime <- as.POSIXlt(paste(power_con$Date, power_con$Time))
# Subset to 2007-02-01 and 2007-02-02
power_con <- subset(power_con, Date=="2007-02-01" | Date=="2007-02-02")
# Filter non-na values and relevant plotting variables
gap <- subset(power_con,!is.na(Global_active_power))$Global_active_power
# Generate Plot 1
hist(gap,col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)",
ylab = "Frequency",
ylim=c(0,1200))
# Copy plot to png graphics driver
dev.copy(png, file="plot1.png",width=480,height=480,units="px")
dev.off() |
library(Hmisc)
### Name: Ecdf
### Title: Empirical Cumulative Distribution Plot
### Aliases: Ecdf Ecdf.default Ecdf.data.frame Ecdf.formula panel.Ecdf
### prepanel.Ecdf
### Keywords: nonparametric hplot methods distribution
### ** Examples
set.seed(1)
ch <- rnorm(1000, 200, 40)
Ecdf(ch, xlab="Serum Cholesterol")
scat1d(ch) # add rug plot
histSpike(ch, add=TRUE, frac=.15) # add spike histogram
# Better: add a data density display automatically:
Ecdf(ch, datadensity='density')
label(ch) <- "Serum Cholesterol"
Ecdf(ch)
other.ch <- rnorm(500, 220, 20)
Ecdf(other.ch,add=TRUE,lty=2)
sex <- factor(sample(c('female','male'), 1000, TRUE))
Ecdf(ch, q=c(.25,.5,.75)) # show quartiles
Ecdf(ch, group=sex,
label.curves=list(method='arrow'))
# Example showing how to draw multiple ECDFs from paired data
pre.test <- rnorm(100,50,10)
post.test <- rnorm(100,55,10)
x <- c(pre.test, post.test)
g <- c(rep('Pre',length(pre.test)),rep('Post',length(post.test)))
Ecdf(x, group=g, xlab='Test Results', label.curves=list(keys=1:2))
# keys=1:2 causes symbols to be drawn periodically on top of curves
# Draw a matrix of ECDFs for a data frame
m <- data.frame(pre.test, post.test,
sex=sample(c('male','female'),100,TRUE))
Ecdf(m, group=m$sex, datadensity='rug')
freqs <- sample(1:10, 1000, TRUE)
Ecdf(ch, weights=freqs) # weighted estimates
# Trellis/Lattice examples:
region <- factor(sample(c('Europe','USA','Australia'),100,TRUE))
year <- factor(sample(2001:2002,1000,TRUE))
Ecdf(~ch | region*year, groups=sex)
Key() # draw a key for sex at the default location
# Key(locator(1)) # user-specified positioning of key
age <- rnorm(1000, 50, 10)
Ecdf(~ch | equal.count(age), groups=sex) # use overlapping shingles
Ecdf(~ch | sex, datadensity='hist', side=3) # add spike histogram at top
| /data/genthat_extracted_code/Hmisc/examples/Ecdf.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,854 | r | library(Hmisc)
### Name: Ecdf
### Title: Empirical Cumulative Distribution Plot
### Aliases: Ecdf Ecdf.default Ecdf.data.frame Ecdf.formula panel.Ecdf
### prepanel.Ecdf
### Keywords: nonparametric hplot methods distribution
### ** Examples
set.seed(1)
ch <- rnorm(1000, 200, 40)
Ecdf(ch, xlab="Serum Cholesterol")
scat1d(ch) # add rug plot
histSpike(ch, add=TRUE, frac=.15) # add spike histogram
# Better: add a data density display automatically:
Ecdf(ch, datadensity='density')
label(ch) <- "Serum Cholesterol"
Ecdf(ch)
other.ch <- rnorm(500, 220, 20)
Ecdf(other.ch,add=TRUE,lty=2)
sex <- factor(sample(c('female','male'), 1000, TRUE))
Ecdf(ch, q=c(.25,.5,.75)) # show quartiles
Ecdf(ch, group=sex,
label.curves=list(method='arrow'))
# Example showing how to draw multiple ECDFs from paired data
pre.test <- rnorm(100,50,10)
post.test <- rnorm(100,55,10)
x <- c(pre.test, post.test)
g <- c(rep('Pre',length(pre.test)),rep('Post',length(post.test)))
Ecdf(x, group=g, xlab='Test Results', label.curves=list(keys=1:2))
# keys=1:2 causes symbols to be drawn periodically on top of curves
# Draw a matrix of ECDFs for a data frame
m <- data.frame(pre.test, post.test,
sex=sample(c('male','female'),100,TRUE))
Ecdf(m, group=m$sex, datadensity='rug')
freqs <- sample(1:10, 1000, TRUE)
Ecdf(ch, weights=freqs) # weighted estimates
# Trellis/Lattice examples:
region <- factor(sample(c('Europe','USA','Australia'),100,TRUE))
year <- factor(sample(2001:2002,1000,TRUE))
Ecdf(~ch | region*year, groups=sex)
Key() # draw a key for sex at the default location
# Key(locator(1)) # user-specified positioning of key
age <- rnorm(1000, 50, 10)
Ecdf(~ch | equal.count(age), groups=sex) # use overlapping shingles
Ecdf(~ch | sex, datadensity='hist', side=3) # add spike histogram at top
|
###########################################
# Functions to read plots data ------------
###########################################
library(gdata)
# read plots coordinates TODO UPDATE BY MARC DONE
read_data_plot <- function(path_samba = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
data_p <- read.xls(file.path(path_samba, 'données_autrestables',
'metadonnees_placette_2015.xlsx'),
sheet = "placettes",
stringsAsFactors = FALSE)
names(data_p) <- c('plot_id', 'paper_yn', 'owner_id', 'management', 'year_first_mes', 'N_census',
'area', "x_min", "x_max", "y_min", "y_max",
'elevation', 'GPS_loc',
'x_lamb93', 'y_lamb93')
return(data_p)
}
## path_samba <- "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"
#### functions read data
read_data_sites <- function(site, prefix, path = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
test_xls <- drop(file.exists(file.path(path, "données_carto&mesures", site,
paste0(prefix,
tolower(site), '.xls'))))
test_xlsx <- drop(file.exists(file.path(path, "données_carto&mesures", site,
paste0(prefix,
tolower(site), '.xlsx'))))
if(test_xls){
print('xls')
data_t <- read.xls(file.path(path, "données_carto&mesures", site,
paste0(prefix, site, '.xls')),
stringsAsFactors = FALSE)
names(data_t) <- tolower(names(data_t))
return(data_t)
}else{
if(test_xlsx){
print('xlsx')
data_t <- read.xls(file.path(path, "données_carto&mesures", site,
paste0(prefix, site, '.xlsx')),
stringsAsFactors = FALSE)
names(data_t) <- tolower(names(data_t))
return(data_t)
}else{
return(NA)
}
}
}
read_mesures_all <- function(path_s = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
sites <- list.dirs(path = file.path(path_s, "données_carto&mesures"),
full.names = FALSE,
recursive = FALSE)
list_mesures <- lapply(sites, read_data_sites, prefix = 'mesures_')
list_test_names <- lapply(list_mesures,
function(x, names_vec) all(names(x) == names_vec),
names_vec = names(list_mesures[[1]]))
if (!all(unlist(list_test_names))) stop('error not sames names in data')
data_mesures <- do.call(rbind, list_mesures)
return(data_mesures)
#done
}
read_carto_all <- function(path_s = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
sites <- list.dirs(path = file.path(path_s, "données_carto&mesures"),
full.names = FALSE,
recursive = FALSE)
list_carto <- lapply(sites, read_data_sites, prefix = 'Carto_')
list_test_names <- lapply(list_carto,
function(x, names_vec) all(names(x) == names_vec),
names_vec = names(list_carto[[1]]))
if (!all(unlist(list_test_names))) stop('error not sames names in data')
data_carto <- do.call(rbind, list_carto)
return(data_carto)
# done
}
# rename data
rename_data_c <- function(df){
names(df) <- c('map_year', 'map_id', 'plot_id', 'stem_id',
'quadrat_id', 'code_species',
'x', 'y', 'z', 'year_birth')
df$stem_id <- df$map_id
df$map_year<- NULL
df$map_id <- NULL
return(df)
}
rename_data_m <- function(df){
names(df) <- c('measure_id', 'plot_id', 'year', 'stem_id',
'code_status', 'code_diam', 'dbh', 'h_tot',
'crown_h1', 'crown_h2', 'crown_h3', 'crown_h4',
'crown_r1', 'crown_r2', 'crown_r3', 'crown_r4',
'base_crown_h', 'strate')
df$stem_id <- paste0(df$stem_id, df$plot_id)
return(df)
}
## FUNCTIONS TO CLEAN DATA
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
fix_species_code <- function(df){
old_name <- c('SOR', '', 'CHE', 'SA spp', 'ALI', 'BOU', 'ALIB', 'SO',
'AL spp', 'QU spp', 'IF', 'MER', 'SAP', 'ERP',
'FRE', 'BETU', 'CHA', 'ORM', 'SOAC')
new_name <- c('SOAU', 'ND', 'QUSP', 'SASP', 'SOAR', 'BESP', 'SOAR',
'SOAU', 'ALSP', 'QUSP', 'TABA', 'PRAV', 'ABAL',
'ACPL', 'FREX', 'BESP', 'CABE', 'ULSP', 'SOAU')
df$code_species <- trim(df$code_species)
df$code_species <- factor(df$code_species)
names_species <- levels(df$code_species)
names_species[match(old_name[old_name %in% names_species],
names_species)] <- new_name[old_name %in% names_species]
levels(df$code_species) <- names_species
df$code_species <- as.character(df$code_species)
return(df)
}
fix_code_status <- function(df){
df$code_status[df$code_status == '0000-'] <- '0000'
df$code_status[df$code_status == '0'] <- '0000'
df$code_status[df$code_status == '9992'] <- '9990'
return(df)
}
get_id_data_dbh_min<- function(data_m){
data_d_min<- tapply(data_m$dbh, INDEX = data_m$plot_id, min, na.rm = TRUE)
data_dbh_max_tree<- tapply(data_m$dbh,
INDEX = data_m$stem_id,
max, na.rm = TRUE)
data_dbh_max_tree[data_dbh_max_tree == -Inf] <- NA
stem_id_remove <- names(data_dbh_max_tree)[data_dbh_max_tree <7.5 &
!is.na(data_dbh_max_tree)]
return(stem_id_remove)
}
remove_tree_below_dbh_map <- function(data_c, data_m){
vec <- get_id_data_dbh_min(data_m)
return(data_c[!(data_c$stem_id %in% vec), ])
}
remove_tree_below_dbh <- function(data_m){
return(data_m[data_m$dbh>=7.5 & !is.na(data_m$dbh), ])
}
check_data_missing_map<- function(data_c, data_m){
data_missing_carto <- data_m[!data_m$stem_id %in% data_c$stem_id, ]
if(nrow(data_missing_carto)>0){
write.csv(data_missing_carto, file.path("output", "data_missing_carto.csv"), row.names = FALSE)
stop("error trees with measurement but no recorder in carto check output/data_missing_carto.csv")
}
}
rm_data_missing_measure<- function(data_c, data_m){
print(paste0('N removed tree with no measure ',
length(data_c[data_c$stem_id %in% data_m$stem_id, "stem_id"])))
return(data_c[data_c$stem_id %in% data_m$stem_id, ])
}
remove_site_m <- function(df_m,df_p){
sites_to_remove <- df_p$plot_id[df_p$paper_yn == 'no']
return(df_m[!(df_m$plot_id %in% sites_to_remove), ])
}
remove_site_c <- function(df_c, df_p){
sites_to_remove <- df_p$plot_id[df_p$paper_yn == 'no']
return(df_c[!(df_c$plot_id %in% sites_to_remove), ])
}
fix_all_c <- function(df_c, df_m){
df_c <- fix_species_code(df_c)
check_data_missing_map(df_c, df_m)
df_c <- rm_data_missing_measure(df_c, df_m)
df_c$x <- as.numeric(df_c$x)
df_c$y <- as.numeric(df_c$y)
df_c$x <- as.numeric(df_c$x)
df_c$z <- as.numeric(df_c$z)
df_c$year_birth <- as.integer(df_c$year_birth)
return(df_c)
}
fix_all_m <- function(df_c, df_m){
df_m <- fix_code_status(df_m)
df_m$year <- as.numeric(df_m$year)
df_m$dbh <- as.numeric(df_m$dbh)
df_m$base_crown_m <- NULL
if(sum(duplicated(df_m$measure_id))>0)
stop('duplicated measure_id')
return(df_m)
}
add_var_p <- function(df_p){
require(maptools)
require(sp)
require(rgdal)
df_p <- df_p[!is.na(df_p$x_lamb93), ]
coordinates(df_p) <- c('x_lamb93', 'y_lamb93')
proj4string(df_p) <- CRS("+init=epsg:2154")
df_p2 <- spTransform(df_p, CRS("+init=epsg:4326"))
df_p$lat <- df_p2$y_lamb93
df_p$long <- df_p2$x_lamb93
df_p<- GetTopo(df_p)
list_res <- GetClimate(df_p)
df_p$MAT <- list_res$MAT
df_p$MAP <- list_res$MAP
geol <- GetGeol(df_p)
df_p$geol <- geol
df <- as.data.frame(df_p)
nn <- names(df)
names(df)[nn == "x"] <- 'x_lamb93'
names(df)[nn == "y"] <- 'y_lamb93'
df <- df[, c(3:ncol(df), 1:2)]
return(as.data.frame(df))
}
# TEST if all site in meta
check_all_sites_in_c_m <- function(d_c, d_m, d_p){
if(sum(!unique(d_c$plot_id) %in% d_p$plot_id)) stop('missing site of c in p')
if(sum(!unique(d_m$plot_id) %in% d_p$plot_id)) stop('missing site of m in p')
}
## CHECK WRONG XY COORDINATES
plot_xy_map <- function(site, data, d_p){
df_t <- data[data$plot_id == site, ]
if(sum(!is.na(df_t$x))>0 & sum(!is.na(df_t$y))>0){
min_x <- d_p[d_p$plot_id == site, 'x_min']
min_y <- d_p[d_p$plot_id == site, 'y_min']
max_x <- d_p[d_p$plot_id == site, 'x_max']
max_y <- d_p[d_p$plot_id == site, 'y_max']
par(pty = 's')
plot(as.numeric(df_t$x), as.numeric(df_t$y),
main = site,
xlab = 'x', ylab = 'y',
xlim = range(c(df_t$x, min_x, max_x,
df_t$y, min_y, max_y),
na.rm = TRUE),
ylim = range(c(df_t$y, min_y, max_y,
df_t$x, min_x, max_x),
na.rm = TRUE))
polygon(c(min_x, max_x, max_x, min_x),
c(min_y, min_y, max_y, max_y),
border = 'red')
points_pb <- df_t$x > max_x | df_t$x < min_x| df_t$y > max_y | df_t$y < min_y
points(df_t$x[points_pb], df_t$y[points_pb], col = 'red', pch = 16)
}
}
## Delete tree outside mapping area
get_wrong_xy_stem_id<- function(site, data, d_p){
df_t <- data[data$plot_id == site, ]
if(sum(!is.na(df_t$x))>0 & sum(!is.na(df_t$y))>0){
min_x <- d_p[d_p$plot_id == site, 'x_min']
min_y <- d_p[d_p$plot_id == site, 'y_min']
max_x <- d_p[d_p$plot_id == site, 'x_max']
max_y <- d_p[d_p$plot_id == site, 'y_max']
points_pb <- (df_t$x > max_x | df_t$x < min_x |
df_t$y > max_y | df_t$y < min_y) &
!is.na(df_t$x) & !is.na(df_t$y)
stem_id_pb <- df_t$stem_id[points_pb]
}else{
stem_id_pb <- c()
}
return(stem_id_pb)
}
remove_wrong_xy_tree_m <- function(d_m, d_c, d_p){
vec_wrong_xy_stem_id <- unlist(lapply(unique(d_c$plot_id),
get_wrong_xy_stem_id,
data = d_c, d_p = d_p))
return(d_m[!d_m$stem_id %in% vec_wrong_xy_stem_id, ])
}
remove_wrong_xy_tree_c <- function(d_m, d_c, d_p){
vec_wrong_xy_stem_id <- unlist(lapply(unique(d_c$plot_id),
get_wrong_xy_stem_id,
data = d_c, d_p = d_p))
return(d_c[!d_c$stem_id %in% vec_wrong_xy_stem_id, ])
}
read_all_data_and_clean <- function(){
#### TEST
data_p <- read_data_plot()
data_m <- read_mesures_all()
data_c <- read_carto_all()
data_m <- rename_data_m(data_m)
data_c <- rename_data_c(data_c)
data_c <- fix_all_c(data_c, data_m)
data_m <- fix_all_m(data_c, data_m)
df_c <- remove_tree_below_dbh_map(data_c, data_m)
df_m <- remove_tree_below_dbh(data_m)
data_c <- remove_site_c(data_c, data_p)
data_m <- remove_site_m(data_m, data_p)
data_p <- data_p[data_p$paper_yn == "yes", ]
check_all_sites_in_c_m(data_c, data_m, data_p)
pdf(file.path('figures', 'map_site_error.pdf'))
lapply(unique(data_c$plot_id), plot_xy_map, data = data_c, d_p = data_p)
dev.off()
data_m <- remove_wrong_xy_tree_m(data_m, data_c, data_p)
data_c<- remove_wrong_xy_tree_c(data_m, data_c, data_p)
data_p <- add_var_p(data_p)
print('done')
saveRDS(list(c = data_c, m = data_m, p = data_p), file.path('output', 'list_data.rds'))
}
## list_data <- read_all_data_and_clean(path_samba, path_samba_r)
save_data_c <- function(){
list_d <- readRDS(file.path('output', 'list_data.rds'))
write.csv(list_d$c, file.path('output', 'data_c.csv'),
row.names = FALSE)
}
save_data_m <- function(){
list_d <- readRDS(file.path('output', 'list_data.rds'))
write.csv(list_d$m, file.path('output', 'data_m.csv'),
row.names = FALSE)
}
save_data_p <- function(){
list_d <- readRDS(file.path('output', 'list_data.rds'))
write.csv(list_d$p, file.path('output', 'data_p.csv'),
row.names = FALSE)
}
get_data_c <- function() read.csv(file.path('output', 'data_c.csv'),
stringsAsFactors = FALSE)
get_data_m <- function() read.csv(file.path('output', 'data_m.csv'),
stringsAsFactors = FALSE)
get_data_p <- function() read.csv(file.path('output', 'data_p.csv'),
stringsAsFactors = FALSE)
## GENERATE EMPTY METADATA
generate_metadata_and_save<- function(data, name_data){
metadata <- data.frame(variables = names(data),
type = unlist(lapply(data, class)),
unit = NA, definition = NA)
write.csv(metadata, file.path('output',
paste0('metadata_',
name_data,
'.csv')), , row.names = FALSE)
write.csv(data, file.path('output',
paste0(name_data,
'.csv')), , row.names = FALSE)
}
generate_metadata_c_e<- function(data, name_data = 'data_c') generate_metadata_and_save(data, name_data )
generate_metadata_m_e<- function(data, name_data = 'data_m') generate_metadata_and_save(data, name_data )
generate_metadata_p_e<- function(data, name_data = 'data_p') generate_metadata_and_save(data, name_data )
data_sp_code<- function(data_c,
path_samba = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
data_code <- read.xls(file.path(path_samba, 'données_autrestables',
'Code_species.xlsx'),
stringsAsFactors = FALSE)
sp_vec <- unique(data_c$code_species)
if(sum(!sp_vec %in% data_code$Code_species)>0)stop("missing species in Code_species.xlsx")
d <- data_code[data_code$Code_species %in% sp_vec, ]
names(d) <- c("code_species", "latin_name")
write.csv(d, file.path('output', 'data_sp_code.csv'), row.names = FALSE)
}
## Check Table
data_census <- function(data_m){
data_census <- table(data_m$plot_id, data_m$year)
write.csv(data_census, file.path('output', 'data_census.csv'), row.names = FALSE)
}
data_sp_site<- function(data_c){
data_sp <- table(data_c$code_species, data_c$plot_id)
write.csv(data_sp, file.path('output', 'data_sp_site.csv'), row.names = FALSE)
}
## print_table_control(data_m, data_c) TODO ADD TO REMAKE
### QUALITY CHECK
### Growth error
growth_dead_tree <- function(i, data, yy, j){
dbh1 <- data[data$stem_id== j &
data$year == yy[i], 'dbh']
dbh2 <- data[data$stem_id== j &
data$year == yy[i+1], 'dbh']
df <- data.frame(stem_id = j,
year1 = yy[i], year2 = yy[i+1],
dbh1 = dbh1, dbh2 = dbh2,
code_diam1 = data[data$stem_id== j &
data$year == yy[i], 'code_diam'],
code_diam2 = data[data$stem_id== j &
data$year == yy[i+1], 'code_diam'],
code_status1 = data[data$stem_id== j &
data$year == yy[i], 'code_status'],
code_status2 = data[data$stem_id== j &
data$year == yy[i+1], 'code_status']
)
return(df)
}
growth_tree_all <- function(j, df){
years <- sort(df[df$stem_id== j, ]$year)
list_t <- vector('list')
list_t <- lapply(seq_len(length(years) -1), growth_dead_tree, df, years, j)
res <- do.call(rbind, list_t)
return(res)
}
save_data_growth <- function(df){
require(parallel)
cl <- makeCluster(12, type="FORK")
trees_ids<- unique(df$stem_id)
list_all <- parLapply(cl, trees_ids, growth_tree_all, df)
stopCluster(cl)
res <- do.call(rbind, list_all)
res$G <- (res$dbh2-res$dbh1)/(res$year2-res$year1)
res$same_code_diam <- res$code_diam1 == res$code_diam2
write.csv(res, file.path('output', 'df_growth.csv'), row.names = FALSE)
}
get_data_growth <- function(){
read.csv(file.path('output', 'df_growth.csv'))
}
## df_growth <- data_growth(data_m)
cook_outlier_detec <- function(df, x, y){
require(MASS)
df <- df[complete.cases(df[, c(x, y)]), ]
fr <- as.formula(paste(y, " ~ ", x))
ols <- lm(fr, df)
d1 <- cooks.distance(ols)
r <- stdres(ols)
a <- cbind(df, d1, r)
a_out <- a[d1 > 6*mean(d1), ]
points(a_out[[x]], a_out[[y]], pch = 4)
return(a_out$stem_id)
}
plot_quant_reg <- function(df, x, y,
probs_vec = c(0.005, 0.995),
smooth_dg = 3){
require(quantreg)
require(splines)
df <- df[complete.cases(df[, c(x, y)]), ]
x_seq_pred <- seq(from = min(df[[x]], na.rm = TRUE),
to = max(df[[x]], na.rm = TRUE),
length.out = 100)
fr <- as.formula(paste(y, " ~ ", paste("bs(", x, ", df = smooth_dg)")))
df_pred <- data.frame( 0, x_seq_pred)
names(df_pred) <- c(y, x)
X <- model.matrix(fr, df_pred)
for(tau in probs_vec){
fit <- rq(fr, tau, data=df)
accel.fit <- X %*% fit$coef
lines(x_seq_pred,accel.fit, col = 'black')
if(tau == probs_vec[1]){
vec_pb <- df[[y]] < predict(fit)
}else{
vec_pb <- df[[y]] > predict(fit)
}
points(df[vec_pb, x], df[vec_pb, y], pch = 16, col = 'red')
df[[paste0('tau',tau)]] <- vec_pb
}
return(df$stem_id[apply(df[, paste0('tau', probs_vec)], MARGIN = 1, sum)>0])
}
plot_growth_error <- function(df){
plot(df$dbh1, df$G, cex = 0.2,
xlab = 'Intial DBH (cm)', ylab = 'Diameter Growth (cm/yr.)')
quant_id <- plot_quant_reg(df, 'dbh1', 'G')
cook_id <- cook_outlier_detec(df, 'dbh1', 'G')
}
save_growth_error <- function(df){
plot(df$dbh1, df$G, cex = 0.2,
col = c('green', 'black')[unclass(factor(df$same_code_diam))],
xlab = 'Intial DBH (cm)', ylab = 'Diameter Growth (cm/yr.)')
abline(h = quantile(df$G, probs = c(0.0025, 0.9975), na.rm = TRUE),
col = 'gray')
quant_id <- plot_quant_reg(df, 'dbh1', 'G')
cook_id <- cook_outlier_detec(df, 'dbh1', 'G')
all_id <- c(as.character(quant_id), as.character(cook_id))
write.csv(data.frame(stem_id = df[df$stem_id %in% all_id[duplicated(all_id)], ]),
file = file.path('output', 'tree_wrong_growth.csv'),
row.names = FALSE)
}
## check if dead tree are alive again TODO
save_stem_id_resurrected <- function(df){
d <- df[df$code_status1 %in% c('9990', '9991') &
df$code_status2 == '0000',]
print(dim(d))
write.csv(d, file.path('output', 'data_resurrected_tree.csv'), row.names = FALSE)
}
## save_stem_id_resurrected(df_growth)
## no trees
# check allometry TODO REMOVE base_crown_h
## test H tot and H crown
save_stem_id_wrong_crown_h<- function(df_m){
vec_pb <- df_m$h_tot/apply(df_m[ , paste0('crown_h', 1:4)],
MARGIN = 1, mean, na.rm = TRUE)<1
d <- df_m$stem_id[ vec_pb & !is.na(vec_pb)]
print(dim(d))
write.csv(d, file.path('output', 'data_wrong_crown_h_tree.csv'), row.names = FALSE)
}
#save_stem_id_wrong_crown_h(data_m)
# plots
plot_allo_error <- function(data){
par(mfrow = c(2, 2))
plot(data$dbh, data$h_tot, xlab = 'dbh', ylab = 'h', cex = 0.3)
plot_quant_reg(data, 'dbh', 'h_tot')
cook_outlier_detec(data, 'dbh', 'h_tot')
data$crown_r <- apply(data[ , paste0('crown_r', 1:4)],
MARGIN = 1, mean, na.rm = TRUE)
plot(data$dbh, data$crown_r,
xlab = 'dbh', ylab= 'crown radius',
cex = 0.3)
plot_quant_reg(data, 'dbh', 'crown_r')
cook_outlier_detec(data, 'dbh', 'crown_r')
data$crown_h <- apply(data[ , paste0('crown_h', 1:4)],
MARGIN = 1, mean, na.rm = TRUE)
vec_pb <- data$h_tot/data$crown_h<1
plot(data$h_tot, data$crown_h,
xlab = 'h', ylab= 'crown height',
col = unclass(factor(vec_pb & !is.na(vec_pb))),
cex = 0.3)
plot_quant_reg(data, 'h_tot', 'crown_h')
cook_outlier_detec(data, 'h_tot', 'crown_h')
lines(0:100, 0:100, col = 'red')
}
save_allo_error <- function(data){
plot(data$dbh, data$h_tot, xlab = 'dbh', ylab = 'h', cex = 0.3,
col = unclass(factor(data$plot_id)))
abline(h=50)
quant_id_1<- plot_quant_reg(data, 'dbh', 'h_tot')
cook_outlier_detec(data, 'dbh', 'h_tot')
data$crown_r <- apply(data[ , paste0('crown_r', 1:4)],
MARGIN = 1, mean, na.rm = TRUE)
plot(data$dbh, data$crown_r,
xlab = 'dbh', ylab= 'crown radius',
cex = 0.3)
quant_id_2 <- plot_quant_reg(data, 'dbh', 'crown_r')
cook_outlier_detec(data, 'dbh', 'crown_r')
vec_pb <- data$h_tot/apply(data[ , paste0('crown_h', 1:4)],
MARGIN = 1, mean, na.rm = TRUE)<1
outlier_3 <- data$stem_id[vec_pb & !is.na(vec_pb)]
write.csv(data.frame(stem_id = unique(c(quant_id_1, quant_id_2, outlier_3))),
file = file.path('output', 'tree_wrong_allo.csv'),
row.names = FALSE)
vec_pb <- (data$h_tot>50 & !is.na(data$h_tot)) | (data$crown_r>7 & !is.na(data$crown_r))
d <- data$stem_id[ vec_pb & !is.na(vec_pb)]
print(dim(d))
write.csv(d, file.path('output', 'data_wrong_allo2.csv'), row.names = FALSE)
}
## Tables for data paper
# TABLE 1 PLOT DESCRIPTION plot_id (check diff with plot_id) area, elvation, lat, long,
table_plot <- function(df_p){
table_p <- df_p[, c('plot_id', 'area', 'elevation', 'slope', 'aspect', 'lat',
'long', 'MAT', 'MAP', 'geol')]
write.csv(table_p, file.path('output', 'table_plot.csv'), row.names = FALSE)
}
# TABLE 2 plot_id year_first_meas N census, main species, N initial G initial
table_stand_descrip<- function(df_p, df_m, df_c, treshold_sp= 0.1){
require(dplyr)
table_p2 <- df_p[, c("plot_id", "area")]
df <- dplyr::left_join(df_m, df_c[, c('stem_id', 'code_species')], by = 'stem_id')
table_p3 <- df %>% dplyr::group_by(plot_id) %>%
dplyr::summarise(first_year = min(year),
n_census = n_distinct(year))
df <- df %>% dplyr::filter(code_status %in% c('0', '8881', '8882')) %>%
dplyr::arrange(year) %>% dplyr::distinct(stem_id)
main_sp <- tapply(df$code_species,
df$plot_id,
function(x) paste(names(table(x))[table(x)/
length(x)>treshold_sp],
collapse = ' and '))
n_init<- tapply(df$code_species,
df$plot_id,
length)
ba_init<- tapply(pi*df$dbh^2/4,
df$plot_id,
sum)
table_p4 <- data.frame(plot_id = names(main_sp),
main_sp = main_sp,
n_init = n_init, ba_init = ba_init,
stringsAsFactors = FALSE)
tab <- dplyr::left_join(table_p2, table_p3, by = 'plot_id')
tab <- dplyr::left_join(tab, table_p4, by = 'plot_id')
tab$ba_init <- tab$ba_init/(tab$area * 10000)
tab$n_init <- tab$n_init/(tab$area)
write.csv(tab, file.path('output', 'table_stand_descrip.csv'), row.names = FALSE)
}
# TABLE 3 diam min , n of height measure , n of crown radius measure, n of crown height measure, dead and stump atestablish Y/N , loc xy or quadrat
table_stand_allo<- function(df_p, df_m, df_c){
require(dplyr)
df_m <- df_m %>% dplyr::rowwise() %>%
dplyr::mutate(crown_h = mean(c(crown_h1, crown_h2,
crown_h3, crown_h4),
na.rm = TRUE),
crown_r = mean(c(crown_r1, crown_r2,
crown_r3, crown_r4),
na.rm = TRUE))
tab1 <- df_m %>% dplyr::group_by( plot_id) %>%
dplyr::summarise(n_h = sum(!is.na(h_tot)),
n_crown_h = sum(!is.na(crown_h)),
n_crown_r = sum(!is.na(crown_r)))
df <- df_m %>%
dplyr::arrange(year) %>% dplyr::distinct(stem_id)
tab2 <- df %>% dplyr::group_by(plot_id) %>%
dplyr::summarise(dead_init_tf = sum(code_status %in% c("9991", "9990"))>0)
tab3 <- df_c%>% dplyr::group_by(plot_id) %>%
dplyr::summarise(xy_tf = sum(!is.na(x))>0)
tab <- dplyr::left_join(tab1, tab2, by = 'plot_id')
tab <- dplyr::left_join(tab, tab3, by = 'plot_id')
write.csv(tab, file.path('output', 'table_stand_allo.csv'), row.names = FALSE)
}
#
species_code <- function(df_c, path_samba = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
code_species <- read.xls(file.path(path_samba, 'données_autrestables',
'Code_species.xlsx'),
stringsAsFactors = FALSE)
names(code_species) <- c("code_species", "Latin.name")
df <- code_species[code_species$code_species %in% unique(df_c$code_species), ]
write.csv(df, file.path('output', 'species_code.csv'), row.names = FALSE)
}
status_code <- function(path_samba = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
code_status <- read.xls(file.path(path_samba, 'données_autrestables',
'Code_status.xlsx'),
stringsAsFactors = FALSE)
write.csv(code_status, file.path('output', 'status_code.csv'), row.names = FALSE)
}
get_meta_c<- function(path_samba = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
df <- read.xls(file.path(path_samba, 'données_autrestables',
'metadonnees_carto.xlsx'),
stringsAsFactors = FALSE)
write.csv(df, file.path('output', 'metadata_data_c.csv'), row.names = FALSE)
}
get_meta_m<- function(path_samba = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
df <- read.xls(file.path(path_samba, 'données_autrestables',
'metadonnees_mesures.xlsx'),
stringsAsFactors = FALSE)
write.csv(df, file.path('output', 'metadata_data_m.csv'), row.names = FALSE)
}
get_meta_p<- function(path_samba = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
df <- read.xls(file.path(path_samba, 'données_autrestables',
'metadonnees_plot.xlsx'),
stringsAsFactors = FALSE)
write.csv(df, file.path('output', 'metadata_data_p.csv'), row.names = FALSE)
}
| /R/ReadData.R | no_license | MarcFuhr/AlpineForestData | R | false | false | 26,178 | r | ###########################################
# Functions to read plots data ------------
###########################################
library(gdata)
# read plots coordinates TODO UPDATE BY MARC DONE
read_data_plot <- function(path_samba = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
data_p <- read.xls(file.path(path_samba, 'données_autrestables',
'metadonnees_placette_2015.xlsx'),
sheet = "placettes",
stringsAsFactors = FALSE)
names(data_p) <- c('plot_id', 'paper_yn', 'owner_id', 'management', 'year_first_mes', 'N_census',
'area', "x_min", "x_max", "y_min", "y_max",
'elevation', 'GPS_loc',
'x_lamb93', 'y_lamb93')
return(data_p)
}
## path_samba <- "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"
#### functions read data
read_data_sites <- function(site, prefix, path = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
test_xls <- drop(file.exists(file.path(path, "données_carto&mesures", site,
paste0(prefix,
tolower(site), '.xls'))))
test_xlsx <- drop(file.exists(file.path(path, "données_carto&mesures", site,
paste0(prefix,
tolower(site), '.xlsx'))))
if(test_xls){
print('xls')
data_t <- read.xls(file.path(path, "données_carto&mesures", site,
paste0(prefix, site, '.xls')),
stringsAsFactors = FALSE)
names(data_t) <- tolower(names(data_t))
return(data_t)
}else{
if(test_xlsx){
print('xlsx')
data_t <- read.xls(file.path(path, "données_carto&mesures", site,
paste0(prefix, site, '.xlsx')),
stringsAsFactors = FALSE)
names(data_t) <- tolower(names(data_t))
return(data_t)
}else{
return(NA)
}
}
}
read_mesures_all <- function(path_s = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
sites <- list.dirs(path = file.path(path_s, "données_carto&mesures"),
full.names = FALSE,
recursive = FALSE)
list_mesures <- lapply(sites, read_data_sites, prefix = 'mesures_')
list_test_names <- lapply(list_mesures,
function(x, names_vec) all(names(x) == names_vec),
names_vec = names(list_mesures[[1]]))
if (!all(unlist(list_test_names))) stop('error not sames names in data')
data_mesures <- do.call(rbind, list_mesures)
return(data_mesures)
#done
}
read_carto_all <- function(path_s = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
sites <- list.dirs(path = file.path(path_s, "données_carto&mesures"),
full.names = FALSE,
recursive = FALSE)
list_carto <- lapply(sites, read_data_sites, prefix = 'Carto_')
list_test_names <- lapply(list_carto,
function(x, names_vec) all(names(x) == names_vec),
names_vec = names(list_carto[[1]]))
if (!all(unlist(list_test_names))) stop('error not sames names in data')
data_carto <- do.call(rbind, list_carto)
return(data_carto)
# done
}
# rename data
rename_data_c <- function(df){
names(df) <- c('map_year', 'map_id', 'plot_id', 'stem_id',
'quadrat_id', 'code_species',
'x', 'y', 'z', 'year_birth')
df$stem_id <- df$map_id
df$map_year<- NULL
df$map_id <- NULL
return(df)
}
rename_data_m <- function(df){
names(df) <- c('measure_id', 'plot_id', 'year', 'stem_id',
'code_status', 'code_diam', 'dbh', 'h_tot',
'crown_h1', 'crown_h2', 'crown_h3', 'crown_h4',
'crown_r1', 'crown_r2', 'crown_r3', 'crown_r4',
'base_crown_h', 'strate')
df$stem_id <- paste0(df$stem_id, df$plot_id)
return(df)
}
## FUNCTIONS TO CLEAN DATA
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
fix_species_code <- function(df){
old_name <- c('SOR', '', 'CHE', 'SA spp', 'ALI', 'BOU', 'ALIB', 'SO',
'AL spp', 'QU spp', 'IF', 'MER', 'SAP', 'ERP',
'FRE', 'BETU', 'CHA', 'ORM', 'SOAC')
new_name <- c('SOAU', 'ND', 'QUSP', 'SASP', 'SOAR', 'BESP', 'SOAR',
'SOAU', 'ALSP', 'QUSP', 'TABA', 'PRAV', 'ABAL',
'ACPL', 'FREX', 'BESP', 'CABE', 'ULSP', 'SOAU')
df$code_species <- trim(df$code_species)
df$code_species <- factor(df$code_species)
names_species <- levels(df$code_species)
names_species[match(old_name[old_name %in% names_species],
names_species)] <- new_name[old_name %in% names_species]
levels(df$code_species) <- names_species
df$code_species <- as.character(df$code_species)
return(df)
}
fix_code_status <- function(df){
df$code_status[df$code_status == '0000-'] <- '0000'
df$code_status[df$code_status == '0'] <- '0000'
df$code_status[df$code_status == '9992'] <- '9990'
return(df)
}
get_id_data_dbh_min<- function(data_m){
data_d_min<- tapply(data_m$dbh, INDEX = data_m$plot_id, min, na.rm = TRUE)
data_dbh_max_tree<- tapply(data_m$dbh,
INDEX = data_m$stem_id,
max, na.rm = TRUE)
data_dbh_max_tree[data_dbh_max_tree == -Inf] <- NA
stem_id_remove <- names(data_dbh_max_tree)[data_dbh_max_tree <7.5 &
!is.na(data_dbh_max_tree)]
return(stem_id_remove)
}
remove_tree_below_dbh_map <- function(data_c, data_m){
vec <- get_id_data_dbh_min(data_m)
return(data_c[!(data_c$stem_id %in% vec), ])
}
remove_tree_below_dbh <- function(data_m){
return(data_m[data_m$dbh>=7.5 & !is.na(data_m$dbh), ])
}
check_data_missing_map<- function(data_c, data_m){
data_missing_carto <- data_m[!data_m$stem_id %in% data_c$stem_id, ]
if(nrow(data_missing_carto)>0){
write.csv(data_missing_carto, file.path("output", "data_missing_carto.csv"), row.names = FALSE)
stop("error trees with measurement but no recorder in carto check output/data_missing_carto.csv")
}
}
rm_data_missing_measure<- function(data_c, data_m){
print(paste0('N removed tree with no measure ',
length(data_c[data_c$stem_id %in% data_m$stem_id, "stem_id"])))
return(data_c[data_c$stem_id %in% data_m$stem_id, ])
}
remove_site_m <- function(df_m,df_p){
sites_to_remove <- df_p$plot_id[df_p$paper_yn == 'no']
return(df_m[!(df_m$plot_id %in% sites_to_remove), ])
}
remove_site_c <- function(df_c, df_p){
sites_to_remove <- df_p$plot_id[df_p$paper_yn == 'no']
return(df_c[!(df_c$plot_id %in% sites_to_remove), ])
}
fix_all_c <- function(df_c, df_m){
df_c <- fix_species_code(df_c)
check_data_missing_map(df_c, df_m)
df_c <- rm_data_missing_measure(df_c, df_m)
df_c$x <- as.numeric(df_c$x)
df_c$y <- as.numeric(df_c$y)
df_c$x <- as.numeric(df_c$x)
df_c$z <- as.numeric(df_c$z)
df_c$year_birth <- as.integer(df_c$year_birth)
return(df_c)
}
fix_all_m <- function(df_c, df_m){
df_m <- fix_code_status(df_m)
df_m$year <- as.numeric(df_m$year)
df_m$dbh <- as.numeric(df_m$dbh)
df_m$base_crown_m <- NULL
if(sum(duplicated(df_m$measure_id))>0)
stop('duplicated measure_id')
return(df_m)
}
add_var_p <- function(df_p){
require(maptools)
require(sp)
require(rgdal)
df_p <- df_p[!is.na(df_p$x_lamb93), ]
coordinates(df_p) <- c('x_lamb93', 'y_lamb93')
proj4string(df_p) <- CRS("+init=epsg:2154")
df_p2 <- spTransform(df_p, CRS("+init=epsg:4326"))
df_p$lat <- df_p2$y_lamb93
df_p$long <- df_p2$x_lamb93
df_p<- GetTopo(df_p)
list_res <- GetClimate(df_p)
df_p$MAT <- list_res$MAT
df_p$MAP <- list_res$MAP
geol <- GetGeol(df_p)
df_p$geol <- geol
df <- as.data.frame(df_p)
nn <- names(df)
names(df)[nn == "x"] <- 'x_lamb93'
names(df)[nn == "y"] <- 'y_lamb93'
df <- df[, c(3:ncol(df), 1:2)]
return(as.data.frame(df))
}
# TEST if all site in meta
check_all_sites_in_c_m <- function(d_c, d_m, d_p){
if(sum(!unique(d_c$plot_id) %in% d_p$plot_id)) stop('missing site of c in p')
if(sum(!unique(d_m$plot_id) %in% d_p$plot_id)) stop('missing site of m in p')
}
## CHECK WRONG XY COORDINATES
plot_xy_map <- function(site, data, d_p){
df_t <- data[data$plot_id == site, ]
if(sum(!is.na(df_t$x))>0 & sum(!is.na(df_t$y))>0){
min_x <- d_p[d_p$plot_id == site, 'x_min']
min_y <- d_p[d_p$plot_id == site, 'y_min']
max_x <- d_p[d_p$plot_id == site, 'x_max']
max_y <- d_p[d_p$plot_id == site, 'y_max']
par(pty = 's')
plot(as.numeric(df_t$x), as.numeric(df_t$y),
main = site,
xlab = 'x', ylab = 'y',
xlim = range(c(df_t$x, min_x, max_x,
df_t$y, min_y, max_y),
na.rm = TRUE),
ylim = range(c(df_t$y, min_y, max_y,
df_t$x, min_x, max_x),
na.rm = TRUE))
polygon(c(min_x, max_x, max_x, min_x),
c(min_y, min_y, max_y, max_y),
border = 'red')
points_pb <- df_t$x > max_x | df_t$x < min_x| df_t$y > max_y | df_t$y < min_y
points(df_t$x[points_pb], df_t$y[points_pb], col = 'red', pch = 16)
}
}
## Delete tree outside mapping area
get_wrong_xy_stem_id<- function(site, data, d_p){
df_t <- data[data$plot_id == site, ]
if(sum(!is.na(df_t$x))>0 & sum(!is.na(df_t$y))>0){
min_x <- d_p[d_p$plot_id == site, 'x_min']
min_y <- d_p[d_p$plot_id == site, 'y_min']
max_x <- d_p[d_p$plot_id == site, 'x_max']
max_y <- d_p[d_p$plot_id == site, 'y_max']
points_pb <- (df_t$x > max_x | df_t$x < min_x |
df_t$y > max_y | df_t$y < min_y) &
!is.na(df_t$x) & !is.na(df_t$y)
stem_id_pb <- df_t$stem_id[points_pb]
}else{
stem_id_pb <- c()
}
return(stem_id_pb)
}
remove_wrong_xy_tree_m <- function(d_m, d_c, d_p){
vec_wrong_xy_stem_id <- unlist(lapply(unique(d_c$plot_id),
get_wrong_xy_stem_id,
data = d_c, d_p = d_p))
return(d_m[!d_m$stem_id %in% vec_wrong_xy_stem_id, ])
}
remove_wrong_xy_tree_c <- function(d_m, d_c, d_p){
vec_wrong_xy_stem_id <- unlist(lapply(unique(d_c$plot_id),
get_wrong_xy_stem_id,
data = d_c, d_p = d_p))
return(d_c[!d_c$stem_id %in% vec_wrong_xy_stem_id, ])
}
read_all_data_and_clean <- function(){
#### TEST
data_p <- read_data_plot()
data_m <- read_mesures_all()
data_c <- read_carto_all()
data_m <- rename_data_m(data_m)
data_c <- rename_data_c(data_c)
data_c <- fix_all_c(data_c, data_m)
data_m <- fix_all_m(data_c, data_m)
df_c <- remove_tree_below_dbh_map(data_c, data_m)
df_m <- remove_tree_below_dbh(data_m)
data_c <- remove_site_c(data_c, data_p)
data_m <- remove_site_m(data_m, data_p)
data_p <- data_p[data_p$paper_yn == "yes", ]
check_all_sites_in_c_m(data_c, data_m, data_p)
pdf(file.path('figures', 'map_site_error.pdf'))
lapply(unique(data_c$plot_id), plot_xy_map, data = data_c, d_p = data_p)
dev.off()
data_m <- remove_wrong_xy_tree_m(data_m, data_c, data_p)
data_c<- remove_wrong_xy_tree_c(data_m, data_c, data_p)
data_p <- add_var_p(data_p)
print('done')
saveRDS(list(c = data_c, m = data_m, p = data_p), file.path('output', 'list_data.rds'))
}
## list_data <- read_all_data_and_clean(path_samba, path_samba_r)
save_data_c <- function(){
list_d <- readRDS(file.path('output', 'list_data.rds'))
write.csv(list_d$c, file.path('output', 'data_c.csv'),
row.names = FALSE)
}
save_data_m <- function(){
list_d <- readRDS(file.path('output', 'list_data.rds'))
write.csv(list_d$m, file.path('output', 'data_m.csv'),
row.names = FALSE)
}
save_data_p <- function(){
list_d <- readRDS(file.path('output', 'list_data.rds'))
write.csv(list_d$p, file.path('output', 'data_p.csv'),
row.names = FALSE)
}
get_data_c <- function() read.csv(file.path('output', 'data_c.csv'),
stringsAsFactors = FALSE)
get_data_m <- function() read.csv(file.path('output', 'data_m.csv'),
stringsAsFactors = FALSE)
get_data_p <- function() read.csv(file.path('output', 'data_p.csv'),
stringsAsFactors = FALSE)
## GENERATE EMPTY METADATA
generate_metadata_and_save<- function(data, name_data){
metadata <- data.frame(variables = names(data),
type = unlist(lapply(data, class)),
unit = NA, definition = NA)
write.csv(metadata, file.path('output',
paste0('metadata_',
name_data,
'.csv')), , row.names = FALSE)
write.csv(data, file.path('output',
paste0(name_data,
'.csv')), , row.names = FALSE)
}
generate_metadata_c_e<- function(data, name_data = 'data_c') generate_metadata_and_save(data, name_data )
generate_metadata_m_e<- function(data, name_data = 'data_m') generate_metadata_and_save(data, name_data )
generate_metadata_p_e<- function(data, name_data = 'data_p') generate_metadata_and_save(data, name_data )
data_sp_code<- function(data_c,
path_samba = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
data_code <- read.xls(file.path(path_samba, 'données_autrestables',
'Code_species.xlsx'),
stringsAsFactors = FALSE)
sp_vec <- unique(data_c$code_species)
if(sum(!sp_vec %in% data_code$Code_species)>0)stop("missing species in Code_species.xlsx")
d <- data_code[data_code$Code_species %in% sp_vec, ]
names(d) <- c("code_species", "latin_name")
write.csv(d, file.path('output', 'data_sp_code.csv'), row.names = FALSE)
}
## Check Table
data_census <- function(data_m){
data_census <- table(data_m$plot_id, data_m$year)
write.csv(data_census, file.path('output', 'data_census.csv'), row.names = FALSE)
}
data_sp_site<- function(data_c){
data_sp <- table(data_c$code_species, data_c$plot_id)
write.csv(data_sp, file.path('output', 'data_sp_site.csv'), row.names = FALSE)
}
## print_table_control(data_m, data_c) TODO ADD TO REMAKE
### QUALITY CHECK
### Growth error
growth_dead_tree <- function(i, data, yy, j){
dbh1 <- data[data$stem_id== j &
data$year == yy[i], 'dbh']
dbh2 <- data[data$stem_id== j &
data$year == yy[i+1], 'dbh']
df <- data.frame(stem_id = j,
year1 = yy[i], year2 = yy[i+1],
dbh1 = dbh1, dbh2 = dbh2,
code_diam1 = data[data$stem_id== j &
data$year == yy[i], 'code_diam'],
code_diam2 = data[data$stem_id== j &
data$year == yy[i+1], 'code_diam'],
code_status1 = data[data$stem_id== j &
data$year == yy[i], 'code_status'],
code_status2 = data[data$stem_id== j &
data$year == yy[i+1], 'code_status']
)
return(df)
}
growth_tree_all <- function(j, df){
years <- sort(df[df$stem_id== j, ]$year)
list_t <- vector('list')
list_t <- lapply(seq_len(length(years) -1), growth_dead_tree, df, years, j)
res <- do.call(rbind, list_t)
return(res)
}
save_data_growth <- function(df){
require(parallel)
cl <- makeCluster(12, type="FORK")
trees_ids<- unique(df$stem_id)
list_all <- parLapply(cl, trees_ids, growth_tree_all, df)
stopCluster(cl)
res <- do.call(rbind, list_all)
res$G <- (res$dbh2-res$dbh1)/(res$year2-res$year1)
res$same_code_diam <- res$code_diam1 == res$code_diam2
write.csv(res, file.path('output', 'df_growth.csv'), row.names = FALSE)
}
get_data_growth <- function(){
read.csv(file.path('output', 'df_growth.csv'))
}
## df_growth <- data_growth(data_m)
cook_outlier_detec <- function(df, x, y){
require(MASS)
df <- df[complete.cases(df[, c(x, y)]), ]
fr <- as.formula(paste(y, " ~ ", x))
ols <- lm(fr, df)
d1 <- cooks.distance(ols)
r <- stdres(ols)
a <- cbind(df, d1, r)
a_out <- a[d1 > 6*mean(d1), ]
points(a_out[[x]], a_out[[y]], pch = 4)
return(a_out$stem_id)
}
plot_quant_reg <- function(df, x, y,
probs_vec = c(0.005, 0.995),
smooth_dg = 3){
require(quantreg)
require(splines)
df <- df[complete.cases(df[, c(x, y)]), ]
x_seq_pred <- seq(from = min(df[[x]], na.rm = TRUE),
to = max(df[[x]], na.rm = TRUE),
length.out = 100)
fr <- as.formula(paste(y, " ~ ", paste("bs(", x, ", df = smooth_dg)")))
df_pred <- data.frame( 0, x_seq_pred)
names(df_pred) <- c(y, x)
X <- model.matrix(fr, df_pred)
for(tau in probs_vec){
fit <- rq(fr, tau, data=df)
accel.fit <- X %*% fit$coef
lines(x_seq_pred,accel.fit, col = 'black')
if(tau == probs_vec[1]){
vec_pb <- df[[y]] < predict(fit)
}else{
vec_pb <- df[[y]] > predict(fit)
}
points(df[vec_pb, x], df[vec_pb, y], pch = 16, col = 'red')
df[[paste0('tau',tau)]] <- vec_pb
}
return(df$stem_id[apply(df[, paste0('tau', probs_vec)], MARGIN = 1, sum)>0])
}
plot_growth_error <- function(df){
plot(df$dbh1, df$G, cex = 0.2,
xlab = 'Intial DBH (cm)', ylab = 'Diameter Growth (cm/yr.)')
quant_id <- plot_quant_reg(df, 'dbh1', 'G')
cook_id <- cook_outlier_detec(df, 'dbh1', 'G')
}
save_growth_error <- function(df){
plot(df$dbh1, df$G, cex = 0.2,
col = c('green', 'black')[unclass(factor(df$same_code_diam))],
xlab = 'Intial DBH (cm)', ylab = 'Diameter Growth (cm/yr.)')
abline(h = quantile(df$G, probs = c(0.0025, 0.9975), na.rm = TRUE),
col = 'gray')
quant_id <- plot_quant_reg(df, 'dbh1', 'G')
cook_id <- cook_outlier_detec(df, 'dbh1', 'G')
all_id <- c(as.character(quant_id), as.character(cook_id))
write.csv(data.frame(stem_id = df[df$stem_id %in% all_id[duplicated(all_id)], ]),
file = file.path('output', 'tree_wrong_growth.csv'),
row.names = FALSE)
}
## check if dead tree are alive again TODO
save_stem_id_resurrected <- function(df){
d <- df[df$code_status1 %in% c('9990', '9991') &
df$code_status2 == '0000',]
print(dim(d))
write.csv(d, file.path('output', 'data_resurrected_tree.csv'), row.names = FALSE)
}
## save_stem_id_resurrected(df_growth)
## no trees
# check allometry TODO REMOVE base_crown_h
## test H tot and H crown
save_stem_id_wrong_crown_h<- function(df_m){
vec_pb <- df_m$h_tot/apply(df_m[ , paste0('crown_h', 1:4)],
MARGIN = 1, mean, na.rm = TRUE)<1
d <- df_m$stem_id[ vec_pb & !is.na(vec_pb)]
print(dim(d))
write.csv(d, file.path('output', 'data_wrong_crown_h_tree.csv'), row.names = FALSE)
}
#save_stem_id_wrong_crown_h(data_m)
# plots
plot_allo_error <- function(data){
par(mfrow = c(2, 2))
plot(data$dbh, data$h_tot, xlab = 'dbh', ylab = 'h', cex = 0.3)
plot_quant_reg(data, 'dbh', 'h_tot')
cook_outlier_detec(data, 'dbh', 'h_tot')
data$crown_r <- apply(data[ , paste0('crown_r', 1:4)],
MARGIN = 1, mean, na.rm = TRUE)
plot(data$dbh, data$crown_r,
xlab = 'dbh', ylab= 'crown radius',
cex = 0.3)
plot_quant_reg(data, 'dbh', 'crown_r')
cook_outlier_detec(data, 'dbh', 'crown_r')
data$crown_h <- apply(data[ , paste0('crown_h', 1:4)],
MARGIN = 1, mean, na.rm = TRUE)
vec_pb <- data$h_tot/data$crown_h<1
plot(data$h_tot, data$crown_h,
xlab = 'h', ylab= 'crown height',
col = unclass(factor(vec_pb & !is.na(vec_pb))),
cex = 0.3)
plot_quant_reg(data, 'h_tot', 'crown_h')
cook_outlier_detec(data, 'h_tot', 'crown_h')
lines(0:100, 0:100, col = 'red')
}
save_allo_error <- function(data){
plot(data$dbh, data$h_tot, xlab = 'dbh', ylab = 'h', cex = 0.3,
col = unclass(factor(data$plot_id)))
abline(h=50)
quant_id_1<- plot_quant_reg(data, 'dbh', 'h_tot')
cook_outlier_detec(data, 'dbh', 'h_tot')
data$crown_r <- apply(data[ , paste0('crown_r', 1:4)],
MARGIN = 1, mean, na.rm = TRUE)
plot(data$dbh, data$crown_r,
xlab = 'dbh', ylab= 'crown radius',
cex = 0.3)
quant_id_2 <- plot_quant_reg(data, 'dbh', 'crown_r')
cook_outlier_detec(data, 'dbh', 'crown_r')
vec_pb <- data$h_tot/apply(data[ , paste0('crown_h', 1:4)],
MARGIN = 1, mean, na.rm = TRUE)<1
outlier_3 <- data$stem_id[vec_pb & !is.na(vec_pb)]
write.csv(data.frame(stem_id = unique(c(quant_id_1, quant_id_2, outlier_3))),
file = file.path('output', 'tree_wrong_allo.csv'),
row.names = FALSE)
vec_pb <- (data$h_tot>50 & !is.na(data$h_tot)) | (data$crown_r>7 & !is.na(data$crown_r))
d <- data$stem_id[ vec_pb & !is.na(vec_pb)]
print(dim(d))
write.csv(d, file.path('output', 'data_wrong_allo2.csv'), row.names = FALSE)
}
## Tables for data paper
# TABLE 1 PLOT DESCRIPTION plot_id (check diff with plot_id) area, elvation, lat, long,
table_plot <- function(df_p){
table_p <- df_p[, c('plot_id', 'area', 'elevation', 'slope', 'aspect', 'lat',
'long', 'MAT', 'MAP', 'geol')]
write.csv(table_p, file.path('output', 'table_plot.csv'), row.names = FALSE)
}
# TABLE 2 plot_id year_first_meas N census, main species, N initial G initial
table_stand_descrip<- function(df_p, df_m, df_c, treshold_sp= 0.1){
require(dplyr)
table_p2 <- df_p[, c("plot_id", "area")]
df <- dplyr::left_join(df_m, df_c[, c('stem_id', 'code_species')], by = 'stem_id')
table_p3 <- df %>% dplyr::group_by(plot_id) %>%
dplyr::summarise(first_year = min(year),
n_census = n_distinct(year))
df <- df %>% dplyr::filter(code_status %in% c('0', '8881', '8882')) %>%
dplyr::arrange(year) %>% dplyr::distinct(stem_id)
main_sp <- tapply(df$code_species,
df$plot_id,
function(x) paste(names(table(x))[table(x)/
length(x)>treshold_sp],
collapse = ' and '))
n_init<- tapply(df$code_species,
df$plot_id,
length)
ba_init<- tapply(pi*df$dbh^2/4,
df$plot_id,
sum)
table_p4 <- data.frame(plot_id = names(main_sp),
main_sp = main_sp,
n_init = n_init, ba_init = ba_init,
stringsAsFactors = FALSE)
tab <- dplyr::left_join(table_p2, table_p3, by = 'plot_id')
tab <- dplyr::left_join(tab, table_p4, by = 'plot_id')
tab$ba_init <- tab$ba_init/(tab$area * 10000)
tab$n_init <- tab$n_init/(tab$area)
write.csv(tab, file.path('output', 'table_stand_descrip.csv'), row.names = FALSE)
}
# TABLE 3 diam min , n of height measure , n of crown radius measure, n of crown height measure, dead and stump atestablish Y/N , loc xy or quadrat
table_stand_allo<- function(df_p, df_m, df_c){
require(dplyr)
df_m <- df_m %>% dplyr::rowwise() %>%
dplyr::mutate(crown_h = mean(c(crown_h1, crown_h2,
crown_h3, crown_h4),
na.rm = TRUE),
crown_r = mean(c(crown_r1, crown_r2,
crown_r3, crown_r4),
na.rm = TRUE))
tab1 <- df_m %>% dplyr::group_by( plot_id) %>%
dplyr::summarise(n_h = sum(!is.na(h_tot)),
n_crown_h = sum(!is.na(crown_h)),
n_crown_r = sum(!is.na(crown_r)))
df <- df_m %>%
dplyr::arrange(year) %>% dplyr::distinct(stem_id)
tab2 <- df %>% dplyr::group_by(plot_id) %>%
dplyr::summarise(dead_init_tf = sum(code_status %in% c("9991", "9990"))>0)
tab3 <- df_c%>% dplyr::group_by(plot_id) %>%
dplyr::summarise(xy_tf = sum(!is.na(x))>0)
tab <- dplyr::left_join(tab1, tab2, by = 'plot_id')
tab <- dplyr::left_join(tab, tab3, by = 'plot_id')
write.csv(tab, file.path('output', 'table_stand_allo.csv'), row.names = FALSE)
}
#
species_code <- function(df_c, path_samba = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
code_species <- read.xls(file.path(path_samba, 'données_autrestables',
'Code_species.xlsx'),
stringsAsFactors = FALSE)
names(code_species) <- c("code_species", "Latin.name")
df <- code_species[code_species$code_species %in% unique(df_c$code_species), ]
write.csv(df, file.path('output', 'species_code.csv'), row.names = FALSE)
}
status_code <- function(path_samba = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
code_status <- read.xls(file.path(path_samba, 'données_autrestables',
'Code_status.xlsx'),
stringsAsFactors = FALSE)
write.csv(code_status, file.path('output', 'status_code.csv'), row.names = FALSE)
}
get_meta_c<- function(path_samba = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
df <- read.xls(file.path(path_samba, 'données_autrestables',
'metadonnees_carto.xlsx'),
stringsAsFactors = FALSE)
write.csv(df, file.path('output', 'metadata_data_c.csv'), row.names = FALSE)
}
get_meta_m<- function(path_samba = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
df <- read.xls(file.path(path_samba, 'données_autrestables',
'metadonnees_mesures.xlsx'),
stringsAsFactors = FALSE)
write.csv(df, file.path('output', 'metadata_data_m.csv'), row.names = FALSE)
}
get_meta_p<- function(path_samba = "/run/user/1001/gvfs/smb-share:server=sdgrp1,share=services/EMGR/Projets/placette_foret/"){
df <- read.xls(file.path(path_samba, 'données_autrestables',
'metadonnees_plot.xlsx'),
stringsAsFactors = FALSE)
write.csv(df, file.path('output', 'metadata_data_p.csv'), row.names = FALSE)
}
|
## plot1.R
## read in Electric Power Consumption dataset
## read in Electric Power Consumption dataset from the following link
## unzip it and place it in your working directory
## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
df1<-read.delim("household_power_consumption.txt", header = TRUE, sep = ";", quote = "\"",dec = ".", fill = TRUE, comment.char = "", na.strings="?")
##subset the data for the date range
df1sub <- subset(df1, as.Date(df1$Date, format="%d/%m/%Y") %in% as.Date(c("2007-02-01", "2007-02-02"), format="%Y-%m-%d"))
## remove any NA's
df1sub<-df1sub[complete.cases(df1sub),]
png("plot1.png",width=480,height=480)
##create histogram
hist(df1sub$Global_active_power,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
##dev.copy(png,file="plot1.png",width=480,height=480)
dev.off()
| /plot1.R | no_license | cafefargo/ExData_Plotting1 | R | false | false | 875 | r |
## plot1.R
## read in Electric Power Consumption dataset
## read in Electric Power Consumption dataset from the following link
## unzip it and place it in your working directory
## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
df1<-read.delim("household_power_consumption.txt", header = TRUE, sep = ";", quote = "\"",dec = ".", fill = TRUE, comment.char = "", na.strings="?")
##subset the data for the date range
df1sub <- subset(df1, as.Date(df1$Date, format="%d/%m/%Y") %in% as.Date(c("2007-02-01", "2007-02-02"), format="%Y-%m-%d"))
## remove any NA's
df1sub<-df1sub[complete.cases(df1sub),]
png("plot1.png",width=480,height=480)
##create histogram
hist(df1sub$Global_active_power,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
##dev.copy(png,file="plot1.png",width=480,height=480)
dev.off()
|
#install.packages("stringr")
rm(list=ls())
setwd("C:/Users/Kate Hyun/Dropbox/Kate/ReID/TruckReid")
load("./ProcessedData/Jan0910/SOLCLoadinJan0910")
# load functionbook2
######## check duration and filter out (When WIM data comes in, step-by-step filtering approches needed) #######
### Time Window ###
buffertimewindow=60; # min (WIM-WIM case)
bufferduration = 0.4; # 0.2 min
buffernumpnt = 800
bufferlen = 12
bufferaspacing12 = 8
bufferaspacing23 = 5
bufferaspacing34 = 5
bufferaspacing45 = 5
buffergvw = 40
# bufferaweightl1 = 3
# bufferaweightr1 = 3
# bufferaweightl2 = 3
# bufferaweightr2 = 3
# bufferaweightl3 = 3
# bufferaweightr3 = 3
# bufferaweightl4 = 3
# bufferaweightr4 = 3
# bufferaweightl5 = 3
# bufferaweightr5 = 3
### input file - Jan 0910
Upheader = SO.Jan0910Header
Upheader[,14] <- SOJan_v1[,6][match( Upheader$sigid, SOJan_v1[,3])] # FHWA class
Upheader[,15] <- SOJan_v1[,8][match( Upheader$sigid, SOJan_v1[,3])] # length
Upheader[,16] <- SOJan_v1[,9][match( Upheader$sigid, SOJan_v1[,3])] # gvw
Upheader[,17] <- SOJan_v1[,10][match( Upheader$sigid, SOJan_v1[,3])] # axle spacing 1-2
Upheader[,18] <- SOJan_v1[,11][match( Upheader$sigid, SOJan_v1[,3])] # axle spacing 2-3
Upheader[,19] <- SOJan_v1[,12][match( Upheader$sigid, SOJan_v1[,3])] # axle spacing 3-4
Upheader[,20] <- SOJan_v1[,13][match( Upheader$sigid, SOJan_v1[,3])] # axle spacing 4-5
# Upheader[,21] <- SOJan_v1[,14][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 1 left
# Upheader[,22] <- SOJan_v1[,15][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 1 right
# Upheader[,23] <- SOJan_v1[,16][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 2 left
# Upheader[,24] <- SOJan_v1[,17][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 2 right
# Upheader[,25] <- SOJan_v1[,18][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 3 left
# Upheader[,26] <- SOJan_v1[,19][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 3 right
# Upheader[,27] <- SOJan_v1[,20][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 4 left
# Upheader[,28] <- SOJan_v1[,21][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 4 right
# Upheader[,29] <- SOJan_v1[,22][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 5 left
# Upheader[,30] <- SOJan_v1[,23][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 5 right
Upheader_new <-subset(Upheader, Upheader[,8] > 100)
Upheader_new <-subset(Upheader_new, Upheader_new[,14] > 3)
Upheader_new <-subset(Upheader_new, Upheader_new[,14] < 15)
Downheader = LC.Jan0910Header
Downheader[,14] <- LCJan_v1[,6][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,15] <- LCJan_v1[,8][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,16] <- LCJan_v1[,9][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,17] <- LCJan_v1[,10][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,18] <- LCJan_v1[,11][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,19] <- LCJan_v1[,12][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,20] <- LCJan_v1[,13][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,21] <- LCJan_v1[,14][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,22] <- LCJan_v1[,15][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,23] <- LCJan_v1[,16][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,24] <- LCJan_v1[,17][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,25] <- LCJan_v1[,18][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,26] <- LCJan_v1[,19][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,27] <- LCJan_v1[,20][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,28] <- LCJan_v1[,21][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,29] <- LCJan_v1[,22][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,30] <- LCJan_v1[,23][match( Downheader$sigid, LCJan_v1[,3])]
Downheader_new <-subset(Downheader, Downheader[,14] > 3)
Downheader_new <-subset(Downheader_new, Downheader_new[,14] < 15)
Downsig = LC.Jan0910sig
Downsig_IM=subset(Downsig, select=c(id,mag,sigid))
Downheader_ID=(Downheader_new$sigid)
# look traffic condition
# set buffer
settime <- matrix(nrow=length(Downheader_ID), ncol=1)
setduration<- matrix(nrow=length(Downheader_ID), ncol=1)
setnumpnt<- matrix(nrow=length(Downheader_ID), ncol=1)
setlen<- matrix(nrow=length(Downheader_ID), ncol=1)
setgvw<- matrix(nrow=length(Downheader_ID), ncol=1)
setaspacing12 <- matrix(nrow=length(Downheader_ID), ncol=1)
setaspacing23 <- matrix(nrow=length(Downheader_ID), ncol=1)
setaspacing34 <- matrix(nrow=length(Downheader_ID), ncol=1)
setaspacing45 <- matrix(nrow=length(Downheader_ID), ncol=1)
#
# setaweightl1 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightr1 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightl2 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightr2 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightl3 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightr3 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightl4 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightr4 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightl5 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightr5 <- matrix(nrow=length(Downheader_ID), ncol=1)
lb <- matrix(nrow=length(Downheader_ID), ncol=1)
ld <- matrix(nrow=length(Downheader_ID), ncol=1)
ud <- matrix(nrow=length(Downheader_ID), ncol=1)
lp <- matrix(nrow=length(Downheader_ID), ncol=1)
up <- matrix(nrow=length(Downheader_ID), ncol=1)
ul <- matrix(nrow=length(Downheader_ID), ncol=1)
ll <- matrix(nrow=length(Downheader_ID), ncol=1)
ug <- matrix(nrow=length(Downheader_ID), ncol=1)
lg <- matrix(nrow=length(Downheader_ID), ncol=1)
la12 <- matrix(nrow=length(Downheader_ID), ncol=1)
ua12 <- matrix(nrow=length(Downheader_ID), ncol=1)
la23 <- matrix(nrow=length(Downheader_ID), ncol=1)
ua23 <- matrix(nrow=length(Downheader_ID), ncol=1)
la34 <- matrix(nrow=length(Downheader_ID), ncol=1)
ua34 <- matrix(nrow=length(Downheader_ID), ncol=1)
la45 <- matrix(nrow=length(Downheader_ID), ncol=1)
ua45 <- matrix(nrow=length(Downheader_ID), ncol=1)
#
# lwl1 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwl1 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwr1 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwr1 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwl2 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwl2 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwr2 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwr2 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwl3 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwl3 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwr3 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwr3 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwl4 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwl4 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwr4 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwr4 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwl5 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwl5 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwr5 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwr5 <- matrix(nrow=length(Downheader_ID), ncol=1)
for (j in 1: length(Downheader_ID)){
settime[j] <- as.numeric(Downheader_new[j,12])
lb[j] <- settime[j] - buffertimewindow * 60000
}
for (j in 1: length(Downheader_ID)){
setduration[j] <- as.numeric(Downheader_new[j,7])
ld[j] <- setduration[j] - bufferduration
ud[j] <- setduration[j] + bufferduration
}
for (j in 1: length(Downheader_ID)){
setnumpnt[j] <- as.numeric(Downheader_new[j,8])
lp[j] <- setnumpnt[j] - buffernumpnt
up[j] <- setnumpnt[j] + buffernumpnt
}
for (j in 1: length(Downheader_ID)){
setlen[j] <- as.numeric(Downheader_new[j,15])
ll[j] <- setlen[j] - bufferlen
ul[j] <- setlen[j] + bufferlen
}
for (j in 1: length(Downheader_ID)){
setgvw[j] <- as.numeric(Downheader_new[j,16])
lg[j] <- setgvw[j] - buffergvw
ug[j] <- setgvw[j] + buffergvw
}
for (j in 1: length(Downheader_ID)){
setaspacing12[j] <- as.numeric(Downheader_new[j,17])
setaspacing23[j] <- as.numeric(Downheader_new[j,18])
setaspacing34[j] <- as.numeric(Downheader_new[j,19])
setaspacing45[j] <- as.numeric(Downheader_new[j,20])
la12[j] <- setaspacing12[j] - bufferaspacing12
ua12[j] <- setaspacing12[j] + bufferaspacing12
la23[j] <- setaspacing23[j] - bufferaspacing23
ua23[j] <- setaspacing23[j] + bufferaspacing23
la34[j] <- setaspacing34[j] - bufferaspacing34
ua34[j] <- setaspacing34[j] + bufferaspacing34
la45[j] <- setaspacing45[j] - bufferaspacing45
ua45[j] <- setaspacing45[j] + bufferaspacing45
}
# for (j in 1: length(Downheader_ID)){
# setaweightl1[j] <- as.numeric(Downheader_new[j,21])
# setaweightr1[j] <- as.numeric(Downheader_new[j,22])
# setaweightl2[j] <- as.numeric(Downheader_new[j,23])
# setaweightr2[j] <- as.numeric(Downheader_new[j,24])
# setaweightl3[j] <- as.numeric(Downheader_new[j,25])
# setaweightr3[j] <- as.numeric(Downheader_new[j,26])
# setaweightl4[j] <- as.numeric(Downheader_new[j,27])
# setaweightr4[j] <- as.numeric(Downheader_new[j,28])
# setaweightl5[j] <- as.numeric(Downheader_new[j,29])
# setaweightr5[j] <- as.numeric(Downheader_new[j,30])
# lwl1[j] <- setaweightl1[j] - bufferaweightl1
# uwl1[j] <- setaweightl1[j] + bufferaweightl1
# lwl2[j] <- setaweightl2[j] - bufferaweightl2
# uwl2[j] <- setaweightl2[j] + bufferaweightl2
# lwl3[j] <- setaweightl3[j] - bufferaweightl3
# uwl3[j] <- setaweightl3[j] + bufferaweightl3
# lwl4[j] <- setaweightl4[j] - bufferaweightl4
# uwl4[j] <- setaweightl4[j] + bufferaweightl4
# lwl5[j] <- setaweightl5[j] - bufferaweightl5
# uwl5[j] <- setaweightl5[j] + bufferaweightl5
#
# lwr1[j] <- setaweightr1[j] - bufferaweightr1
# uwr1[j] <- setaweightr1[j] + bufferaweightr1
# lwr2[j] <- setaweightr2[j] - bufferaweightr2
# uwr2[j] <- setaweightr2[j] + bufferaweightr2
# lwr3[j] <- setaweightr3[j] - bufferaweightr3
# uwr3[j] <- setaweightr3[j] + bufferaweightr3
# lwr4[j] <- setaweightr4[j] - bufferaweightr4
# uwr4[j] <- setaweightr4[j] + bufferaweightr4
# lwr5[j] <- setaweightr5[j] - bufferaweightr5
# uwr5[j] <- setaweightr5[j] + bufferaweightr5
# }
### time window - TIME & DURATION
Upsiglist <- list()
for (j in 1: length(Downheader_ID)){
Upsiglist[j] <- list(subset(Upheader_new$sigid, Upheader_new$utc > lb[j] & Upheader_new$utc <= settime[j]
& Upheader_new[,7] > ld[j] & Upheader_new[,7] < ud[j]
& Upheader_new[,8] > lp[j] & Upheader_new[,8] < up[j]
& Upheader_new[,14] == Downheader_new[j,14]
& Upheader_new[,15] > ll[j] & Upheader_new[,15] < ul[j]
& Upheader_new[,16] > lg[j] & Upheader_new[,16] < ug[j]
& Upheader_new[,17] > la12[j] & Upheader_new[,17] < ua12[j]
& Upheader_new[,18] > la23[j] & Upheader_new[,18] < ua23[j]
& Upheader_new[,19] > la34[j] & Upheader_new[,19] < ua34[j]
& Upheader_new[,20] > la45[j] & Upheader_new[,20] < ua45[j]
#
# & Upheader_new[,21] > lwl1[j] & Upheader_new[,21] < uwl1[j]
# & Upheader_new[,22] > lwr1[j] & Upheader_new[,22] < uwr1[j]
# & Upheader_new[,23] > lwl2[j] & Upheader_new[,23] < uwl2[j]
# & Upheader_new[,24] > lwr2[j] & Upheader_new[,24] < uwr2[j]
# & Upheader_new[,25] > lwl3[j] & Upheader_new[,25] < uwl3[j]
# & Upheader_new[,26] > lwr3[j] & Upheader_new[,26] < uwr3[j]
# & Upheader_new[,27] > lwl4[j] & Upheader_new[,27] < uwl4[j]
# & Upheader_new[,28] > lwr4[j] & Upheader_new[,28] < uwr4[j]
# & Upheader_new[,29] > lwl5[j] & Upheader_new[,29] < uwl5[j]
# & Upheader_new[,30] > lwr5[j] & Upheader_new[,30] < uwr5[j]
))
}
### input files - DOWN
num <- 1000
no_round <- 1000
# find index for potential matching
Downidx <- match ( (Downheader_ID),Downsig_IM[,3] )
Downidx <- Downidx[!is.na(Downidx)]
Downindex <- c()
Downobjout <- c()
for (w in 1: length(Downidx)){
# w=1
Downindex <- Downidx[w]
inDownsig <- Downsig_IM[Downindex+1,]
Downindex <- Downindex+1
while (Downsig_IM[Downindex+1,1] < 100){
inDownsig <- rbind(inDownsig,Downsig_IM[Downindex+1,])
Downindex <- Downindex+1
}
inDownsig <- f.normalization(inDownsig)
splineDown <- f.interpolation(inDownsig,num,no_round)
colnames(splineDown) <- c("outDowntime", "outDownmag")
#write.table(inDownsig, "./ProcessedData/TestCode/inDownsig.txt", sep="\t")
#write.table(splineDown, "./ProcessedData/TestCode/splineDown.txt", sep="\t")
Downobj <- c(splineDown[,2])
Downobj <- t(Downobj)
Downobjout <-rbind(Downobjout, Downobj)
}
### input files - UP
Upsig = SO.Jan0910sig # Mar 20
Upsig_IM=subset(Upsig, select=c(id,mag,sigid))
for (i in 1:length(Upheader_new)){
Upidx <- match ( Upheader_new$sigid, Upsig_IM$sigid )
}
UpheaderID <-Upsig_IM[Upidx,3]
Upindex <- c()
Upobjout <- c()
for (w in 1: length(Upidx)){
# for (w in 1: 20){
Upindex <- Upidx[w]
inUpsig <- Upsig_IM[Upindex+1,]
Upindex <- Upindex+1
while (Upsig_IM[Upindex+1,1] < 100){
inUpsig <- rbind(inUpsig,Upsig_IM[Upindex+1,])
Upindex <- Upindex+1
}
inUpsig <- f.normalization(inUpsig)
splineUp <- f.interpolation(inUpsig,num,no_round)
colnames(splineUp) <- c("outUptime", "outUpmag")
Upobj <- c(splineUp[,2])
Upobj <- t(Upobj)
Upobjout <-rbind(Upobjout, Upobj)
}
save(Upheader_new, file="./ProcessedData/Jan0910/Upheader_new.RData")
save(Downheader_new, file="./ProcessedData/Jan0910/Downheader_new.RData")
save(Upsiglist, file="./ProcessedData/Jan0910/Upsiglist.RData")
save.image("C:/Users/Kate Hyun/Dropbox/Kate/ReID/TruckReid/ProcessedData/Jan0910/10282014Jan0910.RData") # for Jan 0910
#0925 : 50 features
#0808 : 1000 features
save(Upobjout, file="./ProcessedData/Jan0910/Upobjout.RData")
save(Downobjout, file="./ProcessedData/Jan0910/Downobjout.RData")
save(matching, file="C:/Users/Kate Hyun/Dropbox/Kate/ReID/TruckReid/ProcessedData/Jan0910/matching.RData")
#####################################################################end
# remove files
rm(la12, la23, la34, la45, lb, ld, lg, ll, lp, lwl1, lwl2, lwl3, lwl4, lwl5, lwr1, lwr2, lwr3, lwr4, lwr5,
ua12, ua23, ua34, ua45, ud, ug, ul, up, uw1l, uw1r, uw2l, uw2r, uw3l, uw3r, uw4l, uw4r, uw5l, uw5r,
uwl1, uwl2, uwl3, uwl4, uwl5, uwr1, uwr2, uwr3, uwr4, uwr5)
rm(setaspacing12, setaspacing23, setaspacing34, setaspacing45, setaweightl1, setaweightl2, setaweightl3,
setaweightl4, setaweightl5, setaweightr1, setaweightr2, setaweightr3, setaweightr4, setaweightr5,
setduration, setgvw, setnumpnt, settime, setlen, uctJan09, uctJan10)
rm(LC.Jan09ML3Header1, LC.Jan09ML3Header2, LC.Jan09ML3sig1, LC.Jan09ML3sig2,
LC.Jan09ML4Header1, LC.Jan09ML4Header2, LC.Jan09ML4sig1, LC.Jan09ML4sig2,
LC.Jan10ML3Header1, LC.Jan10ML3Header2, LC.Jan10ML3Header3, LC.Jan10ML3Header4,
LC.Jan10ML3sig1, LC.Jan10ML3sig2, LC.Jan10ML3sig3, LC.Jan10ML3sig4,
LC.Jan10ML4Header1, LC.Jan10ML4Header2, LC.Jan10ML4Header3, LC.Jan10ML4Header4,
LC.Jan10ML4sig1, LC.Jan10ML4sig2, LC.Jan10ML4sig3, LC.Jan10ML4sig4)
rm(SO.Jan09ML3Header1, SO.Jan09ML3sig1, SO.Jan09ML4Header1, SO.Jan09ML4sig1,
SO.Jan10ML3Header1, SO.Jan10ML3sig1, SO.Jan10ML3Header2, SO.Jan10ML3sig2,
SO.Jan10ML4Header1, SO.Jan10ML4sig1, SO.Jan10ML4Header2, SO.Jan10ML4sig2)
rm(bufferaspacing12, bufferaspacing23, bufferaspacing34, bufferaspacing45,
bufferaweightl1,bufferaweightl2,bufferaweightl3,bufferaweightl4,bufferaweightl5,
bufferaweightr1,bufferaweightr2,bufferaweightr3,bufferaweightr4,bufferaweightr5,
bufferduration, buffergvw, bufferlen, buffernumpnt, buffertimewindow)
rm(x,y,i,j, len, splineUp, inUpsig, inDownsig)
| /FilteringWimToWim.R | no_license | katehyun/TruckReid2 | R | false | false | 16,082 | r | #install.packages("stringr")
rm(list=ls())
setwd("C:/Users/Kate Hyun/Dropbox/Kate/ReID/TruckReid")
load("./ProcessedData/Jan0910/SOLCLoadinJan0910")
# load functionbook2
######## check duration and filter out (When WIM data comes in, step-by-step filtering approches needed) #######
### Time Window ###
buffertimewindow=60; # min (WIM-WIM case)
bufferduration = 0.4; # 0.2 min
buffernumpnt = 800
bufferlen = 12
bufferaspacing12 = 8
bufferaspacing23 = 5
bufferaspacing34 = 5
bufferaspacing45 = 5
buffergvw = 40
# bufferaweightl1 = 3
# bufferaweightr1 = 3
# bufferaweightl2 = 3
# bufferaweightr2 = 3
# bufferaweightl3 = 3
# bufferaweightr3 = 3
# bufferaweightl4 = 3
# bufferaweightr4 = 3
# bufferaweightl5 = 3
# bufferaweightr5 = 3
### input file - Jan 0910
Upheader = SO.Jan0910Header
Upheader[,14] <- SOJan_v1[,6][match( Upheader$sigid, SOJan_v1[,3])] # FHWA class
Upheader[,15] <- SOJan_v1[,8][match( Upheader$sigid, SOJan_v1[,3])] # length
Upheader[,16] <- SOJan_v1[,9][match( Upheader$sigid, SOJan_v1[,3])] # gvw
Upheader[,17] <- SOJan_v1[,10][match( Upheader$sigid, SOJan_v1[,3])] # axle spacing 1-2
Upheader[,18] <- SOJan_v1[,11][match( Upheader$sigid, SOJan_v1[,3])] # axle spacing 2-3
Upheader[,19] <- SOJan_v1[,12][match( Upheader$sigid, SOJan_v1[,3])] # axle spacing 3-4
Upheader[,20] <- SOJan_v1[,13][match( Upheader$sigid, SOJan_v1[,3])] # axle spacing 4-5
# Upheader[,21] <- SOJan_v1[,14][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 1 left
# Upheader[,22] <- SOJan_v1[,15][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 1 right
# Upheader[,23] <- SOJan_v1[,16][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 2 left
# Upheader[,24] <- SOJan_v1[,17][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 2 right
# Upheader[,25] <- SOJan_v1[,18][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 3 left
# Upheader[,26] <- SOJan_v1[,19][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 3 right
# Upheader[,27] <- SOJan_v1[,20][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 4 left
# Upheader[,28] <- SOJan_v1[,21][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 4 right
# Upheader[,29] <- SOJan_v1[,22][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 5 left
# Upheader[,30] <- SOJan_v1[,23][match( Upheader$sigid, SOJan_v1[,3])] # axle weight 5 right
Upheader_new <-subset(Upheader, Upheader[,8] > 100)
Upheader_new <-subset(Upheader_new, Upheader_new[,14] > 3)
Upheader_new <-subset(Upheader_new, Upheader_new[,14] < 15)
Downheader = LC.Jan0910Header
Downheader[,14] <- LCJan_v1[,6][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,15] <- LCJan_v1[,8][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,16] <- LCJan_v1[,9][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,17] <- LCJan_v1[,10][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,18] <- LCJan_v1[,11][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,19] <- LCJan_v1[,12][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,20] <- LCJan_v1[,13][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,21] <- LCJan_v1[,14][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,22] <- LCJan_v1[,15][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,23] <- LCJan_v1[,16][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,24] <- LCJan_v1[,17][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,25] <- LCJan_v1[,18][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,26] <- LCJan_v1[,19][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,27] <- LCJan_v1[,20][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,28] <- LCJan_v1[,21][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,29] <- LCJan_v1[,22][match( Downheader$sigid, LCJan_v1[,3])]
Downheader[,30] <- LCJan_v1[,23][match( Downheader$sigid, LCJan_v1[,3])]
Downheader_new <-subset(Downheader, Downheader[,14] > 3)
Downheader_new <-subset(Downheader_new, Downheader_new[,14] < 15)
Downsig = LC.Jan0910sig
Downsig_IM=subset(Downsig, select=c(id,mag,sigid))
Downheader_ID=(Downheader_new$sigid)
# look traffic condition
# set buffer
settime <- matrix(nrow=length(Downheader_ID), ncol=1)
setduration<- matrix(nrow=length(Downheader_ID), ncol=1)
setnumpnt<- matrix(nrow=length(Downheader_ID), ncol=1)
setlen<- matrix(nrow=length(Downheader_ID), ncol=1)
setgvw<- matrix(nrow=length(Downheader_ID), ncol=1)
setaspacing12 <- matrix(nrow=length(Downheader_ID), ncol=1)
setaspacing23 <- matrix(nrow=length(Downheader_ID), ncol=1)
setaspacing34 <- matrix(nrow=length(Downheader_ID), ncol=1)
setaspacing45 <- matrix(nrow=length(Downheader_ID), ncol=1)
#
# setaweightl1 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightr1 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightl2 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightr2 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightl3 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightr3 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightl4 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightr4 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightl5 <- matrix(nrow=length(Downheader_ID), ncol=1)
# setaweightr5 <- matrix(nrow=length(Downheader_ID), ncol=1)
lb <- matrix(nrow=length(Downheader_ID), ncol=1)
ld <- matrix(nrow=length(Downheader_ID), ncol=1)
ud <- matrix(nrow=length(Downheader_ID), ncol=1)
lp <- matrix(nrow=length(Downheader_ID), ncol=1)
up <- matrix(nrow=length(Downheader_ID), ncol=1)
ul <- matrix(nrow=length(Downheader_ID), ncol=1)
ll <- matrix(nrow=length(Downheader_ID), ncol=1)
ug <- matrix(nrow=length(Downheader_ID), ncol=1)
lg <- matrix(nrow=length(Downheader_ID), ncol=1)
la12 <- matrix(nrow=length(Downheader_ID), ncol=1)
ua12 <- matrix(nrow=length(Downheader_ID), ncol=1)
la23 <- matrix(nrow=length(Downheader_ID), ncol=1)
ua23 <- matrix(nrow=length(Downheader_ID), ncol=1)
la34 <- matrix(nrow=length(Downheader_ID), ncol=1)
ua34 <- matrix(nrow=length(Downheader_ID), ncol=1)
la45 <- matrix(nrow=length(Downheader_ID), ncol=1)
ua45 <- matrix(nrow=length(Downheader_ID), ncol=1)
#
# lwl1 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwl1 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwr1 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwr1 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwl2 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwl2 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwr2 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwr2 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwl3 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwl3 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwr3 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwr3 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwl4 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwl4 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwr4 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwr4 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwl5 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwl5 <- matrix(nrow=length(Downheader_ID), ncol=1)
# lwr5 <- matrix(nrow=length(Downheader_ID), ncol=1)
# uwr5 <- matrix(nrow=length(Downheader_ID), ncol=1)
for (j in 1: length(Downheader_ID)){
settime[j] <- as.numeric(Downheader_new[j,12])
lb[j] <- settime[j] - buffertimewindow * 60000
}
for (j in 1: length(Downheader_ID)){
setduration[j] <- as.numeric(Downheader_new[j,7])
ld[j] <- setduration[j] - bufferduration
ud[j] <- setduration[j] + bufferduration
}
for (j in 1: length(Downheader_ID)){
setnumpnt[j] <- as.numeric(Downheader_new[j,8])
lp[j] <- setnumpnt[j] - buffernumpnt
up[j] <- setnumpnt[j] + buffernumpnt
}
for (j in 1: length(Downheader_ID)){
setlen[j] <- as.numeric(Downheader_new[j,15])
ll[j] <- setlen[j] - bufferlen
ul[j] <- setlen[j] + bufferlen
}
for (j in 1: length(Downheader_ID)){
setgvw[j] <- as.numeric(Downheader_new[j,16])
lg[j] <- setgvw[j] - buffergvw
ug[j] <- setgvw[j] + buffergvw
}
for (j in 1: length(Downheader_ID)){
setaspacing12[j] <- as.numeric(Downheader_new[j,17])
setaspacing23[j] <- as.numeric(Downheader_new[j,18])
setaspacing34[j] <- as.numeric(Downheader_new[j,19])
setaspacing45[j] <- as.numeric(Downheader_new[j,20])
la12[j] <- setaspacing12[j] - bufferaspacing12
ua12[j] <- setaspacing12[j] + bufferaspacing12
la23[j] <- setaspacing23[j] - bufferaspacing23
ua23[j] <- setaspacing23[j] + bufferaspacing23
la34[j] <- setaspacing34[j] - bufferaspacing34
ua34[j] <- setaspacing34[j] + bufferaspacing34
la45[j] <- setaspacing45[j] - bufferaspacing45
ua45[j] <- setaspacing45[j] + bufferaspacing45
}
# for (j in 1: length(Downheader_ID)){
# setaweightl1[j] <- as.numeric(Downheader_new[j,21])
# setaweightr1[j] <- as.numeric(Downheader_new[j,22])
# setaweightl2[j] <- as.numeric(Downheader_new[j,23])
# setaweightr2[j] <- as.numeric(Downheader_new[j,24])
# setaweightl3[j] <- as.numeric(Downheader_new[j,25])
# setaweightr3[j] <- as.numeric(Downheader_new[j,26])
# setaweightl4[j] <- as.numeric(Downheader_new[j,27])
# setaweightr4[j] <- as.numeric(Downheader_new[j,28])
# setaweightl5[j] <- as.numeric(Downheader_new[j,29])
# setaweightr5[j] <- as.numeric(Downheader_new[j,30])
# lwl1[j] <- setaweightl1[j] - bufferaweightl1
# uwl1[j] <- setaweightl1[j] + bufferaweightl1
# lwl2[j] <- setaweightl2[j] - bufferaweightl2
# uwl2[j] <- setaweightl2[j] + bufferaweightl2
# lwl3[j] <- setaweightl3[j] - bufferaweightl3
# uwl3[j] <- setaweightl3[j] + bufferaweightl3
# lwl4[j] <- setaweightl4[j] - bufferaweightl4
# uwl4[j] <- setaweightl4[j] + bufferaweightl4
# lwl5[j] <- setaweightl5[j] - bufferaweightl5
# uwl5[j] <- setaweightl5[j] + bufferaweightl5
#
# lwr1[j] <- setaweightr1[j] - bufferaweightr1
# uwr1[j] <- setaweightr1[j] + bufferaweightr1
# lwr2[j] <- setaweightr2[j] - bufferaweightr2
# uwr2[j] <- setaweightr2[j] + bufferaweightr2
# lwr3[j] <- setaweightr3[j] - bufferaweightr3
# uwr3[j] <- setaweightr3[j] + bufferaweightr3
# lwr4[j] <- setaweightr4[j] - bufferaweightr4
# uwr4[j] <- setaweightr4[j] + bufferaweightr4
# lwr5[j] <- setaweightr5[j] - bufferaweightr5
# uwr5[j] <- setaweightr5[j] + bufferaweightr5
# }
### time window - TIME & DURATION
Upsiglist <- list()
for (j in 1: length(Downheader_ID)){
Upsiglist[j] <- list(subset(Upheader_new$sigid, Upheader_new$utc > lb[j] & Upheader_new$utc <= settime[j]
& Upheader_new[,7] > ld[j] & Upheader_new[,7] < ud[j]
& Upheader_new[,8] > lp[j] & Upheader_new[,8] < up[j]
& Upheader_new[,14] == Downheader_new[j,14]
& Upheader_new[,15] > ll[j] & Upheader_new[,15] < ul[j]
& Upheader_new[,16] > lg[j] & Upheader_new[,16] < ug[j]
& Upheader_new[,17] > la12[j] & Upheader_new[,17] < ua12[j]
& Upheader_new[,18] > la23[j] & Upheader_new[,18] < ua23[j]
& Upheader_new[,19] > la34[j] & Upheader_new[,19] < ua34[j]
& Upheader_new[,20] > la45[j] & Upheader_new[,20] < ua45[j]
#
# & Upheader_new[,21] > lwl1[j] & Upheader_new[,21] < uwl1[j]
# & Upheader_new[,22] > lwr1[j] & Upheader_new[,22] < uwr1[j]
# & Upheader_new[,23] > lwl2[j] & Upheader_new[,23] < uwl2[j]
# & Upheader_new[,24] > lwr2[j] & Upheader_new[,24] < uwr2[j]
# & Upheader_new[,25] > lwl3[j] & Upheader_new[,25] < uwl3[j]
# & Upheader_new[,26] > lwr3[j] & Upheader_new[,26] < uwr3[j]
# & Upheader_new[,27] > lwl4[j] & Upheader_new[,27] < uwl4[j]
# & Upheader_new[,28] > lwr4[j] & Upheader_new[,28] < uwr4[j]
# & Upheader_new[,29] > lwl5[j] & Upheader_new[,29] < uwl5[j]
# & Upheader_new[,30] > lwr5[j] & Upheader_new[,30] < uwr5[j]
))
}
### input files - DOWN
num <- 1000
no_round <- 1000
# find index for potential matching
Downidx <- match ( (Downheader_ID),Downsig_IM[,3] )
Downidx <- Downidx[!is.na(Downidx)]
Downindex <- c()
Downobjout <- c()
for (w in 1: length(Downidx)){
# w=1
Downindex <- Downidx[w]
inDownsig <- Downsig_IM[Downindex+1,]
Downindex <- Downindex+1
while (Downsig_IM[Downindex+1,1] < 100){
inDownsig <- rbind(inDownsig,Downsig_IM[Downindex+1,])
Downindex <- Downindex+1
}
inDownsig <- f.normalization(inDownsig)
splineDown <- f.interpolation(inDownsig,num,no_round)
colnames(splineDown) <- c("outDowntime", "outDownmag")
#write.table(inDownsig, "./ProcessedData/TestCode/inDownsig.txt", sep="\t")
#write.table(splineDown, "./ProcessedData/TestCode/splineDown.txt", sep="\t")
Downobj <- c(splineDown[,2])
Downobj <- t(Downobj)
Downobjout <-rbind(Downobjout, Downobj)
}
### input files - UP
Upsig = SO.Jan0910sig # Mar 20
Upsig_IM=subset(Upsig, select=c(id,mag,sigid))
for (i in 1:length(Upheader_new)){
Upidx <- match ( Upheader_new$sigid, Upsig_IM$sigid )
}
UpheaderID <-Upsig_IM[Upidx,3]
Upindex <- c()
Upobjout <- c()
for (w in 1: length(Upidx)){
# for (w in 1: 20){
Upindex <- Upidx[w]
inUpsig <- Upsig_IM[Upindex+1,]
Upindex <- Upindex+1
while (Upsig_IM[Upindex+1,1] < 100){
inUpsig <- rbind(inUpsig,Upsig_IM[Upindex+1,])
Upindex <- Upindex+1
}
inUpsig <- f.normalization(inUpsig)
splineUp <- f.interpolation(inUpsig,num,no_round)
colnames(splineUp) <- c("outUptime", "outUpmag")
Upobj <- c(splineUp[,2])
Upobj <- t(Upobj)
Upobjout <-rbind(Upobjout, Upobj)
}
save(Upheader_new, file="./ProcessedData/Jan0910/Upheader_new.RData")
save(Downheader_new, file="./ProcessedData/Jan0910/Downheader_new.RData")
save(Upsiglist, file="./ProcessedData/Jan0910/Upsiglist.RData")
save.image("C:/Users/Kate Hyun/Dropbox/Kate/ReID/TruckReid/ProcessedData/Jan0910/10282014Jan0910.RData") # for Jan 0910
#0925 : 50 features
#0808 : 1000 features
save(Upobjout, file="./ProcessedData/Jan0910/Upobjout.RData")
save(Downobjout, file="./ProcessedData/Jan0910/Downobjout.RData")
save(matching, file="C:/Users/Kate Hyun/Dropbox/Kate/ReID/TruckReid/ProcessedData/Jan0910/matching.RData")
#####################################################################end
# remove files
rm(la12, la23, la34, la45, lb, ld, lg, ll, lp, lwl1, lwl2, lwl3, lwl4, lwl5, lwr1, lwr2, lwr3, lwr4, lwr5,
ua12, ua23, ua34, ua45, ud, ug, ul, up, uw1l, uw1r, uw2l, uw2r, uw3l, uw3r, uw4l, uw4r, uw5l, uw5r,
uwl1, uwl2, uwl3, uwl4, uwl5, uwr1, uwr2, uwr3, uwr4, uwr5)
rm(setaspacing12, setaspacing23, setaspacing34, setaspacing45, setaweightl1, setaweightl2, setaweightl3,
setaweightl4, setaweightl5, setaweightr1, setaweightr2, setaweightr3, setaweightr4, setaweightr5,
setduration, setgvw, setnumpnt, settime, setlen, uctJan09, uctJan10)
rm(LC.Jan09ML3Header1, LC.Jan09ML3Header2, LC.Jan09ML3sig1, LC.Jan09ML3sig2,
LC.Jan09ML4Header1, LC.Jan09ML4Header2, LC.Jan09ML4sig1, LC.Jan09ML4sig2,
LC.Jan10ML3Header1, LC.Jan10ML3Header2, LC.Jan10ML3Header3, LC.Jan10ML3Header4,
LC.Jan10ML3sig1, LC.Jan10ML3sig2, LC.Jan10ML3sig3, LC.Jan10ML3sig4,
LC.Jan10ML4Header1, LC.Jan10ML4Header2, LC.Jan10ML4Header3, LC.Jan10ML4Header4,
LC.Jan10ML4sig1, LC.Jan10ML4sig2, LC.Jan10ML4sig3, LC.Jan10ML4sig4)
rm(SO.Jan09ML3Header1, SO.Jan09ML3sig1, SO.Jan09ML4Header1, SO.Jan09ML4sig1,
SO.Jan10ML3Header1, SO.Jan10ML3sig1, SO.Jan10ML3Header2, SO.Jan10ML3sig2,
SO.Jan10ML4Header1, SO.Jan10ML4sig1, SO.Jan10ML4Header2, SO.Jan10ML4sig2)
rm(bufferaspacing12, bufferaspacing23, bufferaspacing34, bufferaspacing45,
bufferaweightl1,bufferaweightl2,bufferaweightl3,bufferaweightl4,bufferaweightl5,
bufferaweightr1,bufferaweightr2,bufferaweightr3,bufferaweightr4,bufferaweightr5,
bufferduration, buffergvw, bufferlen, buffernumpnt, buffertimewindow)
rm(x,y,i,j, len, splineUp, inUpsig, inDownsig)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute.Davoli_IS.R
\name{compute.Davoli_IS}
\alias{compute.Davoli_IS}
\title{Compute Davoli immune signature}
\usage{
compute.Davoli_IS(RNA.tpm)
}
\arguments{
\item{RNA.tpm}{numeric matrix with rows=genes and columns=samples}
}
\value{
numeric matrix with rows=samples and columns=Davoli immune signature
}
\description{
\code{compute_davoliIS} computes Davoli immune signature as the arithmetic mean of cytotoxic
immune infiltrate signature genes, after rank normalization (Davoli et al., 2017).
}
| /man/compute.Davoli_IS.Rd | permissive | shijianasdf/easier_manuscript | R | false | true | 578 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute.Davoli_IS.R
\name{compute.Davoli_IS}
\alias{compute.Davoli_IS}
\title{Compute Davoli immune signature}
\usage{
compute.Davoli_IS(RNA.tpm)
}
\arguments{
\item{RNA.tpm}{numeric matrix with rows=genes and columns=samples}
}
\value{
numeric matrix with rows=samples and columns=Davoli immune signature
}
\description{
\code{compute_davoliIS} computes Davoli immune signature as the arithmetic mean of cytotoxic
immune infiltrate signature genes, after rank normalization (Davoli et al., 2017).
}
|
\name{print.TracksCollection}
\alias{print.TracksCollection}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Methods for class "TracksCollection"
}
\description{
method to print an object of class "TracksCollection"}
\usage{
print.TracksCollection(X)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{
an object of class "TracksCollection"
}
}
\author{
Mohammad Mehdi Moradi <moradi@uji.es>
}
| /man/print.TracksCollection.Rd | no_license | Moradii/trajectories | R | false | false | 450 | rd | \name{print.TracksCollection}
\alias{print.TracksCollection}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Methods for class "TracksCollection"
}
\description{
method to print an object of class "TracksCollection"}
\usage{
print.TracksCollection(X)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{
an object of class "TracksCollection"
}
}
\author{
Mohammad Mehdi Moradi <moradi@uji.es>
}
|
#!/usr/bin/Rscript --max-ppsize=150000
#
# Example: .scripts/postMUT.R <filename>
############################################################################
## Set parameters for estimation
##
## N.cores = Number of cores available to speed up estimation (e.g. 1, 4, 8)
## total.sims = Total number of simulations to run (must be divisible by N.cores)
## n.random.start.xy = Number of random starts in postMUT (simple)
## n.random.start.xyz = Number of random starts in postMUT
##
## N.sims = DO NOT CHANGE (Number of simulations to run per core)
## N.algo = DO NOT CHANGE (Number of in silico methods (SIFT, PPH2, Xvar -> N.algo = 3))
## N.par.xy = DO NOT CHANGE (Number of parameters in postMUT (simple) model)
## N.par.xyz = DO NOT CHANGE (Number of parameters in postMUT model)
############################################################################
N.cores = 4
total.sims = 100
n.random.start.xy = 10
n.random.start.xyz = 100
# Do not change the following:
N.sims = total.sims / N.cores
N.algo = 3
N.par.xy = 2*N.algo + 1
N.par.xyz = 2*N.algo + 3
############################################################################
## Load R packages, outside functions and create file names
##
## R packages: ggplot2, parallel
############################################################################
library(ggplot2)
library(parallel)
source("scripts/functions_EMAlgo.R")
args <- commandArgs(T)
if(length(args) < 1)
stop("The script needs 1 input arguments.")
input.file <- args[1]
### Create input and output file names
input.file.name <- paste(input.file, "postMUT-in.txt", sep="-")
output.file.name <- paste(input.file, "postMUT-out.txt", sep="-")
output.file.name.BM <- paste(input.file, "BM-postMUT-out.txt", sep="-")
output.file.name.ABM <- paste(input.file, "ABM-postMUT-out.txt", sep="-")
output.file.name.ABMab <- paste(input.file, "ABMab-postMUT-out.txt", sep="-")
figure.file.name <- paste(input.file, "postMUT-plot.pdf", sep="-")
start.file <- "input_files/postMUT_input"
end.file.parameter <- "output_files/postMUT_output/par_est"
end.file.post <- "output_files/postMUT_output/post_est"
end.file.post.all <- "output_files/postMUT_output/post"
end.file.figure <- "output_files/postMUT_output/plots"
############################################################################
## Import data, clean data, prepare data for estimation
##
## Column names 1-6: CHROM, POS, REF_AA, MUT_AA, GENOTYPE, LABEL
## Column names 7-8: SIFT_pred, SIFT_score
## Column names 9-10: Xvar_pred, Xvar_score
## Column names 11-12: PPH2_HD_pred, PPH2_HD_score
############################################################################
data <- read.table(paste(start.file, input.file.name, sep="/"), header = TRUE, sep="\t")
# Clean up predictions
data$SIFT_pred <- ifelse(data$SIFT_pred == "DAMAGING",1, ifelse(data$SIFT_pred == "TOLERATED", 0, NA))
data$PPH2_HD_pred <- ifelse(data$PPH2_HD_pred == "benign",0, ifelse(data$PPH2_HD_pred == "possiblydamaging" | data$PPH2_HD_pred == "probablydamaging", 1, NA))
data$Xvar_pred <- ifelse(data$Xvar_pred == "high" | data$Xvar_pred == "medium", 1, ifelse(data$Xvar_pred == "low" | data$Xvar_pred == "neutral", 0, NA))
# Combine predictions and filter for rows (i.e. mutations) with predictions from all three in silico methods
data_pred = data.frame(data[,1:6],"SIFT" = data$SIFT_pred, "Xvar" = data$Xvar_pred, "PPH2_HD" = data$PPH2_HD_pred, "SIFT_score" = data$SIFT_score, "Xvar_score" = data$Xvar_score, "PPH2_HD_score" = data$PPH2_HD_score)
data_pred = na.omit(data_pred)
############################################################################
## Estimate postMUT (simple) and postMUT parameters
##
############################################################################
N.size = nrow(data_pred[,7:9]);
sum.group <- group.counts(data_pred[,7:9], N.algo)
simCR <- function(N.sims, sum.group, N.par.xy, N.par.xyz, N.algo, n.random.start.xy, n.random.start.xyz){
source("scripts/functions_EMAlgo.R")
EM.Est.xy = array(0, dim = c(N.par.xy, N.sims))
EM.Est.xyz = array(0, dim = c(N.par.xyz, N.sims))
EM.Est.xyz.ab = array(0, dim = c(N.par.xyz - 2, N.sims))
for(k in 1:N.sims){
repeat{
output.xy <- capture.em.xy(sum.group, random.start = TRUE, n.algos = N.algo, ab_1 = FALSE, N.random.start = n.random.start.xy)
if(length(unlist(output.xy)) == N.par.xy){
if(all(output.xy[[1]] <= 0.5) & all(output.xy[[2]] >= 0.5)){ break }
}
}
if(length(unlist(output.xy)) == N.par.xy){
EM.Est.xy[,k] <- unlist(output.xy)
} else {
EM.Est.xy[,k] <- rep(NA,N.par.xy)
}
repeat{
output.xyz <- capture.em.xyz(sum.group, random.start = TRUE, n.algos = N.algo, de_1 = FALSE, N.random.start = n.random.start.xyz)
if(length(unlist(output.xyz)) == N.par.xyz){
if(all(a_b(output.xyz)[[1]] <= 0.5) & all(a_b(output.xyz)[[2]] >= 0.5) & unlist(output.xyz)[length(unlist(output.xyz)) - 1] > 0.75 ){ break }
}
}
if(length(unlist(output.xyz)) == N.par.xyz){
EM.Est.xyz[,k] <- unlist(output.xyz)
EM.Est.xyz.ab[,k] <- c(unlist(a_b(output.xyz)), unlist(output.xyz)[length(unlist(output.xyz))])
} else {
EM.Est.xyz[,k] <- rep(NA,N.par.xyz)
EM.Est.xyz.ab[,k] <- rep(NA,N.par.xyz - 2)
}
} # closes k for loop
out <- rbind(EM.Est.xy, EM.Est.xyz, EM.Est.xyz.ab)
return(out)
} # closes function
### Invoke use of multiple cores
cl <- makeCluster(N.cores)
sim.kk <- do.call("cbind", clusterCall(cl, simCR, N.sims, sum.group, N.par.xy, N.par.xyz, N.algo, n.random.start.xy, n.random.start.xyz) )
stopCluster(cl)
summary.data <- data.frame("mean" = apply(sim.kk, 1, mean), "sd" = apply(sim.kk, 1, sd))
BM.mat <- summary.data[1:N.par.xy,]
ABM.mat <- summary.data[(N.par.xy + 1):(N.par.xy + N.par.xyz),]
ABM.ab.mat <- summary.data[(N.par.xy + N.par.xyz + 1): (N.par.xy + N.par.xyz + N.par.xy),]
############################################################################
## WRITE Sensitivity and Specificity estimates to output file
##
############################################################################
write.table(BM.mat, file = paste(end.file.parameter, output.file.name.BM, sep="/"), sep="\t", quote = FALSE, row.names = FALSE)
write.table(ABM.mat, file = paste(end.file.parameter, output.file.name.ABM, sep="/"), sep="\t", quote = FALSE, row.names = FALSE)
write.table(ABM.ab.mat, file = paste(end.file.parameter, output.file.name.ABMab, sep="/"), sep="\t", quote = FALSE, row.names = FALSE)
############################################################################
## Calculate Posterior Probabilities
##
############################################################################
post_short = data.frame(pred.mat(N.algo), "postMUT-simple" = round(posterior.xy(pred.mat(N.algo), BM.mat[,1], n.algos = N.algo),3), "postMUT" = round(posterior.xyz(pred.mat(N.algo), ABM.mat[,1], n.algos = N.algo), 3))
write.table(post_short, file = paste(end.file.post, output.file.name, sep="/"), sep="\t", quote = FALSE, row.names = FALSE)
post.out.xy = posterior.xy(data_pred[,7:9], BM.mat[,1], n.algos = N.algo) # Posterior Pr(D)
post.out.xyz = posterior.xyz(data_pred[,7:9], ABM.mat[,1], n.algos = N.algo) # Posterior Pr(D)
data_new = data.frame(data_pred, "postMUT-simple" = post.out.xy, "postMUT" = post.out.xyz)
write.table(data_new, file = paste(end.file.post.all, output.file.name, sep="/"), sep="\t", quote = FALSE, row.names = FALSE)
############################################################################
## Plot Sensitivity and Specificity parameter estimates
##
############################################################################
wc_EM.xy <- data.frame(Sensitivity = BM.mat[4:6,1], Specificity = 1-BM.mat[1:3,1])
wc_EM.xyz <- data.frame(Sensitivity = ABM.ab.mat[4:6,1], Specificity = 1-ABM.ab.mat[1:3,1])
cool <- data.frame(rbind(wc_EM.xy,wc_EM.xyz), Algorithm =c("postMUT (simple)","postMUT (simple)","postMUT (simple)","postMUT","postMUT","postMUT"))
pdf(file=paste(end.file.figure, figure.file.name, sep="/"))
plot(1-cool[1,2],cool[1,1], col = 1, xlim = c(0,1), ylim=c(0,1.01), main = " ", xlab = "False Positive Rate", ylab = "True Positive Rate", pch = 16, cex = 1.5, cex.axis = 1.2, cex.lab = 1.2)
points(1-cool[2,2],cool[2,1], col = 1, pch = 18, cex = 1.5)
points(1-cool[3,2],cool[3,1], col = 1, pch = 17, cex = 1.5)
points(1-cool[4,2],cool[4,1], col = 4, pch = 16, cex = 1.5)
points(1-cool[5,2],cool[5,1], col = 4, pch = 18, cex = 1.5)
points(1-cool[6,2],cool[6,1], col = 4, pch = 17, cex = 1.5)
abline(0,1,lty=2,col=80)
legend(.64,.42,c("SIFT", "MutationAsessor","PolyPhen-2"),col=c(1,1,1),pch=c(1,2,5),cex=1.2)
legend(.56,.20,c("postMUT (simple)", "postMUT"),col=c(1,4),lty=c(1,1),lwd=c(4,4),cex=1.2)
dev.off()
| /scripts/postMUT_tumor.R | no_license | stephaniehicks/postMUT | R | false | false | 8,770 | r | #!/usr/bin/Rscript --max-ppsize=150000
#
# Example: .scripts/postMUT.R <filename>
############################################################################
## Set parameters for estimation
##
## N.cores = Number of cores available to speed up estimation (e.g. 1, 4, 8)
## total.sims = Total number of simulations to run (must be divisible by N.cores)
## n.random.start.xy = Number of random starts in postMUT (simple)
## n.random.start.xyz = Number of random starts in postMUT
##
## N.sims = DO NOT CHANGE (Number of simulations to run per core)
## N.algo = DO NOT CHANGE (Number of in silico methods (SIFT, PPH2, Xvar -> N.algo = 3))
## N.par.xy = DO NOT CHANGE (Number of parameters in postMUT (simple) model)
## N.par.xyz = DO NOT CHANGE (Number of parameters in postMUT model)
############################################################################
N.cores = 4
total.sims = 100
n.random.start.xy = 10
n.random.start.xyz = 100
# Do not change the following:
N.sims = total.sims / N.cores
N.algo = 3
N.par.xy = 2*N.algo + 1
N.par.xyz = 2*N.algo + 3
############################################################################
## Load R packages, outside functions and create file names
##
## R packages: ggplot2, parallel
############################################################################
library(ggplot2)
library(parallel)
source("scripts/functions_EMAlgo.R")
args <- commandArgs(T)
if(length(args) < 1)
stop("The script needs 1 input arguments.")
input.file <- args[1]
### Create input and output file names
input.file.name <- paste(input.file, "postMUT-in.txt", sep="-")
output.file.name <- paste(input.file, "postMUT-out.txt", sep="-")
output.file.name.BM <- paste(input.file, "BM-postMUT-out.txt", sep="-")
output.file.name.ABM <- paste(input.file, "ABM-postMUT-out.txt", sep="-")
output.file.name.ABMab <- paste(input.file, "ABMab-postMUT-out.txt", sep="-")
figure.file.name <- paste(input.file, "postMUT-plot.pdf", sep="-")
start.file <- "input_files/postMUT_input"
end.file.parameter <- "output_files/postMUT_output/par_est"
end.file.post <- "output_files/postMUT_output/post_est"
end.file.post.all <- "output_files/postMUT_output/post"
end.file.figure <- "output_files/postMUT_output/plots"
############################################################################
## Import data, clean data, prepare data for estimation
##
## Column names 1-6: CHROM, POS, REF_AA, MUT_AA, GENOTYPE, LABEL
## Column names 7-8: SIFT_pred, SIFT_score
## Column names 9-10: Xvar_pred, Xvar_score
## Column names 11-12: PPH2_HD_pred, PPH2_HD_score
############################################################################
data <- read.table(paste(start.file, input.file.name, sep="/"), header = TRUE, sep="\t")
# Clean up predictions
data$SIFT_pred <- ifelse(data$SIFT_pred == "DAMAGING",1, ifelse(data$SIFT_pred == "TOLERATED", 0, NA))
data$PPH2_HD_pred <- ifelse(data$PPH2_HD_pred == "benign",0, ifelse(data$PPH2_HD_pred == "possiblydamaging" | data$PPH2_HD_pred == "probablydamaging", 1, NA))
data$Xvar_pred <- ifelse(data$Xvar_pred == "high" | data$Xvar_pred == "medium", 1, ifelse(data$Xvar_pred == "low" | data$Xvar_pred == "neutral", 0, NA))
# Combine predictions and filter for rows (i.e. mutations) with predictions from all three in silico methods
data_pred = data.frame(data[,1:6],"SIFT" = data$SIFT_pred, "Xvar" = data$Xvar_pred, "PPH2_HD" = data$PPH2_HD_pred, "SIFT_score" = data$SIFT_score, "Xvar_score" = data$Xvar_score, "PPH2_HD_score" = data$PPH2_HD_score)
data_pred = na.omit(data_pred)
############################################################################
## Estimate postMUT (simple) and postMUT parameters
##
############################################################################
N.size = nrow(data_pred[,7:9]);
sum.group <- group.counts(data_pred[,7:9], N.algo)
simCR <- function(N.sims, sum.group, N.par.xy, N.par.xyz, N.algo, n.random.start.xy, n.random.start.xyz){
source("scripts/functions_EMAlgo.R")
EM.Est.xy = array(0, dim = c(N.par.xy, N.sims))
EM.Est.xyz = array(0, dim = c(N.par.xyz, N.sims))
EM.Est.xyz.ab = array(0, dim = c(N.par.xyz - 2, N.sims))
for(k in 1:N.sims){
repeat{
output.xy <- capture.em.xy(sum.group, random.start = TRUE, n.algos = N.algo, ab_1 = FALSE, N.random.start = n.random.start.xy)
if(length(unlist(output.xy)) == N.par.xy){
if(all(output.xy[[1]] <= 0.5) & all(output.xy[[2]] >= 0.5)){ break }
}
}
if(length(unlist(output.xy)) == N.par.xy){
EM.Est.xy[,k] <- unlist(output.xy)
} else {
EM.Est.xy[,k] <- rep(NA,N.par.xy)
}
repeat{
output.xyz <- capture.em.xyz(sum.group, random.start = TRUE, n.algos = N.algo, de_1 = FALSE, N.random.start = n.random.start.xyz)
if(length(unlist(output.xyz)) == N.par.xyz){
if(all(a_b(output.xyz)[[1]] <= 0.5) & all(a_b(output.xyz)[[2]] >= 0.5) & unlist(output.xyz)[length(unlist(output.xyz)) - 1] > 0.75 ){ break }
}
}
if(length(unlist(output.xyz)) == N.par.xyz){
EM.Est.xyz[,k] <- unlist(output.xyz)
EM.Est.xyz.ab[,k] <- c(unlist(a_b(output.xyz)), unlist(output.xyz)[length(unlist(output.xyz))])
} else {
EM.Est.xyz[,k] <- rep(NA,N.par.xyz)
EM.Est.xyz.ab[,k] <- rep(NA,N.par.xyz - 2)
}
} # closes k for loop
out <- rbind(EM.Est.xy, EM.Est.xyz, EM.Est.xyz.ab)
return(out)
} # closes function
### Invoke use of multiple cores
cl <- makeCluster(N.cores)
sim.kk <- do.call("cbind", clusterCall(cl, simCR, N.sims, sum.group, N.par.xy, N.par.xyz, N.algo, n.random.start.xy, n.random.start.xyz) )
stopCluster(cl)
summary.data <- data.frame("mean" = apply(sim.kk, 1, mean), "sd" = apply(sim.kk, 1, sd))
BM.mat <- summary.data[1:N.par.xy,]
ABM.mat <- summary.data[(N.par.xy + 1):(N.par.xy + N.par.xyz),]
ABM.ab.mat <- summary.data[(N.par.xy + N.par.xyz + 1): (N.par.xy + N.par.xyz + N.par.xy),]
############################################################################
## WRITE Sensitivity and Specificity estimates to output file
##
############################################################################
write.table(BM.mat, file = paste(end.file.parameter, output.file.name.BM, sep="/"), sep="\t", quote = FALSE, row.names = FALSE)
write.table(ABM.mat, file = paste(end.file.parameter, output.file.name.ABM, sep="/"), sep="\t", quote = FALSE, row.names = FALSE)
write.table(ABM.ab.mat, file = paste(end.file.parameter, output.file.name.ABMab, sep="/"), sep="\t", quote = FALSE, row.names = FALSE)
############################################################################
## Calculate Posterior Probabilities
##
############################################################################
post_short = data.frame(pred.mat(N.algo), "postMUT-simple" = round(posterior.xy(pred.mat(N.algo), BM.mat[,1], n.algos = N.algo),3), "postMUT" = round(posterior.xyz(pred.mat(N.algo), ABM.mat[,1], n.algos = N.algo), 3))
write.table(post_short, file = paste(end.file.post, output.file.name, sep="/"), sep="\t", quote = FALSE, row.names = FALSE)
post.out.xy = posterior.xy(data_pred[,7:9], BM.mat[,1], n.algos = N.algo) # Posterior Pr(D)
post.out.xyz = posterior.xyz(data_pred[,7:9], ABM.mat[,1], n.algos = N.algo) # Posterior Pr(D)
data_new = data.frame(data_pred, "postMUT-simple" = post.out.xy, "postMUT" = post.out.xyz)
write.table(data_new, file = paste(end.file.post.all, output.file.name, sep="/"), sep="\t", quote = FALSE, row.names = FALSE)
############################################################################
## Plot Sensitivity and Specificity parameter estimates
##
############################################################################
wc_EM.xy <- data.frame(Sensitivity = BM.mat[4:6,1], Specificity = 1-BM.mat[1:3,1])
wc_EM.xyz <- data.frame(Sensitivity = ABM.ab.mat[4:6,1], Specificity = 1-ABM.ab.mat[1:3,1])
cool <- data.frame(rbind(wc_EM.xy,wc_EM.xyz), Algorithm =c("postMUT (simple)","postMUT (simple)","postMUT (simple)","postMUT","postMUT","postMUT"))
pdf(file=paste(end.file.figure, figure.file.name, sep="/"))
plot(1-cool[1,2],cool[1,1], col = 1, xlim = c(0,1), ylim=c(0,1.01), main = " ", xlab = "False Positive Rate", ylab = "True Positive Rate", pch = 16, cex = 1.5, cex.axis = 1.2, cex.lab = 1.2)
points(1-cool[2,2],cool[2,1], col = 1, pch = 18, cex = 1.5)
points(1-cool[3,2],cool[3,1], col = 1, pch = 17, cex = 1.5)
points(1-cool[4,2],cool[4,1], col = 4, pch = 16, cex = 1.5)
points(1-cool[5,2],cool[5,1], col = 4, pch = 18, cex = 1.5)
points(1-cool[6,2],cool[6,1], col = 4, pch = 17, cex = 1.5)
abline(0,1,lty=2,col=80)
legend(.64,.42,c("SIFT", "MutationAsessor","PolyPhen-2"),col=c(1,1,1),pch=c(1,2,5),cex=1.2)
legend(.56,.20,c("postMUT (simple)", "postMUT"),col=c(1,4),lty=c(1,1),lwd=c(4,4),cex=1.2)
dev.off()
|
#' Routine d'interrogation de la banque hydro pour obtenir une chronique de données pour une station de mesure.
#' Cette routine interroge la banque hydro autant de fois que nécessaire pour compléter la chronique demandée.
#'
#' @param station Code de la station
#' @param DateHeureDeb Date de début d'événement au format string "JJ/MM/AAAA HH:MM:SS"
#' @param DateHeureFin Date de fin d'événement au format string "JJ/MM/AAAA HH:MM:SS"
#' @param procedure Variable à importer : "QTFIX", QTVAR", "H-TEMPS" (QTVAR par défaut)
#' @param url URL de la banque hydro (http://www.hydro.eaufrance.fr par défaut)
#'
#' @return dataframe contenant le tableau produit sur la page de visualisation de la chronique de la banque hydro
#'
#' @example dfx<-rbanqhydro.get("Y3204010","16/03/2017 00:00", "05/04/2017 23:59")
#'
#' @author David Dorchies david.dorchies@irstea.fr
#' @date 13/04/2017 - 15/03/2019
rbanqhydro.get <- function (station, DateHeureDeb, DateHeureFin, procedure = "QTVAR", url="http://www.hydro.eaufrance.fr") {
PackageRequire("httr")
# Formulaire de sélection des stations
form0<- list(
cmd = "filtrer",
consulte = "rechercher",
code_station = "",
cours_d_eau = "Lez",
commune = "",
departement = "",
bassin_hydrographique = "",
station_en_service = "1",
station_hydrologique = "1",
btnValider = "Visualiser"
)
form0[["station[]"]] = station
url.selection = paste(url,"selection.php", sep = "/")
res <- POST(
url.selection,
body = form0, encode = "form", verbose()
)
# Formulaire de sélection de la variable
form1 <- list(
categorie = "rechercher",
procedure = procedure
)
form1[["station[]"]] = station
url.procedure = paste(url,"presentation/procedure.php", sep = "/")
res <- POST(
url.procedure,
body = form1, encode = "form", verbose()
)
# Extraction des chroniques (répétition des interrogations)
DateHeureDeb = as.POSIXct(DateHeureDeb, format = "%d/%m/%Y %H:%M", tz = "UTC")
DateHeureFin = as.POSIXct(DateHeureFin, format = "%d/%m/%Y %H:%M", tz = "UTC")
DateHeureDebOld = 0
df = data.frame(NULL)
while(DateHeureDeb < DateHeureFin & DateHeureDeb > DateHeureDebOld) {
DateHeureDebOld = DateHeureDeb
dfi = rbanquehydro.get.timeserie(url.procedure, procedure, DateHeureDeb, DateHeureFin)
if(nrow(dfi) > 0) {
df = rbind(df, dfi)
DateHeureDeb = tail(dfi,1)$Date + 60 # Last end time + 60 seconds
}
}
return (df)
}
#' Sous routine d'interrogation de la banque hydro pour obtenir une chronique de données pour une station de mesure et une période donnée.
#' Cette routine est appelée à partir de rbanqhydro.get
#'
#' @param url.procedure URL du formulaire d'interrogation de la chronique temporelle
#' @param procedure Variable à importer : "QTFIX", QTVAR", "H-TEMPS" (QTVAR par défaut)
#' @param DateHeureDeb Date de début d'événement au format POSIX
#' @param DateHeureFin Date de fin d'événement au format POSIX
#'
#' @return dataframe contenant le tableau produit sur la page de visualisation de la chronique de la banque hydro
#'
#' @example df<-rbanqhydro.get("Y3204010","16/03/2017 00:00", "05/04/2017 23:59")
#'
#' @author David Dorchies david.dorchies@irstea.fr
#' @date 13/04/2017 - 15/03/2019
rbanquehydro.get.timeserie <- function (url.procedure, procedure, DateHeureDeb, DateHeureFin) {
# Formulaire de sélection de la date
form2 <- list(
procedure = procedure,
affichage = 2,
echelle = 1,
date1 = format(DateHeureDeb, "%d/%m/%Y"),
heure1 = format(DateHeureDeb, "%H:%M"),
date2 = format(DateHeureFin, "%d/%m/%Y"),
heure2 = format(DateHeureFin, "%H:%M"),
precision = "00",
btnValider = "Valider"
)
res <- POST(
url.procedure,
body = form2, encode = "form", verbose()
)
PackageRequire("XML")
# On récupère le dataframe du 3ème tableau de la page
df = readHTMLTable(
content(res, type="text/plain", encoding="cp1252"),
stringsAsFactors = FALSE,
which = 3
)
df[,"Date"] = as.POSIXct(df[,"Date"], format = "%d/%m/%Y %H:%M", tz = "UTC")
return(df)
}
################################################################################
#' Test la présence d'un package, le télécharge au besoin et le charge.
#' Le programme est stoppé en cas d'échec.
#' @param x Chaîne de caractère avec le nom du package à charger
#' @url http://stackoverflow.com/questions/9341635/how-can-i-check-for-installed-r-packages-before-running-install-packages
#' @date 31/07/2014
################################################################################
PackageRequire <- function(x)
{
if (!require(x,character.only = TRUE)) {
install.packages(x,dep=TRUE,repos="http://cran.r-project.org")
}
if(!require(x,character.only = TRUE)) {
stop("Package not found")
}
}
| /banquehydro_requester.R | no_license | jakediamond/loire_headwaters | R | false | false | 4,874 | r | #' Routine d'interrogation de la banque hydro pour obtenir une chronique de données pour une station de mesure.
#' Cette routine interroge la banque hydro autant de fois que nécessaire pour compléter la chronique demandée.
#'
#' @param station Code de la station
#' @param DateHeureDeb Date de début d'événement au format string "JJ/MM/AAAA HH:MM:SS"
#' @param DateHeureFin Date de fin d'événement au format string "JJ/MM/AAAA HH:MM:SS"
#' @param procedure Variable à importer : "QTFIX", QTVAR", "H-TEMPS" (QTVAR par défaut)
#' @param url URL de la banque hydro (http://www.hydro.eaufrance.fr par défaut)
#'
#' @return dataframe contenant le tableau produit sur la page de visualisation de la chronique de la banque hydro
#'
#' @example dfx<-rbanqhydro.get("Y3204010","16/03/2017 00:00", "05/04/2017 23:59")
#'
#' @author David Dorchies david.dorchies@irstea.fr
#' @date 13/04/2017 - 15/03/2019
rbanqhydro.get <- function (station, DateHeureDeb, DateHeureFin, procedure = "QTVAR", url="http://www.hydro.eaufrance.fr") {
PackageRequire("httr")
# Formulaire de sélection des stations
form0<- list(
cmd = "filtrer",
consulte = "rechercher",
code_station = "",
cours_d_eau = "Lez",
commune = "",
departement = "",
bassin_hydrographique = "",
station_en_service = "1",
station_hydrologique = "1",
btnValider = "Visualiser"
)
form0[["station[]"]] = station
url.selection = paste(url,"selection.php", sep = "/")
res <- POST(
url.selection,
body = form0, encode = "form", verbose()
)
# Formulaire de sélection de la variable
form1 <- list(
categorie = "rechercher",
procedure = procedure
)
form1[["station[]"]] = station
url.procedure = paste(url,"presentation/procedure.php", sep = "/")
res <- POST(
url.procedure,
body = form1, encode = "form", verbose()
)
# Extraction des chroniques (répétition des interrogations)
DateHeureDeb = as.POSIXct(DateHeureDeb, format = "%d/%m/%Y %H:%M", tz = "UTC")
DateHeureFin = as.POSIXct(DateHeureFin, format = "%d/%m/%Y %H:%M", tz = "UTC")
DateHeureDebOld = 0
df = data.frame(NULL)
while(DateHeureDeb < DateHeureFin & DateHeureDeb > DateHeureDebOld) {
DateHeureDebOld = DateHeureDeb
dfi = rbanquehydro.get.timeserie(url.procedure, procedure, DateHeureDeb, DateHeureFin)
if(nrow(dfi) > 0) {
df = rbind(df, dfi)
DateHeureDeb = tail(dfi,1)$Date + 60 # Last end time + 60 seconds
}
}
return (df)
}
#' Sous routine d'interrogation de la banque hydro pour obtenir une chronique de données pour une station de mesure et une période donnée.
#' Cette routine est appelée à partir de rbanqhydro.get
#'
#' @param url.procedure URL du formulaire d'interrogation de la chronique temporelle
#' @param procedure Variable à importer : "QTFIX", QTVAR", "H-TEMPS" (QTVAR par défaut)
#' @param DateHeureDeb Date de début d'événement au format POSIX
#' @param DateHeureFin Date de fin d'événement au format POSIX
#'
#' @return dataframe contenant le tableau produit sur la page de visualisation de la chronique de la banque hydro
#'
#' @example df<-rbanqhydro.get("Y3204010","16/03/2017 00:00", "05/04/2017 23:59")
#'
#' @author David Dorchies david.dorchies@irstea.fr
#' @date 13/04/2017 - 15/03/2019
rbanquehydro.get.timeserie <- function (url.procedure, procedure, DateHeureDeb, DateHeureFin) {
# Formulaire de sélection de la date
form2 <- list(
procedure = procedure,
affichage = 2,
echelle = 1,
date1 = format(DateHeureDeb, "%d/%m/%Y"),
heure1 = format(DateHeureDeb, "%H:%M"),
date2 = format(DateHeureFin, "%d/%m/%Y"),
heure2 = format(DateHeureFin, "%H:%M"),
precision = "00",
btnValider = "Valider"
)
res <- POST(
url.procedure,
body = form2, encode = "form", verbose()
)
PackageRequire("XML")
# On récupère le dataframe du 3ème tableau de la page
df = readHTMLTable(
content(res, type="text/plain", encoding="cp1252"),
stringsAsFactors = FALSE,
which = 3
)
df[,"Date"] = as.POSIXct(df[,"Date"], format = "%d/%m/%Y %H:%M", tz = "UTC")
return(df)
}
################################################################################
#' Test la présence d'un package, le télécharge au besoin et le charge.
#' Le programme est stoppé en cas d'échec.
#' @param x Chaîne de caractère avec le nom du package à charger
#' @url http://stackoverflow.com/questions/9341635/how-can-i-check-for-installed-r-packages-before-running-install-packages
#' @date 31/07/2014
################################################################################
PackageRequire <- function(x)
{
if (!require(x,character.only = TRUE)) {
install.packages(x,dep=TRUE,repos="http://cran.r-project.org")
}
if(!require(x,character.only = TRUE)) {
stop("Package not found")
}
}
|
require(graphics)
require(stats)
"""
================================================================================
acf
================================================================================
"""
require(quantmod)
kpss.test
adf.test
Box.test
df_es <- read.table("df_es.csv", header = TRUE, sep = ",")
df_btcusd <- read.table("df_btcusd.csv", header = TRUE, sep = ",")
df_matrix <- read.table("df_btcusd_es.csv", header = TRUE, sep = ",")
>>> s_btcusd_pctchg = df_btcusd.loc[:, 'Adj Close'].pct_change()
# ---------------------------------------------------------------
s_es_0930_pctchg = diff(df_es$X09.30)/df_es$X09.30[-length(df_es$X09.30)]
s_btcusd_pctchg = diff(df_btcusd)/df_btcusd[-nrow(df_btcusd),] * 100
s_btcusd_cls_pctchg = diff(df_btcusd$Adj.Close)/df_btcusd$Adj.Close[-nrow(df_btcusd),] * 100
s_btcusd_pctchg = df_btcusd$Adj.Close/lag(df_btcusd$Adj.Close,-1) - 1
s_btcusd_pctchg = Delt(df_btcusd$Adj.Close,type='arithmetic')
df_es_1615_logchg = diff(log(df_matrix$es))
data <- ts(data.frame(x1=c(1:10), x2=c(11:20), x3=c(21:30)), start = c(2010,3), frequency = 4)
data_pctchg = data/lag(data,-1) - 1
= diff(data)/data[-nrow(data),] * 100
df_es_chg = df_es$X16.15
>>> df.shape
>>> len(df)
num_rows = dim(s_btcusd_pctchg)[1]
= nrow(data)
= length(data)
min(s_btcusd_pctchg[2:num_rows,])
max(s_btcusd_pctchg[2:num_rows,])
hist(data, breaks=seq(0,80,l=6),
freq=FALSE,col="orange",main="Histogram",
xlab="x",ylab="f(x)",yaxs="i",xaxs="i")
hist(data,breaks=seq(min(data),max(data),l=number_of_bins+1),
freq=FALSE,col="orange",
main="Histogram",xlab="x",ylab="f(x)",yaxs="i",xaxs="i")
library(ggplot2)
qplot(s_btcusd_pctchg[2:num_rows,], geom="histogram")
qplot(s_btcusd_pctchg[2:num_rows,], geom="histogram", binwidth = 0.05, xlim=c(-1,1))
s_rand <- runif(1000, 0.0, 1.0)
s_rand_diff = diff(s_rand)
s_randn<-rnorm(1000)
hist(s_rand)
hist(s_randn)
hist(s_rand_diff)
acf(s_rand)
pacf(s_rand)
acf(s_randn)
pacf(s_randn)
acf(lh)
pacf(lh)
acf(ldeaths)
acf(ldeaths, ci.type = "ma")
acf(ts.union(mdeaths, fdeaths))
ccf(mdeaths, fdeaths, ylab = "cross-correlation")
# (just the cross-correlations)
presidents # contains missing values
acf(presidents, na.action = na.pass)
pacf(presidents, na.action = na.pass)
Acf(wineind)
Pacf(wineind)
taperedacf(wineind, nsim=50)
taperedpacf(wineind, nsim=50)
ccf(x, y, na.action=na.pass)
"""
================================================================================
corrlelation
running correlation
cor.test
================================================================================
"""
# ------------------------------------------------------------------------------
# cor.test
# ------------------------------------------------------------------------------
x <- c(44.4, 45.9, 41.9, 53.3, 44.7, 44.1, 50.7, 45.2, 60.1)
y <- c( 2.6, 3.1, 2.5, 5.0, 3.6, 4.0, 5.2, 2.8, 3.8)
## The alternative hypothesis of interest is that the
## Hunter L value is positively associated with the panel score.
cor.test(x, y, method = "kendall", alternative = "greater")
## => p=0.05972
cor.test(x, y, method = "kendall", alternative = "greater",
exact = FALSE) # using large sample approximation
## => p=0.04765
## Compare this to
cor.test(x, y, method = "spearm", alternative = "g")
cor.test(x, y, alternative = "g")
## Formula interface.
require(graphics)
pairs(USJudgeRatings)
cor.test(~ CONT + INTG, data = USJudgeRatings)
# ------------------------------------------------------------------------------
# Corrlation Matrix
# ------------------------------------------------------------------------------
set.seed(955)
vvar <- 1:20 + rnorm(20,sd=3)
wvar <- 1:20 + rnorm(20,sd=5)
xvar <- 20:1 + rnorm(20,sd=3)
yvar <- (1:20)/2 + rnorm(20, sd=10)
zvar <- rnorm(20, sd=6)
# A data frame with multiple variables
data <- data.frame(vvar, wvar, xvar, yvar, zvar)
head(data)
library(ellipse)
# Make the correlation table
ctab <- cor(data)
round(ctab, 2)
#> vvar wvar xvar yvar zvar
#> vvar 1.00 0.61 -0.85 0.75 -0.21
#> wvar 0.61 1.00 -0.81 0.54 -0.31
#> xvar -0.85 -0.81 1.00 -0.63 0.24
#> yvar 0.75 0.54 -0.63 1.00 -0.30
#> zvar -0.21 -0.31 0.24 -0.30 1.00
# Make the graph, with reduced margins
plotcorr(ctab, mar = c(0.1, 0.1, 0.1, 0.1))
# Do the same, but with colors corresponding to value
colorfun <- colorRamp(c("#CC0000","white","#3366CC"), space="Lab")
plotcorr(ctab, col=rgb(colorfun((ctab+1)/2), maxColorValue=255),
mar = c(0.1, 0.1, 0.1, 0.1))
# ------------------------------------------------------------------------------
# Rolling Corrlation
# ------------------------------------------------------------------------------
install.packages("xml2")
install.packages("tidyverse")
install.packages("alphavantager")
library(tidyquant) # Loads tidyverse, tidyquant, financial pkgs, xts/zoo
library(cranlogs) # For inspecting package downloads over time
library(corrr) # Tidy correlation tables and correlation plotting
library(cowplot) # Multiple plots with plot_grid()
"""
================================================================================
Scatterplot, plot
lm - regression
================================================================================
"""
c(w[-1],0)
# Compare to good relationship
# Ex 0/
duration = faithful$eruptions
waiting = faithful$waiting
head(cbind(duration, waiting))
duration = faithful$eruptions # the eruption durations
waiting = faithful$waiting # the waiting interval
plot(duration, waiting,
xlab="Eruption duration",
ylab="Time waited")
abline(lm(waiting ~ duration))
The scatter plot of the eruption durations and waiting intervals
It reveals a positive linear relationship between them.
fit = lm(waiting ~ duration)
summary(fit)
plot(fit)
# Test residuals
mod = lm(prices[,1] ~ prices[,2])
res = mod$res
n = length(res)
mod2 = lm(res[-n] ~ res[-1])
summary(mod2)
# lmtest package.
dwtest(prices[,1] ~ prices[,2])
# Ex 1a/
input <- mtcars[,c('wt','mpg')]
print(head(input))
# Give the chart file a name.
png(file = "scatterplot.png")
# Plot the chart for cars with weight between 2.5 to 5 and mileage between 15 and 30.
plot(x = input$wt,y = input$mpg,
xlab = "Weight",
ylab = "Milage",
xlim = c(2.5,5),
ylim = c(15,30),
main = "Weight vs Milage"
)
# Save the file.
dev.off()
# Ex 1b/
attach(mtcars)
plot(wt, mpg, main="Scatterplot Example",
xlab="Car Weight ", ylab="Miles Per Gallon ", pch=19)
# Add fit lines
abline(lm(mpg~wt), col="red") # regression line (y~x)
lines(lowess(wt,mpg), col="blue") # lowess line (x,y)
The scatterplot() function in the car package offers many enhanced features,
including fit lines, marginal box plots, conditioning on a factor, and
interactive point identification. Each of these features is optional.
# Enhanced Scatterplot of MPG vs. Weight
# by Number of Car Cylinders
library(car)
scatterplot(mpg ~ wt | cyl, data=mtcars,
xlab="Weight of Car", ylab="Miles Per Gallon",
main="Enhanced Scatter Plot",
labels=row.names(mtcars))
When we have more than 2 variables and we want to find the correlation between
one variable versus the remaining ones we use scatterplot matrix:
pairs() function to create matrices of scatterplots.
png(file = "scatterplot_matrices.png")
# Plot the matrices between 4 variables giving 12 plots.
# One variable with 3 others and total 4 variables.
pairs(~wt+mpg+disp+cyl,data = mtcars,
main = "Scatterplot Matrix")
pairs(~cyl+mpg+disp+drat+wt,data=mtcars,
main="Simple Scatterplot Matrix")
# Save the file.
dev.off()
shift <- function(x, lag) {
n <- length(x)
xnew <- rep(NA, n)
if (lag < 0) {
xnew[1:(n-abs(lag))] <- x[(abs(lag)+1):n]
} else if (lag > 0) {
xnew[(lag+1):n] <- x[1:(n-lag)]
} else {
xnew <- x
}
return(xnew)
}
The lattice package provides options to condition the scatterplot matrix on a factor.
# Scatterplot Matrices from the lattice Package
library(lattice)
splom(mtcars[c(1,3,5,6)], groups=cyl, data=mtcars,
panel=panel.superpose,
key=list(title="Three Cylinder Options",
columns=3,
points=list(pch=super.sym$pch[1:3],
col=super.sym$col[1:3]),
text=list(c("4 Cylinder","6 Cylinder","8 Cylinder"))))
# Ex 2/ rnorm
set.seed(955)
# Make some noisily increasing data
dat <- data.frame(xvar = 1:100 + rnorm(20,sd=3),
yvar = 1:100 + rnorm(20,sd=3),
zvar = 1:100 + rnorm(20,sd=3))
head(dat)
# Plot the points using the vectors xvar and yvar
plot(dat$xvar, dat$yvar)
# Same as previous, but with formula interface
plot(yvar ~ xvar, dat)
# Add a regression line
fitline <- lm(dat$yvar ~ dat$xvar)
abline(fitline)
# Matrices plot
plot(dat[,1:3])
# scatterplot matrix, with regression lines
# and histogram/boxplot/density/qqplot/none along the diagonal
library(car)
scatterplotMatrix(dat[,1:3],
diagonal="histogram",
smooth=FALSE)
| /flask_blueprint/apps/app_quant/r_statistics.R | no_license | daneschoen/quant | R | false | false | 9,083 | r |
require(graphics)
require(stats)
"""
================================================================================
acf
================================================================================
"""
require(quantmod)
kpss.test
adf.test
Box.test
df_es <- read.table("df_es.csv", header = TRUE, sep = ",")
df_btcusd <- read.table("df_btcusd.csv", header = TRUE, sep = ",")
df_matrix <- read.table("df_btcusd_es.csv", header = TRUE, sep = ",")
>>> s_btcusd_pctchg = df_btcusd.loc[:, 'Adj Close'].pct_change()
# ---------------------------------------------------------------
s_es_0930_pctchg = diff(df_es$X09.30)/df_es$X09.30[-length(df_es$X09.30)]
s_btcusd_pctchg = diff(df_btcusd)/df_btcusd[-nrow(df_btcusd),] * 100
s_btcusd_cls_pctchg = diff(df_btcusd$Adj.Close)/df_btcusd$Adj.Close[-nrow(df_btcusd),] * 100
s_btcusd_pctchg = df_btcusd$Adj.Close/lag(df_btcusd$Adj.Close,-1) - 1
s_btcusd_pctchg = Delt(df_btcusd$Adj.Close,type='arithmetic')
df_es_1615_logchg = diff(log(df_matrix$es))
data <- ts(data.frame(x1=c(1:10), x2=c(11:20), x3=c(21:30)), start = c(2010,3), frequency = 4)
data_pctchg = data/lag(data,-1) - 1
= diff(data)/data[-nrow(data),] * 100
df_es_chg = df_es$X16.15
>>> df.shape
>>> len(df)
num_rows = dim(s_btcusd_pctchg)[1]
= nrow(data)
= length(data)
min(s_btcusd_pctchg[2:num_rows,])
max(s_btcusd_pctchg[2:num_rows,])
hist(data, breaks=seq(0,80,l=6),
freq=FALSE,col="orange",main="Histogram",
xlab="x",ylab="f(x)",yaxs="i",xaxs="i")
hist(data,breaks=seq(min(data),max(data),l=number_of_bins+1),
freq=FALSE,col="orange",
main="Histogram",xlab="x",ylab="f(x)",yaxs="i",xaxs="i")
library(ggplot2)
qplot(s_btcusd_pctchg[2:num_rows,], geom="histogram")
qplot(s_btcusd_pctchg[2:num_rows,], geom="histogram", binwidth = 0.05, xlim=c(-1,1))
s_rand <- runif(1000, 0.0, 1.0)
s_rand_diff = diff(s_rand)
s_randn<-rnorm(1000)
hist(s_rand)
hist(s_randn)
hist(s_rand_diff)
acf(s_rand)
pacf(s_rand)
acf(s_randn)
pacf(s_randn)
acf(lh)
pacf(lh)
acf(ldeaths)
acf(ldeaths, ci.type = "ma")
acf(ts.union(mdeaths, fdeaths))
ccf(mdeaths, fdeaths, ylab = "cross-correlation")
# (just the cross-correlations)
presidents # contains missing values
acf(presidents, na.action = na.pass)
pacf(presidents, na.action = na.pass)
Acf(wineind)
Pacf(wineind)
taperedacf(wineind, nsim=50)
taperedpacf(wineind, nsim=50)
ccf(x, y, na.action=na.pass)
"""
================================================================================
corrlelation
running correlation
cor.test
================================================================================
"""
# ------------------------------------------------------------------------------
# cor.test
# ------------------------------------------------------------------------------
x <- c(44.4, 45.9, 41.9, 53.3, 44.7, 44.1, 50.7, 45.2, 60.1)
y <- c( 2.6, 3.1, 2.5, 5.0, 3.6, 4.0, 5.2, 2.8, 3.8)
## The alternative hypothesis of interest is that the
## Hunter L value is positively associated with the panel score.
cor.test(x, y, method = "kendall", alternative = "greater")
## => p=0.05972
cor.test(x, y, method = "kendall", alternative = "greater",
exact = FALSE) # using large sample approximation
## => p=0.04765
## Compare this to
cor.test(x, y, method = "spearm", alternative = "g")
cor.test(x, y, alternative = "g")
## Formula interface.
require(graphics)
pairs(USJudgeRatings)
cor.test(~ CONT + INTG, data = USJudgeRatings)
# ------------------------------------------------------------------------------
# Corrlation Matrix
# ------------------------------------------------------------------------------
set.seed(955)
vvar <- 1:20 + rnorm(20,sd=3)
wvar <- 1:20 + rnorm(20,sd=5)
xvar <- 20:1 + rnorm(20,sd=3)
yvar <- (1:20)/2 + rnorm(20, sd=10)
zvar <- rnorm(20, sd=6)
# A data frame with multiple variables
data <- data.frame(vvar, wvar, xvar, yvar, zvar)
head(data)
library(ellipse)
# Make the correlation table
ctab <- cor(data)
round(ctab, 2)
#> vvar wvar xvar yvar zvar
#> vvar 1.00 0.61 -0.85 0.75 -0.21
#> wvar 0.61 1.00 -0.81 0.54 -0.31
#> xvar -0.85 -0.81 1.00 -0.63 0.24
#> yvar 0.75 0.54 -0.63 1.00 -0.30
#> zvar -0.21 -0.31 0.24 -0.30 1.00
# Make the graph, with reduced margins
plotcorr(ctab, mar = c(0.1, 0.1, 0.1, 0.1))
# Do the same, but with colors corresponding to value
colorfun <- colorRamp(c("#CC0000","white","#3366CC"), space="Lab")
plotcorr(ctab, col=rgb(colorfun((ctab+1)/2), maxColorValue=255),
mar = c(0.1, 0.1, 0.1, 0.1))
# ------------------------------------------------------------------------------
# Rolling Corrlation
# ------------------------------------------------------------------------------
install.packages("xml2")
install.packages("tidyverse")
install.packages("alphavantager")
library(tidyquant) # Loads tidyverse, tidyquant, financial pkgs, xts/zoo
library(cranlogs) # For inspecting package downloads over time
library(corrr) # Tidy correlation tables and correlation plotting
library(cowplot) # Multiple plots with plot_grid()
"""
================================================================================
Scatterplot, plot
lm - regression
================================================================================
"""
c(w[-1],0)
# Compare to good relationship
# Ex 0/
duration = faithful$eruptions
waiting = faithful$waiting
head(cbind(duration, waiting))
duration = faithful$eruptions # the eruption durations
waiting = faithful$waiting # the waiting interval
plot(duration, waiting,
xlab="Eruption duration",
ylab="Time waited")
abline(lm(waiting ~ duration))
The scatter plot of the eruption durations and waiting intervals
It reveals a positive linear relationship between them.
fit = lm(waiting ~ duration)
summary(fit)
plot(fit)
# Test residuals
mod = lm(prices[,1] ~ prices[,2])
res = mod$res
n = length(res)
mod2 = lm(res[-n] ~ res[-1])
summary(mod2)
# lmtest package.
dwtest(prices[,1] ~ prices[,2])
# Ex 1a/
input <- mtcars[,c('wt','mpg')]
print(head(input))
# Give the chart file a name.
png(file = "scatterplot.png")
# Plot the chart for cars with weight between 2.5 to 5 and mileage between 15 and 30.
plot(x = input$wt,y = input$mpg,
xlab = "Weight",
ylab = "Milage",
xlim = c(2.5,5),
ylim = c(15,30),
main = "Weight vs Milage"
)
# Save the file.
dev.off()
# Ex 1b/
attach(mtcars)
plot(wt, mpg, main="Scatterplot Example",
xlab="Car Weight ", ylab="Miles Per Gallon ", pch=19)
# Add fit lines
abline(lm(mpg~wt), col="red") # regression line (y~x)
lines(lowess(wt,mpg), col="blue") # lowess line (x,y)
The scatterplot() function in the car package offers many enhanced features,
including fit lines, marginal box plots, conditioning on a factor, and
interactive point identification. Each of these features is optional.
# Enhanced Scatterplot of MPG vs. Weight
# by Number of Car Cylinders
library(car)
scatterplot(mpg ~ wt | cyl, data=mtcars,
xlab="Weight of Car", ylab="Miles Per Gallon",
main="Enhanced Scatter Plot",
labels=row.names(mtcars))
When we have more than 2 variables and we want to find the correlation between
one variable versus the remaining ones we use scatterplot matrix:
pairs() function to create matrices of scatterplots.
png(file = "scatterplot_matrices.png")
# Plot the matrices between 4 variables giving 12 plots.
# One variable with 3 others and total 4 variables.
pairs(~wt+mpg+disp+cyl,data = mtcars,
main = "Scatterplot Matrix")
pairs(~cyl+mpg+disp+drat+wt,data=mtcars,
main="Simple Scatterplot Matrix")
# Save the file.
dev.off()
shift <- function(x, lag) {
n <- length(x)
xnew <- rep(NA, n)
if (lag < 0) {
xnew[1:(n-abs(lag))] <- x[(abs(lag)+1):n]
} else if (lag > 0) {
xnew[(lag+1):n] <- x[1:(n-lag)]
} else {
xnew <- x
}
return(xnew)
}
The lattice package provides options to condition the scatterplot matrix on a factor.
# Scatterplot Matrices from the lattice Package
library(lattice)
splom(mtcars[c(1,3,5,6)], groups=cyl, data=mtcars,
panel=panel.superpose,
key=list(title="Three Cylinder Options",
columns=3,
points=list(pch=super.sym$pch[1:3],
col=super.sym$col[1:3]),
text=list(c("4 Cylinder","6 Cylinder","8 Cylinder"))))
# Ex 2/ rnorm
set.seed(955)
# Make some noisily increasing data
dat <- data.frame(xvar = 1:100 + rnorm(20,sd=3),
yvar = 1:100 + rnorm(20,sd=3),
zvar = 1:100 + rnorm(20,sd=3))
head(dat)
# Plot the points using the vectors xvar and yvar
plot(dat$xvar, dat$yvar)
# Same as previous, but with formula interface
plot(yvar ~ xvar, dat)
# Add a regression line
fitline <- lm(dat$yvar ~ dat$xvar)
abline(fitline)
# Matrices plot
plot(dat[,1:3])
# scatterplot matrix, with regression lines
# and histogram/boxplot/density/qqplot/none along the diagonal
library(car)
scatterplotMatrix(dat[,1:3],
diagonal="histogram",
smooth=FALSE)
|
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 7.68310886409106e-239, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result) | /dann/inst/testfiles/calc_distance_C/AFL_calc_distance_C/calc_distance_C_valgrind_files/1609868251-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 1,199 | r | testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 7.68310886409106e-239, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result) |
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(1.72348146030437e+218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615856851-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 391 | r | testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(1.72348146030437e+218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
# Script for formatting acceleration data for GWAS
# Depends
library(data.table)
library(sktools)
# Load ECG segmentation data
seg_lvm <- fread(file='seg_lvm_std.csv')
# Load censor file (for age)
censor <- fread(file='censor_202106.csv')
# Load instance2 data (for age at MRI)
instance2 <- fread(file='instance2_dates.csv')
# Join to get MRI date
setkey(instance2,sample_id);setkey(seg_lvm,sample_id)
seg_lvm[instance2,':='(mri_date = i.value)]
# Join to get birthdate
setkey(censor,sample_id)
seg_lvm[censor,':='(birthdate = i.birthdate)]
# Remove withdrawals
withdrawals <- fread('w7089_20220222.csv') # UKBB withdrawals
seg_lvm <- seg_lvm[!(sample_id %in% withdrawals$V1)]
# Remove in plink but not imputed
not_imputed <- fread('bolt.in_plink_but_not_imputed.FID_IID.968.txt') # UKBB withdrawals
seg_lvm <- seg_lvm[!(sample_id %in% not_imputed$V1)]
# Keep only in PLINK
gt <- fread(file='in_v2_gt.csv')
seg_lvm <- seg_lvm[(sample_id %in% gt$FID)]
# Remove individual missing GT (>0.1)
missing_indiv <- fread(file='v2_gt_missing_indiv10.csv')
seg_lvm <- seg_lvm[!(sample_id %in% missing_indiv$IID)]
# Age at MRI
format_date(seg_lvm,cols=c('mri_date','birthdate'))
seg_lvm[,age_at_mri := as.numeric(mri_date - birthdate)/365.25]
## PLINK output function
create<-function(trait,exclude_all_both=NULL,exclude_all_cases=NULL,exclude_all_controls=NULL,
exclude_incident_cases=NULL,exclude_flexible=NULL,data){
##phenotype file
a <- data
a<-a[!is.na(get(trait))]
print(paste0('Total N:',nrow(a)))
##sample qc file
b<-fread("ukb_sqc_v2_7089.tsv",header=T)
setkey(a,'sample_id'); setkey(b,'eid')
ab <- a[b,nomatch=0]
ab[,':='(array_UKBB = ifelse(genotyping_array=='UKBB',1,0))]
print(paste0('N after merge with sample QC file:',nrow(ab)))
##remove poor quality
ab[,':='(ex_poor = ifelse(het_missing_outliers==1 | putative_sex_chromosome_aneuploidy==1,1,0),
ex_sex = ifelse(Submitted_Gender==Inferred_Gender,0,1),
ex_misKin = ifelse(ab$excluded_from_kinship_inference==1,1,0))]
#high quality data
ab <- ab[ab$ex_sex==0]
print(paste0('N after removal of sex mismatch:',nrow(ab)))
ab <- ab[ab$ex_poor==0]
print(paste0('N after removal of poor:',nrow(ab)))
#ab <- ab[ab$ex_misKin==0]
#print(paste0('N after removal of missing kinship inference:',nrow(ab)))
# Loop over "exclude all both" phenotypes - all individuals with exclusion phenotype at any time removed for both cases/controls
if (length(exclude_all_both)!=0){
for (i in exclude_all_both){
exclude <- fread(paste0("/phenotypes/",i,'.tab.tsv'),header=T)
setkey(ab,sample_id); setkey(exclude,sample_id)
ab[exclude,':='(exclude_prev=i.prevalent_disease,exclude_incd=i.incident_disease,exclude_censor = i.censor_age)]
ab[,exclude := ifelse(c(c(!is.na(exclude_prev) & exclude_prev==1) | c(!is.na(exclude_incd) & exclude_incd == 1)),1,0)]
print(paste0('I am going to exclude ',sum(ab$exclude),' individuals for diagnosis: ',i,' occurring at any time for cases/controls'))
ab <- ab[exclude==0]
ab <- ab[,!(c('exclude_prev','exclude_incd','exclude_censor','exclude'))]
}}
# Loop over "exclude all cases" phenotypes - all individuals with exclusion phenotype at any time removed for cases
if (length(exclude_all_cases)!=0){
for (i in exclude_all_cases){
exclude <- fread(paste0("/phenotypes/",i,'.tab.tsv'),header=T)
setkey(ab,sample_id); setkey(exclude,sample_id)
ab[exclude,':='(exclude_prev=i.prevalent_disease,exclude_incd=i.incident_disease,exclude_censor = i.censor_age)]
ab[,exclude := ifelse(c(c(!is.na(get(trait)) & get(trait)==1) &
c(c(!is.na(exclude_prev) & exclude_prev==1) | c(!is.na(exclude_incd) & exclude_incd==1))),1,0)]
print(paste0('I am going to exclude ',sum(ab$exclude),' individuals for diagnosis: ',i,' occurring at any time for cases only'))
ab <- ab[exclude==0]
ab <- ab[,!(c('exclude_prev','exclude_incd','exclude_censor','exclude'))]
}}
# Loop over "exclude all controls" phenotypes - all individuals with exclusion phenotype at any time removed for controls
if (length(exclude_all_controls)!=0){
for (i in exclude_all_controls){
exclude <- fread(paste0("/phenotypes/",i,'.tab.tsv'),header=T)
setkey(ab,sample_id); setkey(exclude,sample_id)
ab[exclude,':='(exclude_prev=i.prevalent_disease,exclude_incd=i.incident_disease,exclude_censor = i.censor_age)]
ab[,exclude := ifelse(c(c(get(trait)==0 | is.na(get(trait))) &
c(c(!is.na(exclude_prev) & exclude_prev==1) | c(!is.na(exclude_incd) & exclude_incd==1))),1,0)]
print(paste0('I am going to exclude ',sum(ab$exclude),' individuals for diagnosis: ',i,' occurring at any time for controls only'))
ab <- ab[exclude==0]
ab <- ab[,!(c('exclude_prev','exclude_incd','exclude_censor','exclude'))]
}}
# Loop over "exclude incident" - only cases with exclusion phenotype before disease removed
if (length(exclude_incident_cases)!=0){
for (i in exclude_incident_cases){
exclude <- fread(paste0("/phenotypes/",i,'.tab.tsv'),header=T)
setkey(ab,sample_id); setkey(exclude,sample_id)
ab[exclude,':='(exclude_disease = i.has_disease, exclude_prev = i.prevalent_dsease, exclude_censor = i.censor_date)]
ab[,exclude := ifelse(c(c(!is.na(get(trait)) & get(trait)==1) &
c(c(!is.na(exclude_disease) & (exclude_censor <= censor_date)) |
c(!is.na(exclude_disease) & exclude_prev==1))),1,0)]
print(paste0('I am going to exclude ',sum(ab$exclude),' individuals for diagnosis: ',i,' occurring before case diagnosis'))
ab <- ab[exclude==0]
ab <- ab[,!(c('exclude_prev','exclude_censor','exclude_disease','exclude'))]
}}
# Loop over "exclude flexible" - excludes any instance of exclusion phenotype among controls, and only exclusion phenotype prior to disease for cases
if (length(exclude_flexible)!=0){
for (i in exclude_flexible){
exclude <- fread(paste0("/phenotypes/",i,'.tab.tsv'),header=T)
setkey(ab,sample_id); setkey(exclude,sample_id)
ab[exclude,':='(exclude_incd = i.incident_disease, exclude_disease = i.has_disease, exclude_prev = i.prevalent_disease, exclude_censor = i.censor_date)]
ab[,exclude := ifelse(c(!is.na(get(trait)) & get(trait)==1),
ifelse(c(!is.na(exclude_prev) & exclude_prev==1),1,
ifelse(c(!is.na(exclude_incd) & (exclude_incd==1) & (exclude_censor <= censor_date)),1,0)),
ifelse(c(!is.na(exclude_disease) & exclude_disease==1),1,0))]
print(paste0('I am going to exclude ',sum(ab$exclude),' individuals for diagnosis: ',i,' occurring before case diagnosis or at any time for controls'))
ab <- ab[exclude==0]
ab <- ab[,!(c('exclude_disease','exclude_prev','exclude_incd','exclude_censor','exclude'))]
}}
ab[,':='(male = ifelse(Inferred_Gender=='M',1,0))]
#######
###test AF related PCs in each cleaned dataset
#######
#dim(subset(ab, used_in_pca_calculation==1 & ex_sex==0 & ex_poor==0 & ex_misKin==0 & white==1))
#######
##all white, no relatives
#######
form1<-formula(paste0(trait,"~age_at_mri + ",paste0("PC",1:40,collapse="+"),"+ array_UKBB + male",collapse="+"))
s1<-summary(lm(form1,data=ab))$coefficients
s1<-s1[substring(rownames(s1),1,2)=="PC",]
ab.1<-ab[,.SD,.SDcols=c("age_at_mri",trait)]
allN_1<-nrow(ab.1)
male_1<-nrow(ab[ab[["male"]]==1 & !is.na(ab.1[[trait]])])
pcs1<-paste(rownames(s1)[1:5],collapse=",") # First 5 PCs
#######
##create summary file
#######
t1<-c("all",allN_1,"mean_age",round(mean(ab[!is.na(trait)]$age_at_mri),2),"sd_age",round(sd(ab[!is.na(trait)]$age_at_mri),2),"male_N",male_1,"male%",round(mean(ab[!is.na(trait)]$male)*100,2),"related-PCs",ifelse((pcs1!=""),pcs1,"None"))
write.table(t1,file=paste0("/summary_",trait,".txt"),row.names=F,quote=F,sep="\t")
#######
##create phenotype file
#######
## Choose columns
pheno<-ab[,c("sample_id",trait,"age_at_mri",rownames(s1)[1:5],"array_UKBB","male"),with=F]
## Format for PLINK
setnames(pheno,"sample_id","FID")
pheno[,':='(IID = FID)]
setcolorder(pheno,c('FID','IID'))
print(paste0('Final phenotype N: ',nrow(pheno)))
write.table(pheno,file=paste0('/lvm_gwas/',trait,".tsv"),sep="\t",col.names =T,row.names = F,quote = F)
}
create(trait="lvmi_seg_adjusted_27",data=seg_lvm)
# Exclusions list for BOLT
## Read processed phenotype
seg_lvm <- fread(file='lvmi_seg_adjusted_27.tsv')
## Load list of all individuals in UKBB (includes people in plink GT set that are not in censor files)
ukbb_all <- fread(file='ukbb_all.csv')
exclusions <- ukbb_all[!(FID %in% seg_lvm$FID)]
## Add individuals that are in the plink dataset but not the censor file
write.table(exclusions,file='lvmi_seg_exclusion_27.tsv',sep="\t",col.names =T,row.names = F,quote = F)
| /prep_gwas/prep_gwas_lvm_seg27_std.R | permissive | shaankhurshid/lvmass_gwas | R | false | false | 9,418 | r | # Script for formatting acceleration data for GWAS
# Depends
library(data.table)
library(sktools)
# Load ECG segmentation data
seg_lvm <- fread(file='seg_lvm_std.csv')
# Load censor file (for age)
censor <- fread(file='censor_202106.csv')
# Load instance2 data (for age at MRI)
instance2 <- fread(file='instance2_dates.csv')
# Join to get MRI date
setkey(instance2,sample_id);setkey(seg_lvm,sample_id)
seg_lvm[instance2,':='(mri_date = i.value)]
# Join to get birthdate
setkey(censor,sample_id)
seg_lvm[censor,':='(birthdate = i.birthdate)]
# Remove withdrawals
withdrawals <- fread('w7089_20220222.csv') # UKBB withdrawals
seg_lvm <- seg_lvm[!(sample_id %in% withdrawals$V1)]
# Remove in plink but not imputed
not_imputed <- fread('bolt.in_plink_but_not_imputed.FID_IID.968.txt') # UKBB withdrawals
seg_lvm <- seg_lvm[!(sample_id %in% not_imputed$V1)]
# Keep only in PLINK
gt <- fread(file='in_v2_gt.csv')
seg_lvm <- seg_lvm[(sample_id %in% gt$FID)]
# Remove individual missing GT (>0.1)
missing_indiv <- fread(file='v2_gt_missing_indiv10.csv')
seg_lvm <- seg_lvm[!(sample_id %in% missing_indiv$IID)]
# Age at MRI
format_date(seg_lvm,cols=c('mri_date','birthdate'))
seg_lvm[,age_at_mri := as.numeric(mri_date - birthdate)/365.25]
## PLINK output function
create<-function(trait,exclude_all_both=NULL,exclude_all_cases=NULL,exclude_all_controls=NULL,
exclude_incident_cases=NULL,exclude_flexible=NULL,data){
##phenotype file
a <- data
a<-a[!is.na(get(trait))]
print(paste0('Total N:',nrow(a)))
##sample qc file
b<-fread("ukb_sqc_v2_7089.tsv",header=T)
setkey(a,'sample_id'); setkey(b,'eid')
ab <- a[b,nomatch=0]
ab[,':='(array_UKBB = ifelse(genotyping_array=='UKBB',1,0))]
print(paste0('N after merge with sample QC file:',nrow(ab)))
##remove poor quality
ab[,':='(ex_poor = ifelse(het_missing_outliers==1 | putative_sex_chromosome_aneuploidy==1,1,0),
ex_sex = ifelse(Submitted_Gender==Inferred_Gender,0,1),
ex_misKin = ifelse(ab$excluded_from_kinship_inference==1,1,0))]
#high quality data
ab <- ab[ab$ex_sex==0]
print(paste0('N after removal of sex mismatch:',nrow(ab)))
ab <- ab[ab$ex_poor==0]
print(paste0('N after removal of poor:',nrow(ab)))
#ab <- ab[ab$ex_misKin==0]
#print(paste0('N after removal of missing kinship inference:',nrow(ab)))
# Loop over "exclude all both" phenotypes - all individuals with exclusion phenotype at any time removed for both cases/controls
if (length(exclude_all_both)!=0){
for (i in exclude_all_both){
exclude <- fread(paste0("/phenotypes/",i,'.tab.tsv'),header=T)
setkey(ab,sample_id); setkey(exclude,sample_id)
ab[exclude,':='(exclude_prev=i.prevalent_disease,exclude_incd=i.incident_disease,exclude_censor = i.censor_age)]
ab[,exclude := ifelse(c(c(!is.na(exclude_prev) & exclude_prev==1) | c(!is.na(exclude_incd) & exclude_incd == 1)),1,0)]
print(paste0('I am going to exclude ',sum(ab$exclude),' individuals for diagnosis: ',i,' occurring at any time for cases/controls'))
ab <- ab[exclude==0]
ab <- ab[,!(c('exclude_prev','exclude_incd','exclude_censor','exclude'))]
}}
# Loop over "exclude all cases" phenotypes - all individuals with exclusion phenotype at any time removed for cases
if (length(exclude_all_cases)!=0){
for (i in exclude_all_cases){
exclude <- fread(paste0("/phenotypes/",i,'.tab.tsv'),header=T)
setkey(ab,sample_id); setkey(exclude,sample_id)
ab[exclude,':='(exclude_prev=i.prevalent_disease,exclude_incd=i.incident_disease,exclude_censor = i.censor_age)]
ab[,exclude := ifelse(c(c(!is.na(get(trait)) & get(trait)==1) &
c(c(!is.na(exclude_prev) & exclude_prev==1) | c(!is.na(exclude_incd) & exclude_incd==1))),1,0)]
print(paste0('I am going to exclude ',sum(ab$exclude),' individuals for diagnosis: ',i,' occurring at any time for cases only'))
ab <- ab[exclude==0]
ab <- ab[,!(c('exclude_prev','exclude_incd','exclude_censor','exclude'))]
}}
# Loop over "exclude all controls" phenotypes - all individuals with exclusion phenotype at any time removed for controls
if (length(exclude_all_controls)!=0){
for (i in exclude_all_controls){
exclude <- fread(paste0("/phenotypes/",i,'.tab.tsv'),header=T)
setkey(ab,sample_id); setkey(exclude,sample_id)
ab[exclude,':='(exclude_prev=i.prevalent_disease,exclude_incd=i.incident_disease,exclude_censor = i.censor_age)]
ab[,exclude := ifelse(c(c(get(trait)==0 | is.na(get(trait))) &
c(c(!is.na(exclude_prev) & exclude_prev==1) | c(!is.na(exclude_incd) & exclude_incd==1))),1,0)]
print(paste0('I am going to exclude ',sum(ab$exclude),' individuals for diagnosis: ',i,' occurring at any time for controls only'))
ab <- ab[exclude==0]
ab <- ab[,!(c('exclude_prev','exclude_incd','exclude_censor','exclude'))]
}}
# Loop over "exclude incident" - only cases with exclusion phenotype before disease removed
if (length(exclude_incident_cases)!=0){
for (i in exclude_incident_cases){
exclude <- fread(paste0("/phenotypes/",i,'.tab.tsv'),header=T)
setkey(ab,sample_id); setkey(exclude,sample_id)
ab[exclude,':='(exclude_disease = i.has_disease, exclude_prev = i.prevalent_dsease, exclude_censor = i.censor_date)]
ab[,exclude := ifelse(c(c(!is.na(get(trait)) & get(trait)==1) &
c(c(!is.na(exclude_disease) & (exclude_censor <= censor_date)) |
c(!is.na(exclude_disease) & exclude_prev==1))),1,0)]
print(paste0('I am going to exclude ',sum(ab$exclude),' individuals for diagnosis: ',i,' occurring before case diagnosis'))
ab <- ab[exclude==0]
ab <- ab[,!(c('exclude_prev','exclude_censor','exclude_disease','exclude'))]
}}
# Loop over "exclude flexible" - excludes any instance of exclusion phenotype among controls, and only exclusion phenotype prior to disease for cases
if (length(exclude_flexible)!=0){
for (i in exclude_flexible){
exclude <- fread(paste0("/phenotypes/",i,'.tab.tsv'),header=T)
setkey(ab,sample_id); setkey(exclude,sample_id)
ab[exclude,':='(exclude_incd = i.incident_disease, exclude_disease = i.has_disease, exclude_prev = i.prevalent_disease, exclude_censor = i.censor_date)]
ab[,exclude := ifelse(c(!is.na(get(trait)) & get(trait)==1),
ifelse(c(!is.na(exclude_prev) & exclude_prev==1),1,
ifelse(c(!is.na(exclude_incd) & (exclude_incd==1) & (exclude_censor <= censor_date)),1,0)),
ifelse(c(!is.na(exclude_disease) & exclude_disease==1),1,0))]
print(paste0('I am going to exclude ',sum(ab$exclude),' individuals for diagnosis: ',i,' occurring before case diagnosis or at any time for controls'))
ab <- ab[exclude==0]
ab <- ab[,!(c('exclude_disease','exclude_prev','exclude_incd','exclude_censor','exclude'))]
}}
ab[,':='(male = ifelse(Inferred_Gender=='M',1,0))]
#######
###test AF related PCs in each cleaned dataset
#######
#dim(subset(ab, used_in_pca_calculation==1 & ex_sex==0 & ex_poor==0 & ex_misKin==0 & white==1))
#######
##all white, no relatives
#######
form1<-formula(paste0(trait,"~age_at_mri + ",paste0("PC",1:40,collapse="+"),"+ array_UKBB + male",collapse="+"))
s1<-summary(lm(form1,data=ab))$coefficients
s1<-s1[substring(rownames(s1),1,2)=="PC",]
ab.1<-ab[,.SD,.SDcols=c("age_at_mri",trait)]
allN_1<-nrow(ab.1)
male_1<-nrow(ab[ab[["male"]]==1 & !is.na(ab.1[[trait]])])
pcs1<-paste(rownames(s1)[1:5],collapse=",") # First 5 PCs
#######
##create summary file
#######
t1<-c("all",allN_1,"mean_age",round(mean(ab[!is.na(trait)]$age_at_mri),2),"sd_age",round(sd(ab[!is.na(trait)]$age_at_mri),2),"male_N",male_1,"male%",round(mean(ab[!is.na(trait)]$male)*100,2),"related-PCs",ifelse((pcs1!=""),pcs1,"None"))
write.table(t1,file=paste0("/summary_",trait,".txt"),row.names=F,quote=F,sep="\t")
#######
##create phenotype file
#######
## Choose columns
pheno<-ab[,c("sample_id",trait,"age_at_mri",rownames(s1)[1:5],"array_UKBB","male"),with=F]
## Format for PLINK
setnames(pheno,"sample_id","FID")
pheno[,':='(IID = FID)]
setcolorder(pheno,c('FID','IID'))
print(paste0('Final phenotype N: ',nrow(pheno)))
write.table(pheno,file=paste0('/lvm_gwas/',trait,".tsv"),sep="\t",col.names =T,row.names = F,quote = F)
}
create(trait="lvmi_seg_adjusted_27",data=seg_lvm)
# Exclusions list for BOLT
## Read processed phenotype
seg_lvm <- fread(file='lvmi_seg_adjusted_27.tsv')
## Load list of all individuals in UKBB (includes people in plink GT set that are not in censor files)
ukbb_all <- fread(file='ukbb_all.csv')
exclusions <- ukbb_all[!(FID %in% seg_lvm$FID)]
## Add individuals that are in the plink dataset but not the censor file
write.table(exclusions,file='lvmi_seg_exclusion_27.tsv',sep="\t",col.names =T,row.names = F,quote = F)
|
height <- data.frame(
id = 1:15,
group = rep(c("monitor", "hmd", "cave"), each = 5),
anx_lvl1 = c( 5, 5, 10, 10, 5, 10, 5, 15, 5, 5, 10, 15, 15, 5, 10),
anx_lvl2 = c(15, 5, 15, 10, 10, 25, 20, 30, 25, 25, 30, 40, 35, 30, 45),
anx_lvl3 = c(35, 35, 50, 40, 35, 50, 45, 50, 50, 55, 65, 40, 60, 65, 50)
)
height <- tidyr::gather(height, "level", "anxiety", anx_lvl1:anx_lvl3)
| /data-raw/height.R | no_license | dgromer/psymisc | R | false | false | 390 | r | height <- data.frame(
id = 1:15,
group = rep(c("monitor", "hmd", "cave"), each = 5),
anx_lvl1 = c( 5, 5, 10, 10, 5, 10, 5, 15, 5, 5, 10, 15, 15, 5, 10),
anx_lvl2 = c(15, 5, 15, 10, 10, 25, 20, 30, 25, 25, 30, 40, 35, 30, 45),
anx_lvl3 = c(35, 35, 50, 40, 35, 50, 45, 50, 50, 55, 65, 40, 60, 65, 50)
)
height <- tidyr::gather(height, "level", "anxiety", anx_lvl1:anx_lvl3)
|
## PLOT 1
## Call getDataset
source("getDataset.R")
## Store the data on "tabela"
tabela <- getDataset()
## Build the histogram on screen
hist(tabela$Global_active_power, breaks = 12, col = "red", xlab = NULL, main = NULL)
title(main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
## Store the graphic in "plot1.png", if it does not exists
if(!file.exists("plot1.png")){
dev.copy(png, file = "plot1.png", width = 480, height = 480, units = "px")
dev.off()
}
| /plot1.R | no_license | euvictorfarias/ExData_Plotting1 | R | false | false | 503 | r | ## PLOT 1
## Call getDataset
source("getDataset.R")
## Store the data on "tabela"
tabela <- getDataset()
## Build the histogram on screen
hist(tabela$Global_active_power, breaks = 12, col = "red", xlab = NULL, main = NULL)
title(main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
## Store the graphic in "plot1.png", if it does not exists
if(!file.exists("plot1.png")){
dev.copy(png, file = "plot1.png", width = 480, height = 480, units = "px")
dev.off()
}
|
#mean = 100
#sigma = 15
#probablity of IQ below 115
pnorm(115, 100, 15)
| /REviewing Week 2 and 3/review.R | no_license | memasanz/stats420 | R | false | false | 74 | r | #mean = 100
#sigma = 15
#probablity of IQ below 115
pnorm(115, 100, 15)
|
JS.direct <- function (object) {
nRmrz <- JS.counts(object)
J <- nrow(nRmrz)
est <- with(nRmrz, {
M <- m + (R+1) * z / (r+1) # Seber 1982 bias adjusted formula
p <- m/M; p[c(1,J)] <- NA
varp <- p^2 * (1-p)^2 * (1/r - 1/R + 1/m + 1/z)
N <- ((n + 1) * M)/(m + 1); N[c(1,J)] <- NA
pm <- M / N
varN <- N * (N - n) * (((M - m + R) * (1/r - 1/R)) / M + (1 - pm)/ m)
phi <- M[-1] / (M[-J] - m[-J] + R[-J]); phi[J-1] <- NA
varphi <- phi[-J]^2 * ((M[-1]- m[-1]) * (M[-1]- m[-1] + R[-1]) *
(1/r[-1] - 1/R[-1]) / M[-1]^2 +
(M[-J] - m[-J]) * (1/r[-J] - 1/R[-J]) /
(M[-J] - m[-J] + R[-J]) + (1 - phi[-J]) / M[-1])
varphi <- c(varphi, NA)
phi <- c(phi, NA)
covphi <- ( - phi[-J] * phi[-1] * (N[-1] - m[-1]) * (1/r[-1] - 1/R[-1]) ) / M[-1]
covphi <- c(covphi, NA)
pm <- M/N
B <- N[-1] - phi[-J] * (N[-J] - n[-J] + R[-J])
B <- c(B, NA)
varB <- B[-J]^2 * (M[-1] - m[-1]) * (M[-1] - m[-1] + R[-1]) * (1/r[-1] - 1/R[-1]) / M[-1]^2 +
(M[-J] - m[-J]) * (phi[-J] * R[-J] * (N[-J] - M[-J]) / M[-J])^2 * (1/r[-J] - 1/R[-J]) / (M[-J] - m[-J] + R[-J]) +
(N[-J] - n[-J]) * (N[-1] - B[-J]) * (N[-J] - M[-J]) * (1 - phi[-J]) / N[-J] / (M[-J] - m[-J] + R[-J]) +
N[-1] * (N[-1] - n[-1]) * (N[-1] - M[-1]) / N[-1] / m[-1] +
phi[-J]^2 * N[-J] * (N[-J] - n[-J]) * (N[-J] - M[-J]) / N[-J] / m[-1]
varB <- c(varB, NA)
data.frame(
p = p, sep = varp^0.5,
N = N, seN = varN^0.5,
phi = phi, sephi = varphi^0.5, covphi = covphi,
B = B, seB = varB^0.5
)
})
cbind(nRmrz, est)
}
| /R/JSdirect.R | no_license | MurrayEfford/openCR | R | false | false | 1,906 | r | JS.direct <- function (object) {
nRmrz <- JS.counts(object)
J <- nrow(nRmrz)
est <- with(nRmrz, {
M <- m + (R+1) * z / (r+1) # Seber 1982 bias adjusted formula
p <- m/M; p[c(1,J)] <- NA
varp <- p^2 * (1-p)^2 * (1/r - 1/R + 1/m + 1/z)
N <- ((n + 1) * M)/(m + 1); N[c(1,J)] <- NA
pm <- M / N
varN <- N * (N - n) * (((M - m + R) * (1/r - 1/R)) / M + (1 - pm)/ m)
phi <- M[-1] / (M[-J] - m[-J] + R[-J]); phi[J-1] <- NA
varphi <- phi[-J]^2 * ((M[-1]- m[-1]) * (M[-1]- m[-1] + R[-1]) *
(1/r[-1] - 1/R[-1]) / M[-1]^2 +
(M[-J] - m[-J]) * (1/r[-J] - 1/R[-J]) /
(M[-J] - m[-J] + R[-J]) + (1 - phi[-J]) / M[-1])
varphi <- c(varphi, NA)
phi <- c(phi, NA)
covphi <- ( - phi[-J] * phi[-1] * (N[-1] - m[-1]) * (1/r[-1] - 1/R[-1]) ) / M[-1]
covphi <- c(covphi, NA)
pm <- M/N
B <- N[-1] - phi[-J] * (N[-J] - n[-J] + R[-J])
B <- c(B, NA)
varB <- B[-J]^2 * (M[-1] - m[-1]) * (M[-1] - m[-1] + R[-1]) * (1/r[-1] - 1/R[-1]) / M[-1]^2 +
(M[-J] - m[-J]) * (phi[-J] * R[-J] * (N[-J] - M[-J]) / M[-J])^2 * (1/r[-J] - 1/R[-J]) / (M[-J] - m[-J] + R[-J]) +
(N[-J] - n[-J]) * (N[-1] - B[-J]) * (N[-J] - M[-J]) * (1 - phi[-J]) / N[-J] / (M[-J] - m[-J] + R[-J]) +
N[-1] * (N[-1] - n[-1]) * (N[-1] - M[-1]) / N[-1] / m[-1] +
phi[-J]^2 * N[-J] * (N[-J] - n[-J]) * (N[-J] - M[-J]) / N[-J] / m[-1]
varB <- c(varB, NA)
data.frame(
p = p, sep = varp^0.5,
N = N, seN = varN^0.5,
phi = phi, sephi = varphi^0.5, covphi = covphi,
B = B, seB = varB^0.5
)
})
cbind(nRmrz, est)
}
|
# load libaries
library(dplyr)
# Read input data
powerData <- read.table("household_power_consumption.txt",sep=";",header = TRUE)
powerDataNew <- mutate(powerData,xaxis = paste(powerData$Date,powerData$Time,sep=":"))
powerDataNew$Date <- as.Date(powerDataNew$Date , "%d/%m/%Y")
powerDataMini <- filter(powerDataNew, Date ==as.Date("2007-02-01") | Date == as.Date("2007-02-02"))
powerDataMini$xaxis <- strptime(powerDataMini$xaxis,format = "%d/%m/%Y:%H:%M:%S")
drawsubmetering <- function(){
with(powerDataMini,plot(xaxis,as.numeric(as.character(Sub_metering_1)),type ="l",ylab="Energy Sub Metering",xlab=""))
lines(powerDataMini$xaxis,as.numeric(as.character(powerDataMini$Sub_metering_2)),col="red")
lines(powerDataMini$xaxis,as.numeric(as.character(powerDataMini$Sub_metering_3)),col="blue")
legend("topright",lwd=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_1","Sub_metering_1"),cex=0.5,pt.cex=1)
}
par(mfrow=c(2,2),mar=c(4,4,2,1),oma=c(0,0,2,0))
with(powerDataMini,plot(xaxis,as.numeric(as.character(Global_active_power)),type ="l",xlab="",ylab="Global Active Power(kilowatts"))
with(powerDataMini,plot(xaxis,as.numeric(as.character(Voltage)),type="l",ylab="Voltage",xlab=""))
drawsubmetering()
with(powerDataMini,plot(xaxis,as.numeric(as.character(Global_reactive_power)),type ="l",xlab="",ylab="Global Reactive Power(kw)"))
dev.copy(png,file="plot4.png",width=480,height=480)
dev.off()
| /plot4.R | no_license | obliex/ExData_Plotting1 | R | false | false | 1,460 | r | # load libaries
library(dplyr)
# Read input data
powerData <- read.table("household_power_consumption.txt",sep=";",header = TRUE)
powerDataNew <- mutate(powerData,xaxis = paste(powerData$Date,powerData$Time,sep=":"))
powerDataNew$Date <- as.Date(powerDataNew$Date , "%d/%m/%Y")
powerDataMini <- filter(powerDataNew, Date ==as.Date("2007-02-01") | Date == as.Date("2007-02-02"))
powerDataMini$xaxis <- strptime(powerDataMini$xaxis,format = "%d/%m/%Y:%H:%M:%S")
drawsubmetering <- function(){
with(powerDataMini,plot(xaxis,as.numeric(as.character(Sub_metering_1)),type ="l",ylab="Energy Sub Metering",xlab=""))
lines(powerDataMini$xaxis,as.numeric(as.character(powerDataMini$Sub_metering_2)),col="red")
lines(powerDataMini$xaxis,as.numeric(as.character(powerDataMini$Sub_metering_3)),col="blue")
legend("topright",lwd=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_1","Sub_metering_1"),cex=0.5,pt.cex=1)
}
par(mfrow=c(2,2),mar=c(4,4,2,1),oma=c(0,0,2,0))
with(powerDataMini,plot(xaxis,as.numeric(as.character(Global_active_power)),type ="l",xlab="",ylab="Global Active Power(kilowatts"))
with(powerDataMini,plot(xaxis,as.numeric(as.character(Voltage)),type="l",ylab="Voltage",xlab=""))
drawsubmetering()
with(powerDataMini,plot(xaxis,as.numeric(as.character(Global_reactive_power)),type ="l",xlab="",ylab="Global Reactive Power(kw)"))
dev.copy(png,file="plot4.png",width=480,height=480)
dev.off()
|
#' Fit a Bayesian model to A/B test data.
#'
#' @description This function fits a Bayesian model to your A/B testing sample data. See \bold{Details} for more information on usage.
#'
#' @param A_data Vector of collected samples from recipe A
#' @param B_data Vector of collected samples from recipe B
#' @param priors Named vector or named list providing priors as required by the specified distribution:
#' \itemize{
#' \item For 'bernoulli' distribution \code{list("alpha" = val1, "beta" = val2)}
#' \item For 'normal' distribution \code{c("mu" = val1, "lambda" = val2, "alpha" = val3, "beta" = val4)}
#' \item For 'lognormal' distribution \code{c("mu" = val1, "lambda" = val2, "alpha" = val3, "beta" = val4)}
#' \item For 'poisson' distribution \code{c("shape" = val1, "rate" = val2)}
#' \item For 'exponential' distribution \code{list("shape" = val1, "rate" = val2)}
#' \item For 'uniform' distribution \code{c("xm" = val1, "alpha" = val2)}
#' \item For 'bernoulliC' distribution: same prior definitions as 'bernoulli'
#' \item For 'poissonC' distribution: same prior definitions as 'poisson'
#' }
#'
#' See \link{plotDistributions} or the \emph{Note} section of this help document for more info.
#' @param n_samples Number of posterior samples to draw. Should be large enough for the distribution to converge. 1e5 is a good rule of thumb.
#' Not used for closed form tests.
#' @param distribution Distribution of underlying A/B test data.
#' @return A \code{bayesTest} object of the appropriate distribution class.
#'
#' @details \code{bayesTest} is the main driver function of the \bold{bayesAB} package. The input takes two vectors of data,
#' corresponding to recipe A and recipe B of an A/B test. Order does not matter, except for interpretability of the final
#' plots and intervals/point estimates. The Bayesian model for each distribution uses conjugate priors which must
#' be specified at the time of invoking the function. Currently, there are \emph{eight} supported distributions for the underlying data:
#'
#' \itemize{
#'
#' \item Bernoulli: If your data is well modeled by 1s and 0s, according to a specific probability \code{p} of a 1 occurring
#' \itemize{\item For example, click-through-rate /conversions for a page
#' \item Data \bold{must} be in a \{0, 1\} format where 1 corresponds to a 'success' as per the Bernoulli distribution
#' \item Uses a conjugate \code{Beta} distribution for the parameter \bold{p} in the Bernoulli distribution
#' \item \code{alpha} and \code{beta} must be set for a prior distribution over \bold{p}
#' \itemize{\item alpha = 1, beta = 1 can be used as a diffuse or uniform prior}}
#'
#' \item Normal: If your data is well modeled by the normal distribution, with parameters \eqn{\mu}, \eqn{\sigma^2} controlling mean and variance
#' of the underlying distribution
#' \itemize{\item Data \emph{can} be negative if it makes sense for your experiment
#' \item Uses a conjugate \code{NormalInverseGamma} distribution for the parameters \bold{\eqn{\mu}} and \bold{\eqn{\sigma^2}} in the
#' Normal Distribution.
#' \item \code{mu}, \code{lambda}, \code{alpha}, and \code{beta} must be set for prior
#' distributions over \bold{\eqn{\mu, \sigma^2}} in accordance with the parameters of the conjugate prior distributions:
#' \itemize{\item \eqn{\mu, \sigma^2} ~ NormalInverseGamma(mu, lambda, alpha, beta)}
#' \item This is a bivariate distribution (commonly used to model mean and variance of the normal distribution).
#' You may want to experiment with both this distribution and the \code{plotNormal} and \code{plotInvGamma} outputs
#' separately before arriving at a suitable set of priors for the Normal and LogNormal \code{bayesTest}}.
#'
#' \item LogNormal: If your data is well modeled by the log-normal distribution, with parameters \eqn{\mu}, \eqn{\sigma^2} as the \bold{parameters}
#' of the corresponding log-normal distribution (log of data is ~ N(\eqn{\mu}, \eqn{\sigma^2}))
#' \itemize{\item Support for a log-normal distribution is strictly positive
#' \item The Bayesian model requires same conjugate priors on \eqn{\mu}, \eqn{\sigma^2} as for the Normal Distribution priors
#' \item Note: The \eqn{\mu} and \eqn{\sigma^2} are not the mean/variance of lognormal numbers themselves but are rather the
#' corresponding parameters of the lognormal distribution. Thus, posteriors for the statistics 'Mean' and 'Variance'
#' are returned alongside 'Mu' and 'Sig_Sq' for interpretability.}
#'
#' \item Poisson: If your data is well modeled by the Poisson distribution, with parameter \eqn{\lambda} controlling the average number of events
#' per interval.
#' \itemize{\item For example, pageviews per session
#' \item Data \emph{must} be strictly integral or 0.
#' \item Uses a conjugate \code{Gamma} distribution for the parameter \bold{\eqn{\lambda}} in the Poisson Distribution
#' \item \code{shape} and \code{rate} must be set for prior distribution over \eqn{\lambda}}
#'
#' \item Exponential: If your data is well modeled by the Exponential distribution, with parameter \eqn{\lambda} controlling the
#' rate of decay.
#' \itemize{\item For example, time spent on a page or customers' LTV
#' \item Data \emph{must} be strictly >= 0
#' \item Uses a conjugate \code{Gamma} distribution for the parameter \bold{\eqn{\lambda}} in the Exponential Distribution
#' \item \code{shape} and \code{rate} must be set for prior distribution over \eqn{\lambda}}
#'
#' \item Uniform: If your data is well modeled by the Uniform distribution, with parameter \eqn{\theta} controlling the \emph{max} value.
#' \itemize{\item bayesAB has only implemented Uniform(0, \eqn{\theta}) forms
#' \item For example, estimating max/total inventory size from individually numbered snapshots
#' \item Data \emph{must} be strictly > 0
#' \item Uses a conjugate \code{Pareto} distribution for the parameter \bold{\eqn{\theta}} in the Uniform(0, \eqn{\theta}) Distribution
#' \item \code{xm} and \code{alpha} must be set for prior distribution over \eqn{\theta}}
#'
#' \item BernoulliC: Closed form (computational) calculation of the 'bernoulli' bayesTest. Same priors are required.
#' \item PoissonC: Closed form (computational) calculation of the 'poisson' bayesTest. Same priors are required.
#' }
#'
#' @note For 'closed form' tests, you do not get a distribution over the posterior, but simply P(A > B) for the parameter in question.
#'
#' Choosing priors correctly is very important. Please see http://fportman.com/blog/bayesab-0-dot-7-0-plus-a-primer-on-priors/ for a detailed example of choosing priors
#' within bayesAB. Here are some ways to leverage objective/diffuse (assigning equal probability to all values) priors:
#'
#' \itemize{\item \code{Beta}(1, 1)
#' \item \code{Gamma}(eps, eps) ~ \code{Gamma}(.00005, .00005) will be effectively diffuse
#' \item \code{InvGamma}(eps, eps) ~ \code{InvGamma}(.00005, .00005) will be effectively diffuse
#' \item \code{Pareto}(eps, eps) ~ \code{Pareto}(.005, .005) will be effectively diffuse}
#'
#' Keep in mind that the Prior Plots for bayesTest's run with diffuse priors may not plot correctly as they will not be truncated as they
#' approach infinity. See \link{plot.bayesTest} for how to turn off the Prior Plots.
#'
#' @examples
#' A_binom <- rbinom(100, 1, .5)
#' B_binom <- rbinom(100, 1, .6)
#'
#' A_norm <- rnorm(100, 6, 1.5)
#' B_norm <- rnorm(100, 5, 2.5)
#'
#' AB1 <- bayesTest(A_binom, B_binom,
#' priors = c('alpha' = 1, 'beta' = 1),
#' distribution = 'bernoulli')
#' AB2 <- bayesTest(A_norm, B_norm,
#' priors = c('mu' = 5, 'lambda' = 1, 'alpha' = 3, 'beta' = 1),
#' distribution = 'normal')
#'
#' print(AB1)
#' summary(AB1)
#' plot(AB1)
#'
#' summary(AB2)
#'
#' # Create a new variable that is the probability multiiplied
#' # by the normally distributed variable (expected value of something)
#' AB3 <- combine(AB1, AB2, f = `*`, params = c('Probability', 'Mu'), newName = 'Expectation')
#'
#' print(AB3)
#' summary(AB3)
#' plot(AB3)
#'
#' @export
bayesTest <- function(A_data,
B_data,
priors,
n_samples = 1e5,
distribution = c('bernoulli', 'normal', 'lognormal',
'poisson', 'exponential', 'uniform',
'bernoulliC', 'poissonC')) {
# Coerce inputs
distribution <- match.arg(distribution)
data <- list(A_data = A_data, B_data = B_data)
priors <- as.list(priors)
# Import generic data/priors checks
genericDataChecks <- list(
checkNumericData,
checkCompleteData
)
genericPriorChecks <- list(
checkNumericPriors
)
# Import and prepare distribution specific functions
Funcs <- getDistribution(distribution)
priorArgs <- names(formals(Funcs$posteriors))
priorArgs <- removeGenericArgs(priorArgs)
###
# Generic checks
###
funcLooper(data, genericDataChecks)
funcLooper(priors, genericPriorChecks)
# The following are explicit for clarity in error messages
if(length(priorArgs) != length(priors)) {
stop("Incorrect number of priors for supplied distribution.")
}
if(! all(priorArgs %in% names(priors))) {
stop("Misnamed priors provided for supplied distribution.")
}
###
# Distribution specific checks
###
funcLooper(data, Funcs$dataChecks)
funcLooper(priors, Funcs$priorChecks)
# Construct call for posterior
fcall <- c(data, priors)
if(!isClosed(distribution)) fcall <- c(fcall, n_samples = n_samples)
posteriors <- do.call(Funcs$posteriors, fcall)
result <- list(
inputs = list(
A_data = list(A = A_data),
B_data = list(B = B_data),
priors = priors,
n_samples = n_samples,
distribution = distribution
),
prior = Funcs$prior,
posteriors = posteriors
)
class(result) <- ifelse(isClosed(distribution), 'bayesTestClosed', 'bayesTest')
return(result)
}
| /R/bayesTest.R | no_license | Jmetrics86/bayesAB | R | false | false | 10,453 | r | #' Fit a Bayesian model to A/B test data.
#'
#' @description This function fits a Bayesian model to your A/B testing sample data. See \bold{Details} for more information on usage.
#'
#' @param A_data Vector of collected samples from recipe A
#' @param B_data Vector of collected samples from recipe B
#' @param priors Named vector or named list providing priors as required by the specified distribution:
#' \itemize{
#' \item For 'bernoulli' distribution \code{list("alpha" = val1, "beta" = val2)}
#' \item For 'normal' distribution \code{c("mu" = val1, "lambda" = val2, "alpha" = val3, "beta" = val4)}
#' \item For 'lognormal' distribution \code{c("mu" = val1, "lambda" = val2, "alpha" = val3, "beta" = val4)}
#' \item For 'poisson' distribution \code{c("shape" = val1, "rate" = val2)}
#' \item For 'exponential' distribution \code{list("shape" = val1, "rate" = val2)}
#' \item For 'uniform' distribution \code{c("xm" = val1, "alpha" = val2)}
#' \item For 'bernoulliC' distribution: same prior definitions as 'bernoulli'
#' \item For 'poissonC' distribution: same prior definitions as 'poisson'
#' }
#'
#' See \link{plotDistributions} or the \emph{Note} section of this help document for more info.
#' @param n_samples Number of posterior samples to draw. Should be large enough for the distribution to converge. 1e5 is a good rule of thumb.
#' Not used for closed form tests.
#' @param distribution Distribution of underlying A/B test data.
#' @return A \code{bayesTest} object of the appropriate distribution class.
#'
#' @details \code{bayesTest} is the main driver function of the \bold{bayesAB} package. The input takes two vectors of data,
#' corresponding to recipe A and recipe B of an A/B test. Order does not matter, except for interpretability of the final
#' plots and intervals/point estimates. The Bayesian model for each distribution uses conjugate priors which must
#' be specified at the time of invoking the function. Currently, there are \emph{eight} supported distributions for the underlying data:
#'
#' \itemize{
#'
#' \item Bernoulli: If your data is well modeled by 1s and 0s, according to a specific probability \code{p} of a 1 occurring
#' \itemize{\item For example, click-through-rate /conversions for a page
#' \item Data \bold{must} be in a \{0, 1\} format where 1 corresponds to a 'success' as per the Bernoulli distribution
#' \item Uses a conjugate \code{Beta} distribution for the parameter \bold{p} in the Bernoulli distribution
#' \item \code{alpha} and \code{beta} must be set for a prior distribution over \bold{p}
#' \itemize{\item alpha = 1, beta = 1 can be used as a diffuse or uniform prior}}
#'
#' \item Normal: If your data is well modeled by the normal distribution, with parameters \eqn{\mu}, \eqn{\sigma^2} controlling mean and variance
#' of the underlying distribution
#' \itemize{\item Data \emph{can} be negative if it makes sense for your experiment
#' \item Uses a conjugate \code{NormalInverseGamma} distribution for the parameters \bold{\eqn{\mu}} and \bold{\eqn{\sigma^2}} in the
#' Normal Distribution.
#' \item \code{mu}, \code{lambda}, \code{alpha}, and \code{beta} must be set for prior
#' distributions over \bold{\eqn{\mu, \sigma^2}} in accordance with the parameters of the conjugate prior distributions:
#' \itemize{\item \eqn{\mu, \sigma^2} ~ NormalInverseGamma(mu, lambda, alpha, beta)}
#' \item This is a bivariate distribution (commonly used to model mean and variance of the normal distribution).
#' You may want to experiment with both this distribution and the \code{plotNormal} and \code{plotInvGamma} outputs
#' separately before arriving at a suitable set of priors for the Normal and LogNormal \code{bayesTest}}.
#'
#' \item LogNormal: If your data is well modeled by the log-normal distribution, with parameters \eqn{\mu}, \eqn{\sigma^2} as the \bold{parameters}
#' of the corresponding log-normal distribution (log of data is ~ N(\eqn{\mu}, \eqn{\sigma^2}))
#' \itemize{\item Support for a log-normal distribution is strictly positive
#' \item The Bayesian model requires same conjugate priors on \eqn{\mu}, \eqn{\sigma^2} as for the Normal Distribution priors
#' \item Note: The \eqn{\mu} and \eqn{\sigma^2} are not the mean/variance of lognormal numbers themselves but are rather the
#' corresponding parameters of the lognormal distribution. Thus, posteriors for the statistics 'Mean' and 'Variance'
#' are returned alongside 'Mu' and 'Sig_Sq' for interpretability.}
#'
#' \item Poisson: If your data is well modeled by the Poisson distribution, with parameter \eqn{\lambda} controlling the average number of events
#' per interval.
#' \itemize{\item For example, pageviews per session
#' \item Data \emph{must} be strictly integral or 0.
#' \item Uses a conjugate \code{Gamma} distribution for the parameter \bold{\eqn{\lambda}} in the Poisson Distribution
#' \item \code{shape} and \code{rate} must be set for prior distribution over \eqn{\lambda}}
#'
#' \item Exponential: If your data is well modeled by the Exponential distribution, with parameter \eqn{\lambda} controlling the
#' rate of decay.
#' \itemize{\item For example, time spent on a page or customers' LTV
#' \item Data \emph{must} be strictly >= 0
#' \item Uses a conjugate \code{Gamma} distribution for the parameter \bold{\eqn{\lambda}} in the Exponential Distribution
#' \item \code{shape} and \code{rate} must be set for prior distribution over \eqn{\lambda}}
#'
#' \item Uniform: If your data is well modeled by the Uniform distribution, with parameter \eqn{\theta} controlling the \emph{max} value.
#' \itemize{\item bayesAB has only implemented Uniform(0, \eqn{\theta}) forms
#' \item For example, estimating max/total inventory size from individually numbered snapshots
#' \item Data \emph{must} be strictly > 0
#' \item Uses a conjugate \code{Pareto} distribution for the parameter \bold{\eqn{\theta}} in the Uniform(0, \eqn{\theta}) Distribution
#' \item \code{xm} and \code{alpha} must be set for prior distribution over \eqn{\theta}}
#'
#' \item BernoulliC: Closed form (computational) calculation of the 'bernoulli' bayesTest. Same priors are required.
#' \item PoissonC: Closed form (computational) calculation of the 'poisson' bayesTest. Same priors are required.
#' }
#'
#' @note For 'closed form' tests, you do not get a distribution over the posterior, but simply P(A > B) for the parameter in question.
#'
#' Choosing priors correctly is very important. Please see http://fportman.com/blog/bayesab-0-dot-7-0-plus-a-primer-on-priors/ for a detailed example of choosing priors
#' within bayesAB. Here are some ways to leverage objective/diffuse (assigning equal probability to all values) priors:
#'
#' \itemize{\item \code{Beta}(1, 1)
#' \item \code{Gamma}(eps, eps) ~ \code{Gamma}(.00005, .00005) will be effectively diffuse
#' \item \code{InvGamma}(eps, eps) ~ \code{InvGamma}(.00005, .00005) will be effectively diffuse
#' \item \code{Pareto}(eps, eps) ~ \code{Pareto}(.005, .005) will be effectively diffuse}
#'
#' Keep in mind that the Prior Plots for bayesTest's run with diffuse priors may not plot correctly as they will not be truncated as they
#' approach infinity. See \link{plot.bayesTest} for how to turn off the Prior Plots.
#'
#' @examples
#' A_binom <- rbinom(100, 1, .5)
#' B_binom <- rbinom(100, 1, .6)
#'
#' A_norm <- rnorm(100, 6, 1.5)
#' B_norm <- rnorm(100, 5, 2.5)
#'
#' AB1 <- bayesTest(A_binom, B_binom,
#' priors = c('alpha' = 1, 'beta' = 1),
#' distribution = 'bernoulli')
#' AB2 <- bayesTest(A_norm, B_norm,
#' priors = c('mu' = 5, 'lambda' = 1, 'alpha' = 3, 'beta' = 1),
#' distribution = 'normal')
#'
#' print(AB1)
#' summary(AB1)
#' plot(AB1)
#'
#' summary(AB2)
#'
#' # Create a new variable that is the probability multiiplied
#' # by the normally distributed variable (expected value of something)
#' AB3 <- combine(AB1, AB2, f = `*`, params = c('Probability', 'Mu'), newName = 'Expectation')
#'
#' print(AB3)
#' summary(AB3)
#' plot(AB3)
#'
#' @export
bayesTest <- function(A_data,
B_data,
priors,
n_samples = 1e5,
distribution = c('bernoulli', 'normal', 'lognormal',
'poisson', 'exponential', 'uniform',
'bernoulliC', 'poissonC')) {
# Coerce inputs
distribution <- match.arg(distribution)
data <- list(A_data = A_data, B_data = B_data)
priors <- as.list(priors)
# Import generic data/priors checks
genericDataChecks <- list(
checkNumericData,
checkCompleteData
)
genericPriorChecks <- list(
checkNumericPriors
)
# Import and prepare distribution specific functions
Funcs <- getDistribution(distribution)
priorArgs <- names(formals(Funcs$posteriors))
priorArgs <- removeGenericArgs(priorArgs)
###
# Generic checks
###
funcLooper(data, genericDataChecks)
funcLooper(priors, genericPriorChecks)
# The following are explicit for clarity in error messages
if(length(priorArgs) != length(priors)) {
stop("Incorrect number of priors for supplied distribution.")
}
if(! all(priorArgs %in% names(priors))) {
stop("Misnamed priors provided for supplied distribution.")
}
###
# Distribution specific checks
###
funcLooper(data, Funcs$dataChecks)
funcLooper(priors, Funcs$priorChecks)
# Construct call for posterior
fcall <- c(data, priors)
if(!isClosed(distribution)) fcall <- c(fcall, n_samples = n_samples)
posteriors <- do.call(Funcs$posteriors, fcall)
result <- list(
inputs = list(
A_data = list(A = A_data),
B_data = list(B = B_data),
priors = priors,
n_samples = n_samples,
distribution = distribution
),
prior = Funcs$prior,
posteriors = posteriors
)
class(result) <- ifelse(isClosed(distribution), 'bayesTestClosed', 'bayesTest')
return(result)
}
|
# FUNCTION TO GET FREDDIE MAC HPI DATA
# Seasonally Adjusted - State HPI
# Source: http://www.freddiemac.com/research/indices/house-price-index.page
#------------------------------------------------------------
if(!require(pacman)){
install.packages("pacman")
library(pacman)
}
p_load(tidyverse, lubridate, readr, readxl)
get_fre_state_hpi <- function(fre_url = NULL, from_yr = NULL){
if(missing(fre_url)){
fre_url <- "http://www.freddiemac.com/fmac-resources/research/docs/State_and_US_SA.xls"
}else{
fre_url <- fre_url
}
tf <- tempfile()
fname <- basename(fre_url)
download.file(fre_url, destfile = paste0(tf, fname), mode = "wb")
d_fre_load <- read_excel(paste0(tf, fname), skip = 5)
drop_rows <- seq(min(as.numeric(which(rowSums(is.na(d_fre_load)) >10,
arr.ind = T))),nrow(d_fre_load))
d_fre_load <- d_fre_load%>%
filter(!row.names(d_fre_load) %in% drop_rows)
dt_replace_mo <- str_sub(max(d_fre_load$Month, rm.na = T), start = -2, -1)
dt_replace_yr <- str_sub(max(d_fre_load$Month, na.rm = T), start = 0, end = 4)
new_date <- seq(as.Date("1975-01-01"),
as.Date(sprintf("%s-%s-01",dt_replace_yr, dt_replace_mo)), by = "month")
d_fre <- d_fre_load%>%
dplyr::select(-Month)%>%
mutate(date = new_date)%>%
mutate_if(is.character, as.numeric)%>%
pivot_longer(-date, names_to= "state_abb", values_to = "value")%>%
mutate(dt_mo = month(date),
dt_yr = year(date),
value = as.numeric(value))%>%
group_by(state_abb)%>%
mutate(hpa_yoy = value/lag(value, 12)-1,
hpa_mom1 = value/lag(value, 1)-1)%>%
ungroup()%>%
filter(!state_abb == "United States seasonally adjusted")
unlink(tf)
if(missing(from_yr)){
return(d_fre)
}else if (from_yr%>%as.numeric() > max(year(d_fre$date), na.rm = T) ||
from_yr%>%as.numeric() < min(year(d_fre$date), na.rm = T)) {
paste0("Variable dt_yr must be within ", min(year(d_fre$date), na.rm = T),
" and ", max(year(dt_fre$date), na.rm = T))
}else{
from_yr <- (from_yr%>%as.numeric()-1)%>%as.character()
from_dt <- ymd(sprintf("%s-12-01", from_yr))
d_fre_sub <- d_fre%>%
filter(date > from_dt)
return(d_fre_sub)
# print(paste0("Data set from: ",
# range(d_fre_sub$date)[1],
# " to ", range(d_fre_sub$date)[2]))
}
}
| /hpi/f_fre_state_hpi_data.R | no_license | stuartiquinn/R_my_functions | R | false | false | 2,536 | r |
# FUNCTION TO GET FREDDIE MAC HPI DATA
# Seasonally Adjusted - State HPI
# Source: http://www.freddiemac.com/research/indices/house-price-index.page
#------------------------------------------------------------
if(!require(pacman)){
install.packages("pacman")
library(pacman)
}
p_load(tidyverse, lubridate, readr, readxl)
get_fre_state_hpi <- function(fre_url = NULL, from_yr = NULL){
if(missing(fre_url)){
fre_url <- "http://www.freddiemac.com/fmac-resources/research/docs/State_and_US_SA.xls"
}else{
fre_url <- fre_url
}
tf <- tempfile()
fname <- basename(fre_url)
download.file(fre_url, destfile = paste0(tf, fname), mode = "wb")
d_fre_load <- read_excel(paste0(tf, fname), skip = 5)
drop_rows <- seq(min(as.numeric(which(rowSums(is.na(d_fre_load)) >10,
arr.ind = T))),nrow(d_fre_load))
d_fre_load <- d_fre_load%>%
filter(!row.names(d_fre_load) %in% drop_rows)
dt_replace_mo <- str_sub(max(d_fre_load$Month, rm.na = T), start = -2, -1)
dt_replace_yr <- str_sub(max(d_fre_load$Month, na.rm = T), start = 0, end = 4)
new_date <- seq(as.Date("1975-01-01"),
as.Date(sprintf("%s-%s-01",dt_replace_yr, dt_replace_mo)), by = "month")
d_fre <- d_fre_load%>%
dplyr::select(-Month)%>%
mutate(date = new_date)%>%
mutate_if(is.character, as.numeric)%>%
pivot_longer(-date, names_to= "state_abb", values_to = "value")%>%
mutate(dt_mo = month(date),
dt_yr = year(date),
value = as.numeric(value))%>%
group_by(state_abb)%>%
mutate(hpa_yoy = value/lag(value, 12)-1,
hpa_mom1 = value/lag(value, 1)-1)%>%
ungroup()%>%
filter(!state_abb == "United States seasonally adjusted")
unlink(tf)
if(missing(from_yr)){
return(d_fre)
}else if (from_yr%>%as.numeric() > max(year(d_fre$date), na.rm = T) ||
from_yr%>%as.numeric() < min(year(d_fre$date), na.rm = T)) {
paste0("Variable dt_yr must be within ", min(year(d_fre$date), na.rm = T),
" and ", max(year(dt_fre$date), na.rm = T))
}else{
from_yr <- (from_yr%>%as.numeric()-1)%>%as.character()
from_dt <- ymd(sprintf("%s-12-01", from_yr))
d_fre_sub <- d_fre%>%
filter(date > from_dt)
return(d_fre_sub)
# print(paste0("Data set from: ",
# range(d_fre_sub$date)[1],
# " to ", range(d_fre_sub$date)[2]))
}
}
|
#' Bdgramr
#'
#' This function returns a dataframe with the coordinates of a unique body diagram.
#'
#' @param data A dataframe. The dataframe with all the coordinates points. Must be equal to `data`.
#' @param model A character string. One of the available models. Check model_types(data = data) if not sure. It defaults to 'futuristic_male
#'
#' @return A data frame:
#' \describe{
#' \item{Id}{Numeric. Unique id of each muscle area.}
#' \item{View}{A character String. The type of view (Anterior, Posterior, Left or Right)}
#' \item{Part}{A character string. Upper or Lower Body.}
#' \item{Group}{A character String. The name of the muscle group.}
#' \item{Muscle}{A character String. The name of the muscle}
#' \item{Side}{A character String. Whether it it left or right side of the body}
#' \item{x}{A number. x coordinates}
#' \item{y}{A number. y coordinates}
#' }
#'
#'
#' @export
#' @examples
#' bdgramr(data = data)
#'
bdgramr <- function(data = data, model = "futuristic_male"){
result <- tryCatch({
data <- data %>% dplyr::filter(Model == model) %>%
dplyr::select(-Model)
data
}, error = function(cond){
message("Check that data is equal data and model is equal to one of the models in `model_types(data = data)`")
return(NA)
})
return(result)
}
| /R/bodygram.R | permissive | pateibe/bdgramR | R | false | false | 1,385 | r | #' Bdgramr
#'
#' This function returns a dataframe with the coordinates of a unique body diagram.
#'
#' @param data A dataframe. The dataframe with all the coordinates points. Must be equal to `data`.
#' @param model A character string. One of the available models. Check model_types(data = data) if not sure. It defaults to 'futuristic_male
#'
#' @return A data frame:
#' \describe{
#' \item{Id}{Numeric. Unique id of each muscle area.}
#' \item{View}{A character String. The type of view (Anterior, Posterior, Left or Right)}
#' \item{Part}{A character string. Upper or Lower Body.}
#' \item{Group}{A character String. The name of the muscle group.}
#' \item{Muscle}{A character String. The name of the muscle}
#' \item{Side}{A character String. Whether it it left or right side of the body}
#' \item{x}{A number. x coordinates}
#' \item{y}{A number. y coordinates}
#' }
#'
#'
#' @export
#' @examples
#' bdgramr(data = data)
#'
bdgramr <- function(data = data, model = "futuristic_male"){
result <- tryCatch({
data <- data %>% dplyr::filter(Model == model) %>%
dplyr::select(-Model)
data
}, error = function(cond){
message("Check that data is equal data and model is equal to one of the models in `model_types(data = data)`")
return(NA)
})
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/delim_record_spec.R
\name{delim_record_spec}
\alias{delim_record_spec}
\alias{csv_record_spec}
\alias{tsv_record_spec}
\title{Specification for reading a record from a text file with delimited values}
\usage{
delim_record_spec(example_file, delim = ",", skip = 0, names = NULL,
types = NULL, defaults = NULL)
csv_record_spec(example_file, skip = 0, names = NULL, types = NULL,
defaults = NULL)
tsv_record_spec(example_file, skip = 0, names = NULL, types = NULL,
defaults = NULL)
}
\arguments{
\item{example_file}{File that provides an example of the records to be read.
If you don't explicitly specify names and types (or defaults) then this
file will be read to generate default values.}
\item{delim}{Character delimiter to separate fields in a record (defaults to
",")}
\item{skip}{Number of lines to skip before reading data. Note that if
\code{names} is explicitly provided and there are column names witin the
file then \code{skip} should be set to 1 to ensure that the column names are
bypassed.}
\item{names}{Character vector with column names (or \code{NULL} to automatically
detect the column names from the first row of \code{example_file}).
If \code{names} is a character vector, the values will be used as the names of
the columns, and the first row of the input will be read into the first row
of the datset. Note that if the underlying text file also includes column
names in it's first row, this row should be skipped explicitly with \code{skip = 1}.
If \code{NULL}, the first row of the example_file will be used as the column
names, and will be skipped when reading the dataset.}
\item{types}{Column types. If \code{NULL} and \code{defaults} is specified then types
will be imputed from the defaults. Otherwise, all column types will be
imputed from the first 1000 rows of the \code{example_file}. This is convenient
(and fast), but not robust. If the imputation fails, you'll need to supply
the correct types yourself.
Types can be explicitliy specified in a character vector as "integer",
"double", and "character" (e.g. \code{col_types = c("double", "double", "integer"}).
Alternatively, you can use a compact string representation where each
character represents one column: c = character, i = integer, d = double
(e.g. \code{types =}ddi`).}
\item{defaults}{List of default values which are used when data is
missing from a record (e.g. \code{list(0, 0, 0L}). If \code{NULL} then defaults will
be automatically provided based on \code{types} (\code{0} for numeric columns and
\code{""} for character columns).}
}
\description{
Specification for reading a record from a text file with delimited values
}
| /man/delim_record_spec.Rd | no_license | GRSEB9S/tfdatasets | R | false | true | 2,719 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/delim_record_spec.R
\name{delim_record_spec}
\alias{delim_record_spec}
\alias{csv_record_spec}
\alias{tsv_record_spec}
\title{Specification for reading a record from a text file with delimited values}
\usage{
delim_record_spec(example_file, delim = ",", skip = 0, names = NULL,
types = NULL, defaults = NULL)
csv_record_spec(example_file, skip = 0, names = NULL, types = NULL,
defaults = NULL)
tsv_record_spec(example_file, skip = 0, names = NULL, types = NULL,
defaults = NULL)
}
\arguments{
\item{example_file}{File that provides an example of the records to be read.
If you don't explicitly specify names and types (or defaults) then this
file will be read to generate default values.}
\item{delim}{Character delimiter to separate fields in a record (defaults to
",")}
\item{skip}{Number of lines to skip before reading data. Note that if
\code{names} is explicitly provided and there are column names witin the
file then \code{skip} should be set to 1 to ensure that the column names are
bypassed.}
\item{names}{Character vector with column names (or \code{NULL} to automatically
detect the column names from the first row of \code{example_file}).
If \code{names} is a character vector, the values will be used as the names of
the columns, and the first row of the input will be read into the first row
of the datset. Note that if the underlying text file also includes column
names in it's first row, this row should be skipped explicitly with \code{skip = 1}.
If \code{NULL}, the first row of the example_file will be used as the column
names, and will be skipped when reading the dataset.}
\item{types}{Column types. If \code{NULL} and \code{defaults} is specified then types
will be imputed from the defaults. Otherwise, all column types will be
imputed from the first 1000 rows of the \code{example_file}. This is convenient
(and fast), but not robust. If the imputation fails, you'll need to supply
the correct types yourself.
Types can be explicitliy specified in a character vector as "integer",
"double", and "character" (e.g. \code{col_types = c("double", "double", "integer"}).
Alternatively, you can use a compact string representation where each
character represents one column: c = character, i = integer, d = double
(e.g. \code{types =}ddi`).}
\item{defaults}{List of default values which are used when data is
missing from a record (e.g. \code{list(0, 0, 0L}). If \code{NULL} then defaults will
be automatically provided based on \code{types} (\code{0} for numeric columns and
\code{""} for character columns).}
}
\description{
Specification for reading a record from a text file with delimited values
}
|
calculate_corrected_dissimilatity <- function(point, points) {
dissimilatity = point@dissimilatity
points = calculate_dissimilatity_for_points(point, points)
points = sort_list_by_dissimilatity(points)
neighborhood = get_neighborhood(points[2:length(points)], neighbor_number)
max_dissimilatity = neighborhood[[length(neighborhood)]]@dissimilatity
if( max_dissimilatity > dissimilatity) {
return(max_dissimilatity)
}
else {
return(dissimilatity)
}
} | /calculate_corrected_dissimilatity.R | no_license | kaczmarekkacper/LocalOutlierFactor | R | false | false | 478 | r | calculate_corrected_dissimilatity <- function(point, points) {
dissimilatity = point@dissimilatity
points = calculate_dissimilatity_for_points(point, points)
points = sort_list_by_dissimilatity(points)
neighborhood = get_neighborhood(points[2:length(points)], neighbor_number)
max_dissimilatity = neighborhood[[length(neighborhood)]]@dissimilatity
if( max_dissimilatity > dissimilatity) {
return(max_dissimilatity)
}
else {
return(dissimilatity)
}
} |
#To determine the salary of the person having 14.5 years of experience in a particular field at Significance level of 0.05.
data<- read.csv(file.choose())
View(data)
attach(data)
names(data)
str(data)
data$YrsExp=as.numeric(data$YrsExp)
data$sal=as.numeric(data$sal)
str(data)
data1<-data[c(-1,-2)]
summary(data1)
#to check missing values
sapply(data1, function(x) sum(is.na(x)))
attach(data1)
library(lmtest)
#x=independent=YrsExp, y= dependent=sal
plot(sal~YrsExp)
plot(data1)
cor(data1)
input<-data1
sal.lm= lm(sal~YrsExp, data= input) #### why data=input, why not data1=input
summary(sal.lm)
#accuracy of model=95.54%. it is overfitting, so we are doing data partitioning
library(caret)
Train<- createDataPartition(data1$sal, p=0.70, list=FALSE)
training<- data1[Train,]
testing<- data1[-Train,]
cor(training)
sal.lm=lm(sal~YrsExp, data= training)
summary(sal.lm)
#####again accuracy is coming 94%. what again we need to do.
hist(training$sal)
hist(1/(training$sal))
hist(log(training$sal))
summary(log(training$sal))
sal.lm<- step(lm(log(sal)~.,data=training), direction = "backward")
summary(sal.lm)
#now we can find the model is 91% accurate. and p value <alpha.
#so we are rejecting H0.
library(car)
vif(sal.lm) ##########output error
y=B0+B1x
y= 25792.2 + 9450.0*14.5 #(without data partition)
y
y1=B0+B1x
y1= 10.481+0.13*14.5
y1 #(after data partition)
y2<-exp(y1)
y2
anova(sal.lm)
#here p value<alpha(0.05), so reject H). means there is linear corelation between sal and yrs of exp
plot(sal~YrsExp)
abline(sal~YrsExp, col="RED")
par(mfrow=c(2,2))
plot(sal.lm)
#conclusion= after 14.5 yrs of exp, salary will be 234685.1 with 0.05 significance level | /1 simple linear regression dataset salary.R | no_license | Abhipsanayak92/My-Practice-Folder-R | R | false | false | 1,767 | r | #To determine the salary of the person having 14.5 years of experience in a particular field at Significance level of 0.05.
data<- read.csv(file.choose())
View(data)
attach(data)
names(data)
str(data)
data$YrsExp=as.numeric(data$YrsExp)
data$sal=as.numeric(data$sal)
str(data)
data1<-data[c(-1,-2)]
summary(data1)
#to check missing values
sapply(data1, function(x) sum(is.na(x)))
attach(data1)
library(lmtest)
#x=independent=YrsExp, y= dependent=sal
plot(sal~YrsExp)
plot(data1)
cor(data1)
input<-data1
sal.lm= lm(sal~YrsExp, data= input) #### why data=input, why not data1=input
summary(sal.lm)
#accuracy of model=95.54%. it is overfitting, so we are doing data partitioning
library(caret)
Train<- createDataPartition(data1$sal, p=0.70, list=FALSE)
training<- data1[Train,]
testing<- data1[-Train,]
cor(training)
sal.lm=lm(sal~YrsExp, data= training)
summary(sal.lm)
#####again accuracy is coming 94%. what again we need to do.
hist(training$sal)
hist(1/(training$sal))
hist(log(training$sal))
summary(log(training$sal))
sal.lm<- step(lm(log(sal)~.,data=training), direction = "backward")
summary(sal.lm)
#now we can find the model is 91% accurate. and p value <alpha.
#so we are rejecting H0.
library(car)
vif(sal.lm) ##########output error
y=B0+B1x
y= 25792.2 + 9450.0*14.5 #(without data partition)
y
y1=B0+B1x
y1= 10.481+0.13*14.5
y1 #(after data partition)
y2<-exp(y1)
y2
anova(sal.lm)
#here p value<alpha(0.05), so reject H). means there is linear corelation between sal and yrs of exp
plot(sal~YrsExp)
abline(sal~YrsExp, col="RED")
par(mfrow=c(2,2))
plot(sal.lm)
#conclusion= after 14.5 yrs of exp, salary will be 234685.1 with 0.05 significance level |
setwd("R:/Business Strategy_Analytics/Corp Market Analyses/Corporate Merge (tix and location data)/Data Files")
library(ggplot2)
library(maps)
#load us map data
all_states <- map_data("state")
#plot all states with ggplot
p <- ggplot()
p <- p + geom_polygon( data=all_states, aes(x=long, y=lat, group = group),colour="white", fill="grey10" )
states <- subset(all_states, region %in% c( "maryland", "virginia", "district of columbia") )
p <- ggplot()
p <- p + geom_polygon( data=states, aes(x=long, y=lat, group = group),colour="white", fill="gray50" )
p
library(xlsx)
mydata<-read.xlsx("Corporate Accts.xlsx", sheetName='CORP ACCTS',stringsAsFactors=FALSE)
colnames(mydata)[colnames(mydata)
== 'Lat'] <- 'lat'
colnames(mydata)[colnames(mydata)
== 'Lon'] <- 'long'
colnames(mydata)[colnames(mydata)
== 'Ticket.Package.Amount'] <- 'PACKAGE_TICKET_AMT'
p <- ggplot()
p <- p + geom_polygon( data=states, aes(x=long, y=lat, group = group),colour="white" )
#COMPLETED MERGE
basic <- p + geom_point( data=mydata, aes(x=long, y=lat), color="coral1") + scale_size(name="Revenue")
Revenue_map <- p + geom_point( data=mydata, aes(x=long, y=lat, size = Revenue), color="coral1") + scale_size(name="Revenue")
TicketPackage_map <- p + geom_point( data=mydata, aes(x=long, y=lat, size = PACKAGE_TICKET_AMT), color="coral1") + scale_size(name="Ticket Package Total")
Employee_map <- p + geom_point( data=mydata, aes(x=long, y=lat, size = Employee.Total), color="coral1") + scale_size(name="Employee Total")
basic
Revenue_map
TicketPackage_map
Employee_map
| /Analysis of Corporate Prospects/Corporate Mapping.R | no_license | garrettpgeorgia/Washington-Nationals-R-Programming | R | false | false | 1,601 | r | setwd("R:/Business Strategy_Analytics/Corp Market Analyses/Corporate Merge (tix and location data)/Data Files")
library(ggplot2)
library(maps)
#load us map data
all_states <- map_data("state")
#plot all states with ggplot
p <- ggplot()
p <- p + geom_polygon( data=all_states, aes(x=long, y=lat, group = group),colour="white", fill="grey10" )
states <- subset(all_states, region %in% c( "maryland", "virginia", "district of columbia") )
p <- ggplot()
p <- p + geom_polygon( data=states, aes(x=long, y=lat, group = group),colour="white", fill="gray50" )
p
library(xlsx)
mydata<-read.xlsx("Corporate Accts.xlsx", sheetName='CORP ACCTS',stringsAsFactors=FALSE)
colnames(mydata)[colnames(mydata)
== 'Lat'] <- 'lat'
colnames(mydata)[colnames(mydata)
== 'Lon'] <- 'long'
colnames(mydata)[colnames(mydata)
== 'Ticket.Package.Amount'] <- 'PACKAGE_TICKET_AMT'
p <- ggplot()
p <- p + geom_polygon( data=states, aes(x=long, y=lat, group = group),colour="white" )
#COMPLETED MERGE
basic <- p + geom_point( data=mydata, aes(x=long, y=lat), color="coral1") + scale_size(name="Revenue")
Revenue_map <- p + geom_point( data=mydata, aes(x=long, y=lat, size = Revenue), color="coral1") + scale_size(name="Revenue")
TicketPackage_map <- p + geom_point( data=mydata, aes(x=long, y=lat, size = PACKAGE_TICKET_AMT), color="coral1") + scale_size(name="Ticket Package Total")
Employee_map <- p + geom_point( data=mydata, aes(x=long, y=lat, size = Employee.Total), color="coral1") + scale_size(name="Employee Total")
basic
Revenue_map
TicketPackage_map
Employee_map
|
load_libraries <- function(){
library(sp)
library(raster)
library(rgdal)
library(parallel)
library(maptools)
}
set_clusters <- function(n){
num_clusters <<- n
}
log <- function(...){
arguments <- paste(list(...),collapse= ' ')
print(arguments)
}
load_preproc_raster <- function(raster_name){
# This function returns rasters
raster_path <<- paste('preproc_rasters/',raster_name,sep='')
log("loading",raster_path)
return (raster(raster_path))
}
load_forest_data <- function(){
# loading forest data rasters
log("loading forest data")
fl <<- list.files(pattern=glob2rx("Total*.tif$"))
fl_1 <<- raster(fl[1])
# All the rasters have fl_l's projection
fl.ele <<- load_preproc_raster("ele_raster.tif") # elevation
fl.slp <<- load_preproc_raster("slp_raster.tif") # slope
fl.asp <<- load_preproc_raster("asp_raster.tif") # aspect
fl.sit <<- load_preproc_raster("sit_raster.tif") # sit
log("Making a raster brick")
fl.stack <<- stack(fl)
fl.stack <<- stack(fl.stack, fl.sit)
fl.brick <<- brick(fl.stack)
}
load_cdl <- function(){
log("loading cdl ")
cdl.prj.mask <<- load_preproc_raster("cdl_prj_mask.tif")
}
load_wilderness <- function(){
log("loading wilderness")
wild.prj <<- readOGR("wilderness_proj.shp") # wilderness areas shapefile
}
load_ownership_nps <- function(){
log("loading ownership data")
own.prj <<- load_preproc_raster("own_proj.tif")
# get nps only land from ownership
#own_nps_ex <<- load_preproc_raster("own_nps_ex.tif")
#own_nps_in <<- mask(own.prj,own_nps_ex,inverse=T) # only nps
#writeRaster(own_nps_in,"own_nps_in.tif")
#load_preproc_raster(own_nps_in)
fl.ele.nps_ex <<- mask(fl.ele, own_nps_in, inverse=T) # all fl.ele without nps
#writeRaster(fl.ele.nps_ex,"fl_ele_nps_ex.tif",overwrite=T)
#fl.ele.nps_ex <<- load_preproc_raster("fl_ele_nps_ex.tif")
#own_stack = stack(own_nps_ex,own_nps_in,own.prj)
#fl_ele_stack = stack(fl.ele,fl.ele.nps_ex)
#plot(fl_ele_stack)
#plot(own_stack)
}
f1 <- function(x){
x[is.na(x)]<-0
sum(x) > 0
}
#fl.ele.wild.cdl.own.masked <<- mask(fl.ele.wild.cdl.own, fl.brick.masked, maskvalue = 0)
#writeRaster(fl.ele.wild.cdl.own.masked,"fl_ele_wild_cdl_own_masked_ex.tif",overwrite=TRUE
trim_data <- function(){
log("trimming data")
# Get information from brick
fl.brick.subset <<- subset(fl.brick,grep("BMSTM",names(fl.brick)))
fl.brick.masked <<- clusterR(fl.brick.subset, calc, args=list(fun=f1))
fl.ele.nps_ex.mask <<- mask(fl.ele.nps_ex,fl.brick.masked, maskvalue = 0)
writeRaster(fl.ele.mask,"fl_ele_nps_ex_mask",overwrite=TRUE)
df.ele <<- rasterToPoints(fl.ele.nps_ex.mask, spatial = T)
}
load_shapefile <- function(){
log("Loading Counties shapefile")
shp <<- readOGR("CA_Counties/CA_Counties_TIGER2016.shp")
log("preparing some raster files for processing")
fhz <<- "fhszl06_132sn/c32fhszl06_1.shp"
fl.fhz <<- spTransform(readOGR(fhz), proj4string(fl.ele))
fl.raster <<- raster(fl.fhz,res=30)
fl.fhz.raster <<- rasterize(fl.fhz, fl.raster, "HAZ_CODE")
}
load_county <- function(county_name){
county_name <<- "Alpine"
# loading the shapefile of the county
log("Extracting",county_name,"shape file")
shp.curr_county <- shp[shp$NAME==county_name,]
log("Transforming the projection")
shp.curr_county.prj <- spTransform(shp.curr_county, proj4string(fl.ele))
log("Getting county's elevation data")
df.ele.curr_county <<- over(df.ele, shp.curr_county.prj)
df.ele.curr_county.na.omit <<- na.omit(cbind(df.ele@data,df.ele@coords,df.ele.curr_county$NAME))
log("Writing into csv file")
county_dir <- paste("counties/",county_name,sep="")
write.csv(df.ele.curr_county.na.omit, paste(county_dir,"/F3_data_ele_v3.csv",sep = ""))
log("Transforming coordinates")
coordinates(df.ele.curr_county.na.omit) <- ~x+y
proj4string(df.ele.curr_county.na.omit) <- proj4string(fl.ele)
df.data.curr_county <<- extract(fl.brick, df.ele.curr_county.na.omit, sp=T)
log("Writing output into csv files")
fl.fhz.data <<- extract(fl.fhz.raster, df.ele.curr_county.na.omit, df=T)
write.csv(fl.fhz.data, paste(county_dir,"/F3_data_fhz_v3.csv",sep=""))
fl.slp.data <<- extract(fl.slp, df.ele.curr_county.na.omit, df=T)
write.csv(fl.slp.data, paste(county_dir,"/F3_data_slp_v3.csv",sep=""))
fl.asp.data <<- extract(fl.asp, df.ele.curr_county.na.omit, df=T)
write.csv(fl.asp.data, paste(county_dir,"/F3_data_asp_v3.csv",sep=""))
log("Getting the right crs for data frame")
df.data.curr_county.latlon <<- spTransform(df.data.curr_county, CRS("+proj=longlat +datum=WGS84"))
}
Visualization <- function(county_name){
log("Preparing kmeans raster file for",county_name)
county_dir <- paste("counties/",county_name,sep="")
log("Countery directory is",county_dir)
plot_file <- paste("/plot/",county_name,"_all_nps_ex.tif",sep="")
log("Plot file location is",plot_file)
f3_data_file <- paste(county_dir,"/F3_data_kmeans100_v7.csv",sep="")
log("kmeans csv location is",f3_data_file)
plot.kmeans.fhz <- read.csv(f3_data_file)
raster.kmeans100.fhz <- rasterFromXYZ(plot.kmeans.fhz[c(4,5,8)])
proj4string(raster.kmeans100.fhz) <- proj4string(raster(fl[1]))
log("Writing kmeans raster for",county_name,"in to memory.","File name is : ",plot_file)
writeRaster(raster.kmeans100.fhz, paste(county_dir,plot_file,sep=""), overwrite=T)
}
dbSafeNames = function(names) {
names = gsub('[^a-z0-9]+','_',tolower(names))
names = make.names(names, unique=TRUE, allow_=TRUE)
names = gsub('.','_',names, fixed=TRUE)
names
}
Output <- function(county_name){
log("Preparing kmeans data frame for county,",county_name)
county_dir <<- paste("counties/",county_name,sep="")
# reading kmeans csv
test.kmeans100 <- read.csv(paste(county_dir,"/F3_data_kmeans100_v7.csv",sep=""))
test.kmeans100.filt <- test.kmeans100[,c('Cluster1','Cluster2')]
test.kmeans100.filt<-spCbind(df.data.curr_county.latlon,test.kmeans100.filt) #Error
# convert into data frame
test.kmeans100.filt.df <<- as.data.frame(test.kmeans100.filt)
test.kmeans100.filt.df[is.na(test.kmeans100.filt.df)]<-0
colnames(test.kmeans100.filt.df)<-dbSafeNames(colnames(test.kmeans100.filt.df))
test.kmeans100.filt.df$cluster_no <- test.kmeans100.filt.df$cluster1 * 2500 + test.kmeans100.filt.df$cluster2
headers <<- paste0(colnames(test.kmeans100.filt.df), " ", "REAL", " ", "NOT NULL")
writeLines(headers, file("table_headers.txt"))
write.csv(test.kmeans100.filt.df, paste(county_dir,"/",county_name,"_all_nps_ex.csv",sep=""), row.names = F)
merge_own_wild_df(county_name=county_name,
test.kmeans100.filt=test.kmeans100.filt,
test.kmeans100.filt.df=test.kmeans100.filt.df)
}
merge_own_wild_df <- function(county_name,test.kmeans100.filt,test.kmeans100.filt.df){
log("Merging ownership and wilderness data for",county_name)
county_dir <<- paste("counties/",county_name,sep="")
log("Getting ownership data")
own_cdl <<- load_preproc_raster("fl_ele_own_cdl.tif")
proj4string(own_cdl) = proj4string(own.prj)
log("Getting wilderness data")
wild_cdl = load_preproc_raster("fl_ele_wild_cdl.tif")
proj4string(wild_cdl) = proj4string(own.prj)
# making use of existing dataframe
helper <- raster(test.kmeans100.filt)
# changing the projection of shapefile
shp.curr_county.prj2 <- spTransform(shp.curr_county.prj, proj4string(helper))
# cropping wilderness according to common extent & matching it's projection
wild_kmeans_prj = projectRaster(wild_cdl,helper)
wild_cropped <<- crop(wild_kmeans_prj, extent(shp.curr_county.prj2))
own_kmeans_prj = projectRaster(own_cdl,helper)
own_cropped <<- crop(own_kmeans_prj,extent(shp.curr_county.prj2))
# stacking cropped ownership and wilderness rasters
com_st = stack(wild_cropped,own_cropped)
# Giving the stack layers meaningfull names
names(com_st) = c("Wilderness_Areas","Ownership_Areas")
# plot(com_st)
# converting raster stack to spatial data frame
com_st_df = rasterToPoints(com_st,spatial=T)
# converting raster stack to normal data frame
com_st_normal = as.data.frame(com_st_df)
# merging
log("Merging")
merged_df = merge(x=test.kmeans100.filt.df,y=com_st_normal,by=c("x","y"),all.x=TRUE,all.y=TRUE)
# writing the merged data frame into memory
log("Writing merged data frame into memory")
write.csv(merged_df, paste(county_dir,"/",county_name,"_merged_all_nps_ex.csv",sep=""), row.names = F)
}
load_data <- function(){
log("Starting")
# loading data
load_forest_data()
load_cdl()
load_wilderness()
load_ownership()
# preparing data
trim_data()
load_shapefile() # for all counties
}
save_results <- function(county_name){
Visualization(county_name = county_name)
Output(county_name = county_name)
}
#county_names <<<- c("Alpine","Amador","Butte","Calaveras","El Dorado","Fresno","Inyo","Kern","Madera","Mariposa",
# "Mono","Nevada","Placer","Plumas","Sierra","Tulare","Tuolumne","Yuba")
# load libraries
setup <- function(){
load_libraries()
beginCluster(num_clusters)
rasterOptions(maxmemory = 1e+09) # comment this out if the machine is not powerful enough
setwd("/gpfs/data1/cmongp/ujjwal/cec/Forest Data/") # sets the directory path
}
# loads libraries and sets clusters
set_clusters(35) # change num_clusters here
setup()
load_data()
# data prep
county_name <<- 'Plumas'
load_county(county_name)
save_results(county_name)
endCluster()
| /cec_script_ver_ex_nps.R | no_license | Dr-Varaprasad-Bandaru-Research-Group/cec_dss | R | false | false | 9,654 | r | load_libraries <- function(){
library(sp)
library(raster)
library(rgdal)
library(parallel)
library(maptools)
}
set_clusters <- function(n){
num_clusters <<- n
}
log <- function(...){
arguments <- paste(list(...),collapse= ' ')
print(arguments)
}
load_preproc_raster <- function(raster_name){
# This function returns rasters
raster_path <<- paste('preproc_rasters/',raster_name,sep='')
log("loading",raster_path)
return (raster(raster_path))
}
load_forest_data <- function(){
# loading forest data rasters
log("loading forest data")
fl <<- list.files(pattern=glob2rx("Total*.tif$"))
fl_1 <<- raster(fl[1])
# All the rasters have fl_l's projection
fl.ele <<- load_preproc_raster("ele_raster.tif") # elevation
fl.slp <<- load_preproc_raster("slp_raster.tif") # slope
fl.asp <<- load_preproc_raster("asp_raster.tif") # aspect
fl.sit <<- load_preproc_raster("sit_raster.tif") # sit
log("Making a raster brick")
fl.stack <<- stack(fl)
fl.stack <<- stack(fl.stack, fl.sit)
fl.brick <<- brick(fl.stack)
}
load_cdl <- function(){
log("loading cdl ")
cdl.prj.mask <<- load_preproc_raster("cdl_prj_mask.tif")
}
load_wilderness <- function(){
log("loading wilderness")
wild.prj <<- readOGR("wilderness_proj.shp") # wilderness areas shapefile
}
load_ownership_nps <- function(){
log("loading ownership data")
own.prj <<- load_preproc_raster("own_proj.tif")
# get nps only land from ownership
#own_nps_ex <<- load_preproc_raster("own_nps_ex.tif")
#own_nps_in <<- mask(own.prj,own_nps_ex,inverse=T) # only nps
#writeRaster(own_nps_in,"own_nps_in.tif")
#load_preproc_raster(own_nps_in)
fl.ele.nps_ex <<- mask(fl.ele, own_nps_in, inverse=T) # all fl.ele without nps
#writeRaster(fl.ele.nps_ex,"fl_ele_nps_ex.tif",overwrite=T)
#fl.ele.nps_ex <<- load_preproc_raster("fl_ele_nps_ex.tif")
#own_stack = stack(own_nps_ex,own_nps_in,own.prj)
#fl_ele_stack = stack(fl.ele,fl.ele.nps_ex)
#plot(fl_ele_stack)
#plot(own_stack)
}
f1 <- function(x){
x[is.na(x)]<-0
sum(x) > 0
}
#fl.ele.wild.cdl.own.masked <<- mask(fl.ele.wild.cdl.own, fl.brick.masked, maskvalue = 0)
#writeRaster(fl.ele.wild.cdl.own.masked,"fl_ele_wild_cdl_own_masked_ex.tif",overwrite=TRUE
trim_data <- function(){
log("trimming data")
# Get information from brick
fl.brick.subset <<- subset(fl.brick,grep("BMSTM",names(fl.brick)))
fl.brick.masked <<- clusterR(fl.brick.subset, calc, args=list(fun=f1))
fl.ele.nps_ex.mask <<- mask(fl.ele.nps_ex,fl.brick.masked, maskvalue = 0)
writeRaster(fl.ele.mask,"fl_ele_nps_ex_mask",overwrite=TRUE)
df.ele <<- rasterToPoints(fl.ele.nps_ex.mask, spatial = T)
}
load_shapefile <- function(){
log("Loading Counties shapefile")
shp <<- readOGR("CA_Counties/CA_Counties_TIGER2016.shp")
log("preparing some raster files for processing")
fhz <<- "fhszl06_132sn/c32fhszl06_1.shp"
fl.fhz <<- spTransform(readOGR(fhz), proj4string(fl.ele))
fl.raster <<- raster(fl.fhz,res=30)
fl.fhz.raster <<- rasterize(fl.fhz, fl.raster, "HAZ_CODE")
}
load_county <- function(county_name){
county_name <<- "Alpine"
# loading the shapefile of the county
log("Extracting",county_name,"shape file")
shp.curr_county <- shp[shp$NAME==county_name,]
log("Transforming the projection")
shp.curr_county.prj <- spTransform(shp.curr_county, proj4string(fl.ele))
log("Getting county's elevation data")
df.ele.curr_county <<- over(df.ele, shp.curr_county.prj)
df.ele.curr_county.na.omit <<- na.omit(cbind(df.ele@data,df.ele@coords,df.ele.curr_county$NAME))
log("Writing into csv file")
county_dir <- paste("counties/",county_name,sep="")
write.csv(df.ele.curr_county.na.omit, paste(county_dir,"/F3_data_ele_v3.csv",sep = ""))
log("Transforming coordinates")
coordinates(df.ele.curr_county.na.omit) <- ~x+y
proj4string(df.ele.curr_county.na.omit) <- proj4string(fl.ele)
df.data.curr_county <<- extract(fl.brick, df.ele.curr_county.na.omit, sp=T)
log("Writing output into csv files")
fl.fhz.data <<- extract(fl.fhz.raster, df.ele.curr_county.na.omit, df=T)
write.csv(fl.fhz.data, paste(county_dir,"/F3_data_fhz_v3.csv",sep=""))
fl.slp.data <<- extract(fl.slp, df.ele.curr_county.na.omit, df=T)
write.csv(fl.slp.data, paste(county_dir,"/F3_data_slp_v3.csv",sep=""))
fl.asp.data <<- extract(fl.asp, df.ele.curr_county.na.omit, df=T)
write.csv(fl.asp.data, paste(county_dir,"/F3_data_asp_v3.csv",sep=""))
log("Getting the right crs for data frame")
df.data.curr_county.latlon <<- spTransform(df.data.curr_county, CRS("+proj=longlat +datum=WGS84"))
}
Visualization <- function(county_name){
log("Preparing kmeans raster file for",county_name)
county_dir <- paste("counties/",county_name,sep="")
log("Countery directory is",county_dir)
plot_file <- paste("/plot/",county_name,"_all_nps_ex.tif",sep="")
log("Plot file location is",plot_file)
f3_data_file <- paste(county_dir,"/F3_data_kmeans100_v7.csv",sep="")
log("kmeans csv location is",f3_data_file)
plot.kmeans.fhz <- read.csv(f3_data_file)
raster.kmeans100.fhz <- rasterFromXYZ(plot.kmeans.fhz[c(4,5,8)])
proj4string(raster.kmeans100.fhz) <- proj4string(raster(fl[1]))
log("Writing kmeans raster for",county_name,"in to memory.","File name is : ",plot_file)
writeRaster(raster.kmeans100.fhz, paste(county_dir,plot_file,sep=""), overwrite=T)
}
dbSafeNames = function(names) {
names = gsub('[^a-z0-9]+','_',tolower(names))
names = make.names(names, unique=TRUE, allow_=TRUE)
names = gsub('.','_',names, fixed=TRUE)
names
}
Output <- function(county_name){
log("Preparing kmeans data frame for county,",county_name)
county_dir <<- paste("counties/",county_name,sep="")
# reading kmeans csv
test.kmeans100 <- read.csv(paste(county_dir,"/F3_data_kmeans100_v7.csv",sep=""))
test.kmeans100.filt <- test.kmeans100[,c('Cluster1','Cluster2')]
test.kmeans100.filt<-spCbind(df.data.curr_county.latlon,test.kmeans100.filt) #Error
# convert into data frame
test.kmeans100.filt.df <<- as.data.frame(test.kmeans100.filt)
test.kmeans100.filt.df[is.na(test.kmeans100.filt.df)]<-0
colnames(test.kmeans100.filt.df)<-dbSafeNames(colnames(test.kmeans100.filt.df))
test.kmeans100.filt.df$cluster_no <- test.kmeans100.filt.df$cluster1 * 2500 + test.kmeans100.filt.df$cluster2
headers <<- paste0(colnames(test.kmeans100.filt.df), " ", "REAL", " ", "NOT NULL")
writeLines(headers, file("table_headers.txt"))
write.csv(test.kmeans100.filt.df, paste(county_dir,"/",county_name,"_all_nps_ex.csv",sep=""), row.names = F)
merge_own_wild_df(county_name=county_name,
test.kmeans100.filt=test.kmeans100.filt,
test.kmeans100.filt.df=test.kmeans100.filt.df)
}
merge_own_wild_df <- function(county_name,test.kmeans100.filt,test.kmeans100.filt.df){
log("Merging ownership and wilderness data for",county_name)
county_dir <<- paste("counties/",county_name,sep="")
log("Getting ownership data")
own_cdl <<- load_preproc_raster("fl_ele_own_cdl.tif")
proj4string(own_cdl) = proj4string(own.prj)
log("Getting wilderness data")
wild_cdl = load_preproc_raster("fl_ele_wild_cdl.tif")
proj4string(wild_cdl) = proj4string(own.prj)
# making use of existing dataframe
helper <- raster(test.kmeans100.filt)
# changing the projection of shapefile
shp.curr_county.prj2 <- spTransform(shp.curr_county.prj, proj4string(helper))
# cropping wilderness according to common extent & matching it's projection
wild_kmeans_prj = projectRaster(wild_cdl,helper)
wild_cropped <<- crop(wild_kmeans_prj, extent(shp.curr_county.prj2))
own_kmeans_prj = projectRaster(own_cdl,helper)
own_cropped <<- crop(own_kmeans_prj,extent(shp.curr_county.prj2))
# stacking cropped ownership and wilderness rasters
com_st = stack(wild_cropped,own_cropped)
# Giving the stack layers meaningfull names
names(com_st) = c("Wilderness_Areas","Ownership_Areas")
# plot(com_st)
# converting raster stack to spatial data frame
com_st_df = rasterToPoints(com_st,spatial=T)
# converting raster stack to normal data frame
com_st_normal = as.data.frame(com_st_df)
# merging
log("Merging")
merged_df = merge(x=test.kmeans100.filt.df,y=com_st_normal,by=c("x","y"),all.x=TRUE,all.y=TRUE)
# writing the merged data frame into memory
log("Writing merged data frame into memory")
write.csv(merged_df, paste(county_dir,"/",county_name,"_merged_all_nps_ex.csv",sep=""), row.names = F)
}
load_data <- function(){
log("Starting")
# loading data
load_forest_data()
load_cdl()
load_wilderness()
load_ownership()
# preparing data
trim_data()
load_shapefile() # for all counties
}
save_results <- function(county_name){
Visualization(county_name = county_name)
Output(county_name = county_name)
}
#county_names <<<- c("Alpine","Amador","Butte","Calaveras","El Dorado","Fresno","Inyo","Kern","Madera","Mariposa",
# "Mono","Nevada","Placer","Plumas","Sierra","Tulare","Tuolumne","Yuba")
# load libraries
setup <- function(){
load_libraries()
beginCluster(num_clusters)
rasterOptions(maxmemory = 1e+09) # comment this out if the machine is not powerful enough
setwd("/gpfs/data1/cmongp/ujjwal/cec/Forest Data/") # sets the directory path
}
# loads libraries and sets clusters
set_clusters(35) # change num_clusters here
setup()
load_data()
# data prep
county_name <<- 'Plumas'
load_county(county_name)
save_results(county_name)
endCluster()
|
suppressPackageStartupMessages(library(magrittr))
suppressPackageStartupMessages(library(utils))
suppressPackageStartupMessages(library(AnnotationDbi))
suppressPackageStartupMessages(library(DOSE))
suppressPackageStartupMessages(library(GOSemSim))
get_KEGG_Env <- function() {
if (! exists(".KEGG_clusterProfiler_Env", envir = .GlobalEnv)) {
pos <- 1
envir <- as.environment(pos)
assign(".KEGG_clusterProfiler_Env", new.env(), envir=envir)
}
get(".KEGG_clusterProfiler_Env", envir = .GlobalEnv)
}
download_KEGG <- function(species, keggType="KEGG", keyType="kegg") {
KEGG_Env <- get_KEGG_Env()
use_cached <- FALSE
if (exists("organism", envir = KEGG_Env, inherits = FALSE) &&
exists("_type_", envir = KEGG_Env, inherits = FALSE) ) {
org <- get("organism", envir=KEGG_Env)
type <- get("_type_", envir=KEGG_Env)
if (org == species && type == keggType &&
exists("KEGGPATHID2NAME", envir=KEGG_Env, inherits = FALSE) &&
exists("KEGGPATHID2EXTID", envir=KEGG_Env, inherits = FALSE)) {
use_cached <- TRUE
}
}
if (use_cached) {
KEGGPATHID2EXTID <- get("KEGGPATHID2EXTID", envir=KEGG_Env)
KEGGPATHID2NAME <- get("KEGGPATHID2NAME", envir=KEGG_Env)
} else {
if (keggType == "KEGG") {
kres <- download.KEGG.Path(species)
} else {
kres <- download.KEGG.Module(species)
}
KEGGPATHID2EXTID <- kres$KEGGPATHID2EXTID
KEGGPATHID2NAME <- kres$KEGGPATHID2NAME
assign("organism", species, envir=KEGG_Env)
assign("_type_", keggType, envir=KEGG_Env)
assign("KEGGPATHID2NAME", KEGGPATHID2NAME, envir=KEGG_Env)
assign("KEGGPATHID2EXTID", KEGGPATHID2EXTID, envir=KEGG_Env)
}
if (keyType != "kegg") {
need_idconv <- FALSE
idconv <- NULL
if (use_cached &&
exists("key", envir=KEGG_Env, inherits = FALSE) &&
exists("idconv", envir=KEGG_Env, inherits = FALSE)) {
key <- get("key", envir=KEGG_Env)
if (key == keyType) {
idconv <- get("idconv", envir=KEGG_Env)
} else {
need_idconv <- TRUE
}
} else {
neec_idconv <- TRUE
}
if (need_idconv || is.null(idconv)) {
idconv <- KEGG_convert("kegg", keyType, species)
assign("key", keyType, envir=KEGG_Env)
assign("idconv", idconv, envir=KEGG_Env)
}
colnames(KEGGPATHID2EXTID) <- c("from", "kegg")
KEGGPATHID2EXTID <- merge(KEGGPATHID2EXTID, idconv, by.x='kegg', by.y='from')
KEGGPATHID2EXTID <- unique(KEGGPATHID2EXTID[, -1])
}
return(list(KEGGPATHID2EXTID = KEGGPATHID2EXTID,
KEGGPATHID2NAME = KEGGPATHID2NAME))
}
prepare_KEGG <- function(species, KEGG_Type="KEGG", keyType="kegg") {
kegg <- download_KEGG(species, KEGG_Type, keyType)
build_Anno(kegg$KEGGPATHID2EXTID,
kegg$KEGGPATHID2NAME)
}
download.KEGG.Path <- function(species) {
keggpathid2extid.df <- kegg_link(species, "pathway")
if (is.null(keggpathid2extid.df))
stop("'species' should be one of organisms listed in 'http://www.genome.jp/kegg/catalog/org_list.html'...")
keggpathid2extid.df[,1] %<>% gsub("[^:]+:", "", .)
keggpathid2extid.df[,2] %<>% gsub("[^:]+:", "", .)
keggpathid2name.df <- kegg_list("pathway")
keggpathid2name.df[,1] %<>% gsub("path:map", species, .)
## if 'species="ko"', ko and map path are duplicated, only keep ko path.
##
## http://www.kegg.jp/dbget-bin/www_bget?ko+ko00010
## http://www.kegg.jp/dbget-bin/www_bget?ko+map0001
##
keggpathid2extid.df <- keggpathid2extid.df[keggpathid2extid.df[,1] %in% keggpathid2name.df[,1],]
return(list(KEGGPATHID2EXTID=keggpathid2extid.df,
KEGGPATHID2NAME=keggpathid2name.df))
}
download.KEGG.Module <- function(species) {
keggmodule2extid.df <- kegg_link(species, "module")
if (is.null(keggmodule2extid.df)) {
stop("'species' should be one of organisms listed in 'http://www.genome.jp/kegg/catalog/org_list.html'...")
}
keggmodule2extid.df[,1] %<>% gsub("[^:]+:", "", .) %>% gsub(species, "", .) %>% gsub("^_", "", .)
keggmodule2extid.df[,2] %<>% gsub("[^:]+:", "", .)
keggmodule2name.df <- kegg_list("module")
keggmodule2name.df[,1] %<>% gsub("md:", "", .)
return(list(KEGGPATHID2EXTID=keggmodule2extid.df,
KEGGPATHID2NAME =keggmodule2name.df))
}
browseKEGG <- function(x, pathID) {
url <- paste0("http://www.kegg.jp/kegg-bin/show_pathway?", pathID, '/', x[pathID, "geneID"])
browseURL(url)
invisible(url)
}
search_kegg_organism <- function(str, by="scientific_name", ignore.case=FALSE) {
by <- match.arg(by, c("kegg_code", "scientific_name", "common_name"))
kegg_species <- kegg_species_data()
idx <- grep(str, kegg_species[, by], ignore.case = ignore.case)
kegg_species[idx,]
}
kegg_species_data <- function() {
utils::data(list="kegg_species", package="clusterProfiler")
get("kegg_species", envir = .GlobalEnv)
}
get_kegg_species <- function() {
pkg <- "XML"
requireNamespace(pkg)
readHTMLTable <- eval(parse(text="XML::readHTMLTable"))
x <- readHTMLTable("http://www.genome.jp/kegg/catalog/org_list.html")
y <- get_species_name(x[[2]], "Eukaryotes")
y2 <- get_species_name(x[[3]], 'Prokaryotes')
sci_name <- gsub(" \\(.*$", '', y[,2])
com_name <- gsub("[^\\(]+ \\(([^\\)]+)\\)$", '\\1', y[,2])
eu <- data.frame(kegg_code=unlist(y[,1]),
scientific_name = sci_name,
common_name = com_name,
stringsAsFactors = FALSE)
pr <- data.frame(kegg_code=unlist(y2[,1]),
scientific_name = unlist(y2[,2]),
common_name = NA,
stringsAsFactors = FALSE)
kegg_species <- rbind(eu, pr)
save(kegg_species, file="kegg_species.rda")
invisible(kegg_species)
}
get_species_name <- function(y, table) {
idx <- get_species_name_idx(y, table)
t(sapply(1:nrow(idx), function(i) {
y[] = lapply(y, as.character)
y[i, idx[i,]]
}))
}
get_species_name_idx <- function(y, table='Eukaryotes') {
table <- match.arg(table, c("Eukaryotes", "Prokaryotes"))
t(apply(y, 1, function(x) {
ii <- which(!is.na(x))
n <- length(ii)
if (table == "Eukaryotes") {
return(ii[(n-2):(n-1)])
} else {
return(ii[(n-3):(n-2)])
}
}))
}
kegg_rest <- function(rest_url) {
content <- tryCatch(suppressWarnings(readLines(rest_url)), error=function(e) NULL)
if (is.null(content))
return(content)
content %<>% strsplit(., "\t") %>% do.call('rbind', .)
res <- data.frame(from=content[,1],
to=content[,2])
return(res)
}
## http://www.genome.jp/kegg/rest/keggapi.html
## kegg_link('hsa', 'pathway')
kegg_link <- function(target_db, source_db) {
url <- paste0("http://rest.kegg.jp/link/", target_db, "/", source_db, collapse="")
kegg_rest(url)
}
kegg_list <- function(db) {
url <- paste0("http://rest.kegg.jp/list/", db, collapse="")
kegg_rest(url)
}
ko2name <- function(ko) {
p <- kegg_list('pathway')
ko2 <- gsub("^ko", "path:map", ko)
ko.df <- data.frame(ko=ko, from=ko2)
res <- merge(ko.df, p, by = 'from', all.x=TRUE)
res <- res[, c("ko", "to")]
colnames(res) <- c("ko", "name")
return(res)
}
idType <- function(OrgDb = "org.Hs.eg.db") {
db <- load_OrgDb(OrgDb)
keytypes(db)
}
bitr <- function(geneID, fromType, toType, OrgDb, drop=TRUE) {
idTypes <- idType(OrgDb)
msg <- paste0("should be one of ", paste(idTypes, collapse=", "), ".")
if (! fromType %in% idTypes) {
stop("'fromType' ", msg)
}
if (! all(toType %in% idTypes)) {
stop("'toType' ", msg)
}
geneID %<>% as.character %>% unique
db <- load_OrgDb(OrgDb)
res <- suppressWarnings(select(db,
keys = geneID,
keytype = fromType,
columns=c(fromType, toType)))
ii <- which(is.na(res[,2]))
if (length(ii)) {
n <- res[ii, 1] %>% unique %>% length
if (n) {
warning(paste0(round(n/length(geneID)*100, 2), "%"), " of input gene IDs are fail to map...")
}
if (drop) {
res <- res[-ii, ]
}
}
return(res)
}
bitr_kegg <- function(geneID, fromType, toType, organism, drop=TRUE) {
id_types <- c("Path", "Module", "ncbi-proteinid", "ncbi-geneid", "uniprot", "kegg")
fromType <- match.arg(fromType, id_types)
toType <- match.arg(toType, id_types)
if (fromType == toType)
stop("fromType and toType should not be identical...")
if (fromType == "Path" || fromType == "Module") {
idconv <- KEGG_path2extid(geneID, organism, fromType, toType)
} else if (toType == "Path" || toType == "Module") {
idconv <- KEGG_extid2path(geneID, organism, toType, fromType)
} else {
idconv <- KEGG_convert(fromType, toType, organism)
}
res <- idconv[idconv[,1] %in% geneID, ]
n <- sum(!geneID %in% res[,1])
if (n > 0) {
warning(paste0(round(n/length(geneID)*100, 2), "%"), " of input gene IDs are fail to map...")
}
if (! drop && n > 0) {
misHit <- data.frame(from = geneID[!geneID %in% res[,1]],
to = NA)
res <- rbind(res, misHit)
}
colnames(res) <- c(fromType, toType)
rownames(res) <- NULL
return(res)
}
KEGG_convert <- function(fromType, toType, species) {
if (fromType == "kegg" || toType != "kegg") {
turl <- paste("http://rest.kegg.jp/conv", toType, species, sep='/')
tidconv <- kegg_rest(turl)
if (is.null(tidconv))
stop(toType, " is not supported for ", species, " ...")
idconv <- tidconv
}
if (toType == "kegg" || fromType != "kegg") {
furl <- paste("http://rest.kegg.jp/conv", fromType, species, sep='/')
fidconv <- kegg_rest(furl)
if (is.null(fidconv))
stop(fromType, " is not supported for ", species, " ...")
idconv <- fidconv
}
if (fromType != "kegg" && toType != "kegg") {
idconv <- merge(fidconv, tidconv, by.x='from', by.y='from')
idconv <- idconv[, -1]
} else if (fromType != "kegg") {
idconv <- idconv[, c(2,1)]
}
colnames(idconv) <- c("from", "to")
idconv[,1] %<>% gsub("[^:]+:", "", .)
idconv[,2] %<>% gsub("[^:]+:", "", .)
return(idconv)
}
KEGG_path2extid <- function(keggID, species=sub("\\d+$", "", keggID),
keggType = "Path", keyType = "kegg") {
path2extid <- KEGGPATHID2EXTID(species, keggType, keyType)
path2extid[path2extid$from %in% keggID, ]
}
KEGG_extid2path <- function(geneID, species, keggType = "Path", keyType = "kegg") {
path2extid <- KEGGPATHID2EXTID(species, keggType, keyType)
res <- path2extid[path2extid$to %in% geneID, ]
res <- res[, c(2,1)]
colnames(res) <- colnames(path2extid)
return(res)
}
KEGGPATHID2EXTID <- function(species, keggType = "Path", keyType = "kegg") {
keggType <- match.arg(keggType, c("Path", "Module"))
if (keggType == "Path") {
keggType <- "KEGG"
} else {
keggType <- "MKEGG"
}
kegg <- download_KEGG(species, keggType, keyType)
return(kegg$KEGGPATHID2EXTID)
}
build_Anno <- DOSE:::build_Anno
get_organism <- DOSE:::get_organism
# KEGG_DATA <- prepare_KEGG(species = 'mmu', keyType = 'kegg')
| /RTrans_main_scripts/kegg.clusterProfiler.R | no_license | gskrasnov/RTrans | R | false | false | 11,123 | r | suppressPackageStartupMessages(library(magrittr))
suppressPackageStartupMessages(library(utils))
suppressPackageStartupMessages(library(AnnotationDbi))
suppressPackageStartupMessages(library(DOSE))
suppressPackageStartupMessages(library(GOSemSim))
get_KEGG_Env <- function() {
if (! exists(".KEGG_clusterProfiler_Env", envir = .GlobalEnv)) {
pos <- 1
envir <- as.environment(pos)
assign(".KEGG_clusterProfiler_Env", new.env(), envir=envir)
}
get(".KEGG_clusterProfiler_Env", envir = .GlobalEnv)
}
download_KEGG <- function(species, keggType="KEGG", keyType="kegg") {
KEGG_Env <- get_KEGG_Env()
use_cached <- FALSE
if (exists("organism", envir = KEGG_Env, inherits = FALSE) &&
exists("_type_", envir = KEGG_Env, inherits = FALSE) ) {
org <- get("organism", envir=KEGG_Env)
type <- get("_type_", envir=KEGG_Env)
if (org == species && type == keggType &&
exists("KEGGPATHID2NAME", envir=KEGG_Env, inherits = FALSE) &&
exists("KEGGPATHID2EXTID", envir=KEGG_Env, inherits = FALSE)) {
use_cached <- TRUE
}
}
if (use_cached) {
KEGGPATHID2EXTID <- get("KEGGPATHID2EXTID", envir=KEGG_Env)
KEGGPATHID2NAME <- get("KEGGPATHID2NAME", envir=KEGG_Env)
} else {
if (keggType == "KEGG") {
kres <- download.KEGG.Path(species)
} else {
kres <- download.KEGG.Module(species)
}
KEGGPATHID2EXTID <- kres$KEGGPATHID2EXTID
KEGGPATHID2NAME <- kres$KEGGPATHID2NAME
assign("organism", species, envir=KEGG_Env)
assign("_type_", keggType, envir=KEGG_Env)
assign("KEGGPATHID2NAME", KEGGPATHID2NAME, envir=KEGG_Env)
assign("KEGGPATHID2EXTID", KEGGPATHID2EXTID, envir=KEGG_Env)
}
if (keyType != "kegg") {
need_idconv <- FALSE
idconv <- NULL
if (use_cached &&
exists("key", envir=KEGG_Env, inherits = FALSE) &&
exists("idconv", envir=KEGG_Env, inherits = FALSE)) {
key <- get("key", envir=KEGG_Env)
if (key == keyType) {
idconv <- get("idconv", envir=KEGG_Env)
} else {
need_idconv <- TRUE
}
} else {
neec_idconv <- TRUE
}
if (need_idconv || is.null(idconv)) {
idconv <- KEGG_convert("kegg", keyType, species)
assign("key", keyType, envir=KEGG_Env)
assign("idconv", idconv, envir=KEGG_Env)
}
colnames(KEGGPATHID2EXTID) <- c("from", "kegg")
KEGGPATHID2EXTID <- merge(KEGGPATHID2EXTID, idconv, by.x='kegg', by.y='from')
KEGGPATHID2EXTID <- unique(KEGGPATHID2EXTID[, -1])
}
return(list(KEGGPATHID2EXTID = KEGGPATHID2EXTID,
KEGGPATHID2NAME = KEGGPATHID2NAME))
}
prepare_KEGG <- function(species, KEGG_Type="KEGG", keyType="kegg") {
kegg <- download_KEGG(species, KEGG_Type, keyType)
build_Anno(kegg$KEGGPATHID2EXTID,
kegg$KEGGPATHID2NAME)
}
download.KEGG.Path <- function(species) {
keggpathid2extid.df <- kegg_link(species, "pathway")
if (is.null(keggpathid2extid.df))
stop("'species' should be one of organisms listed in 'http://www.genome.jp/kegg/catalog/org_list.html'...")
keggpathid2extid.df[,1] %<>% gsub("[^:]+:", "", .)
keggpathid2extid.df[,2] %<>% gsub("[^:]+:", "", .)
keggpathid2name.df <- kegg_list("pathway")
keggpathid2name.df[,1] %<>% gsub("path:map", species, .)
## if 'species="ko"', ko and map path are duplicated, only keep ko path.
##
## http://www.kegg.jp/dbget-bin/www_bget?ko+ko00010
## http://www.kegg.jp/dbget-bin/www_bget?ko+map0001
##
keggpathid2extid.df <- keggpathid2extid.df[keggpathid2extid.df[,1] %in% keggpathid2name.df[,1],]
return(list(KEGGPATHID2EXTID=keggpathid2extid.df,
KEGGPATHID2NAME=keggpathid2name.df))
}
download.KEGG.Module <- function(species) {
keggmodule2extid.df <- kegg_link(species, "module")
if (is.null(keggmodule2extid.df)) {
stop("'species' should be one of organisms listed in 'http://www.genome.jp/kegg/catalog/org_list.html'...")
}
keggmodule2extid.df[,1] %<>% gsub("[^:]+:", "", .) %>% gsub(species, "", .) %>% gsub("^_", "", .)
keggmodule2extid.df[,2] %<>% gsub("[^:]+:", "", .)
keggmodule2name.df <- kegg_list("module")
keggmodule2name.df[,1] %<>% gsub("md:", "", .)
return(list(KEGGPATHID2EXTID=keggmodule2extid.df,
KEGGPATHID2NAME =keggmodule2name.df))
}
browseKEGG <- function(x, pathID) {
url <- paste0("http://www.kegg.jp/kegg-bin/show_pathway?", pathID, '/', x[pathID, "geneID"])
browseURL(url)
invisible(url)
}
search_kegg_organism <- function(str, by="scientific_name", ignore.case=FALSE) {
by <- match.arg(by, c("kegg_code", "scientific_name", "common_name"))
kegg_species <- kegg_species_data()
idx <- grep(str, kegg_species[, by], ignore.case = ignore.case)
kegg_species[idx,]
}
kegg_species_data <- function() {
utils::data(list="kegg_species", package="clusterProfiler")
get("kegg_species", envir = .GlobalEnv)
}
get_kegg_species <- function() {
pkg <- "XML"
requireNamespace(pkg)
readHTMLTable <- eval(parse(text="XML::readHTMLTable"))
x <- readHTMLTable("http://www.genome.jp/kegg/catalog/org_list.html")
y <- get_species_name(x[[2]], "Eukaryotes")
y2 <- get_species_name(x[[3]], 'Prokaryotes')
sci_name <- gsub(" \\(.*$", '', y[,2])
com_name <- gsub("[^\\(]+ \\(([^\\)]+)\\)$", '\\1', y[,2])
eu <- data.frame(kegg_code=unlist(y[,1]),
scientific_name = sci_name,
common_name = com_name,
stringsAsFactors = FALSE)
pr <- data.frame(kegg_code=unlist(y2[,1]),
scientific_name = unlist(y2[,2]),
common_name = NA,
stringsAsFactors = FALSE)
kegg_species <- rbind(eu, pr)
save(kegg_species, file="kegg_species.rda")
invisible(kegg_species)
}
get_species_name <- function(y, table) {
idx <- get_species_name_idx(y, table)
t(sapply(1:nrow(idx), function(i) {
y[] = lapply(y, as.character)
y[i, idx[i,]]
}))
}
get_species_name_idx <- function(y, table='Eukaryotes') {
table <- match.arg(table, c("Eukaryotes", "Prokaryotes"))
t(apply(y, 1, function(x) {
ii <- which(!is.na(x))
n <- length(ii)
if (table == "Eukaryotes") {
return(ii[(n-2):(n-1)])
} else {
return(ii[(n-3):(n-2)])
}
}))
}
kegg_rest <- function(rest_url) {
content <- tryCatch(suppressWarnings(readLines(rest_url)), error=function(e) NULL)
if (is.null(content))
return(content)
content %<>% strsplit(., "\t") %>% do.call('rbind', .)
res <- data.frame(from=content[,1],
to=content[,2])
return(res)
}
## http://www.genome.jp/kegg/rest/keggapi.html
## kegg_link('hsa', 'pathway')
kegg_link <- function(target_db, source_db) {
url <- paste0("http://rest.kegg.jp/link/", target_db, "/", source_db, collapse="")
kegg_rest(url)
}
kegg_list <- function(db) {
url <- paste0("http://rest.kegg.jp/list/", db, collapse="")
kegg_rest(url)
}
ko2name <- function(ko) {
p <- kegg_list('pathway')
ko2 <- gsub("^ko", "path:map", ko)
ko.df <- data.frame(ko=ko, from=ko2)
res <- merge(ko.df, p, by = 'from', all.x=TRUE)
res <- res[, c("ko", "to")]
colnames(res) <- c("ko", "name")
return(res)
}
idType <- function(OrgDb = "org.Hs.eg.db") {
db <- load_OrgDb(OrgDb)
keytypes(db)
}
bitr <- function(geneID, fromType, toType, OrgDb, drop=TRUE) {
idTypes <- idType(OrgDb)
msg <- paste0("should be one of ", paste(idTypes, collapse=", "), ".")
if (! fromType %in% idTypes) {
stop("'fromType' ", msg)
}
if (! all(toType %in% idTypes)) {
stop("'toType' ", msg)
}
geneID %<>% as.character %>% unique
db <- load_OrgDb(OrgDb)
res <- suppressWarnings(select(db,
keys = geneID,
keytype = fromType,
columns=c(fromType, toType)))
ii <- which(is.na(res[,2]))
if (length(ii)) {
n <- res[ii, 1] %>% unique %>% length
if (n) {
warning(paste0(round(n/length(geneID)*100, 2), "%"), " of input gene IDs are fail to map...")
}
if (drop) {
res <- res[-ii, ]
}
}
return(res)
}
bitr_kegg <- function(geneID, fromType, toType, organism, drop=TRUE) {
id_types <- c("Path", "Module", "ncbi-proteinid", "ncbi-geneid", "uniprot", "kegg")
fromType <- match.arg(fromType, id_types)
toType <- match.arg(toType, id_types)
if (fromType == toType)
stop("fromType and toType should not be identical...")
if (fromType == "Path" || fromType == "Module") {
idconv <- KEGG_path2extid(geneID, organism, fromType, toType)
} else if (toType == "Path" || toType == "Module") {
idconv <- KEGG_extid2path(geneID, organism, toType, fromType)
} else {
idconv <- KEGG_convert(fromType, toType, organism)
}
res <- idconv[idconv[,1] %in% geneID, ]
n <- sum(!geneID %in% res[,1])
if (n > 0) {
warning(paste0(round(n/length(geneID)*100, 2), "%"), " of input gene IDs are fail to map...")
}
if (! drop && n > 0) {
misHit <- data.frame(from = geneID[!geneID %in% res[,1]],
to = NA)
res <- rbind(res, misHit)
}
colnames(res) <- c(fromType, toType)
rownames(res) <- NULL
return(res)
}
KEGG_convert <- function(fromType, toType, species) {
if (fromType == "kegg" || toType != "kegg") {
turl <- paste("http://rest.kegg.jp/conv", toType, species, sep='/')
tidconv <- kegg_rest(turl)
if (is.null(tidconv))
stop(toType, " is not supported for ", species, " ...")
idconv <- tidconv
}
if (toType == "kegg" || fromType != "kegg") {
furl <- paste("http://rest.kegg.jp/conv", fromType, species, sep='/')
fidconv <- kegg_rest(furl)
if (is.null(fidconv))
stop(fromType, " is not supported for ", species, " ...")
idconv <- fidconv
}
if (fromType != "kegg" && toType != "kegg") {
idconv <- merge(fidconv, tidconv, by.x='from', by.y='from')
idconv <- idconv[, -1]
} else if (fromType != "kegg") {
idconv <- idconv[, c(2,1)]
}
colnames(idconv) <- c("from", "to")
idconv[,1] %<>% gsub("[^:]+:", "", .)
idconv[,2] %<>% gsub("[^:]+:", "", .)
return(idconv)
}
KEGG_path2extid <- function(keggID, species=sub("\\d+$", "", keggID),
keggType = "Path", keyType = "kegg") {
path2extid <- KEGGPATHID2EXTID(species, keggType, keyType)
path2extid[path2extid$from %in% keggID, ]
}
KEGG_extid2path <- function(geneID, species, keggType = "Path", keyType = "kegg") {
path2extid <- KEGGPATHID2EXTID(species, keggType, keyType)
res <- path2extid[path2extid$to %in% geneID, ]
res <- res[, c(2,1)]
colnames(res) <- colnames(path2extid)
return(res)
}
KEGGPATHID2EXTID <- function(species, keggType = "Path", keyType = "kegg") {
keggType <- match.arg(keggType, c("Path", "Module"))
if (keggType == "Path") {
keggType <- "KEGG"
} else {
keggType <- "MKEGG"
}
kegg <- download_KEGG(species, keggType, keyType)
return(kegg$KEGGPATHID2EXTID)
}
build_Anno <- DOSE:::build_Anno
get_organism <- DOSE:::get_organism
# KEGG_DATA <- prepare_KEGG(species = 'mmu', keyType = 'kegg')
|
\name{Communities}
\alias{Communities}
\title{
Search and visualize community-structures
}
\description{
Function that searches for and visualizes community-structures in graphs.
}
\usage{
Communities(P, graph = TRUE, lay = "layout_with_fr", coords = NULL,
Vsize = 15, Vcex = 1, Vcolor = "orangered",
VBcolor = "darkred", VLcolor = "black", main = "")
}
\arguments{
\item{P}{
Sparsified precision \code{matrix}
}
\item{graph}{
A \code{logical} indicating if the results should be visualized.
}
\item{lay}{
A \code{character} mimicking a call to \code{\link{igraph}} layout functions.
Determines the placement of vertices.
}
\item{coords}{
A \code{matrix} containing coordinates.
Alternative to the lay-argument for determining the placement of vertices.
}
\item{Vsize}{
A \code{numeric} determining the vertex size.
}
\item{Vcex}{
A \code{numeric} determining the size of the vertex labels.
}
\item{Vcolor}{
A \code{character} (scalar or vector) determining the vertex color.
}
\item{VBcolor}{
A \code{character} determining the color of the vertex border.
}
\item{VLcolor}{
A \code{character} determining the color of the vertex labels.
}
\item{main}{
A \code{character} giving the main figure title.
}
}
\details{
Communities in a network are groups of vertices (modules) that are densely connected within.
Community search is performed by the Girvan-Newman algorithm (Newman and Girvan, 2004).
When \code{graph = TRUE} the community structure in the graph is visualized.
The default layout is according to the Fruchterman-Reingold algorithm (1991).
Most layout functions supported by \code{\link{igraph}} are supported (the function is partly a wrapper around certain \code{\link{igraph}} functions).
The igraph layouts can be invoked by a \code{character} that mimicks a call to a \code{\link{igraph}} layout functions in the \code{lay} argument.
When using \code{lay = NULL} one can specify the placement of vertices with the \code{coords} argument.
The row dimension of this matrix should equal the number of vertices.
The column dimension then should equal 2 (for 2D layouts) or 3 (for 3D layouts).
The \code{coords} argument can also be viewed as a convenience argument as it enables one, e.g., to layout a graph
according to the coordinates of a previous call to \code{Ugraph}.
If both the the lay and the coords arguments are not \code{NULL}, the lay argument takes precedence.
Communities are indicated by color markings.
}
\value{
An object of class list:
\item{membership}{\code{numeric} vector indicating, for each vertex, community membership.}
\item{modularityscore}{\code{numeric} scalar indicating the modularity value of the community structure.}
When \code{graph = TRUE} the function also returns a graph.
}
\references{
Csardi, G. and Nepusz, T. (2006).
The igraph software package for complex network research.
InterJournal, Complex Systems 1695.
http://igraph.sf.net
Fruchterman, T.M.J., and Reingold, E.M. (1991).
Graph Drawing by Force-Directed Placement.
Software: Practice & Experience, 21: 1129-1164.
Newman, M. and Girvan, M. (2004).
Finding and evaluating community structure in networks.
Physical Review E, 69: 026113.
}
\author{
Carel F.W. Peeters <cf.peeters@vumc.nl>
}
\seealso{
\code{\link{Ugraph}}
}
\examples{
## Obtain some (high-dimensional) data
p = 25
n = 10
set.seed(333)
X = matrix(rnorm(n*p), nrow = n, ncol = p)
colnames(X)[1:25] = letters[1:25]
## Obtain regularized precision under optimal penalty
OPT <- optPenalty.LOOCV(X, lambdaMin = .5, lambdaMax = 30, step = 100)
## Determine support regularized standardized precision under optimal penalty
PC0 <- sparsify(symm(OPT$optPrec), threshold = "localFDR")$sparseParCor
## Search and visualize communities
Commy <- Communities(PC0)
}
| /man/Communities.Rd | no_license | AEBilgrau/rags2ridges | R | false | false | 3,781 | rd | \name{Communities}
\alias{Communities}
\title{
Search and visualize community-structures
}
\description{
Function that searches for and visualizes community-structures in graphs.
}
\usage{
Communities(P, graph = TRUE, lay = "layout_with_fr", coords = NULL,
Vsize = 15, Vcex = 1, Vcolor = "orangered",
VBcolor = "darkred", VLcolor = "black", main = "")
}
\arguments{
\item{P}{
Sparsified precision \code{matrix}
}
\item{graph}{
A \code{logical} indicating if the results should be visualized.
}
\item{lay}{
A \code{character} mimicking a call to \code{\link{igraph}} layout functions.
Determines the placement of vertices.
}
\item{coords}{
A \code{matrix} containing coordinates.
Alternative to the lay-argument for determining the placement of vertices.
}
\item{Vsize}{
A \code{numeric} determining the vertex size.
}
\item{Vcex}{
A \code{numeric} determining the size of the vertex labels.
}
\item{Vcolor}{
A \code{character} (scalar or vector) determining the vertex color.
}
\item{VBcolor}{
A \code{character} determining the color of the vertex border.
}
\item{VLcolor}{
A \code{character} determining the color of the vertex labels.
}
\item{main}{
A \code{character} giving the main figure title.
}
}
\details{
Communities in a network are groups of vertices (modules) that are densely connected within.
Community search is performed by the Girvan-Newman algorithm (Newman and Girvan, 2004).
When \code{graph = TRUE} the community structure in the graph is visualized.
The default layout is according to the Fruchterman-Reingold algorithm (1991).
Most layout functions supported by \code{\link{igraph}} are supported (the function is partly a wrapper around certain \code{\link{igraph}} functions).
The igraph layouts can be invoked by a \code{character} that mimicks a call to a \code{\link{igraph}} layout functions in the \code{lay} argument.
When using \code{lay = NULL} one can specify the placement of vertices with the \code{coords} argument.
The row dimension of this matrix should equal the number of vertices.
The column dimension then should equal 2 (for 2D layouts) or 3 (for 3D layouts).
The \code{coords} argument can also be viewed as a convenience argument as it enables one, e.g., to layout a graph
according to the coordinates of a previous call to \code{Ugraph}.
If both the the lay and the coords arguments are not \code{NULL}, the lay argument takes precedence.
Communities are indicated by color markings.
}
\value{
An object of class list:
\item{membership}{\code{numeric} vector indicating, for each vertex, community membership.}
\item{modularityscore}{\code{numeric} scalar indicating the modularity value of the community structure.}
When \code{graph = TRUE} the function also returns a graph.
}
\references{
Csardi, G. and Nepusz, T. (2006).
The igraph software package for complex network research.
InterJournal, Complex Systems 1695.
http://igraph.sf.net
Fruchterman, T.M.J., and Reingold, E.M. (1991).
Graph Drawing by Force-Directed Placement.
Software: Practice & Experience, 21: 1129-1164.
Newman, M. and Girvan, M. (2004).
Finding and evaluating community structure in networks.
Physical Review E, 69: 026113.
}
\author{
Carel F.W. Peeters <cf.peeters@vumc.nl>
}
\seealso{
\code{\link{Ugraph}}
}
\examples{
## Obtain some (high-dimensional) data
p = 25
n = 10
set.seed(333)
X = matrix(rnorm(n*p), nrow = n, ncol = p)
colnames(X)[1:25] = letters[1:25]
## Obtain regularized precision under optimal penalty
OPT <- optPenalty.LOOCV(X, lambdaMin = .5, lambdaMax = 30, step = 100)
## Determine support regularized standardized precision under optimal penalty
PC0 <- sparsify(symm(OPT$optPrec), threshold = "localFDR")$sparseParCor
## Search and visualize communities
Commy <- Communities(PC0)
}
|
#' Extract content from a Gavagai tonality object
#'
#' @param x a \code{gavagai_tonality} object.
#' @export
tonality_document <- function(x){
checkmate::assert_class(x, "gavagai_tonality")
dplyr::bind_rows(lapply(x$texts, extract_tonality_score_by_doc))
}
#' @rdname tonality_document
#' @export
tonality_sentence <- function(x){
checkmate::assert_class(x, "gavagai_tonality")
dplyr::bind_rows(lapply(x$texts, extract_tonality_score_by_sentence))
}
#' @rdname tonality_document
#' @export
tonality_ngram <- function(x){
checkmate::assert_class(x, "gavagai_tonality")
dplyr::bind_rows(lapply(x$texts, extract_tonality_ngrams))
}
| /RPackage/R/extract_tonality.R | no_license | MansMeg/gavagair | R | false | false | 647 | r | #' Extract content from a Gavagai tonality object
#'
#' @param x a \code{gavagai_tonality} object.
#' @export
tonality_document <- function(x){
checkmate::assert_class(x, "gavagai_tonality")
dplyr::bind_rows(lapply(x$texts, extract_tonality_score_by_doc))
}
#' @rdname tonality_document
#' @export
tonality_sentence <- function(x){
checkmate::assert_class(x, "gavagai_tonality")
dplyr::bind_rows(lapply(x$texts, extract_tonality_score_by_sentence))
}
#' @rdname tonality_document
#' @export
tonality_ngram <- function(x){
checkmate::assert_class(x, "gavagai_tonality")
dplyr::bind_rows(lapply(x$texts, extract_tonality_ngrams))
}
|
\name{draw_title-HeatmapList-method}
\alias{draw_title,HeatmapList-method}
\title{
Draw heatmap list title
}
\description{
Draw heatmap list title
}
\usage{
\S4method{draw_title}{HeatmapList}(object,
which = c("column", "row"))}
\arguments{
\item{object}{a \code{\link{HeatmapList-class}} object}
\item{which}{dendrogram on the row or on the column of the heatmap}
}
\details{
A viewport is created which contains heatmap list title.
This function is only for internal use.
}
\value{
This function returns no value.
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\examples{
# no example for this internal method
NULL
}
| /man/draw_title-HeatmapList-method.rd | no_license | Yixf-Self/ComplexHeatmap | R | false | false | 620 | rd | \name{draw_title-HeatmapList-method}
\alias{draw_title,HeatmapList-method}
\title{
Draw heatmap list title
}
\description{
Draw heatmap list title
}
\usage{
\S4method{draw_title}{HeatmapList}(object,
which = c("column", "row"))}
\arguments{
\item{object}{a \code{\link{HeatmapList-class}} object}
\item{which}{dendrogram on the row or on the column of the heatmap}
}
\details{
A viewport is created which contains heatmap list title.
This function is only for internal use.
}
\value{
This function returns no value.
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\examples{
# no example for this internal method
NULL
}
|
#' Convert lengths
#'
#' Convert lengths from character vector, containing values like "3/4" and "hd"
#' into a numeric vector.
#'
#' @details This function makes some assumptions, so may need tweaking over time.
#' For each string in a vector it is first split up by the spaces in it, so "1 3/4"
#' is split into "1" "3/4", if there are no spaces then it assesses the single
#' number/letters against a list of common length margins. If the split vector
#' has length 2 then it assumes the first element is a whole number, and the
#' second is a fraction (like "3/4"), it calculates the total margin. There is no
#' doubt for edge cases that this function doesn't cover which will cause errors,
#' the function was harder to write than initially thought, and will likely need
#' improvements as edge cases are encountered
#'
#' @param lengths character vector of lengths
#'
#' @export
#' @examples
#' \dontrun{
#' lengths <- c("0", "nse", "hd", "3/4", "1 1/2")
#' conv_len(lengths = lengths)
#' }
conv_len <- function(lengths) {
lens <- sapply(lengths, function(x) {
helper_len(x)
})
lens <- as.vector(lens)
return(lens)
}
#' Helper function for conv_len
#'
#' @param length character string representing a margin in lengths
helper_len <- function(length) {
lenlist <- list("0" = 0, "dh" = 0, "nse" = 0.02, "shd" = 0.05, "sh" = 0.05,
"hd" = 0.1, "nk" = 0.2, "1/4" = 0.25, "1/2" = 0.5, "3/4" = 0.75)
if(nchar(length) == 0) {
len <- 0
return(len)
}
if(grepl(pattern = "[[:alpha:]]", length)) {
len <- tolower(length)
len <- lenlist[[len]]
return(len)
}
if(grepl(pattern = "[[:digit:]]/[[:digit:]]", x = length)) {
if(grepl(pattern = "\\s+|-", length)) {
x <- unlist(strsplit(x = length, split = "\\s+|-"))
frac <- x[2]
frac <- lenlist[[frac]]
len <- as.numeric(x[1]) + frac
return(len)
} else {
len <- lenlist[[length]]
return(len)
}
}
if(grepl(pattern = "[[:digit:]]+(\\.[[:digit:]]+)?", x = length)) {
len <- as.numeric(length)
return(len)
}
len <- NA
return(len)
}
| /R/conv_lengths.R | no_license | seqva/RcappeR | R | false | false | 2,231 | r | #' Convert lengths
#'
#' Convert lengths from character vector, containing values like "3/4" and "hd"
#' into a numeric vector.
#'
#' @details This function makes some assumptions, so may need tweaking over time.
#' For each string in a vector it is first split up by the spaces in it, so "1 3/4"
#' is split into "1" "3/4", if there are no spaces then it assesses the single
#' number/letters against a list of common length margins. If the split vector
#' has length 2 then it assumes the first element is a whole number, and the
#' second is a fraction (like "3/4"), it calculates the total margin. There is no
#' doubt for edge cases that this function doesn't cover which will cause errors,
#' the function was harder to write than initially thought, and will likely need
#' improvements as edge cases are encountered
#'
#' @param lengths character vector of lengths
#'
#' @export
#' @examples
#' \dontrun{
#' lengths <- c("0", "nse", "hd", "3/4", "1 1/2")
#' conv_len(lengths = lengths)
#' }
conv_len <- function(lengths) {
lens <- sapply(lengths, function(x) {
helper_len(x)
})
lens <- as.vector(lens)
return(lens)
}
#' Helper function for conv_len
#'
#' @param length character string representing a margin in lengths
helper_len <- function(length) {
lenlist <- list("0" = 0, "dh" = 0, "nse" = 0.02, "shd" = 0.05, "sh" = 0.05,
"hd" = 0.1, "nk" = 0.2, "1/4" = 0.25, "1/2" = 0.5, "3/4" = 0.75)
if(nchar(length) == 0) {
len <- 0
return(len)
}
if(grepl(pattern = "[[:alpha:]]", length)) {
len <- tolower(length)
len <- lenlist[[len]]
return(len)
}
if(grepl(pattern = "[[:digit:]]/[[:digit:]]", x = length)) {
if(grepl(pattern = "\\s+|-", length)) {
x <- unlist(strsplit(x = length, split = "\\s+|-"))
frac <- x[2]
frac <- lenlist[[frac]]
len <- as.numeric(x[1]) + frac
return(len)
} else {
len <- lenlist[[length]]
return(len)
}
}
if(grepl(pattern = "[[:digit:]]+(\\.[[:digit:]]+)?", x = length)) {
len <- as.numeric(length)
return(len)
}
len <- NA
return(len)
}
|
## Measurements at thread level
pop_discipline = data_orig0 %>%
group_by(Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
group_by(Discipline) %>%
summarize(DisciplinePopSize = n(),
FractionFemale = sum(Female,na.rm=TRUE)/sum(Male + Female, na.rm=TRUE)) %>%
mutate(DisciplinePopSize = DisciplinePopSize/sum(DisciplinePopSize))
# Counting participants by discipline in each thread
thread_pop = data_orig0 %>%
group_by(ThreadId,Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
group_by(ThreadId,Discipline) %>%
summarize(n=n()) %>%
left_join(pop_discipline, by='Discipline')
thread_overrep = thread_pop %>%
group_by(ThreadId) %>%
mutate(OverRep = binom.confint(n,sum(n),methods='wilson')$lower > DisciplinePopSize,
Discipline_OverRep = paste0('OverRep_',Discipline)) %>%
select(-n,-DisciplinePopSize,-Discipline) %>%
spread(Discipline_OverRep,OverRep,fill=0) %>%
ungroup()
thread_underrep = thread_pop %>%
group_by(ThreadId) %>%
mutate(UnderRep = binom.confint(n,sum(n),methods='wilson')$upper < DisciplinePopSize,
Discipline_UnderRep = paste0('UnderRep_',Discipline)) %>%
select(-n,-DisciplinePopSize,-Discipline) %>%
spread(Discipline_UnderRep,UnderRep,fill=0) %>%
ungroup()
thread_discipline = thread_overrep %>%
left_join(thread_underrep, by='ThreadId')
# Counting participants by job in each thread
thread_job = data_orig0 %>%
group_by(ThreadId,Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
group_by(ThreadId,Job_Title_S) %>%
summarize(n=n()) %>%
mutate(Job_Title_S = paste0('Thread_Job_',Job_Title_S)) %>%
spread(Job_Title_S,n,fill=0)
# Summary stats for text of each thread
thread_text = data_orig0 %>%
group_by(ThreadId) %>%
summarise_at(.vars = 69:150,
.funs = c(Min="min",Max="max",Mean="mean"))
names(thread_text) = paste0('Thread_Text_',names(thread_text))
# Counting other participant metrics by thread
thread_other = data_orig0 %>%
group_by(ThreadId,Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
group_by(ThreadId) %>%
summarize(Thread_Academic = sum(Academic=='1', na.rm=TRUE),
Thread_FemaleAcademic = sum((Academic=='1')*Female, na.rm=TRUE),
Thread_Citations = sum(Total_Citations, na.rm=TRUE),
Thread_FemaleCitations = sum(Total_Citations*Female, na.rm=TRUE),
Thread_Contributions = sum(ContributionsThisYear+PreviousContributions, na.rm=TRUE),
Thread_FemaleContributions = sum((ContributionsThisYear+PreviousContributions)*Female, na.rm=TRUE),
Thread_Threads = sum((ThreadsThisYear+PreviousThreads), na.rm=TRUE),
Thread_FemaleThreads = sum((ThreadsThisYear+PreviousThreads)*Female, na.rm=TRUE))
# Join thread data back to main table
# data_thread = data_orig0 %>%
# left_join(thread_discipline,by='ThreadId') %>%
# left_join(thread_job, by= 'ThreadId') %>%
# left_join(thread_other, by= 'ThreadId') %>%
# mutate_at(.cols = vars(starts_with('Thread_')),
# .funs = funs(./UniqueContributors)) %>%
# left_join(thread_text, by=c('ThreadId'='Thread_Text_ThreadId'))
data_thread = data_orig0 %>%
left_join(thread_discipline,by='ThreadId') %>%
left_join(thread_job, by= 'ThreadId') %>%
left_join(thread_other, by= 'ThreadId') %>%
left_join(thread_text, by=c('ThreadId'='Thread_Text_ThreadId'))
## Measurements at participant population level
uniquify_contrib = data_orig0 %>%
group_by(Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1)
ggplot(uniquify_contrib, aes(reorder(Discipline, Male, sum))) +
geom_bar() +
xlab('Discipline') +
theme(axis.text.x = element_text(angle = 45, hjust=1))
ggplot(uniquify_contrib, aes(reorder(Job_Title_S, Male, sum))) +
geom_bar() +
xlab('Job Title') +
theme(axis.text.x = element_text(angle = 45, hjust=1))
uniquify_thread = data_orig0 %>%
group_by(ThreadId) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1)
ggplot(uniquify_thread, aes(Year)) + geom_bar()
ggplot(uniquify_thread, aes(DebateSize)) +
geom_histogram(bins = 25)+
scale_y_log10() +
scale_x_log10()
pop_discipline_conf_data = data_orig0 %>%
group_by(Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
group_by(Discipline) %>%
summarize(DisciplinePopSize = sum(Male + Female, na.rm=TRUE),
DisciplineFemSize = sum(Female,na.rm=TRUE)) %>%
mutate(DisciplinePopFrac = DisciplinePopSize/sum(DisciplinePopSize))
pop_discipline_conf0 = binom.confint(pop_discipline_conf_data$DisciplineFemSize,pop_discipline_conf_data$DisciplinePopSize,methods='wilson')[4:6]
pop_discipline_conf = cbind(as.character(pop_discipline_conf_data$Discipline),pop_discipline_conf_data$DisciplinePopFrac, pop_discipline_conf0)
names(pop_discipline_conf)[1]='Discipline'
names(pop_discipline_conf)[2]='DisciplinePopFrac'
ggplot(pop_discipline_conf, aes(reorder(Discipline,mean),group=1)) +
geom_bar(aes(y=DisciplinePopFrac),stat='identity') +
geom_line(aes(y=mean)) +
geom_errorbar(aes(ymin = lower, ymax = upper)) +
xlab('Discipine') +
scale_y_continuous("Discipline Community Size",
sec.axis = sec_axis(~ ., name = "Fraction Female")) +
theme(axis.text.x = element_text(angle = 45, hjust=1))
# Counting participants by job overall
pop_job = data_orig0 %>%
group_by(Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
mutate(Job_Title_S = ifelse(grepl('Entrepreneur|Management|Founder',Job_Title_S),'Management',
ifelse(grepl('Postdoctoral|Student', Job_Title_S),'Trainee',
ifelse(grepl('Other|Not Available', Job_Title_S) | Job_Title_S=='','Other',
Job_Title_S)))) %>%
group_by(Job_Title_S) %>%
summarize(JobPopSize = n(),
FractionFemale = sum(Female,na.rm=TRUE)/sum(Male + Female, na.rm=TRUE)) %>%
mutate(JobPopSize = JobPopSize/sum(JobPopSize))
ggplot(pop_job, aes(reorder(Job_Title_S,FractionFemale),group=1)) +
geom_bar(aes(y=JobPopSize),stat='identity') +
geom_line(aes(y=FractionFemale)) +
xlab('Job Title') +
scale_y_continuous("Fraction with This Job",
sec.axis = sec_axis(~ ., name = "Fraction Female")) +
theme(axis.text.x = element_text(angle = 45, hjust=1))
pop_job_conf_data = data_orig0 %>%
group_by(Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
mutate(Job_Title_S = ifelse(grepl('Entrepreneur|Management|Founder',Job_Title_S),'Management',
ifelse(grepl('Postdoctoral|Student', Job_Title_S),'Trainee',
ifelse(grepl('Other|Not Available', Job_Title_S) | Job_Title_S=='','Other',
Job_Title_S)))) %>%
group_by(Job_Title_S) %>%
summarize(JobPopSize = sum(Male + Female, na.rm=TRUE),
JobFemSize = sum(Female,na.rm=TRUE)) %>%
mutate(JobPopFrac = JobPopSize/sum(JobPopSize))
pop_job_conf0 = binom.confint(pop_job_conf_data$JobFemSize,pop_job_conf_data$JobPopSize,methods='wilson')[4:6]
pop_job_conf = cbind(as.character(pop_job_conf_data$Job_Title_S),pop_job_conf_data$JobPopFrac, pop_job_conf0)
names(pop_job_conf)[1]='JobTitle'
names(pop_job_conf)[2]='JobPopFrac'
ggplot(pop_job_conf, aes(reorder(JobTitle,mean),group=1)) +
geom_bar(aes(y=JobPopFrac),stat='identity') +
geom_line(aes(y=mean)) +
geom_errorbar(aes(ymin = lower, ymax = upper)) +
xlab('Job Title') +
scale_y_continuous("Fraction of All Users with This Job",
sec.axis = sec_axis(~ ., name = "Fraction Female")) +
theme(axis.text.x = element_text(angle = 45, hjust=1))
ggplot(data_orig0, aes(Year,FemaleParticipation)) +
stat_summary_bin()
gender_uniquecontrib = data_orig0 %>%
group_by(ThreadId,Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
mutate(Job_Title_S = ifelse(grepl('Entrepreneur|Management|Founder',Job_Title_S),'Management',
ifelse(grepl('Postdoctoral|Student', Job_Title_S),'Trainee',
ifelse(grepl('Other|Not Available', Job_Title_S) | Job_Title_S=='','Other',
Job_Title_S)))) %>%
group_by(Id) %>%
summarize(Female = mean(Female),
Job_Title_S = max(Job_Title_S),
Discipline = max(Discipline),
TotalUniqueContributions = n(),
YearsContributing = max(Year)-min(Year))
ggplot(filter(gender_uniquecontrib),
aes(YearsContributing,TotalUniqueContributions)) +
geom_jitter()+
geom_smooth()
ggplot(filter(gender_uniquecontrib),
aes(TotalUniqueContributions)) +
geom_bar()
ggplot(filter(gender_uniquecontrib,Discipline==''),
aes(YearsContributing,TotalUniqueContributions,color = (Female==1))) +
geom_jitter()+
geom_smooth()
ggplot(filter(gender_uniquecontrib,Discipline!=''),aes(YearsContributing,TotalUniqueContributions,color = (Female==1))) +
geom_jitter()+
geom_smooth()+
facet_grid(Discipline ~ .)
ggplot(filter(gender_uniquecontrib,YearsContributing<5,!is.na(Female)),aes(Female==1,TotalUniqueContributions)) +
stat_summary(fun.y = mean, geom = "bar") +
stat_summary(fun.data = mean_cl_normal, geom = "errorbar") +
facet_grid(. ~ Discipline)+
theme(axis.text.x = element_text(angle = 45, hjust=1))
ggplot(filter(gender_uniquecontrib,YearsContributing>10,!is.na(Female)),aes(Female==1,TotalUniqueContributions)) +
stat_summary(fun.y = mean, geom = "bar") +
stat_summary(fun.data = mean_cl_normal, geom = "errorbar") +
facet_grid(. ~ Discipline)+
theme(axis.text.x = element_text(angle = 45, hjust=1))
ggplot(gender_uniquecontrib,aes(YearsContributing)) +
geom_bar()
## Normalize thread measurements to participant population
# Normalizing thread discipline representation to the overall community size of discipline
thread_to_discipline = data_thread %>%
select(ThreadId,starts_with('Thread_Discipline')) %>%
unique()
tmp = sweep(thread_to_discipline[,-1], 2, pop_discipline$Fraction, `/`)
thread_to_discipline = cbind(thread_to_discipline %>% select(ThreadId), tmp)
colnames(tmp) = gsub("^.*?_","",gsub("^.*?_","",colnames(tmp)))
thread_to_discipline$Thread_OverrepresentedDiscipline = colnames(tmp)[max.col(tmp,ties.method="first")]
thread_to_discipline$Thread_UnderrepresentedDiscipline = colnames(tmp)[max.col(-tmp,ties.method="first")]
# Normalizing thread job representation to the overall community size of job
thread_to_job = data_thread %>%
select(ThreadId,starts_with('Thread_Job')) %>%
unique()
tmp = sweep(thread_to_job[,-1], 2, pop_job$Fraction, `/`)
thread_to_job = cbind(thread_to_job %>% select(ThreadId), tmp)
colnames(tmp) = gsub("^.*?_","",gsub("^.*?_","",colnames(tmp)))
thread_to_job$Thread_OverrepresentedJob = colnames(tmp)[max.col(tmp,ties.method="first")]
thread_to_job$Thread_UnderrepresentedJob = colnames(tmp)[max.col(-tmp,ties.method="first")]
# Joining mormalized discipline and job metrics to main table
data_thread = data_thread %>%
select(-starts_with('Thread_Discipline'),-starts_with('Thread_Job')) %>%
left_join(thread_to_discipline, by='ThreadId')%>%
left_join(thread_to_job, by='ThreadId')
| /edge_preprocessing_thread.R | no_license | lots-of-things/edge_forum | R | false | false | 11,511 | r | ## Measurements at thread level
pop_discipline = data_orig0 %>%
group_by(Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
group_by(Discipline) %>%
summarize(DisciplinePopSize = n(),
FractionFemale = sum(Female,na.rm=TRUE)/sum(Male + Female, na.rm=TRUE)) %>%
mutate(DisciplinePopSize = DisciplinePopSize/sum(DisciplinePopSize))
# Counting participants by discipline in each thread
thread_pop = data_orig0 %>%
group_by(ThreadId,Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
group_by(ThreadId,Discipline) %>%
summarize(n=n()) %>%
left_join(pop_discipline, by='Discipline')
thread_overrep = thread_pop %>%
group_by(ThreadId) %>%
mutate(OverRep = binom.confint(n,sum(n),methods='wilson')$lower > DisciplinePopSize,
Discipline_OverRep = paste0('OverRep_',Discipline)) %>%
select(-n,-DisciplinePopSize,-Discipline) %>%
spread(Discipline_OverRep,OverRep,fill=0) %>%
ungroup()
thread_underrep = thread_pop %>%
group_by(ThreadId) %>%
mutate(UnderRep = binom.confint(n,sum(n),methods='wilson')$upper < DisciplinePopSize,
Discipline_UnderRep = paste0('UnderRep_',Discipline)) %>%
select(-n,-DisciplinePopSize,-Discipline) %>%
spread(Discipline_UnderRep,UnderRep,fill=0) %>%
ungroup()
thread_discipline = thread_overrep %>%
left_join(thread_underrep, by='ThreadId')
# Counting participants by job in each thread
thread_job = data_orig0 %>%
group_by(ThreadId,Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
group_by(ThreadId,Job_Title_S) %>%
summarize(n=n()) %>%
mutate(Job_Title_S = paste0('Thread_Job_',Job_Title_S)) %>%
spread(Job_Title_S,n,fill=0)
# Summary stats for text of each thread
thread_text = data_orig0 %>%
group_by(ThreadId) %>%
summarise_at(.vars = 69:150,
.funs = c(Min="min",Max="max",Mean="mean"))
names(thread_text) = paste0('Thread_Text_',names(thread_text))
# Counting other participant metrics by thread
thread_other = data_orig0 %>%
group_by(ThreadId,Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
group_by(ThreadId) %>%
summarize(Thread_Academic = sum(Academic=='1', na.rm=TRUE),
Thread_FemaleAcademic = sum((Academic=='1')*Female, na.rm=TRUE),
Thread_Citations = sum(Total_Citations, na.rm=TRUE),
Thread_FemaleCitations = sum(Total_Citations*Female, na.rm=TRUE),
Thread_Contributions = sum(ContributionsThisYear+PreviousContributions, na.rm=TRUE),
Thread_FemaleContributions = sum((ContributionsThisYear+PreviousContributions)*Female, na.rm=TRUE),
Thread_Threads = sum((ThreadsThisYear+PreviousThreads), na.rm=TRUE),
Thread_FemaleThreads = sum((ThreadsThisYear+PreviousThreads)*Female, na.rm=TRUE))
# Join thread data back to main table
# data_thread = data_orig0 %>%
# left_join(thread_discipline,by='ThreadId') %>%
# left_join(thread_job, by= 'ThreadId') %>%
# left_join(thread_other, by= 'ThreadId') %>%
# mutate_at(.cols = vars(starts_with('Thread_')),
# .funs = funs(./UniqueContributors)) %>%
# left_join(thread_text, by=c('ThreadId'='Thread_Text_ThreadId'))
data_thread = data_orig0 %>%
left_join(thread_discipline,by='ThreadId') %>%
left_join(thread_job, by= 'ThreadId') %>%
left_join(thread_other, by= 'ThreadId') %>%
left_join(thread_text, by=c('ThreadId'='Thread_Text_ThreadId'))
## Measurements at participant population level
uniquify_contrib = data_orig0 %>%
group_by(Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1)
ggplot(uniquify_contrib, aes(reorder(Discipline, Male, sum))) +
geom_bar() +
xlab('Discipline') +
theme(axis.text.x = element_text(angle = 45, hjust=1))
ggplot(uniquify_contrib, aes(reorder(Job_Title_S, Male, sum))) +
geom_bar() +
xlab('Job Title') +
theme(axis.text.x = element_text(angle = 45, hjust=1))
uniquify_thread = data_orig0 %>%
group_by(ThreadId) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1)
ggplot(uniquify_thread, aes(Year)) + geom_bar()
ggplot(uniquify_thread, aes(DebateSize)) +
geom_histogram(bins = 25)+
scale_y_log10() +
scale_x_log10()
pop_discipline_conf_data = data_orig0 %>%
group_by(Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
group_by(Discipline) %>%
summarize(DisciplinePopSize = sum(Male + Female, na.rm=TRUE),
DisciplineFemSize = sum(Female,na.rm=TRUE)) %>%
mutate(DisciplinePopFrac = DisciplinePopSize/sum(DisciplinePopSize))
pop_discipline_conf0 = binom.confint(pop_discipline_conf_data$DisciplineFemSize,pop_discipline_conf_data$DisciplinePopSize,methods='wilson')[4:6]
pop_discipline_conf = cbind(as.character(pop_discipline_conf_data$Discipline),pop_discipline_conf_data$DisciplinePopFrac, pop_discipline_conf0)
names(pop_discipline_conf)[1]='Discipline'
names(pop_discipline_conf)[2]='DisciplinePopFrac'
ggplot(pop_discipline_conf, aes(reorder(Discipline,mean),group=1)) +
geom_bar(aes(y=DisciplinePopFrac),stat='identity') +
geom_line(aes(y=mean)) +
geom_errorbar(aes(ymin = lower, ymax = upper)) +
xlab('Discipine') +
scale_y_continuous("Discipline Community Size",
sec.axis = sec_axis(~ ., name = "Fraction Female")) +
theme(axis.text.x = element_text(angle = 45, hjust=1))
# Counting participants by job overall
pop_job = data_orig0 %>%
group_by(Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
mutate(Job_Title_S = ifelse(grepl('Entrepreneur|Management|Founder',Job_Title_S),'Management',
ifelse(grepl('Postdoctoral|Student', Job_Title_S),'Trainee',
ifelse(grepl('Other|Not Available', Job_Title_S) | Job_Title_S=='','Other',
Job_Title_S)))) %>%
group_by(Job_Title_S) %>%
summarize(JobPopSize = n(),
FractionFemale = sum(Female,na.rm=TRUE)/sum(Male + Female, na.rm=TRUE)) %>%
mutate(JobPopSize = JobPopSize/sum(JobPopSize))
ggplot(pop_job, aes(reorder(Job_Title_S,FractionFemale),group=1)) +
geom_bar(aes(y=JobPopSize),stat='identity') +
geom_line(aes(y=FractionFemale)) +
xlab('Job Title') +
scale_y_continuous("Fraction with This Job",
sec.axis = sec_axis(~ ., name = "Fraction Female")) +
theme(axis.text.x = element_text(angle = 45, hjust=1))
pop_job_conf_data = data_orig0 %>%
group_by(Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
mutate(Job_Title_S = ifelse(grepl('Entrepreneur|Management|Founder',Job_Title_S),'Management',
ifelse(grepl('Postdoctoral|Student', Job_Title_S),'Trainee',
ifelse(grepl('Other|Not Available', Job_Title_S) | Job_Title_S=='','Other',
Job_Title_S)))) %>%
group_by(Job_Title_S) %>%
summarize(JobPopSize = sum(Male + Female, na.rm=TRUE),
JobFemSize = sum(Female,na.rm=TRUE)) %>%
mutate(JobPopFrac = JobPopSize/sum(JobPopSize))
pop_job_conf0 = binom.confint(pop_job_conf_data$JobFemSize,pop_job_conf_data$JobPopSize,methods='wilson')[4:6]
pop_job_conf = cbind(as.character(pop_job_conf_data$Job_Title_S),pop_job_conf_data$JobPopFrac, pop_job_conf0)
names(pop_job_conf)[1]='JobTitle'
names(pop_job_conf)[2]='JobPopFrac'
ggplot(pop_job_conf, aes(reorder(JobTitle,mean),group=1)) +
geom_bar(aes(y=JobPopFrac),stat='identity') +
geom_line(aes(y=mean)) +
geom_errorbar(aes(ymin = lower, ymax = upper)) +
xlab('Job Title') +
scale_y_continuous("Fraction of All Users with This Job",
sec.axis = sec_axis(~ ., name = "Fraction Female")) +
theme(axis.text.x = element_text(angle = 45, hjust=1))
ggplot(data_orig0, aes(Year,FemaleParticipation)) +
stat_summary_bin()
gender_uniquecontrib = data_orig0 %>%
group_by(ThreadId,Id) %>%
arrange(desc(PreviousContributions)) %>%
filter(row_number()==1) %>%
ungroup() %>%
mutate(Job_Title_S = ifelse(grepl('Entrepreneur|Management|Founder',Job_Title_S),'Management',
ifelse(grepl('Postdoctoral|Student', Job_Title_S),'Trainee',
ifelse(grepl('Other|Not Available', Job_Title_S) | Job_Title_S=='','Other',
Job_Title_S)))) %>%
group_by(Id) %>%
summarize(Female = mean(Female),
Job_Title_S = max(Job_Title_S),
Discipline = max(Discipline),
TotalUniqueContributions = n(),
YearsContributing = max(Year)-min(Year))
ggplot(filter(gender_uniquecontrib),
aes(YearsContributing,TotalUniqueContributions)) +
geom_jitter()+
geom_smooth()
ggplot(filter(gender_uniquecontrib),
aes(TotalUniqueContributions)) +
geom_bar()
ggplot(filter(gender_uniquecontrib,Discipline==''),
aes(YearsContributing,TotalUniqueContributions,color = (Female==1))) +
geom_jitter()+
geom_smooth()
ggplot(filter(gender_uniquecontrib,Discipline!=''),aes(YearsContributing,TotalUniqueContributions,color = (Female==1))) +
geom_jitter()+
geom_smooth()+
facet_grid(Discipline ~ .)
ggplot(filter(gender_uniquecontrib,YearsContributing<5,!is.na(Female)),aes(Female==1,TotalUniqueContributions)) +
stat_summary(fun.y = mean, geom = "bar") +
stat_summary(fun.data = mean_cl_normal, geom = "errorbar") +
facet_grid(. ~ Discipline)+
theme(axis.text.x = element_text(angle = 45, hjust=1))
ggplot(filter(gender_uniquecontrib,YearsContributing>10,!is.na(Female)),aes(Female==1,TotalUniqueContributions)) +
stat_summary(fun.y = mean, geom = "bar") +
stat_summary(fun.data = mean_cl_normal, geom = "errorbar") +
facet_grid(. ~ Discipline)+
theme(axis.text.x = element_text(angle = 45, hjust=1))
ggplot(gender_uniquecontrib,aes(YearsContributing)) +
geom_bar()
## Normalize thread measurements to participant population
# Normalizing thread discipline representation to the overall community size of discipline
thread_to_discipline = data_thread %>%
select(ThreadId,starts_with('Thread_Discipline')) %>%
unique()
tmp = sweep(thread_to_discipline[,-1], 2, pop_discipline$Fraction, `/`)
thread_to_discipline = cbind(thread_to_discipline %>% select(ThreadId), tmp)
colnames(tmp) = gsub("^.*?_","",gsub("^.*?_","",colnames(tmp)))
thread_to_discipline$Thread_OverrepresentedDiscipline = colnames(tmp)[max.col(tmp,ties.method="first")]
thread_to_discipline$Thread_UnderrepresentedDiscipline = colnames(tmp)[max.col(-tmp,ties.method="first")]
# Normalizing thread job representation to the overall community size of job
thread_to_job = data_thread %>%
select(ThreadId,starts_with('Thread_Job')) %>%
unique()
tmp = sweep(thread_to_job[,-1], 2, pop_job$Fraction, `/`)
thread_to_job = cbind(thread_to_job %>% select(ThreadId), tmp)
colnames(tmp) = gsub("^.*?_","",gsub("^.*?_","",colnames(tmp)))
thread_to_job$Thread_OverrepresentedJob = colnames(tmp)[max.col(tmp,ties.method="first")]
thread_to_job$Thread_UnderrepresentedJob = colnames(tmp)[max.col(-tmp,ties.method="first")]
# Joining mormalized discipline and job metrics to main table
data_thread = data_thread %>%
select(-starts_with('Thread_Discipline'),-starts_with('Thread_Job')) %>%
left_join(thread_to_discipline, by='ThreadId')%>%
left_join(thread_to_job, by='ThreadId')
|
# Ozônio - Salvo, 2017
# Random forest
# Libraries ---------------------------------------------------------------
library(tidyverse)
library(caret)
library(recipes)
library(lime)
library(patchwork)
library(iml)
source("scripts/salvo-2017/salvo-utils.R")
# Dados -------------------------------------------------------------------
df_model <- read_rds("data/artaxo-salvo-geiger/data-asg-model.rds")
# Formula -----------------------------------------------------------------
formula <- df_model %>%
select(
-siteid,
-date, -o3_mass_conc, -dayofweek,
-starts_with("congestion"),
-dv_kmregion_am_18_max, -dv_kmcity_am_80_max,
-pp, -dv_pp_20_150,
-dv_sun_reg,
-year, -month, -day, -dv_weekday_regular, -dv_yearendvacation
) %>%
names() %>%
str_c(collapse = " + ") %>%
str_c("o3_mass_conc ~ ", .) %>%
as.formula()
rec <-
df_model %>%
na.omit() %>%
recipe(formula = formula) %>%
step_naomit(all_predictors(), all_outcomes()) %>%
step_dummy(stationname, week, one_hot = TRUE)
# Model -------------------------------------------------------------------
train_control <- trainControl(method = "cv", number = 5)
tuning_grid <- expand.grid(
splitrule = "variance",
mtry = 60,
min.node.size = 3
)
# Caret
set.seed(5893524)
model <- train(
x = rec,
data = na.omit(df_model),
method = "ranger",
trControl = train_control,
tuneGrid = tuning_grid,
importance = 'impurity'
)
model
# RMSE: 14.11
# MAE: 10.20
# % var: 85.72%
# share_gas imp: 6ª
model$finalModel
varImp(model)
pred_obs_plot(
obs = na.omit(df_model)$o3_mass_conc,
pred = predict(model, newdata = na.omit(df_model))
)
ggsave(
filename = "text/figuras/cap-comb-forest-pred-obs-plot.pdf",
width = 6,
height = 4
)
# Interpretação ------------------------------------------------------------
df_train <- rec %>%
prep(df_model) %>%
bake(df_model)
X <- df_train %>%
select(-o3_mass_conc) %>%
as.matrix()
train_control <- trainControl(method = "cv", number = 5)
tuning_grid <- expand.grid(
splitrule = "variance",
mtry = 60,
min.node.size = 3
)
model <- train(
x = X,
y = df_train$o3_mass_conc,
method = "ranger",
trControl = train_control,
tuneGrid = tuning_grid,
importance = 'impurity'
)
predictor <- Predictor$new(model, data = df_train, y = df_train$o3_mass_conc)
# PDP
pdp <- FeatureEffect$new(predictor, feature = "share_gas", method = "pdp", grid.size = 20)
p_pdp <- pdp$plot() +
theme_bw() +
labs(x = "Proporção estimada de carros a gasolina") +
scale_y_continuous(name = expression(paste(O[3], " estimado (", mu, "g/", m^3, ")"))) +
ggtitle("PDP")
# ALE
ale <- FeatureEffect$new(predictor, feature = "share_gas", grid.size = 20)
p_ale <- ale$plot() +
theme_bw() +
labs(x = "Proporção estimada de carros a gasolina") +
scale_y_continuous(name = "Diferença em relação à predição média") +
ggtitle("ALE")
p_pdp + p_ale
ggsave(
filename = "text/figuras/cap-comb-rf-graficos-iml.pdf",
width = 7, height = 5
)
# ALE (inglês)
ale <- FeatureEffect$new(predictor, feature = "share_gas", grid.size = 20)
p_ale <- ale$plot() +
theme_bw() +
labs(x = "shareE25") +
scale_y_continuous(name = "ALE") +
ggtitle("Random forest")
p_ale_r <- p_ale
p_ale_r + p_ale_x
ggsave(
filename = "text/figuras/cap-comb-rf-xgboost-p-ale.pdf",
width = 7, height = 5
)
# Cenários com baixo e alto share -----------------------------------------
prep <- prep(rec, na.omit(df_model))
# Baixo
df_share_baixo <- df_model %>%
mutate(share_gas = 0.2) %>%
na.omit
df_share_alto <- df_model %>%
mutate(share_gas = 0.7) %>%
na.omit
df_model %>%
na.omit %>%
mutate(
pred_baixa = predict(model, df_share_baixo),
pred_alta = predict(model, df_share_alto),
pred = predict(model, na.omit(df_model))
) %>%
group_by(stationname) %>%
gather(var, o3, pred, pred_baixa, pred_alta) %>%
ggplot(aes(x = date, y = o3, color = var)) +
geom_smooth(se = FALSE) +
facet_wrap(~stationname) +
labs(color = "Cenário", x = "Ano", y = "Ozônio predito") +
scale_color_discrete(labels = c(
"Proporção observada", "Proporção Alta", "Proporção baixa"
)) +
theme(legend.position = "bottom")
ggsave(
filename = "text/figuras/cap-comb-random-forest-cenarios.pdf",
width = 9,
height = 6
) | /scripts/salvo-2017/salvo-random-forest.R | permissive | williamorim/tese | R | false | false | 4,341 | r | # Ozônio - Salvo, 2017
# Random forest
# Libraries ---------------------------------------------------------------
library(tidyverse)
library(caret)
library(recipes)
library(lime)
library(patchwork)
library(iml)
source("scripts/salvo-2017/salvo-utils.R")
# Dados -------------------------------------------------------------------
df_model <- read_rds("data/artaxo-salvo-geiger/data-asg-model.rds")
# Formula -----------------------------------------------------------------
formula <- df_model %>%
select(
-siteid,
-date, -o3_mass_conc, -dayofweek,
-starts_with("congestion"),
-dv_kmregion_am_18_max, -dv_kmcity_am_80_max,
-pp, -dv_pp_20_150,
-dv_sun_reg,
-year, -month, -day, -dv_weekday_regular, -dv_yearendvacation
) %>%
names() %>%
str_c(collapse = " + ") %>%
str_c("o3_mass_conc ~ ", .) %>%
as.formula()
rec <-
df_model %>%
na.omit() %>%
recipe(formula = formula) %>%
step_naomit(all_predictors(), all_outcomes()) %>%
step_dummy(stationname, week, one_hot = TRUE)
# Model -------------------------------------------------------------------
train_control <- trainControl(method = "cv", number = 5)
tuning_grid <- expand.grid(
splitrule = "variance",
mtry = 60,
min.node.size = 3
)
# Caret
set.seed(5893524)
model <- train(
x = rec,
data = na.omit(df_model),
method = "ranger",
trControl = train_control,
tuneGrid = tuning_grid,
importance = 'impurity'
)
model
# RMSE: 14.11
# MAE: 10.20
# % var: 85.72%
# share_gas imp: 6ª
model$finalModel
varImp(model)
pred_obs_plot(
obs = na.omit(df_model)$o3_mass_conc,
pred = predict(model, newdata = na.omit(df_model))
)
ggsave(
filename = "text/figuras/cap-comb-forest-pred-obs-plot.pdf",
width = 6,
height = 4
)
# Interpretação ------------------------------------------------------------
df_train <- rec %>%
prep(df_model) %>%
bake(df_model)
X <- df_train %>%
select(-o3_mass_conc) %>%
as.matrix()
train_control <- trainControl(method = "cv", number = 5)
tuning_grid <- expand.grid(
splitrule = "variance",
mtry = 60,
min.node.size = 3
)
model <- train(
x = X,
y = df_train$o3_mass_conc,
method = "ranger",
trControl = train_control,
tuneGrid = tuning_grid,
importance = 'impurity'
)
predictor <- Predictor$new(model, data = df_train, y = df_train$o3_mass_conc)
# PDP
pdp <- FeatureEffect$new(predictor, feature = "share_gas", method = "pdp", grid.size = 20)
p_pdp <- pdp$plot() +
theme_bw() +
labs(x = "Proporção estimada de carros a gasolina") +
scale_y_continuous(name = expression(paste(O[3], " estimado (", mu, "g/", m^3, ")"))) +
ggtitle("PDP")
# ALE
ale <- FeatureEffect$new(predictor, feature = "share_gas", grid.size = 20)
p_ale <- ale$plot() +
theme_bw() +
labs(x = "Proporção estimada de carros a gasolina") +
scale_y_continuous(name = "Diferença em relação à predição média") +
ggtitle("ALE")
p_pdp + p_ale
ggsave(
filename = "text/figuras/cap-comb-rf-graficos-iml.pdf",
width = 7, height = 5
)
# ALE (inglês)
ale <- FeatureEffect$new(predictor, feature = "share_gas", grid.size = 20)
p_ale <- ale$plot() +
theme_bw() +
labs(x = "shareE25") +
scale_y_continuous(name = "ALE") +
ggtitle("Random forest")
p_ale_r <- p_ale
p_ale_r + p_ale_x
ggsave(
filename = "text/figuras/cap-comb-rf-xgboost-p-ale.pdf",
width = 7, height = 5
)
# Cenários com baixo e alto share -----------------------------------------
prep <- prep(rec, na.omit(df_model))
# Baixo
df_share_baixo <- df_model %>%
mutate(share_gas = 0.2) %>%
na.omit
df_share_alto <- df_model %>%
mutate(share_gas = 0.7) %>%
na.omit
df_model %>%
na.omit %>%
mutate(
pred_baixa = predict(model, df_share_baixo),
pred_alta = predict(model, df_share_alto),
pred = predict(model, na.omit(df_model))
) %>%
group_by(stationname) %>%
gather(var, o3, pred, pred_baixa, pred_alta) %>%
ggplot(aes(x = date, y = o3, color = var)) +
geom_smooth(se = FALSE) +
facet_wrap(~stationname) +
labs(color = "Cenário", x = "Ano", y = "Ozônio predito") +
scale_color_discrete(labels = c(
"Proporção observada", "Proporção Alta", "Proporção baixa"
)) +
theme(legend.position = "bottom")
ggsave(
filename = "text/figuras/cap-comb-random-forest-cenarios.pdf",
width = 9,
height = 6
) |
##--------------------------------------##
## Figures for density fucntions ##
##--------------------------------------##
EHE.dens <- function(s=0.1, gam=1, ep=seq(-4, 4, by=0.1)){
mc <- 10000
W <- rgamma(mc, gam, 1)
V <- rgamma(mc, W, 1)
V[V<0.001] <- 0.001
U2 <- rgamma(mc, 1, V)
U1 <- rep(1, mc)
Z <- rbinom(mc, 1, s)
UU <- (1-Z)*U1 + Z*U2
L <- length(ep)
U.mat <- matrix(rep(UU, L), mc, L)
ep.mat <- t( matrix(rep(ep, mc), L, mc) )
dens <- apply(dnorm(ep.mat, 0, sqrt(U.mat)), 2, mean)
return(dens)
}
ep <- seq(-10, 10, by=0.1)
s.set <- c(0.05, 0.1, 0.2)
J <- length(s.set)
dens <- matrix(NA, length(ep), J)
for(k in 1:J){
dens[,k] <- EHE.dens(s=s.set[k], gam=1, ep=ep)
}
N.dens <- dnorm(ep)
Dens <- cbind(N.dens, dens)
# figure 1 in the main document
pdf("density.pdf", width=12, height=8, pointsize=13)
par(mfcol=c(1,2))
matplot(ep[abs(ep)<4], Dens[abs(ep)<4,], type="l", lty=1, col=1:(J+1), xlab="x", ylab="density")
legend("topright", legend=c("Normal", paste0("EHE (s=",s.set,")")), lty=1, col=1:(J+1))
matplot(ep[ep>4], Dens[ep>4,], type="l", lty=1, col=1:(J+1), xlab="x", ylab="density")
legend("topright", legend=c("Normal", paste0("EHE (s=",s.set,")")), lty=1, col=1:(J+1))
dev.off()
##--------------------------------------##
## Figures for CDF ##
##--------------------------------------##
H.dist <- function(gam=1, ep=seq(0, 4, by=0.1)){
dist <- 1-( 1 + log(1+ep) )^(-gam)
}
rEHE <- function(mc=100000, s=0.1, gam=1){
a <- 10^8
W <- rgamma(mc, gam, 1)
V <- rgamma(mc, W, 1)
V[V<0.001] <- 0.001
U2 <- rgamma(mc, 1, V)
U1 <- rgamma(mc, a, a)
Z <- rbinom(mc, 1, s)
UU <- (1-Z)*U1 + Z*U2
X <- rnorm(mc,0,sqrt(UU))
return(X)
}
ep <- seq(0, 10, by=0.01)
s.set <- c(0.5, 1, 2)
J <- length(s.set)
dist <- matrix(NA, length(ep), J+1)
dist[,1] <- 1 - pgamma(1/ep, 0.5, rate = 0.5)
for(k in 1:J){
dist[,k+1] <- H.dist(gam=s.set[k], ep=ep)
}
ep2 <- seq(-10, 10, by=0.1)
s.set2 <- c(0.1, 0.5, 0.8)
J2 <- length(s.set2)
DIST <- matrix(NA, length(ep2), J2+1)
DIST[,1] <- pcauchy(ep2) #pnorm(ep2)
for(k in 1:J2){
DIST[,k+1] <- ecdf(rEHE(s=s.set2[k], gam=1))(ep2)
}
Lab1 <- c("IG(1/2,1/2)", expression(H (gamma==0.5)), expression(H (gamma==1)), expression(H (gamma==2)))
Lab2 <- c("Cauchy", expression(EHE (s==0.1)), expression(EHE (s==0.5)), expression(EHE (s==0.8)))
# figure 2 in the main document
pdf("dist.pdf", width=12, height=8, pointsize=13)
par(mfcol=c(1,2))
matplot(ep[abs(ep)<6], dist[abs(ep)<6,], type="l", lty=1, col=1:(J+1), xlab="x", ylab="CDF")
legend("bottomright", Lab1, lty=1, col=1:(J+1))
matplot(ep2[abs(ep2)<8], DIST[abs(ep2)<8,], type="l", lty=1, col=1:(J2+1), xlab="x", ylab="CDF")
legend("bottomright", Lab2, lty=1, col=1:(J2+1))
dev.off()
| /Fig.R | no_license | sshonosuke/EHE | R | false | false | 2,775 | r | ##--------------------------------------##
## Figures for density fucntions ##
##--------------------------------------##
EHE.dens <- function(s=0.1, gam=1, ep=seq(-4, 4, by=0.1)){
mc <- 10000
W <- rgamma(mc, gam, 1)
V <- rgamma(mc, W, 1)
V[V<0.001] <- 0.001
U2 <- rgamma(mc, 1, V)
U1 <- rep(1, mc)
Z <- rbinom(mc, 1, s)
UU <- (1-Z)*U1 + Z*U2
L <- length(ep)
U.mat <- matrix(rep(UU, L), mc, L)
ep.mat <- t( matrix(rep(ep, mc), L, mc) )
dens <- apply(dnorm(ep.mat, 0, sqrt(U.mat)), 2, mean)
return(dens)
}
ep <- seq(-10, 10, by=0.1)
s.set <- c(0.05, 0.1, 0.2)
J <- length(s.set)
dens <- matrix(NA, length(ep), J)
for(k in 1:J){
dens[,k] <- EHE.dens(s=s.set[k], gam=1, ep=ep)
}
N.dens <- dnorm(ep)
Dens <- cbind(N.dens, dens)
# figure 1 in the main document
pdf("density.pdf", width=12, height=8, pointsize=13)
par(mfcol=c(1,2))
matplot(ep[abs(ep)<4], Dens[abs(ep)<4,], type="l", lty=1, col=1:(J+1), xlab="x", ylab="density")
legend("topright", legend=c("Normal", paste0("EHE (s=",s.set,")")), lty=1, col=1:(J+1))
matplot(ep[ep>4], Dens[ep>4,], type="l", lty=1, col=1:(J+1), xlab="x", ylab="density")
legend("topright", legend=c("Normal", paste0("EHE (s=",s.set,")")), lty=1, col=1:(J+1))
dev.off()
##--------------------------------------##
## Figures for CDF ##
##--------------------------------------##
H.dist <- function(gam=1, ep=seq(0, 4, by=0.1)){
dist <- 1-( 1 + log(1+ep) )^(-gam)
}
rEHE <- function(mc=100000, s=0.1, gam=1){
a <- 10^8
W <- rgamma(mc, gam, 1)
V <- rgamma(mc, W, 1)
V[V<0.001] <- 0.001
U2 <- rgamma(mc, 1, V)
U1 <- rgamma(mc, a, a)
Z <- rbinom(mc, 1, s)
UU <- (1-Z)*U1 + Z*U2
X <- rnorm(mc,0,sqrt(UU))
return(X)
}
ep <- seq(0, 10, by=0.01)
s.set <- c(0.5, 1, 2)
J <- length(s.set)
dist <- matrix(NA, length(ep), J+1)
dist[,1] <- 1 - pgamma(1/ep, 0.5, rate = 0.5)
for(k in 1:J){
dist[,k+1] <- H.dist(gam=s.set[k], ep=ep)
}
ep2 <- seq(-10, 10, by=0.1)
s.set2 <- c(0.1, 0.5, 0.8)
J2 <- length(s.set2)
DIST <- matrix(NA, length(ep2), J2+1)
DIST[,1] <- pcauchy(ep2) #pnorm(ep2)
for(k in 1:J2){
DIST[,k+1] <- ecdf(rEHE(s=s.set2[k], gam=1))(ep2)
}
Lab1 <- c("IG(1/2,1/2)", expression(H (gamma==0.5)), expression(H (gamma==1)), expression(H (gamma==2)))
Lab2 <- c("Cauchy", expression(EHE (s==0.1)), expression(EHE (s==0.5)), expression(EHE (s==0.8)))
# figure 2 in the main document
pdf("dist.pdf", width=12, height=8, pointsize=13)
par(mfcol=c(1,2))
matplot(ep[abs(ep)<6], dist[abs(ep)<6,], type="l", lty=1, col=1:(J+1), xlab="x", ylab="CDF")
legend("bottomright", Lab1, lty=1, col=1:(J+1))
matplot(ep2[abs(ep2)<8], DIST[abs(ep2)<8,], type="l", lty=1, col=1:(J2+1), xlab="x", ylab="CDF")
legend("bottomright", Lab2, lty=1, col=1:(J2+1))
dev.off()
|
library(ape)
mytree <- read.tree(file.choose("N_S_remove.tree")) # newick format
plot(mytree, cex=0.7)
#order is pc bc hp pl af pp
mycol<-c("skyblue1","skyblue1", "skyblue1", "red", "red", "red")
png("nj.N_S_remove.png",h=1000, w=1000, pointsize=20 )
plot(mytree, type="unrooted",
show.tip.label=FALSE,
edge.width=3,
edge.color="black",
adj=0.5, label.offset=1.5, lwd=2.5)
tiplabels(pch=21, col="black",adj=0.5, bg=mycol, cex=4)
tiplabels(mytree$tip.label,col="black", adj=0.3,bg=mycol, frame="none")
dev.off()
| /R_scripts/NJ.plot.R | no_license | rsbrennan/AdmixtureMapping | R | false | false | 552 | r | library(ape)
mytree <- read.tree(file.choose("N_S_remove.tree")) # newick format
plot(mytree, cex=0.7)
#order is pc bc hp pl af pp
mycol<-c("skyblue1","skyblue1", "skyblue1", "red", "red", "red")
png("nj.N_S_remove.png",h=1000, w=1000, pointsize=20 )
plot(mytree, type="unrooted",
show.tip.label=FALSE,
edge.width=3,
edge.color="black",
adj=0.5, label.offset=1.5, lwd=2.5)
tiplabels(pch=21, col="black",adj=0.5, bg=mycol, cex=4)
tiplabels(mytree$tip.label,col="black", adj=0.3,bg=mycol, frame="none")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.pinpointemail_operations.R
\name{list_dedicated_ip_pools}
\alias{list_dedicated_ip_pools}
\title{List all of the dedicated IP pools that exist in your Amazon Pinpoint account in the current AWS Region}
\usage{
list_dedicated_ip_pools(NextToken = NULL, PageSize = NULL)
}
\arguments{
\item{NextToken}{A token returned from a previous call to \code{ListDedicatedIpPools} to indicate the position in the list of dedicated IP pools.}
\item{PageSize}{The number of results to show in a single call to \code{ListDedicatedIpPools}. If the number of results is larger than the number you specified in this parameter, then the response includes a \code{NextToken} element, which you can use to obtain additional results.}
}
\description{
List all of the dedicated IP pools that exist in your Amazon Pinpoint account in the current AWS Region.
}
\section{Accepted Parameters}{
\preformatted{list_dedicated_ip_pools(
NextToken = "string",
PageSize = 123
)
}
}
| /service/paws.pinpointemail/man/list_dedicated_ip_pools.Rd | permissive | CR-Mercado/paws | R | false | true | 1,038 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.pinpointemail_operations.R
\name{list_dedicated_ip_pools}
\alias{list_dedicated_ip_pools}
\title{List all of the dedicated IP pools that exist in your Amazon Pinpoint account in the current AWS Region}
\usage{
list_dedicated_ip_pools(NextToken = NULL, PageSize = NULL)
}
\arguments{
\item{NextToken}{A token returned from a previous call to \code{ListDedicatedIpPools} to indicate the position in the list of dedicated IP pools.}
\item{PageSize}{The number of results to show in a single call to \code{ListDedicatedIpPools}. If the number of results is larger than the number you specified in this parameter, then the response includes a \code{NextToken} element, which you can use to obtain additional results.}
}
\description{
List all of the dedicated IP pools that exist in your Amazon Pinpoint account in the current AWS Region.
}
\section{Accepted Parameters}{
\preformatted{list_dedicated_ip_pools(
NextToken = "string",
PageSize = 123
)
}
}
|
#---1---1-------------------------------------------------------------------------------------------#
# Options Links and Datasets
require(ggplot2)
require(plyr)
require(reshape2)
require(colorspace) #Load the library necessary for creating tightly-controlled palettes.
# Values and palettes
attcol8<-c("blue3","blue","lightblue", "snow", "lightpink", "red" ,"red3", "red4")
attcol3<-c("blue2","blue2","snow", "snow", "snow", "red2" ,"red2", "red2")
prevcol3<-c("blue2","snow","red2")
categories8long<-c("Never",
"Once or twice","Less than once a month/ 3-12 times",
"About once a month/ 12 times",
"About tiwce a month/ 24 times",
"About once a week",
"Several times a week",
"Everyday"
)
categories8<- c("Never ",
"1-2/year ",
"3-12/year",
"~1/month ",
"~2/month ",
"~1/week ",
"2-3/week ",
"Everyday "
)
categories3<-c("Goers","Irregulars","Absentees")
categories38<-c("Absentees","Absentees","Irregulars","Irregulars","Irregulars","Goers","Goers","Goers")
# Graphs
pathImageOut<-file.path(getwd(),"03 writing and drawing",drawing)
#---platonic of the MovingWristBand drawing--------------------------------begin-----------#
drawing<-"MovingWristBand" # name of drawing and folder in "03 writing and drawing" directory
cohortYear <- 1984
allCohorts<-c("1980","1981","1982","1983","1984" )
cohorts<-cohortYear # cohortYear or allCohorts - markes the drawing with cohort info
pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear,".png"))
slide<-"slide00"
dsFORp <- subset(dsL_attend, cohort %in% allCohorts)
#------------------------------------------------------#
p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
geom_bar(position="fill")+
scale_fill_manual(values = attcol8,
labels=categories8long,
name="Category" )+
scale_y_continuous("Prevalence: proportion of total",
limits=c(0, 1),
breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
scale_x_discrete("Age in years",
limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
labs(title=paste0("In the past year, how often have you attended a worship service?",
" cohort:",cohorts))
p
#------------------------------------------------------#
png(filename = pathFileOut,
width =800, height =450 , units = "px")
plot(p)
dev.off()
#---platonic of the MovingWristBand drawing---------------------------------end------------#
#
# cattransPrev<-c("G","I","A")
# cattransTrans<-c("gg","gi","ga","ig","ii","ia","ag","ai","aa" )
# proportions<-c("cohort","cattrans","pt00","pt01","pt02","pt03","pt04","pt05","pt06","pt07","pt08")
# counts <-c("cohort","cattrans","t00" , "t01", "t02", "t03", "t04", "t05", "t06", "t07", "t08")
# slide<-"01_"
# cohortYear <- 1984
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8long,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
#
# slide<-"02"
# cohortYear <- 1984
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
# slide<-"03"
# cohortYear <- 1984
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
# slide<-"04"
# cohortYear <- 1983
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
# slide<-"05"
# cohortYear <- 1982
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
# slide<-"06"
# cohortYear <- 1981
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
# slide<-"07"
# cohortYear <- 1980
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
# slide<-"08"
# cohortYear <- 1980
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol3,
# labels=categories38,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
#
# slide<-"09"
# cohortYear <- 1981
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol3,
# labels=categories38,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
#
# slide<-"11"
# cohortYear <- 1983
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol3,
# labels=categories38,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
#
# slide<-"12"
# cohortYear <- 1984
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol3,
# labels=categories38,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
#
# slide<-"13"
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% allCohorts)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol3,
# labels=categories38,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
#
# slide<-"14"
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% allCohorts)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories38,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
#
# slide<-"15"
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% allCohorts)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
| /03 writing and drawing/MovingWristBand-GiA332.R | no_license | andkov/NLSY-97_Religiosity | R | false | false | 19,868 | r | #---1---1-------------------------------------------------------------------------------------------#
# Options Links and Datasets
require(ggplot2)
require(plyr)
require(reshape2)
require(colorspace) #Load the library necessary for creating tightly-controlled palettes.
# Values and palettes
attcol8<-c("blue3","blue","lightblue", "snow", "lightpink", "red" ,"red3", "red4")
attcol3<-c("blue2","blue2","snow", "snow", "snow", "red2" ,"red2", "red2")
prevcol3<-c("blue2","snow","red2")
categories8long<-c("Never",
"Once or twice","Less than once a month/ 3-12 times",
"About once a month/ 12 times",
"About tiwce a month/ 24 times",
"About once a week",
"Several times a week",
"Everyday"
)
categories8<- c("Never ",
"1-2/year ",
"3-12/year",
"~1/month ",
"~2/month ",
"~1/week ",
"2-3/week ",
"Everyday "
)
categories3<-c("Goers","Irregulars","Absentees")
categories38<-c("Absentees","Absentees","Irregulars","Irregulars","Irregulars","Goers","Goers","Goers")
# Graphs
pathImageOut<-file.path(getwd(),"03 writing and drawing",drawing)
#---platonic of the MovingWristBand drawing--------------------------------begin-----------#
drawing<-"MovingWristBand" # name of drawing and folder in "03 writing and drawing" directory
cohortYear <- 1984
allCohorts<-c("1980","1981","1982","1983","1984" )
cohorts<-cohortYear # cohortYear or allCohorts - markes the drawing with cohort info
pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear,".png"))
slide<-"slide00"
dsFORp <- subset(dsL_attend, cohort %in% allCohorts)
#------------------------------------------------------#
p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
geom_bar(position="fill")+
scale_fill_manual(values = attcol8,
labels=categories8long,
name="Category" )+
scale_y_continuous("Prevalence: proportion of total",
limits=c(0, 1),
breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
scale_x_discrete("Age in years",
limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
labs(title=paste0("In the past year, how often have you attended a worship service?",
" cohort:",cohorts))
p
#------------------------------------------------------#
png(filename = pathFileOut,
width =800, height =450 , units = "px")
plot(p)
dev.off()
#---platonic of the MovingWristBand drawing---------------------------------end------------#
#
# cattransPrev<-c("G","I","A")
# cattransTrans<-c("gg","gi","ga","ig","ii","ia","ag","ai","aa" )
# proportions<-c("cohort","cattrans","pt00","pt01","pt02","pt03","pt04","pt05","pt06","pt07","pt08")
# counts <-c("cohort","cattrans","t00" , "t01", "t02", "t03", "t04", "t05", "t06", "t07", "t08")
# slide<-"01_"
# cohortYear <- 1984
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8long,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
#
# slide<-"02"
# cohortYear <- 1984
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
# slide<-"03"
# cohortYear <- 1984
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
# slide<-"04"
# cohortYear <- 1983
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
# slide<-"05"
# cohortYear <- 1982
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
# slide<-"06"
# cohortYear <- 1981
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
# slide<-"07"
# cohortYear <- 1980
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
# slide<-"08"
# cohortYear <- 1980
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol3,
# labels=categories38,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
#
# slide<-"09"
# cohortYear <- 1981
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol3,
# labels=categories38,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
#
# slide<-"11"
# cohortYear <- 1983
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol3,
# labels=categories38,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
#
# slide<-"12"
# cohortYear <- 1984
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-cohortYear
# # cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% cohortYear)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol3,
# labels=categories38,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
#
# slide<-"13"
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% allCohorts)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol3,
# labels=categories38,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
#
# slide<-"14"
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% allCohorts)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories38,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
#
#
# slide<-"15"
# allCohorts<-c("1980","1981","1982","1983","1984" )
# cohorts<-c("all")
# #------------------------------------------------------#
# dsFORp <- subset(dsLong, cohort %in% allCohorts)
# p<-ggplot(dsFORp, aes(x=factor(age), fill=factor(attendence)))+
# geom_bar(position="fill")+
# scale_fill_manual(values = attcol8,
# labels=categories8,
# name="Category" )+
# scale_y_continuous("Prevalence: proportion of total",
# limits=c(0, 1),
# breaks=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))+
# scale_x_discrete("Age in years",
# limits=c("16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"))+
# labs(title=paste0("In the past year, how often have you attended a worship service?"," cohort:",cohorts))
# p
# plast<-p
# pathImageOut<-file.path(getwd(),"EMOSA_ggplot/ggplot_graphs/prevalences")
# pathFileOut<-file.path(pathImageOut,paste0(slide,"_",cohortYear, ".png"))
# png(filename = pathFileOut,
# width =800, height =450 , units = "px")
# plot(plast)
# dev.off()
# #------------------------------------------------------#
|
getwd()
setwd("/Users/krishna/MOOC/Edge/Data")
gerber = read.csv("gerber.csv")
str(gerber)
# Problem 1.1 - Exploration and Logistic Regression
sum(gerber$voting)/nrow(gerber)
# > sum(gerber$voting)/nrow(gerber)
# [1] 0.3158996
# Problem 1.2 - Exploration and Logistic Regression
table(gerber$civicduty,gerber$voting)
table(gerber$hawthorne,gerber$voting)
table(gerber$self,gerber$voting)
table(gerber$neighbors,gerber$voting)
tapply(gerber$voting, gerber$civicduty, mean)
tapply(gerber$voting, gerber$hawthorne, mean)
tapply(gerber$voting, gerber$self, mean)
tapply(gerber$voting, gerber$neighbors, mean)
# Problem 1.3 - Exploration and Logistic Regression
model1 = glm(voting ~ hawthorne + civicduty + neighbors + self, gerber, family=binomial() )
summary(model1)
# Problem 1.4 - Exploration and Logistic Regression
trainPredict = predict(model1 , type="response")
op = table(gerber$voting,trainPredict >0.3)
# FALSE TRUE
# 0 134513 100875
# 1 56730 51966
# Problem 1.5 - Exploration and Logistic Regression
op2 = table(gerber$voting,trainPredict >0.5)
op2
235388/sum(op2)
# [1] 0.6841004
# > table(gerber$voting)
# Problem 1.6 - Exploration and Logistic Regression
op3 = table(gerber$voting)
235388/sum(op3)
# > 235388/sum(op3)
# [1] 0.6841004
library(ROCR)
ROCRpred = prediction(trainPredict, gerber$voting)
as.numeric(performance(ROCRpred,"auc")@y.values)
# > as.numeric(performance(ROCRpred,"auc")@y.values)
# [1] 0.5308461
# Even though all of the variables are significant, this is a weak predictive model. Even though all of the variables are significant, this is a weak predictive model. - correct
# Problem 2.1 - Trees
library(rpart)
library(rpart.plot)
CARTmodel = rpart(voting ~ civicduty + hawthorne + self + neighbors, data=gerber)
rpart.plot(CARTmodel)
# Problem 2.2 - Trees
CARTmodel2 = rpart(voting ~ civicduty + hawthorne + self + neighbors, data=gerber, cp=0.0)
rpart.plot(CARTmodel2)
# Neighbor is the first split, civic duty is the last.
# Problem 2.3 - Trees
# 0.31
# Problem 2.4 - Trees
CARTmodel3 = rpart(voting ~ civicduty + hawthorne + self + neighbors + sex, data=gerber, cp=0.0)
rpart.plot(CARTmodel3)
# Men
# Problem 3.1 - Interaction Terms
CARTmodel4 = rpart(voting ~ control, data=gerber, cp=0.0)
CARTmodel5 = rpart(voting ~ control + sex, data=gerber, cp=0.0)
rpart.plot(CARTmodel4)
prp(CARTmodel4,digits = 6)
rpart.plot(CARTmodel5)
abs(0.296638 - 0.34)
# > abs(0.296638 - 0.34)
# [1] 0.043362
# Problem 3.2 - Interaction Terms
rpart.plot(CARTmodel5)
# Problem 3.3 - Interaction Terms
model6 = glm(voting ~ control + sex, gerber, family=binomial() )
summary(model6)
# Coefficient is negative, reflecting that women are less likely to vote
# Problem 3.4 - Interaction Terms
Possibilities = data.frame(sex=c(0,0,1,1),control=c(0,1,0,1))
predict(model6, newdata=Possibilities, type="response")[4] -
predict(CARTmodel5, newdata=Possibilities)[4]
# 4
# 0.0003506733
# Problem 3.5 - Interaction Terms
LogModel2 = glm(voting ~ sex + control + sex:control, data=gerber, family="binomial")
summary(LogModel2)
# If a person is a woman and in the control group, the chance that she voted goes down.
# Problem 3.6 - Interaction Terms
predict(LogModel2, newdata=Possibilities, type="response")[4] -
predict(CARTmodel5, newdata=Possibilities)[4]
| /Analytics Edge/NUnit4_Assignment.R | no_license | krishnakalyan3/Edge | R | false | false | 3,312 | r | getwd()
setwd("/Users/krishna/MOOC/Edge/Data")
gerber = read.csv("gerber.csv")
str(gerber)
# Problem 1.1 - Exploration and Logistic Regression
sum(gerber$voting)/nrow(gerber)
# > sum(gerber$voting)/nrow(gerber)
# [1] 0.3158996
# Problem 1.2 - Exploration and Logistic Regression
table(gerber$civicduty,gerber$voting)
table(gerber$hawthorne,gerber$voting)
table(gerber$self,gerber$voting)
table(gerber$neighbors,gerber$voting)
tapply(gerber$voting, gerber$civicduty, mean)
tapply(gerber$voting, gerber$hawthorne, mean)
tapply(gerber$voting, gerber$self, mean)
tapply(gerber$voting, gerber$neighbors, mean)
# Problem 1.3 - Exploration and Logistic Regression
model1 = glm(voting ~ hawthorne + civicduty + neighbors + self, gerber, family=binomial() )
summary(model1)
# Problem 1.4 - Exploration and Logistic Regression
trainPredict = predict(model1 , type="response")
op = table(gerber$voting,trainPredict >0.3)
# FALSE TRUE
# 0 134513 100875
# 1 56730 51966
# Problem 1.5 - Exploration and Logistic Regression
op2 = table(gerber$voting,trainPredict >0.5)
op2
235388/sum(op2)
# [1] 0.6841004
# > table(gerber$voting)
# Problem 1.6 - Exploration and Logistic Regression
op3 = table(gerber$voting)
235388/sum(op3)
# > 235388/sum(op3)
# [1] 0.6841004
library(ROCR)
ROCRpred = prediction(trainPredict, gerber$voting)
as.numeric(performance(ROCRpred,"auc")@y.values)
# > as.numeric(performance(ROCRpred,"auc")@y.values)
# [1] 0.5308461
# Even though all of the variables are significant, this is a weak predictive model. Even though all of the variables are significant, this is a weak predictive model. - correct
# Problem 2.1 - Trees
library(rpart)
library(rpart.plot)
CARTmodel = rpart(voting ~ civicduty + hawthorne + self + neighbors, data=gerber)
rpart.plot(CARTmodel)
# Problem 2.2 - Trees
CARTmodel2 = rpart(voting ~ civicduty + hawthorne + self + neighbors, data=gerber, cp=0.0)
rpart.plot(CARTmodel2)
# Neighbor is the first split, civic duty is the last.
# Problem 2.3 - Trees
# 0.31
# Problem 2.4 - Trees
CARTmodel3 = rpart(voting ~ civicduty + hawthorne + self + neighbors + sex, data=gerber, cp=0.0)
rpart.plot(CARTmodel3)
# Men
# Problem 3.1 - Interaction Terms
CARTmodel4 = rpart(voting ~ control, data=gerber, cp=0.0)
CARTmodel5 = rpart(voting ~ control + sex, data=gerber, cp=0.0)
rpart.plot(CARTmodel4)
prp(CARTmodel4,digits = 6)
rpart.plot(CARTmodel5)
abs(0.296638 - 0.34)
# > abs(0.296638 - 0.34)
# [1] 0.043362
# Problem 3.2 - Interaction Terms
rpart.plot(CARTmodel5)
# Problem 3.3 - Interaction Terms
model6 = glm(voting ~ control + sex, gerber, family=binomial() )
summary(model6)
# Coefficient is negative, reflecting that women are less likely to vote
# Problem 3.4 - Interaction Terms
Possibilities = data.frame(sex=c(0,0,1,1),control=c(0,1,0,1))
predict(model6, newdata=Possibilities, type="response")[4] -
predict(CARTmodel5, newdata=Possibilities)[4]
# 4
# 0.0003506733
# Problem 3.5 - Interaction Terms
LogModel2 = glm(voting ~ sex + control + sex:control, data=gerber, family="binomial")
summary(LogModel2)
# If a person is a woman and in the control group, the chance that she voted goes down.
# Problem 3.6 - Interaction Terms
predict(LogModel2, newdata=Possibilities, type="response")[4] -
predict(CARTmodel5, newdata=Possibilities)[4]
|
enumerationValueApi <- R6Class("enumerationValueApi",
private = list(),
public = list(
serviceBase = NULL,
authType = NULL,
username = NULL,
password = NULL,
validateSSL = NULL,
debug = NULL,
initialize = function(baseUrl, authType, username, password, validateSSL, debug) {
self$serviceBase <- baseUrl
self$username <- username
self$password <- password
self$authType <- authType
self$validateSSL <- validateSSL
self$debug <- debug
},
getByPath = function(path, selectedFields, webIdType) {
qs <- customQueryString$new()
if (is.null(path) || path == "") {
return (paste0("Error: required parameter path was null or undefined"))
}
if (is.character(path) == FALSE) {
return (print(paste0("Error: path must be a string.")))
}
qs$add('path', path, FALSE);
localVarPath <- paste(c(self$serviceBase, '/enumerationvalues'), collapse = "")
if (missing(selectedFields) == FALSE && is.null(selectedFields) == FALSE && selectedFields != "") {
qs$add('selectedFields', selectedFields, FALSE);
if (is.character(selectedFields) == FALSE) {
return (print(paste0("Error: selectedFields must be a string.")))
}
}
if (missing(webIdType) == FALSE && is.null(webIdType) == FALSE && webIdType != "") {
qs$add('webIdType', webIdType, FALSE);
if (is.character(webIdType) == FALSE) {
return (print(paste0("Error: webIdType must be a string.")))
}
}
res <- getHttpRequest(localVarPath, qs$getQueryParameters(), self$username, self$password, self$authType, self$validateSSL, self$debug)
contentResponse <- content(res)
if (res$status == 200) {
attr(contentResponse, "className") <- "PIEnumerationValue"
}
return (contentResponse)
},
get = function(webId, selectedFields, webIdType) {
qs <- customQueryString$new()
if (is.null(webId) || webId == "") {
return (paste0("Error: required parameter webId was null or undefined"))
}
if (is.character(webId) == FALSE) {
return (print(paste0("Error: webId must be a string.")))
}
localVarPath <- paste(c(self$serviceBase, '/enumerationvalues/', webId), collapse = "")
if (missing(selectedFields) == FALSE && is.null(selectedFields) == FALSE && selectedFields != "") {
qs$add('selectedFields', selectedFields, FALSE);
if (is.character(selectedFields) == FALSE) {
return (print(paste0("Error: selectedFields must be a string.")))
}
}
if (missing(webIdType) == FALSE && is.null(webIdType) == FALSE && webIdType != "") {
qs$add('webIdType', webIdType, FALSE);
if (is.character(webIdType) == FALSE) {
return (print(paste0("Error: webIdType must be a string.")))
}
}
res <- getHttpRequest(localVarPath, qs$getQueryParameters(), self$username, self$password, self$authType, self$validateSSL, self$debug)
contentResponse <- content(res)
if (res$status == 200) {
attr(contentResponse, "className") <- "PIEnumerationValue"
}
return (contentResponse)
},
updateEnumerationValue = function(webId, PIEnumerationValue) {
qs <- customQueryString$new()
if (is.null(webId) || webId == "") {
return (paste0("Error: required parameter webId was null or undefined"))
}
if (is.character(webId) == FALSE) {
return (print(paste0("Error: webId must be a string.")))
}
if (is.null(PIEnumerationValue) || PIEnumerationValue == "") {
return (paste0("Error: required parameter PIEnumerationValue was null or undefined"))
}
className <- attr(PIEnumerationValue, "className")
if ((is.null(className)) || (className != "PIEnumerationValue")) {
return (print(paste0("Error: the class from the parameter PIEnumerationValue should be PIEnumerationValue.")))
}
localVarPath <- paste(c(self$serviceBase, '/enumerationvalues/', webId), collapse = "")
res <- patchHttpRequest(localVarPath, qs$getQueryParameters(), PIEnumerationValue, self$username, self$password, self$authType, self$validateSSL, self$debug)
return (res)
},
deleteEnumerationValue = function(webId) {
qs <- customQueryString$new()
if (is.null(webId) || webId == "") {
return (paste0("Error: required parameter webId was null or undefined"))
}
if (is.character(webId) == FALSE) {
return (print(paste0("Error: webId must be a string.")))
}
localVarPath <- paste(c(self$serviceBase, '/enumerationvalues/', webId), collapse = "")
res <- deleteHttpRequest(localVarPath, qs$getQueryParameters(), self$username, self$password, self$authType, self$validateSSL, self$debug)
return (res)
}
)
)
| /R/enumerationValueApi.r | permissive | frbl/PI-Web-API-Client-R | R | false | false | 4,554 | r | enumerationValueApi <- R6Class("enumerationValueApi",
private = list(),
public = list(
serviceBase = NULL,
authType = NULL,
username = NULL,
password = NULL,
validateSSL = NULL,
debug = NULL,
initialize = function(baseUrl, authType, username, password, validateSSL, debug) {
self$serviceBase <- baseUrl
self$username <- username
self$password <- password
self$authType <- authType
self$validateSSL <- validateSSL
self$debug <- debug
},
getByPath = function(path, selectedFields, webIdType) {
qs <- customQueryString$new()
if (is.null(path) || path == "") {
return (paste0("Error: required parameter path was null or undefined"))
}
if (is.character(path) == FALSE) {
return (print(paste0("Error: path must be a string.")))
}
qs$add('path', path, FALSE);
localVarPath <- paste(c(self$serviceBase, '/enumerationvalues'), collapse = "")
if (missing(selectedFields) == FALSE && is.null(selectedFields) == FALSE && selectedFields != "") {
qs$add('selectedFields', selectedFields, FALSE);
if (is.character(selectedFields) == FALSE) {
return (print(paste0("Error: selectedFields must be a string.")))
}
}
if (missing(webIdType) == FALSE && is.null(webIdType) == FALSE && webIdType != "") {
qs$add('webIdType', webIdType, FALSE);
if (is.character(webIdType) == FALSE) {
return (print(paste0("Error: webIdType must be a string.")))
}
}
res <- getHttpRequest(localVarPath, qs$getQueryParameters(), self$username, self$password, self$authType, self$validateSSL, self$debug)
contentResponse <- content(res)
if (res$status == 200) {
attr(contentResponse, "className") <- "PIEnumerationValue"
}
return (contentResponse)
},
get = function(webId, selectedFields, webIdType) {
qs <- customQueryString$new()
if (is.null(webId) || webId == "") {
return (paste0("Error: required parameter webId was null or undefined"))
}
if (is.character(webId) == FALSE) {
return (print(paste0("Error: webId must be a string.")))
}
localVarPath <- paste(c(self$serviceBase, '/enumerationvalues/', webId), collapse = "")
if (missing(selectedFields) == FALSE && is.null(selectedFields) == FALSE && selectedFields != "") {
qs$add('selectedFields', selectedFields, FALSE);
if (is.character(selectedFields) == FALSE) {
return (print(paste0("Error: selectedFields must be a string.")))
}
}
if (missing(webIdType) == FALSE && is.null(webIdType) == FALSE && webIdType != "") {
qs$add('webIdType', webIdType, FALSE);
if (is.character(webIdType) == FALSE) {
return (print(paste0("Error: webIdType must be a string.")))
}
}
res <- getHttpRequest(localVarPath, qs$getQueryParameters(), self$username, self$password, self$authType, self$validateSSL, self$debug)
contentResponse <- content(res)
if (res$status == 200) {
attr(contentResponse, "className") <- "PIEnumerationValue"
}
return (contentResponse)
},
updateEnumerationValue = function(webId, PIEnumerationValue) {
qs <- customQueryString$new()
if (is.null(webId) || webId == "") {
return (paste0("Error: required parameter webId was null or undefined"))
}
if (is.character(webId) == FALSE) {
return (print(paste0("Error: webId must be a string.")))
}
if (is.null(PIEnumerationValue) || PIEnumerationValue == "") {
return (paste0("Error: required parameter PIEnumerationValue was null or undefined"))
}
className <- attr(PIEnumerationValue, "className")
if ((is.null(className)) || (className != "PIEnumerationValue")) {
return (print(paste0("Error: the class from the parameter PIEnumerationValue should be PIEnumerationValue.")))
}
localVarPath <- paste(c(self$serviceBase, '/enumerationvalues/', webId), collapse = "")
res <- patchHttpRequest(localVarPath, qs$getQueryParameters(), PIEnumerationValue, self$username, self$password, self$authType, self$validateSSL, self$debug)
return (res)
},
deleteEnumerationValue = function(webId) {
qs <- customQueryString$new()
if (is.null(webId) || webId == "") {
return (paste0("Error: required parameter webId was null or undefined"))
}
if (is.character(webId) == FALSE) {
return (print(paste0("Error: webId must be a string.")))
}
localVarPath <- paste(c(self$serviceBase, '/enumerationvalues/', webId), collapse = "")
res <- deleteHttpRequest(localVarPath, qs$getQueryParameters(), self$username, self$password, self$authType, self$validateSSL, self$debug)
return (res)
}
)
)
|
#' Retrieve the content of a gene set
#'
#' @param i The i-th gene set to probe.
#' @param design The design matrix.
#' @param array The gene-wise information. The order of genes should be the same as in the design matrix.
#' @return A vector containing the result.
#' @export
getterminfo <- function(i, design, array) {
info = array[which(design[, i]!=0)]
return(info)
}
#' Parse the HiSig results into a dataframe
#'
#' @param data A named list containing `design` and `response`.
#' @param impact The impact matrix; 1st column being the main fit; 2nd column to the end being the null model.
#' @param term.names a vector of term (gene set) names
#' @param gene.names a vector of gene names
#' @param signal2 if not NA, will print a second numerical information for gene sets. Default is NA.
#' @param gene.as.term if the design matrix sees individual gene as a gene set, set it to TRUE to correctly compute statistics. Default is FALSE.
#' @param showedge the number of top genes to print when the gene sets are big
#' @return A dataframe, sorted by q values.
#' @export
parse_hisig <- function(data, impact, term.names, gene.names, signal2=NA, gene.as.term=F, showedge=20) {
data$response = round(data$response, 2)
ngenes = length(gene.names)
nterms = length(term.names)
stopifnot(ngenes==length(data$response))
# print(dim(data$design))
term.sizes = Matrix::colSums(data$design!=0)
# calculate p.values
impact.main = impact[,1]
impact.rand = impact[,2:dim(impact)[2]]
pval <- rowSums(impact.rand > impact.main)/(dim(impact)[2]-1)
qval = p.adjust(pval, method='BH')
df = cbind(impact.main, pval, qval)
df = round(df, 4)
df = as.data.frame(df)
names(df) = c("Selection.pressure", "p", "q")
if (gene.as.term) {
df = df[(ngenes+1):dim(impact)[1], ]
}
df$System.names = term.names
nsys = dim(df)[1]
# list the genes in the systems
allranks = rank(-abs(data$response), ties.method = 'max')
if (gene.as.term) {
genesets = lapply(1:nsys, getterminfo, data$design[,(ngenes+1):dim(impact)[1]], gene.names)
generanks = lapply(1:nsys, getterminfo, data$design[,(ngenes+1):dim(impact)[1]], allranks)
signal1 = lapply(1:nsys, getterminfo, data$design[,(ngenes+1):dim(impact)[1]], data$response)
term.sizes = term.sizes[,(ngenes+1):dim(impact)[1]]
}
else {
genesets = lapply(1:nsys, getterminfo, data$design, gene.names)
generanks = lapply(1:nsys, getterminfo, data$design, allranks)
signal1 = lapply(1:nsys, getterminfo, data$design, data$response)
}
gene_sort_ids = lapply(signal1, function(x) order(abs(x), decreasing=T)) # all sorted by signal1
genesets_sorted = lapply(1:nsys, function(x) genesets[[x]][gene_sort_ids[[x]]] )
generanks_sorted = lapply(1:nsys, function(x) generanks[[x]][gene_sort_ids[[x]]] )
signal1_sorted = lapply(1:nsys, function(x) signal1[[x]][gene_sort_ids[[x]]] )
genesets_sorted = as.character(lapply(genesets_sorted, function(x) paste(x[1:showedge][is.na(x[1:showedge])==F], collapse = '|')))
generanks_sorted = as.character(lapply(generanks_sorted, function(x) paste(x[1:showedge][is.na(x[1:showedge])==F], collapse = '|')))
signal1_sorted = as.character(lapply(signal1_sorted, function(x) paste(x[1:showedge][is.na(x[1:showedge])==F], collapse='|')))
df$No.genes = term.sizes
df$Genes = genesets_sorted
df$Gene.ranks = generanks_sorted
df$Signal = signal1_sorted
if (is.na(signal2) ==F) {
stopifnot(ngenes=length(signal2))
if (gene.as.term) {
signal2 = lapply(1:nsys, getterminfo, data = data$design[,(ngenes+1):dim(impact)[1]], array=signal2)
}
else {
signal2 = lapply(1:nsys, getterminfo, data = data$design, array = signal2)
}
signal2_sorted = lapply(1:nsys, function(x) signal2[[x]][gene_sort_ids[[x]]] )
signal2_sorted = as.character(lapply(signal2_sorted, function(x) paste(x[1:showedge], collapse='|')))
df$Signal.2 = signal2_sorted
}
df <- df %>% filter(Selection.pressure > 0)
df <- df %>% arrange(q, p, desc(Selection.pressure))
return(df)
}
#' Parse HiSig results multi-sample mode
#
#' @param beta_sample An impact matrix (No. regulons x No. samples)
#' @param beta_null An impact matrix (No. r)
#' @return Two dataframes (No. regulons x No. samples). The first one for normalized scores, and the second one for q values. The normalized scores may contain 'NA'.
#' @export
parse_hisig_ms <- function(beta_sample, beta_null) {
nsample = ncol(beta_sample)
npermute = ncol(beta_null)
p.vals <- sapply(1:nsample,
function(x) {rowSums(beta_null >= beta_sample[,x])/npermute})
q.vals <- sapply(1:nsample,
function(x) {p.adjust(p.vals[,x], method ='BH')}) # TODO: think about it
nes <- sapply(1:nsample,
function(x) {npermute * beta_sample[,x]/rowSums(beta_null)})
return(list(nes, q.vals))
}
#' Save the dataframe.
#'
#' @param df The dataframe generated by `parse_hisig`.
#' @param out The output filename.
#' @return
#' @export
output <- function(df, out) {
colorder= c("System.names", "No.genes", "Selection.pressure", "p", "q", "Genes", "Gene.ranks", "Signal")
if ("Signal.2" %in% names(df) ) {
colorder = c(colorder, "Signal.2")
}
write.table(df[,colorder], file=out, quote=F, row.names = F, sep="\t")
}
| /HiSig/R/parse.R | no_license | n4wilson/HiSig | R | false | false | 5,295 | r | #' Retrieve the content of a gene set
#'
#' @param i The i-th gene set to probe.
#' @param design The design matrix.
#' @param array The gene-wise information. The order of genes should be the same as in the design matrix.
#' @return A vector containing the result.
#' @export
getterminfo <- function(i, design, array) {
info = array[which(design[, i]!=0)]
return(info)
}
#' Parse the HiSig results into a dataframe
#'
#' @param data A named list containing `design` and `response`.
#' @param impact The impact matrix; 1st column being the main fit; 2nd column to the end being the null model.
#' @param term.names a vector of term (gene set) names
#' @param gene.names a vector of gene names
#' @param signal2 if not NA, will print a second numerical information for gene sets. Default is NA.
#' @param gene.as.term if the design matrix sees individual gene as a gene set, set it to TRUE to correctly compute statistics. Default is FALSE.
#' @param showedge the number of top genes to print when the gene sets are big
#' @return A dataframe, sorted by q values.
#' @export
parse_hisig <- function(data, impact, term.names, gene.names, signal2=NA, gene.as.term=F, showedge=20) {
data$response = round(data$response, 2)
ngenes = length(gene.names)
nterms = length(term.names)
stopifnot(ngenes==length(data$response))
# print(dim(data$design))
term.sizes = Matrix::colSums(data$design!=0)
# calculate p.values
impact.main = impact[,1]
impact.rand = impact[,2:dim(impact)[2]]
pval <- rowSums(impact.rand > impact.main)/(dim(impact)[2]-1)
qval = p.adjust(pval, method='BH')
df = cbind(impact.main, pval, qval)
df = round(df, 4)
df = as.data.frame(df)
names(df) = c("Selection.pressure", "p", "q")
if (gene.as.term) {
df = df[(ngenes+1):dim(impact)[1], ]
}
df$System.names = term.names
nsys = dim(df)[1]
# list the genes in the systems
allranks = rank(-abs(data$response), ties.method = 'max')
if (gene.as.term) {
genesets = lapply(1:nsys, getterminfo, data$design[,(ngenes+1):dim(impact)[1]], gene.names)
generanks = lapply(1:nsys, getterminfo, data$design[,(ngenes+1):dim(impact)[1]], allranks)
signal1 = lapply(1:nsys, getterminfo, data$design[,(ngenes+1):dim(impact)[1]], data$response)
term.sizes = term.sizes[,(ngenes+1):dim(impact)[1]]
}
else {
genesets = lapply(1:nsys, getterminfo, data$design, gene.names)
generanks = lapply(1:nsys, getterminfo, data$design, allranks)
signal1 = lapply(1:nsys, getterminfo, data$design, data$response)
}
gene_sort_ids = lapply(signal1, function(x) order(abs(x), decreasing=T)) # all sorted by signal1
genesets_sorted = lapply(1:nsys, function(x) genesets[[x]][gene_sort_ids[[x]]] )
generanks_sorted = lapply(1:nsys, function(x) generanks[[x]][gene_sort_ids[[x]]] )
signal1_sorted = lapply(1:nsys, function(x) signal1[[x]][gene_sort_ids[[x]]] )
genesets_sorted = as.character(lapply(genesets_sorted, function(x) paste(x[1:showedge][is.na(x[1:showedge])==F], collapse = '|')))
generanks_sorted = as.character(lapply(generanks_sorted, function(x) paste(x[1:showedge][is.na(x[1:showedge])==F], collapse = '|')))
signal1_sorted = as.character(lapply(signal1_sorted, function(x) paste(x[1:showedge][is.na(x[1:showedge])==F], collapse='|')))
df$No.genes = term.sizes
df$Genes = genesets_sorted
df$Gene.ranks = generanks_sorted
df$Signal = signal1_sorted
if (is.na(signal2) ==F) {
stopifnot(ngenes=length(signal2))
if (gene.as.term) {
signal2 = lapply(1:nsys, getterminfo, data = data$design[,(ngenes+1):dim(impact)[1]], array=signal2)
}
else {
signal2 = lapply(1:nsys, getterminfo, data = data$design, array = signal2)
}
signal2_sorted = lapply(1:nsys, function(x) signal2[[x]][gene_sort_ids[[x]]] )
signal2_sorted = as.character(lapply(signal2_sorted, function(x) paste(x[1:showedge], collapse='|')))
df$Signal.2 = signal2_sorted
}
df <- df %>% filter(Selection.pressure > 0)
df <- df %>% arrange(q, p, desc(Selection.pressure))
return(df)
}
#' Parse HiSig results multi-sample mode
#
#' @param beta_sample An impact matrix (No. regulons x No. samples)
#' @param beta_null An impact matrix (No. r)
#' @return Two dataframes (No. regulons x No. samples). The first one for normalized scores, and the second one for q values. The normalized scores may contain 'NA'.
#' @export
parse_hisig_ms <- function(beta_sample, beta_null) {
nsample = ncol(beta_sample)
npermute = ncol(beta_null)
p.vals <- sapply(1:nsample,
function(x) {rowSums(beta_null >= beta_sample[,x])/npermute})
q.vals <- sapply(1:nsample,
function(x) {p.adjust(p.vals[,x], method ='BH')}) # TODO: think about it
nes <- sapply(1:nsample,
function(x) {npermute * beta_sample[,x]/rowSums(beta_null)})
return(list(nes, q.vals))
}
#' Save the dataframe.
#'
#' @param df The dataframe generated by `parse_hisig`.
#' @param out The output filename.
#' @return
#' @export
output <- function(df, out) {
colorder= c("System.names", "No.genes", "Selection.pressure", "p", "q", "Genes", "Gene.ranks", "Signal")
if ("Signal.2" %in% names(df) ) {
colorder = c(colorder, "Signal.2")
}
write.table(df[,colorder], file=out, quote=F, row.names = F, sep="\t")
}
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(ggplot2)
shinyUI(fluidPage(
# Application title
titlePanel("Canadian Football Expected vs. Actual Win Ratio (2015)"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput("team", "Team:",
c("Saskatchewan Roughriders" = 'SSK',
"BC Lions" = 'BC',
"Calgary Stampeders"= "CGY",
"Edmonton Eskimos" = "EDM",
"Winnipeg Blue Bombers" = "WPG",
"Hamilton Tiger Cats" = "HAM",
"Toronto Argonauts" = 'TOR',
"Montreal Alouettes" = 'MTL',
"Ottawa REDBLACKS" = 'ORB'
)
),
checkboxInput("showactual", "Show Actual Win %", FALSE),
helpText(
"This application is an exploration of the effectiveness of
Pythagorean Expectation for prediction of wins in pro football.
Select a Canadian Football League (CFL) team from above to display their
expected win percentage throughout the 2015 season. Then select the
'Show Actual Win %' checkbox to compare the predicted vs. actual win %."
)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("SeasonPyExpPlot")
)
)
))
| /ui.R | no_license | andrewjdyck/Coursera_DDP_Project | R | false | false | 1,519 | r |
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(ggplot2)
shinyUI(fluidPage(
# Application title
titlePanel("Canadian Football Expected vs. Actual Win Ratio (2015)"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput("team", "Team:",
c("Saskatchewan Roughriders" = 'SSK',
"BC Lions" = 'BC',
"Calgary Stampeders"= "CGY",
"Edmonton Eskimos" = "EDM",
"Winnipeg Blue Bombers" = "WPG",
"Hamilton Tiger Cats" = "HAM",
"Toronto Argonauts" = 'TOR',
"Montreal Alouettes" = 'MTL',
"Ottawa REDBLACKS" = 'ORB'
)
),
checkboxInput("showactual", "Show Actual Win %", FALSE),
helpText(
"This application is an exploration of the effectiveness of
Pythagorean Expectation for prediction of wins in pro football.
Select a Canadian Football League (CFL) team from above to display their
expected win percentage throughout the 2015 season. Then select the
'Show Actual Win %' checkbox to compare the predicted vs. actual win %."
)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("SeasonPyExpPlot")
)
)
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{fit_ridge_regression}
\alias{fit_ridge_regression}
\title{Functia de antrenare a unui model Ridge}
\usage{
fit_ridge_regression(
X,
y,
w = rep(0, dim(X)[2]),
learning_rate = 0.001,
lambda = 10,
model_train_error = 1e-05,
verbose = FALSE
)
}
\arguments{
\item{X}{este setul de date}
\item{y}{este labelul}
\item{learning_rate}{este rata de invatare pentru algoritmul de optimizare}
\item{lambda}{este constanta de regularizare pentru regularizarea l2}
\item{model_train_error}{este performanta pe care o asteptam de la model}
\item{verbose}{este parametrul care, setat la TRUE, afiseaza detalii despre antrenare}
\item{w_init}{este vectorul initial de ponderi (valoare default)}
}
\description{
Functia de antrenare a unui model Ridge
}
\examples{
fit_ridge_regression(X, y, verbose = TRUE)
}
| /man/fit_ridge_regression.Rd | no_license | bogdanmacovei/dm-lrfs | R | false | true | 903 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{fit_ridge_regression}
\alias{fit_ridge_regression}
\title{Functia de antrenare a unui model Ridge}
\usage{
fit_ridge_regression(
X,
y,
w = rep(0, dim(X)[2]),
learning_rate = 0.001,
lambda = 10,
model_train_error = 1e-05,
verbose = FALSE
)
}
\arguments{
\item{X}{este setul de date}
\item{y}{este labelul}
\item{learning_rate}{este rata de invatare pentru algoritmul de optimizare}
\item{lambda}{este constanta de regularizare pentru regularizarea l2}
\item{model_train_error}{este performanta pe care o asteptam de la model}
\item{verbose}{este parametrul care, setat la TRUE, afiseaza detalii despre antrenare}
\item{w_init}{este vectorul initial de ponderi (valoare default)}
}
\description{
Functia de antrenare a unui model Ridge
}
\examples{
fit_ridge_regression(X, y, verbose = TRUE)
}
|
library(rethinking)
globe.qa <- quap(
alist(
W ~ dbinom(W + L, p),
p ~ dunif(0,1)
),
data=list(W=6, L=3)
)
precis(globe.qa)
W <- 6
L <- 3
curve(dbeta(x, W+1, L+1), from=0, to=1)
curve(dnorm(x, 0.67, 0.16), lty=2, add=TRUE)
# posterier predictive distribution
w <- rbinom(1e4, size=9, prob=samples)
simplehist(w)
| /Follow_through/Lecture3_follow_through.R | no_license | nichi97/Statistical_rethinking- | R | false | false | 333 | r | library(rethinking)
globe.qa <- quap(
alist(
W ~ dbinom(W + L, p),
p ~ dunif(0,1)
),
data=list(W=6, L=3)
)
precis(globe.qa)
W <- 6
L <- 3
curve(dbeta(x, W+1, L+1), from=0, to=1)
curve(dnorm(x, 0.67, 0.16), lty=2, add=TRUE)
# posterier predictive distribution
w <- rbinom(1e4, size=9, prob=samples)
simplehist(w)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/jtapi.R
\name{writeoutputargs}
\alias{writeoutputargs}
\title{Jterator API.
Writing output arguments to HDF5 file using the location specified in "handles".}
\usage{
writeoutputargs(handles, output_args)
}
\arguments{
\item{handles}{List of module input/output information.}
\item{List}{of temporary pipeline output arguments that should be written to HDF5 file.}
}
\description{
Jterator API.
Writing output arguments to HDF5 file using the location specified in "handles".
}
| /api/r/jtapi/man/writeoutputargs.Rd | permissive | brainy-minds/Jterator | R | false | false | 565 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/jtapi.R
\name{writeoutputargs}
\alias{writeoutputargs}
\title{Jterator API.
Writing output arguments to HDF5 file using the location specified in "handles".}
\usage{
writeoutputargs(handles, output_args)
}
\arguments{
\item{handles}{List of module input/output information.}
\item{List}{of temporary pipeline output arguments that should be written to HDF5 file.}
}
\description{
Jterator API.
Writing output arguments to HDF5 file using the location specified in "handles".
}
|
library(testthat)
library(parsnip)
library(workflows)
set.seed(1234)
knn_spec <- nearest_neighbor() %>%
set_mode("regression") %>%
set_engine("kknn")
knn_fit <- workflow() %>%
add_formula(mpg ~ disp) %>%
add_model(knn_spec) %>%
fit(mtcars)
test_that("viz_fitted_line works", {
vdiffr::expect_doppelganger(
"viz_fitted_line simple",
viz_fitted_line(knn_fit, mtcars),
"viz_fitted_line"
)
vdiffr::expect_doppelganger(
"viz_fitted_line resolution",
viz_fitted_line(knn_fit, mtcars, resolution = 20),
"viz_fitted_line"
)
vdiffr::expect_doppelganger(
"viz_fitted_line expand",
viz_fitted_line(knn_fit, mtcars, expand = 1),
"viz_fitted_line"
)
vdiffr::expect_doppelganger(
"viz_fitted_line style",
viz_fitted_line(knn_fit, mtcars, color = "pink", size = 4),
"viz_fitted_line"
)
})
| /tests/testthat/test-viz_fitted_line.R | permissive | EmilHvitfeldt/horus | R | false | false | 857 | r | library(testthat)
library(parsnip)
library(workflows)
set.seed(1234)
knn_spec <- nearest_neighbor() %>%
set_mode("regression") %>%
set_engine("kknn")
knn_fit <- workflow() %>%
add_formula(mpg ~ disp) %>%
add_model(knn_spec) %>%
fit(mtcars)
test_that("viz_fitted_line works", {
vdiffr::expect_doppelganger(
"viz_fitted_line simple",
viz_fitted_line(knn_fit, mtcars),
"viz_fitted_line"
)
vdiffr::expect_doppelganger(
"viz_fitted_line resolution",
viz_fitted_line(knn_fit, mtcars, resolution = 20),
"viz_fitted_line"
)
vdiffr::expect_doppelganger(
"viz_fitted_line expand",
viz_fitted_line(knn_fit, mtcars, expand = 1),
"viz_fitted_line"
)
vdiffr::expect_doppelganger(
"viz_fitted_line style",
viz_fitted_line(knn_fit, mtcars, color = "pink", size = 4),
"viz_fitted_line"
)
})
|
testlist <- list(a = 6.32404026676796e-322, b = 0)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result) | /BayesMRA/inst/testfiles/rmvn_arma_scalar/AFL_rmvn_arma_scalar/rmvn_arma_scalar_valgrind_files/1615926868-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 117 | r | testlist <- list(a = 6.32404026676796e-322, b = 0)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/listSymbols.R
\name{listSymbols}
\alias{listSymbols}
\title{listSymbols}
\usage{
listSymbols(...)
}
\arguments{
\item{...}{Vector(s) or list(s) of character strings.
Multiple genes can overlap at a given position on the genome. Therefore,
it is hard to associate directly a single-base TSS to a single gene symbol.
In our workflows we prepare gene expression tables where TSS counts are pooled
per gene symbol. If a position belongs to more than one gene, an artificial
ad-hoc symbol is created by concatenating the symbols with commas. For
instance, \sQuote{7SK,ACTR5}. As a result, one can not infer the number of
detected genes by simply counting the number of rows where expression is
igher that zero.
\code{listSymbols} is the solution to that problem. It will concatenate
with commas a list of row names from such gene expression tables, and then
expand it again and remove duplicates. That is, \sQuote{"7SK,ACTR5",
"7SK,ADAM10"} becomes \sQuote{"7SK,ACTR5,7SK,ADAM10"} and then
\sQuote{"7SK" ,ACTR5", "ADAM10"}. \code{listSymbols} will also search
and remove the \dQuote{.} gene symbol, which is a special artefact of
our workflows.}
}
\value{
Returns a vector of unique character strings, or \code{NULL} if
the input contained no strong, the empty string alone, or the special
symbol \dQuote{.}.
}
\description{
List unique gene symbols, which some can be comma-separated.
}
\details{
Takes a serie of strings containing either one gene symbol or
comma-separated gene symbols, character vector of unique gene symbols.
}
\examples{
listSymbols("7SK,ACTR5", "7SK,ADAM10")
length(listSymbols("7SK,ACTR5", "7SK,ADAM10"))
listSymbols("")
length(listSymbols(""))
}
| /man/listSymbols.Rd | permissive | charles-plessy/smallCAGEqc | R | false | true | 1,756 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/listSymbols.R
\name{listSymbols}
\alias{listSymbols}
\title{listSymbols}
\usage{
listSymbols(...)
}
\arguments{
\item{...}{Vector(s) or list(s) of character strings.
Multiple genes can overlap at a given position on the genome. Therefore,
it is hard to associate directly a single-base TSS to a single gene symbol.
In our workflows we prepare gene expression tables where TSS counts are pooled
per gene symbol. If a position belongs to more than one gene, an artificial
ad-hoc symbol is created by concatenating the symbols with commas. For
instance, \sQuote{7SK,ACTR5}. As a result, one can not infer the number of
detected genes by simply counting the number of rows where expression is
igher that zero.
\code{listSymbols} is the solution to that problem. It will concatenate
with commas a list of row names from such gene expression tables, and then
expand it again and remove duplicates. That is, \sQuote{"7SK,ACTR5",
"7SK,ADAM10"} becomes \sQuote{"7SK,ACTR5,7SK,ADAM10"} and then
\sQuote{"7SK" ,ACTR5", "ADAM10"}. \code{listSymbols} will also search
and remove the \dQuote{.} gene symbol, which is a special artefact of
our workflows.}
}
\value{
Returns a vector of unique character strings, or \code{NULL} if
the input contained no strong, the empty string alone, or the special
symbol \dQuote{.}.
}
\description{
List unique gene symbols, which some can be comma-separated.
}
\details{
Takes a serie of strings containing either one gene symbol or
comma-separated gene symbols, character vector of unique gene symbols.
}
\examples{
listSymbols("7SK,ACTR5", "7SK,ADAM10")
length(listSymbols("7SK,ACTR5", "7SK,ADAM10"))
listSymbols("")
length(listSymbols(""))
}
|
library(reReg)
### Name: simDat
### Title: Function to generate simulated data
### Aliases: simDat
### ** Examples
set.seed(123)
simDat(200, c(-1, 1), c(-1, 1), summary = TRUE)
| /data/genthat_extracted_code/reReg/examples/simDat.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 184 | r | library(reReg)
### Name: simDat
### Title: Function to generate simulated data
### Aliases: simDat
### ** Examples
set.seed(123)
simDat(200, c(-1, 1), c(-1, 1), summary = TRUE)
|
library("DALEX")
library("randomForest")
library("gbm")
library("e1071")
library("rms")
link_to_models <- "models/models_apartments.rda"
if (file.exists(link_to_models)) {
load(link_to_models)
} else {
# lm
apartments_lm_v5 <- lm(m2.price ~ ., data = apartments)
model_apartments_lm <- apartments_lm_v5
explain_apartments_lm_v5 <- explain(model = apartments_lm_v5,
data = apartments_test,
y = apartments_test$m2.price,
label = "Linear Regression")
# random forest
set.seed(72)
apartments_rf_v5 <- randomForest(m2.price ~ ., data = apartments)
explain_apartments_rf_v5 <- explain(model = apartments_rf_v5,
data = apartments_test,
y = apartments_test$m2.price,
label = "Random Forest")
# svm
apartments_svm_v5 <- svm(m2.price ~ construction.year + surface + floor +
no.rooms + district, data = apartments)
explain_apartments_svm_v5 <- explain(model = apartments_svm_v5,
data = apartments_test,
y = apartments_test$m2.price,
label = "Support Vector Machines")
}
model_apartments_lm <- apartments_lm_v5
model_apartments_rf <- apartments_rf_v5
model_apartments_svm <- apartments_svm_v5
| /models/models_apartments.R | no_license | zhanghonglishanzai/ema | R | false | false | 1,497 | r | library("DALEX")
library("randomForest")
library("gbm")
library("e1071")
library("rms")
link_to_models <- "models/models_apartments.rda"
if (file.exists(link_to_models)) {
load(link_to_models)
} else {
# lm
apartments_lm_v5 <- lm(m2.price ~ ., data = apartments)
model_apartments_lm <- apartments_lm_v5
explain_apartments_lm_v5 <- explain(model = apartments_lm_v5,
data = apartments_test,
y = apartments_test$m2.price,
label = "Linear Regression")
# random forest
set.seed(72)
apartments_rf_v5 <- randomForest(m2.price ~ ., data = apartments)
explain_apartments_rf_v5 <- explain(model = apartments_rf_v5,
data = apartments_test,
y = apartments_test$m2.price,
label = "Random Forest")
# svm
apartments_svm_v5 <- svm(m2.price ~ construction.year + surface + floor +
no.rooms + district, data = apartments)
explain_apartments_svm_v5 <- explain(model = apartments_svm_v5,
data = apartments_test,
y = apartments_test$m2.price,
label = "Support Vector Machines")
}
model_apartments_lm <- apartments_lm_v5
model_apartments_rf <- apartments_rf_v5
model_apartments_svm <- apartments_svm_v5
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_ICNARC.R
\name{label.icnarc.dc2}
\alias{label.icnarc.dc2}
\title{Label up ICNARC organ level code}
\usage{
label.icnarc.dc2(dt, dc.2.col = "dc.2")
}
\description{
Label up ICNARC organ level code
}
| /man/label.icnarc.dc2.Rd | no_license | alistairewj/ccfun | R | false | true | 283 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_ICNARC.R
\name{label.icnarc.dc2}
\alias{label.icnarc.dc2}
\title{Label up ICNARC organ level code}
\usage{
label.icnarc.dc2(dt, dc.2.col = "dc.2")
}
\description{
Label up ICNARC organ level code
}
|
data<-read.table("household_power_consumption.txt", header=T, sep=";", stringsAsFactors=F, na.strings="?")
data1<-subset(data, Date %in% c("1/2/2007", "2/2/2007"))
data2<-as.numeric(data1$Global_active_power)
time<-strptime(paste(data1$Date, data1$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot3.png", width=480, height=480)
plot(time, data1$Sub_metering_1, type="l", ylab="Energy Submetering", xlab="")
lines(time, data1$Sub_metering_2, type="l", col="red")
lines(time, data1$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
| /plot3.R | no_license | flyfatasy/ExData_Plotting1 | R | false | false | 648 | r | data<-read.table("household_power_consumption.txt", header=T, sep=";", stringsAsFactors=F, na.strings="?")
data1<-subset(data, Date %in% c("1/2/2007", "2/2/2007"))
data2<-as.numeric(data1$Global_active_power)
time<-strptime(paste(data1$Date, data1$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot3.png", width=480, height=480)
plot(time, data1$Sub_metering_1, type="l", ylab="Energy Submetering", xlab="")
lines(time, data1$Sub_metering_2, type="l", col="red")
lines(time, data1$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GCaMP-data.R
\docType{data}
\name{GCaMP}
\alias{GCaMP}
\title{Fitted GCaMP fluorescence waveform data}
\format{
A data frame with 11 rows and 814 columns
\describe{
\item{Time}{observed times of Trial.n values, in seconds}
\item{Trial.n}{values of calcium activity signals for trial n}
...
}
}
\source{
Dr. David Root, Maier-Watkins laboratory
}
\usage{
data(GCaMP)
}
\description{
A dataset containing calcium activity and
observed times during a behavioral paradigm pilot study;
organized into 10 individual trials
}
\keyword{datasets}
| /man/GCaMP.Rd | no_license | atamalu/fluoR | R | false | true | 622 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GCaMP-data.R
\docType{data}
\name{GCaMP}
\alias{GCaMP}
\title{Fitted GCaMP fluorescence waveform data}
\format{
A data frame with 11 rows and 814 columns
\describe{
\item{Time}{observed times of Trial.n values, in seconds}
\item{Trial.n}{values of calcium activity signals for trial n}
...
}
}
\source{
Dr. David Root, Maier-Watkins laboratory
}
\usage{
data(GCaMP)
}
\description{
A dataset containing calcium activity and
observed times during a behavioral paradigm pilot study;
organized into 10 individual trials
}
\keyword{datasets}
|
# install these packages if they are not already
# install.packages("plyr")
# install.packages("ggplot2")
# install.packages("date")
# loads the required packages
library("plyr")
library("ggplot2")
library("date")
# imports the raw data
# the raw file is not tracked in the git repo
data <- data.frame(read.table("transactions.txt"))
colnames(data) <- c("userID", "actionType", "date", "centAmount")
# normalizing dates to integers for help later on
# and set the start date to zero for help with regression below
data$date <- as.integer(as.date(as.character(data$date), order = "ymd"))
data$date <- data$date - min(data$date)
# converting the data to a table with each row as a summary of
# a particular user's total purchasing behavior
purchases <- ddply(data[which(data$actionType == "Purchase"), ], "userID", summarise,
purchases = sum(centAmount),
avg.purchase = mean(centAmount),
num.purchases = sum(centAmount) / mean(centAmount)
)
# similar for sales
sales <- ddply(data[which(data$actionType == "Sale"), ], "userID", summarise,
sales = sum(centAmount),
avg.sale = mean(centAmount),
num.sales = sum(centAmount) / mean(centAmount)
)
# finding the date and amount of the first purchase for each user
purchases.data <- data[data$actionType == "Purchase", ]
first.purchase <- ddply(purchases.data[!duplicated(purchases.data$userID), ], "userID", summarise,
first.purchase=centAmount,
date.first.purchase = date
)
# finding the date and amount of the first sale for each user
sales.data <- data[data$actionType == "Sale", ]
first.sale <- ddply(sales.data[!duplicated(sales.data$userID), ], "userID", summarise,
first.sale=centAmount,
date.first.sale = date
)
# these lines are simply merging the tables produced above in an
# outer join on userID
lifetimeData <- merge(purchases, sales, by = "userID", all = TRUE)
lifetimeData <- merge(lifetimeData, first.purchase, by = "userID", all = TRUE)
lifetimeData <- merge(lifetimeData, first.sale, by = "userID", all = TRUE)
# adding columns for helpful booleans for each user
lifetimeData$both.actions <- !is.na(lifetimeData$purchases) & !is.na(lifetimeData$sales)
lifetimeData$is.repeat.purchaser <- lifetimeData$num.purchases > 1 & !is.na(lifetimeData$purchases)
lifetimeData$is.repeat.seller <- lifetimeData$num.sales > 1 & !is.na(lifetimeData$sales)
# splitting the dataset in two for use in regressions below
purchases.data <- lifetimeData[!is.na(lifetimeData$purchases) & !is.na(lifetimeData$first.purchase) &
lifetimeData$first.purchase > 0, ]
sales.data <- lifetimeData[!is.na(lifetimeData$sales) & !is.na(lifetimeData$first.sale) &
lifetimeData$first.sale > 0, ]
### MODELS
## purchases
# this is a multiple regression to estimate the payout over a 175 day period to a given purchaser
full.purchases.model <- lm(log1p(purchases) ~ date.first.purchase + log1p(first.purchase) +
both.actions + is.repeat.purchaser + date.first.purchase*is.repeat.purchaser,
data = purchases.data
)
# helpful statistics to estimate the proceeds to a representative user
percent.repeat <- sum(purchases.data$is.repeat.purchaser)/length(purchases.data$is.repeat.purchaser)
percent.both <- sum(purchases.data$both.actions)/length(purchases.data$both.actions)
avg.first.purchase <- mean(purchases.data$first.purchase)
# extracting the coefficients from the linear model above
full.intercept <- coefficients(full.purchases.model)[1]
full.first.purchase <- coefficients(full.purchases.model)[3]
full.both.actions <- coefficients(full.purchases.model)[4]
full.is.repeat <- coefficients(full.purchases.model)[5]
# putting it all together to get an estimate for 175-day proceeds
# note we omit the date term to represent a user purchasing today
log.estimate <- full.intercept + full.first.purchase * log1p(avg.first.purchase) +
percent.repeat * full.is.repeat + percent.both * full.both.actions
estimate <- exp(log.estimate)
# extrapolating behavior forward to get an estimate of lifetime value
# we use the cohort of the first 10 days to estimate the 175-day churn rate
cohort.10d <- purchases.data[purchases.data$date.first.purchase<10, ]
# we then have to adjust up the value by the unobserved transactions
churn.rate.175 <- (sum(purchases.data$num.purchases>1)*(1 + 0.5^(175/half.life))) / sum(purchases.data$num.purchases>0)
multiplier <- sum(churn.rate.175^c(0,1,2,3))
two.year.ltv <- multiplier * estimate
## sales
# because so few sellers are not also buyers, the "both.actions" dummy is not linearly independent
# so it is not included in the model below
# the code below follows exactly the same pattern for as the purchases model above
# regression model
full.sales.model <- lm(log1p(sales) ~ date.first.sale + log1p(first.sale) +
is.repeat.seller + is.repeat.seller*date.first.sale,
data = sales.data
)
# statistics for an average seller
percent.repeat.seller <- sum(purchases.data$is.repeat.purchaser)/length(purchases.data$is.repeat.purchaser)
avg.first.sale <- mean(purchases.data$first.sale[!is.na(purchases.data$first.sale)])
# extracting model coefficients
sm.intercept <- coefficients(full.sales.model)[1]
sm.first.sale <- coefficients(full.sales.model)[3]
sm.repeat <- coefficients(full.sales.model)[4]
# estimate for a 175 day period
log.sm.estimate <- sm.intercept + sm.first.sale * log1p(avg.first.sale) +
sm.repeat * percent.repeat.seller
sm.estimate <- exp(log.sm.estimate)
# extrapolating forward to a two year period
sm.churn.rate.175 <- percent.repeat.seller
sm.multiplier <- sum(sm.churn.rate.175 ^ c(0,1,2,3))
two.year.ltp <- sm.estimate * sm.multiplier
| /clv_challenge.r | no_license | tvladeck/clv_challenge | R | false | false | 5,651 | r | # install these packages if they are not already
# install.packages("plyr")
# install.packages("ggplot2")
# install.packages("date")
# loads the required packages
library("plyr")
library("ggplot2")
library("date")
# imports the raw data
# the raw file is not tracked in the git repo
data <- data.frame(read.table("transactions.txt"))
colnames(data) <- c("userID", "actionType", "date", "centAmount")
# normalizing dates to integers for help later on
# and set the start date to zero for help with regression below
data$date <- as.integer(as.date(as.character(data$date), order = "ymd"))
data$date <- data$date - min(data$date)
# converting the data to a table with each row as a summary of
# a particular user's total purchasing behavior
purchases <- ddply(data[which(data$actionType == "Purchase"), ], "userID", summarise,
purchases = sum(centAmount),
avg.purchase = mean(centAmount),
num.purchases = sum(centAmount) / mean(centAmount)
)
# similar for sales
sales <- ddply(data[which(data$actionType == "Sale"), ], "userID", summarise,
sales = sum(centAmount),
avg.sale = mean(centAmount),
num.sales = sum(centAmount) / mean(centAmount)
)
# finding the date and amount of the first purchase for each user
purchases.data <- data[data$actionType == "Purchase", ]
first.purchase <- ddply(purchases.data[!duplicated(purchases.data$userID), ], "userID", summarise,
first.purchase=centAmount,
date.first.purchase = date
)
# finding the date and amount of the first sale for each user
sales.data <- data[data$actionType == "Sale", ]
first.sale <- ddply(sales.data[!duplicated(sales.data$userID), ], "userID", summarise,
first.sale=centAmount,
date.first.sale = date
)
# these lines are simply merging the tables produced above in an
# outer join on userID
lifetimeData <- merge(purchases, sales, by = "userID", all = TRUE)
lifetimeData <- merge(lifetimeData, first.purchase, by = "userID", all = TRUE)
lifetimeData <- merge(lifetimeData, first.sale, by = "userID", all = TRUE)
# adding columns for helpful booleans for each user
lifetimeData$both.actions <- !is.na(lifetimeData$purchases) & !is.na(lifetimeData$sales)
lifetimeData$is.repeat.purchaser <- lifetimeData$num.purchases > 1 & !is.na(lifetimeData$purchases)
lifetimeData$is.repeat.seller <- lifetimeData$num.sales > 1 & !is.na(lifetimeData$sales)
# splitting the dataset in two for use in regressions below
purchases.data <- lifetimeData[!is.na(lifetimeData$purchases) & !is.na(lifetimeData$first.purchase) &
lifetimeData$first.purchase > 0, ]
sales.data <- lifetimeData[!is.na(lifetimeData$sales) & !is.na(lifetimeData$first.sale) &
lifetimeData$first.sale > 0, ]
### MODELS
## purchases
# this is a multiple regression to estimate the payout over a 175 day period to a given purchaser
full.purchases.model <- lm(log1p(purchases) ~ date.first.purchase + log1p(first.purchase) +
both.actions + is.repeat.purchaser + date.first.purchase*is.repeat.purchaser,
data = purchases.data
)
# helpful statistics to estimate the proceeds to a representative user
percent.repeat <- sum(purchases.data$is.repeat.purchaser)/length(purchases.data$is.repeat.purchaser)
percent.both <- sum(purchases.data$both.actions)/length(purchases.data$both.actions)
avg.first.purchase <- mean(purchases.data$first.purchase)
# extracting the coefficients from the linear model above
full.intercept <- coefficients(full.purchases.model)[1]
full.first.purchase <- coefficients(full.purchases.model)[3]
full.both.actions <- coefficients(full.purchases.model)[4]
full.is.repeat <- coefficients(full.purchases.model)[5]
# putting it all together to get an estimate for 175-day proceeds
# note we omit the date term to represent a user purchasing today
log.estimate <- full.intercept + full.first.purchase * log1p(avg.first.purchase) +
percent.repeat * full.is.repeat + percent.both * full.both.actions
estimate <- exp(log.estimate)
# extrapolating behavior forward to get an estimate of lifetime value
# we use the cohort of the first 10 days to estimate the 175-day churn rate
cohort.10d <- purchases.data[purchases.data$date.first.purchase<10, ]
# we then have to adjust up the value by the unobserved transactions
churn.rate.175 <- (sum(purchases.data$num.purchases>1)*(1 + 0.5^(175/half.life))) / sum(purchases.data$num.purchases>0)
multiplier <- sum(churn.rate.175^c(0,1,2,3))
two.year.ltv <- multiplier * estimate
## sales
# because so few sellers are not also buyers, the "both.actions" dummy is not linearly independent
# so it is not included in the model below
# the code below follows exactly the same pattern for as the purchases model above
# regression model
full.sales.model <- lm(log1p(sales) ~ date.first.sale + log1p(first.sale) +
is.repeat.seller + is.repeat.seller*date.first.sale,
data = sales.data
)
# statistics for an average seller
percent.repeat.seller <- sum(purchases.data$is.repeat.purchaser)/length(purchases.data$is.repeat.purchaser)
avg.first.sale <- mean(purchases.data$first.sale[!is.na(purchases.data$first.sale)])
# extracting model coefficients
sm.intercept <- coefficients(full.sales.model)[1]
sm.first.sale <- coefficients(full.sales.model)[3]
sm.repeat <- coefficients(full.sales.model)[4]
# estimate for a 175 day period
log.sm.estimate <- sm.intercept + sm.first.sale * log1p(avg.first.sale) +
sm.repeat * percent.repeat.seller
sm.estimate <- exp(log.sm.estimate)
# extrapolating forward to a two year period
sm.churn.rate.175 <- percent.repeat.seller
sm.multiplier <- sum(sm.churn.rate.175 ^ c(0,1,2,3))
two.year.ltp <- sm.estimate * sm.multiplier
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tk_xts.R
\name{tk_xts}
\alias{tk_xts}
\alias{tk_xts_}
\title{Coerce time series objects and tibbles with date/date-time columns to xts.}
\usage{
tk_xts(data, select = NULL, date_var = NULL, silent = FALSE, ...)
tk_xts_(data, select = NULL, date_var = NULL, silent = FALSE, ...)
}
\arguments{
\item{data}{A time-based tibble or time-series object.}
\item{select}{\strong{Applicable to tibbles and data frames only}.
The column or set of columns to be coerced to \code{ts} class.}
\item{date_var}{\strong{Applicable to tibbles and data frames only}.
Column name to be used to \code{order.by}.
\code{NULL} by default. If \code{NULL}, function will find the date or date-time column.}
\item{silent}{Used to toggle printing of messages and warnings.}
\item{...}{Additional parameters to be passed to \code{xts::xts()}. Refer to \code{xts::xts()}.}
}
\value{
Returns a \code{xts} object.
}
\description{
Coerce time series objects and tibbles with date/date-time columns to xts.
}
\details{
\code{tk_xts} is a wrapper for \code{xts::xts()} that is designed
to coerce \code{tibble} objects that have a "time-base" (meaning the values vary with time)
to \code{xts} class objects. There are three main advantages:
\enumerate{
\item Non-numeric columns that are not removed via \code{select} are dropped and the user is warned.
This prevents an error or coercion issue from occurring.
\item The date column is auto-detected if not specified by \code{date_var}. This takes
the effort off the user to assign a date vector during coercion.
\item \code{ts} objects are automatically coerced if a "timetk index" is present. Refer to \code{\link[=tk_ts]{tk_ts()}}.
}
The \code{select} argument can be used to select subsets
of columns from the incoming data.frame.
Only columns containing numeric data are coerced.
The \code{date_var} can be used to specify the column with the date index.
If \code{date_var = NULL}, the date / date-time column is interpreted.
Optionally, the \code{order.by} argument from the underlying \code{xts::xts()} function can be used.
The user must pass a vector of dates or date-times if \code{order.by} is used.
For non-data.frame object classes (e.g. \code{xts}, \code{zoo}, \code{timeSeries}, etc) the objects are coerced
using \code{xts::xts()}.
\code{tk_xts_} is a nonstandard evaluation method.
}
\examples{
library(tidyverse)
library(timetk)
### tibble to xts: Comparison between tk_xts() and xts::xts()
data_tbl <- tibble::tibble(
date = seq.Date(as.Date("2016-01-01"), by = 1, length.out = 5),
x = rep("chr values", 5),
y = cumsum(1:5),
z = cumsum(11:15) * rnorm(1))
# xts: Character columns cause coercion issues; order.by must be passed a vector of dates
xts::xts(data_tbl[,-1], order.by = data_tbl$date)
# tk_xts: Non-numeric columns automatically dropped; No need to specify date column
tk_xts(data_tbl)
# ts can be coerced back to xts
data_tbl \%>\%
tk_ts(start = 2016, freq = 365) \%>\%
tk_xts()
### Using select and date_var
tk_xts(data_tbl, select = y, date_var = date)
### NSE: Enables programming
date_var <- "date"
select <- "y"
tk_xts_(data_tbl, select = select, date_var = date_var)
}
\seealso{
\code{\link[=tk_tbl]{tk_tbl()}}, \code{\link[=tk_zoo]{tk_zoo()}}, \code{\link[=tk_zooreg]{tk_zooreg()}}, \code{\link[=tk_ts]{tk_ts()}}
}
| /man/tk_xts.Rd | no_license | EstellaGuan/timetk | R | false | true | 3,395 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tk_xts.R
\name{tk_xts}
\alias{tk_xts}
\alias{tk_xts_}
\title{Coerce time series objects and tibbles with date/date-time columns to xts.}
\usage{
tk_xts(data, select = NULL, date_var = NULL, silent = FALSE, ...)
tk_xts_(data, select = NULL, date_var = NULL, silent = FALSE, ...)
}
\arguments{
\item{data}{A time-based tibble or time-series object.}
\item{select}{\strong{Applicable to tibbles and data frames only}.
The column or set of columns to be coerced to \code{ts} class.}
\item{date_var}{\strong{Applicable to tibbles and data frames only}.
Column name to be used to \code{order.by}.
\code{NULL} by default. If \code{NULL}, function will find the date or date-time column.}
\item{silent}{Used to toggle printing of messages and warnings.}
\item{...}{Additional parameters to be passed to \code{xts::xts()}. Refer to \code{xts::xts()}.}
}
\value{
Returns a \code{xts} object.
}
\description{
Coerce time series objects and tibbles with date/date-time columns to xts.
}
\details{
\code{tk_xts} is a wrapper for \code{xts::xts()} that is designed
to coerce \code{tibble} objects that have a "time-base" (meaning the values vary with time)
to \code{xts} class objects. There are three main advantages:
\enumerate{
\item Non-numeric columns that are not removed via \code{select} are dropped and the user is warned.
This prevents an error or coercion issue from occurring.
\item The date column is auto-detected if not specified by \code{date_var}. This takes
the effort off the user to assign a date vector during coercion.
\item \code{ts} objects are automatically coerced if a "timetk index" is present. Refer to \code{\link[=tk_ts]{tk_ts()}}.
}
The \code{select} argument can be used to select subsets
of columns from the incoming data.frame.
Only columns containing numeric data are coerced.
The \code{date_var} can be used to specify the column with the date index.
If \code{date_var = NULL}, the date / date-time column is interpreted.
Optionally, the \code{order.by} argument from the underlying \code{xts::xts()} function can be used.
The user must pass a vector of dates or date-times if \code{order.by} is used.
For non-data.frame object classes (e.g. \code{xts}, \code{zoo}, \code{timeSeries}, etc) the objects are coerced
using \code{xts::xts()}.
\code{tk_xts_} is a nonstandard evaluation method.
}
\examples{
library(tidyverse)
library(timetk)
### tibble to xts: Comparison between tk_xts() and xts::xts()
data_tbl <- tibble::tibble(
date = seq.Date(as.Date("2016-01-01"), by = 1, length.out = 5),
x = rep("chr values", 5),
y = cumsum(1:5),
z = cumsum(11:15) * rnorm(1))
# xts: Character columns cause coercion issues; order.by must be passed a vector of dates
xts::xts(data_tbl[,-1], order.by = data_tbl$date)
# tk_xts: Non-numeric columns automatically dropped; No need to specify date column
tk_xts(data_tbl)
# ts can be coerced back to xts
data_tbl \%>\%
tk_ts(start = 2016, freq = 365) \%>\%
tk_xts()
### Using select and date_var
tk_xts(data_tbl, select = y, date_var = date)
### NSE: Enables programming
date_var <- "date"
select <- "y"
tk_xts_(data_tbl, select = select, date_var = date_var)
}
\seealso{
\code{\link[=tk_tbl]{tk_tbl()}}, \code{\link[=tk_zoo]{tk_zoo()}}, \code{\link[=tk_zooreg]{tk_zooreg()}}, \code{\link[=tk_ts]{tk_ts()}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/three-class.R, R/light-method.R, R/lines-method.R, R/points-method.R, R/text-method.R, R/three.R
\docType{class}
\name{three-class}
\alias{light}
\alias{light,three-method}
\alias{lines,three-method}
\alias{points,three-method}
\alias{show,three-method}
\alias{store}
\alias{store,three-method}
\alias{text,three-method}
\alias{three}
\alias{three-class}
\title{three class for preparing three.js scripts}
\usage{
\S4method{show}{three}(object)
\S4method{store}{three}(object, directory = NULL, filePrefix = NULL)
\S4method{light}{three}(x, pointLight = list(x = 0, y = 0, z = 0))
\S4method{lines}{three}(x, from, to, color = "0x000000", lwd = 5)
\S4method{points}{three}(x, coords, size = 5, color = "0xcccccc")
\S4method{text}{three}(x, .data, color = "0x000000", size = 15,
offset = c(x = 5, y = 5, z = 5))
three(type = "base", bgColor = "0x000000", jsUrlPrefix = NULL,
adjust = list())
}
\arguments{
\item{object}{a three object}
\item{directory}{evident}
\item{filePrefix}{evident, too}
\item{x}{a three object}
\item{pointLight}{a list with the x,y,z-coordinates of a point light}
\item{from}{data frame with coordinates of starting points of lines}
\item{to}{data frame with coordinates of end points of lines}
\item{color}{color of line / point etc.}
\item{lwd}{line width, defaults to 5}
\item{coords}{coordinates of points}
\item{size}{point size}
\item{.data}{a data.frame with three columns providing x/y/z coordinates, and rownames providing text}
\item{offset}{offset of the text}
\item{type}{character vector providing display mode ("base", "anaglyph", "stereo")}
\item{bgColor}{the background color}
\item{jsUrlPrefix}{if not provided, the js files in the package will be linked, if provided
the respective web folder will be referenced}
\item{adjust}{named list, names are replaced by value}
}
\description{
to be displayed with a browser
create a three-object
}
\section{Slots}{
\describe{
\item{\code{js}}{character, the JavaScript code}
\item{\code{json}}{a list with character strings}
\item{\code{type}}{the display mode}
\item{\code{threejsDir}}{directory with js files, may be a web folder}
\item{\code{htmlDir}}{directory where to put the files}
}}
\examples{
\dontrun{
vis <- three(type="base", bgColor="0xcccccc")
vis <- light(vis)
coords <- mtcars[, c("mpg", "wt", "cyl")]
coords <- coords * 10
colnames(coords) <- c("x", "y", "z")
vis <- points(vis, coords=coords, color="#1111FF")
vis <- three:::text(vis, .data=data.frame(coords, row.names=rownames(mtcars)), color="0xff111111")
vis
}
}
| /man/three.Rd | no_license | ablaette/three | R | false | true | 2,629 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/three-class.R, R/light-method.R, R/lines-method.R, R/points-method.R, R/text-method.R, R/three.R
\docType{class}
\name{three-class}
\alias{light}
\alias{light,three-method}
\alias{lines,three-method}
\alias{points,three-method}
\alias{show,three-method}
\alias{store}
\alias{store,three-method}
\alias{text,three-method}
\alias{three}
\alias{three-class}
\title{three class for preparing three.js scripts}
\usage{
\S4method{show}{three}(object)
\S4method{store}{three}(object, directory = NULL, filePrefix = NULL)
\S4method{light}{three}(x, pointLight = list(x = 0, y = 0, z = 0))
\S4method{lines}{three}(x, from, to, color = "0x000000", lwd = 5)
\S4method{points}{three}(x, coords, size = 5, color = "0xcccccc")
\S4method{text}{three}(x, .data, color = "0x000000", size = 15,
offset = c(x = 5, y = 5, z = 5))
three(type = "base", bgColor = "0x000000", jsUrlPrefix = NULL,
adjust = list())
}
\arguments{
\item{object}{a three object}
\item{directory}{evident}
\item{filePrefix}{evident, too}
\item{x}{a three object}
\item{pointLight}{a list with the x,y,z-coordinates of a point light}
\item{from}{data frame with coordinates of starting points of lines}
\item{to}{data frame with coordinates of end points of lines}
\item{color}{color of line / point etc.}
\item{lwd}{line width, defaults to 5}
\item{coords}{coordinates of points}
\item{size}{point size}
\item{.data}{a data.frame with three columns providing x/y/z coordinates, and rownames providing text}
\item{offset}{offset of the text}
\item{type}{character vector providing display mode ("base", "anaglyph", "stereo")}
\item{bgColor}{the background color}
\item{jsUrlPrefix}{if not provided, the js files in the package will be linked, if provided
the respective web folder will be referenced}
\item{adjust}{named list, names are replaced by value}
}
\description{
to be displayed with a browser
create a three-object
}
\section{Slots}{
\describe{
\item{\code{js}}{character, the JavaScript code}
\item{\code{json}}{a list with character strings}
\item{\code{type}}{the display mode}
\item{\code{threejsDir}}{directory with js files, may be a web folder}
\item{\code{htmlDir}}{directory where to put the files}
}}
\examples{
\dontrun{
vis <- three(type="base", bgColor="0xcccccc")
vis <- light(vis)
coords <- mtcars[, c("mpg", "wt", "cyl")]
coords <- coords * 10
colnames(coords) <- c("x", "y", "z")
vis <- points(vis, coords=coords, color="#1111FF")
vis <- three:::text(vis, .data=data.frame(coords, row.names=rownames(mtcars)), color="0xff111111")
vis
}
}
|
#' @title roll replacement method
#' @description replacement method to replace the value of a given roll
#' @param x object of class "rolls"
#' @param i index of the rolls to be changed
#' @param value new value to be inputted
#' @return new rolls with replaced value
#' @export
#' @examples
#' device2 <- device()
#' roll2 <- roll(device2, 20)
#'
#' #replace 3rd result to 2
#' roll2[2] <- 2
"[<-.rolls" <- function(x, i, value) {
if (i > x$total) {
stop("\nGiven index is out of range")
}
if (sum(value == x$sides) == 0) {
stop("\nGiven value is invalid choice of side")
}
x$rolls[i] <- value
newroll <- list(rolls = x$rolls,
sides = x$sides,
prob = x$prob,
total = x$total
)
class(newroll) <- "rolls"
newroll
}
#' @title roll extraction method
#' @description extract the value of given index in the rolls
#' @param x object of class "rolls"
#' @param i index of the rolls to be changed
#' @return value of the rolls at the given index
#' @export
#' @examples
#' device2 <- device()
#' roll2 <- roll(device2, 20)
#'
#' #get the 10th result of roll2
#' roll2[10]
"[.rolls" <- function(x, i) {
x$rolls[i]
}
#' @title roll addition method
#' @description addition method to add more rolls
#' @param x object of class "rolls"
#' @param n n more times to be added to the rolls
#' @return new 'rolls' object with added rolls
#' @export
#' @examples
#' device2 <- device()
#' roll2 <- roll(device2, 20)
#'
#' #add 10 more rolls
#' roll2 + 10
"+.rolls" <- function(x, n) {
if (n <= 0) {
stop("\ninvalid value. Must be positive")
}
dev <- device(x$sides, x$prob)
more_rolls <- roll(dev, n)
new_roll <- list(rolls = c(x$rolls, more_rolls$rolls),
sides = x$sides,
prob = x$prob,
total = x$total + n)
class(new_roll) <- 'rolls'
new_roll
}
| /Courseworks/Stat133_Statistical_Data_Computation/Workout/roller/R/additional-methods-roll.R | no_license | duilee/workspace | R | false | false | 1,896 | r | #' @title roll replacement method
#' @description replacement method to replace the value of a given roll
#' @param x object of class "rolls"
#' @param i index of the rolls to be changed
#' @param value new value to be inputted
#' @return new rolls with replaced value
#' @export
#' @examples
#' device2 <- device()
#' roll2 <- roll(device2, 20)
#'
#' #replace 3rd result to 2
#' roll2[2] <- 2
"[<-.rolls" <- function(x, i, value) {
if (i > x$total) {
stop("\nGiven index is out of range")
}
if (sum(value == x$sides) == 0) {
stop("\nGiven value is invalid choice of side")
}
x$rolls[i] <- value
newroll <- list(rolls = x$rolls,
sides = x$sides,
prob = x$prob,
total = x$total
)
class(newroll) <- "rolls"
newroll
}
#' @title roll extraction method
#' @description extract the value of given index in the rolls
#' @param x object of class "rolls"
#' @param i index of the rolls to be changed
#' @return value of the rolls at the given index
#' @export
#' @examples
#' device2 <- device()
#' roll2 <- roll(device2, 20)
#'
#' #get the 10th result of roll2
#' roll2[10]
"[.rolls" <- function(x, i) {
x$rolls[i]
}
#' @title roll addition method
#' @description addition method to add more rolls
#' @param x object of class "rolls"
#' @param n n more times to be added to the rolls
#' @return new 'rolls' object with added rolls
#' @export
#' @examples
#' device2 <- device()
#' roll2 <- roll(device2, 20)
#'
#' #add 10 more rolls
#' roll2 + 10
"+.rolls" <- function(x, n) {
if (n <= 0) {
stop("\ninvalid value. Must be positive")
}
dev <- device(x$sides, x$prob)
more_rolls <- roll(dev, n)
new_roll <- list(rolls = c(x$rolls, more_rolls$rolls),
sides = x$sides,
prob = x$prob,
total = x$total + n)
class(new_roll) <- 'rolls'
new_roll
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theme_dpi.R
\name{theme_dpi_map}
\alias{theme_dpi_map}
\title{a deprecated ggplot2 theme developed for PDF or SVG maps}
\source{
For more information see https://github.com/hadley/ggplot2/wiki/Themes
}
\usage{
theme_dpi_map(base_size = 14, base_family = "")
}
\arguments{
\item{base_size}{numeric, specify the font size, default is 14}
\item{base_family}{character, specify the font family, this value is optional}
}
\value{
A theme object which is a list of attributes applied to a ggplot2 object.
}
\description{
This is a deprecated ggplot2 theme developed for the Wisconsin
Department of Public Instruction for making PDF maps
}
\details{
All values are optional
}
\seealso{
his uses \code{\link{unit}} from the grid package extensively.
See also \code{\link{theme_bw}} from the ggplot2 package.
}
\author{
Jared E. Knowles
}
| /man/theme_dpi_map.Rd | no_license | cran/eeptools | R | false | true | 911 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theme_dpi.R
\name{theme_dpi_map}
\alias{theme_dpi_map}
\title{a deprecated ggplot2 theme developed for PDF or SVG maps}
\source{
For more information see https://github.com/hadley/ggplot2/wiki/Themes
}
\usage{
theme_dpi_map(base_size = 14, base_family = "")
}
\arguments{
\item{base_size}{numeric, specify the font size, default is 14}
\item{base_family}{character, specify the font family, this value is optional}
}
\value{
A theme object which is a list of attributes applied to a ggplot2 object.
}
\description{
This is a deprecated ggplot2 theme developed for the Wisconsin
Department of Public Instruction for making PDF maps
}
\details{
All values are optional
}
\seealso{
his uses \code{\link{unit}} from the grid package extensively.
See also \code{\link{theme_bw}} from the ggplot2 package.
}
\author{
Jared E. Knowles
}
|
# script do professor ####
##########
install.packages('microbenchmark')
library(microbenchmark)
# exporta em formato nativo do R
saveRDS(sinistrosRecifeRaw, "bases_tratadas/sinistrosRecife.rds")
# exporta em formato tabular (.csv) - padrão para interoperabilidade
write.csv2(sinistrosRecifeRaw, "bases_tratadas/sinistrosRecife.csv")
# carrega base de dados em formato nativo R
sinistrosRecife <- readRDS('bases_tratadas/sinistrosRecife.rds')
# carrega base de dados em formato tabular (.csv) - padrão para interoperabilidade
sinistrosRecife <- read.csv2('bases_tratadas/sinistrosRecife.csv', sep = ';')
# compara os dois processos de exportação, usando a função microbenchmark
microbenchmark(a <- saveRDS(sinistrosRecifeRaw, "bases_tratadas/sinistrosRecife.rds"), b <- write.csv2(sinistrosRecifeRaw, "bases_tratadas/sinistrosRecife.csv"), times = 30L)
microbenchmark(a <- readRDS('bases_tratadas/sinistrosRecife.rds'), b <- read.csv2('bases_tratadas/sinistrosRecife.csv', sep = ';'), times = 10L)
# exercicio ####
#Desta vez, indique uma vantagem e uma desvantagem de cada tipo de arquivo (nativo e plano com interoperabilidade) e acrescente no código uma forma adicional de exportação e de leitura, com a respectiva comparação usando a função microbenchmark. Lembre-se de compartilhar um link do github!!!
# RESPOSTA:
# A biblioteca microbenchmark avalia a otimização de cada uma das opções. Compara o retorno de cada uma. Quanto tempo demora para executar cada função. No geral o CSV tem como vantagem interoperabilidade, enquanto o RDS é muito mais eficiente tanto em tamanho de armazenagem quanto em tempo de execução.
# Para continuar o exercicio, salvarei a mesma base como xlsx, formato nativo do excel. Minha expectativa é de que ele seja mais pesado, além de perder em interoperabilidade.
if(require(openxlsx) == F) install.packages('openxlsx'); require(openxlsx)
write.xlsx(sinistrosRecifeRaw, "bases_tratadas/sinistrosRecife.xlsx")
microbenchmark(a <- write.xlsx(sinistrosRecifeRaw, "bases_tratadas/sinistrosRecife.xlsx"), b <- write.csv2(sinistrosRecifeRaw, "bases_tratadas/sinistrosRecife.csv"), times = 30L)
microbenchmark(a <- read.xlsx('bases_tratadas/sinistrosRecife.xlsx'), b <- read.csv2('bases_tratadas/sinistrosRecife.csv', sep = ';'), times = 10L)
| /scripts/leitura1_exercicio.R | no_license | ullerafaga/EletivaAnaliseDeDados | R | false | false | 2,311 | r | # script do professor ####
##########
install.packages('microbenchmark')
library(microbenchmark)
# exporta em formato nativo do R
saveRDS(sinistrosRecifeRaw, "bases_tratadas/sinistrosRecife.rds")
# exporta em formato tabular (.csv) - padrão para interoperabilidade
write.csv2(sinistrosRecifeRaw, "bases_tratadas/sinistrosRecife.csv")
# carrega base de dados em formato nativo R
sinistrosRecife <- readRDS('bases_tratadas/sinistrosRecife.rds')
# carrega base de dados em formato tabular (.csv) - padrão para interoperabilidade
sinistrosRecife <- read.csv2('bases_tratadas/sinistrosRecife.csv', sep = ';')
# compara os dois processos de exportação, usando a função microbenchmark
microbenchmark(a <- saveRDS(sinistrosRecifeRaw, "bases_tratadas/sinistrosRecife.rds"), b <- write.csv2(sinistrosRecifeRaw, "bases_tratadas/sinistrosRecife.csv"), times = 30L)
microbenchmark(a <- readRDS('bases_tratadas/sinistrosRecife.rds'), b <- read.csv2('bases_tratadas/sinistrosRecife.csv', sep = ';'), times = 10L)
# exercicio ####
#Desta vez, indique uma vantagem e uma desvantagem de cada tipo de arquivo (nativo e plano com interoperabilidade) e acrescente no código uma forma adicional de exportação e de leitura, com a respectiva comparação usando a função microbenchmark. Lembre-se de compartilhar um link do github!!!
# RESPOSTA:
# A biblioteca microbenchmark avalia a otimização de cada uma das opções. Compara o retorno de cada uma. Quanto tempo demora para executar cada função. No geral o CSV tem como vantagem interoperabilidade, enquanto o RDS é muito mais eficiente tanto em tamanho de armazenagem quanto em tempo de execução.
# Para continuar o exercicio, salvarei a mesma base como xlsx, formato nativo do excel. Minha expectativa é de que ele seja mais pesado, além de perder em interoperabilidade.
if(require(openxlsx) == F) install.packages('openxlsx'); require(openxlsx)
write.xlsx(sinistrosRecifeRaw, "bases_tratadas/sinistrosRecife.xlsx")
microbenchmark(a <- write.xlsx(sinistrosRecifeRaw, "bases_tratadas/sinistrosRecife.xlsx"), b <- write.csv2(sinistrosRecifeRaw, "bases_tratadas/sinistrosRecife.csv"), times = 30L)
microbenchmark(a <- read.xlsx('bases_tratadas/sinistrosRecife.xlsx'), b <- read.csv2('bases_tratadas/sinistrosRecife.csv', sep = ';'), times = 10L)
|
\name{TTLFaliasList}
\alias{TTLFaliasList}
\title{
List registered Modules for TableToLongForm
}
\description{
TableToLongForm is partially modular and can be extended in some ways
with external modules. This function is used to list currently
registered modules.
}
\details{
For more details on modules, refer to the ``Working with Modules''
document on the website:
\url{https://www.stat.auckland.ac.nz/~joh024/Research/TableToLongForm/}
} | /man/TTLFaliasList.Rd | no_license | cran/TableToLongForm | R | false | false | 469 | rd | \name{TTLFaliasList}
\alias{TTLFaliasList}
\title{
List registered Modules for TableToLongForm
}
\description{
TableToLongForm is partially modular and can be extended in some ways
with external modules. This function is used to list currently
registered modules.
}
\details{
For more details on modules, refer to the ``Working with Modules''
document on the website:
\url{https://www.stat.auckland.ac.nz/~joh024/Research/TableToLongForm/}
} |
## makeCacheMatrix wraps a matrix in getter and setter functions while
## cacheSolve returns the inverse using cached results or solve()
## Takes a matrix x and wraps it in getter and setter functions for the
## matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Takes the output of makeCacheMatrix and returns stored inverse if there,
## otherwise finds inverse via solve() then caches and returns it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)) {
# message("using cached inverse")
return(inv)
}
m8trix <- x$get()
inv <- solve(m8trix,...)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | raydante/ProgrammingAssignment2 | R | false | false | 1,054 | r | ## makeCacheMatrix wraps a matrix in getter and setter functions while
## cacheSolve returns the inverse using cached results or solve()
## Takes a matrix x and wraps it in getter and setter functions for the
## matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Takes the output of makeCacheMatrix and returns stored inverse if there,
## otherwise finds inverse via solve() then caches and returns it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)) {
# message("using cached inverse")
return(inv)
}
m8trix <- x$get()
inv <- solve(m8trix,...)
x$setInverse(inv)
inv
}
|
#' `bcbioSingleCell` Additional bcbio Run Data Accessor
#'
#' @rdname bcbio
#' @name bcbio
#' @author Michael Steinbaugh
#'
#' @inheritParams AllGenerics
#' @param type Type of count data to retrieve.
#'
#' @return [bcbioSingleCell].
NULL
# Methods ====
#' @rdname bcbio
#' @export
setMethod(
"bcbio",
signature("bcbioSingleCell"),
function(object, type) {
if (missing(type)) {
return(slot(object, "bcbio"))
}
if (type %in% names(slot(object, "bcbio"))) {
slot(object, "bcbio")[[type]]
} else {
stop(paste(type, "not found"), call. = FALSE)
}
})
#' @rdname bcbio
#' @export
setMethod(
"bcbio<-",
signature(object = "bcbioSingleCell",
value = "ANY"),
function(object, type, value) {
slot(object, "bcbio")[[type]] <- value
validObject(object)
object
})
# Legacy class support ====
# Package versions prior to 0.0.19 used `callers` to define the extra bcbio
# slot. The structure of the object is otherwise the same.
#' @rdname bcbio
#' @export
setMethod(
"bcbio",
signature("bcbioSCDataSet"),
function(object, type) {
if (missing(type)) {
return(slot(object, "callers"))
}
if (type %in% names(slot(object, "callers"))) {
slot(object, "callers")[[type]]
} else {
stop(paste(type, "not found"), call. = FALSE)
}
})
#' @rdname bcbio
#' @export
setMethod(
"bcbio<-",
signature(object = "bcbioSCDataSet",
value = "ANY"),
function(object, type, value) {
slot(object, "callers")[[type]] <- value
validObject(object)
object
})
| /R/methods-bcbio.R | permissive | larryyang1980/bcbioSingleCell | R | false | false | 1,721 | r | #' `bcbioSingleCell` Additional bcbio Run Data Accessor
#'
#' @rdname bcbio
#' @name bcbio
#' @author Michael Steinbaugh
#'
#' @inheritParams AllGenerics
#' @param type Type of count data to retrieve.
#'
#' @return [bcbioSingleCell].
NULL
# Methods ====
#' @rdname bcbio
#' @export
setMethod(
"bcbio",
signature("bcbioSingleCell"),
function(object, type) {
if (missing(type)) {
return(slot(object, "bcbio"))
}
if (type %in% names(slot(object, "bcbio"))) {
slot(object, "bcbio")[[type]]
} else {
stop(paste(type, "not found"), call. = FALSE)
}
})
#' @rdname bcbio
#' @export
setMethod(
"bcbio<-",
signature(object = "bcbioSingleCell",
value = "ANY"),
function(object, type, value) {
slot(object, "bcbio")[[type]] <- value
validObject(object)
object
})
# Legacy class support ====
# Package versions prior to 0.0.19 used `callers` to define the extra bcbio
# slot. The structure of the object is otherwise the same.
#' @rdname bcbio
#' @export
setMethod(
"bcbio",
signature("bcbioSCDataSet"),
function(object, type) {
if (missing(type)) {
return(slot(object, "callers"))
}
if (type %in% names(slot(object, "callers"))) {
slot(object, "callers")[[type]]
} else {
stop(paste(type, "not found"), call. = FALSE)
}
})
#' @rdname bcbio
#' @export
setMethod(
"bcbio<-",
signature(object = "bcbioSCDataSet",
value = "ANY"),
function(object, type, value) {
slot(object, "callers")[[type]] <- value
validObject(object)
object
})
|
library(ggplot2)
library(dplyr)
#1
ggplot(mpg, aes(x = cty, y= hwy)) +geom_point(col = "blue")
ggsave("문1.png")
#2
cls <- ggplot(mpg, aes(x= class))
cls + geom_bar(aes(fill = drv))
ggsave("문2.png")
#3
options(scipen = 99)
ggplot(midwest, aes(x= poptotal, y = popasian)) + geom_point() + xlim(0, 500000) + ylim(0,10000)
ggsave("문3.png")
#4
mpg <- as.data.frame(ggplot2::mpg)
mpg <- mpg %>%
filter(class == "compact" | class == "subcompact" | class == "suv")
ggplot(data = mpg, aes(x = class, y = hwy)) + geom_boxplot()
ggsave("문4.png")
#5
(prodd <- read.table("product_click.log", col.names = c("Purdate", "click")))
ggplot(prodd, aes(x = click)) + geom_bar(aes(fill = click))
ggsave("문5.png")
#6
(prodd <- read.table("product_click.log", col.names = c("Purdate", "click")))
prodd %>%
group_by(click) %>%
summarise(n = n())
prodd$Purdate<- strptime(prodd$Purdate, "%Y%m%d%H%M")
prodd$weekdays <- weekdays(prodd$Purdate)
ggplot(data = prodd, aes(x = weekdays)) + geom_bar(aes(fill = weekdays))
ggsave("문6.png")
| /R_training/실습제출/최승민/2019-11-07/ggplot2_lab.R | no_license | BaeYS-marketing/R | R | false | false | 1,124 | r | library(ggplot2)
library(dplyr)
#1
ggplot(mpg, aes(x = cty, y= hwy)) +geom_point(col = "blue")
ggsave("문1.png")
#2
cls <- ggplot(mpg, aes(x= class))
cls + geom_bar(aes(fill = drv))
ggsave("문2.png")
#3
options(scipen = 99)
ggplot(midwest, aes(x= poptotal, y = popasian)) + geom_point() + xlim(0, 500000) + ylim(0,10000)
ggsave("문3.png")
#4
mpg <- as.data.frame(ggplot2::mpg)
mpg <- mpg %>%
filter(class == "compact" | class == "subcompact" | class == "suv")
ggplot(data = mpg, aes(x = class, y = hwy)) + geom_boxplot()
ggsave("문4.png")
#5
(prodd <- read.table("product_click.log", col.names = c("Purdate", "click")))
ggplot(prodd, aes(x = click)) + geom_bar(aes(fill = click))
ggsave("문5.png")
#6
(prodd <- read.table("product_click.log", col.names = c("Purdate", "click")))
prodd %>%
group_by(click) %>%
summarise(n = n())
prodd$Purdate<- strptime(prodd$Purdate, "%Y%m%d%H%M")
prodd$weekdays <- weekdays(prodd$Purdate)
ggplot(data = prodd, aes(x = weekdays)) + geom_bar(aes(fill = weekdays))
ggsave("문6.png")
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{ra2}
\alias{ra2}
\title{RA2 Function}
\usage{
ra2(m = matrix(rpois(80, 1), nrow = 10))
}
\description{
Randomizes a numeric utilization matrix m by replacing non-zero elements with a random uniform [0,1]
}
| /man/RA2.Rd | no_license | littlecharlotte/EcoSimR | R | false | false | 266 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{ra2}
\alias{ra2}
\title{RA2 Function}
\usage{
ra2(m = matrix(rpois(80, 1), nrow = 10))
}
\description{
Randomizes a numeric utilization matrix m by replacing non-zero elements with a random uniform [0,1]
}
|
\name{stackDatabases}
\alias{stackDatabases}
\title{Combine edges from databases into a data.table}
\description{
Retrieves edges from specified databases and stacks them into one data.table.This is a helper function in \code{prepareAdjMat} and should not be called by the user.}
\usage{
stackDatabases(databases)
}
\arguments{
\item{databases}{Character vector of databases to compile. Should be one of the options from hspaiens in \code{graphite::pathwayDatabases()}}
}
\details{
This function compiles all the edges from all databases specified into one data.table
}
\value{
A data.table with columns:
\item{database}{Which database the edge comes from}
\item{src}{Source gene}
\item{src_type}{Source gene identifier type}
\item{dest}{Destination gene}
\item{dest_type}{Destination gene identifier type}
\item{direction}{Direction of edge. Either Directed or Undirected}
}
\references{
Ma, J., Shojaie, A. & Michailidis, G. (2016) Network-based pathway enrichment analysis with incomplete network information. Bioinformatics 32(20):165--3174.
}
\author{
Michael Hellstern
}
\seealso{
\code{\link{obtainEdgeList}}
}
| /man/stackDatabases.Rd | no_license | mikehellstern/netgsa | R | false | false | 1,134 | rd | \name{stackDatabases}
\alias{stackDatabases}
\title{Combine edges from databases into a data.table}
\description{
Retrieves edges from specified databases and stacks them into one data.table.This is a helper function in \code{prepareAdjMat} and should not be called by the user.}
\usage{
stackDatabases(databases)
}
\arguments{
\item{databases}{Character vector of databases to compile. Should be one of the options from hspaiens in \code{graphite::pathwayDatabases()}}
}
\details{
This function compiles all the edges from all databases specified into one data.table
}
\value{
A data.table with columns:
\item{database}{Which database the edge comes from}
\item{src}{Source gene}
\item{src_type}{Source gene identifier type}
\item{dest}{Destination gene}
\item{dest_type}{Destination gene identifier type}
\item{direction}{Direction of edge. Either Directed or Undirected}
}
\references{
Ma, J., Shojaie, A. & Michailidis, G. (2016) Network-based pathway enrichment analysis with incomplete network information. Bioinformatics 32(20):165--3174.
}
\author{
Michael Hellstern
}
\seealso{
\code{\link{obtainEdgeList}}
}
|
test_that("Test benchmark_io", {
skip_on_cran()
expect_error(benchmark_io(size = 1))
res = benchmark_io(runs = 1, size = 5)
res2 = benchmark_io(runs = 1, size = 5, cores = 2)
expect_equal(nrow(res), 2)
expect_equal(ncol(res), 6)
expect_equal(nrow(res2), 2)
expect_equal(ncol(res2), 6)
})
| /tests/testthat/test-benchmark_io.R | no_license | izahn/benchmarkme | R | false | false | 304 | r | test_that("Test benchmark_io", {
skip_on_cran()
expect_error(benchmark_io(size = 1))
res = benchmark_io(runs = 1, size = 5)
res2 = benchmark_io(runs = 1, size = 5, cores = 2)
expect_equal(nrow(res), 2)
expect_equal(ncol(res), 6)
expect_equal(nrow(res2), 2)
expect_equal(ncol(res2), 6)
})
|
#Author: Roel Elbers MSc.
#email: r.j.h.elbers@umcutrecht.nl
#Organisation: UMC Utrecht, Utrecht, The Netherlands
#Date: 15/07/2021
if(SUBP){
SCHEME_04 <- copy(subpopulation_meanings)
SCHEME_04 <- SCHEME_04[, ':=' (file_in = paste0(subpopulations,"_source_population.rds"), file_out = paste0(subpopulations,"_study_population.rds"), folder_out = "populations") ]
}
if(!SUBP) SCHEME_04 <- data.frame(subpopulations = c("ALL"),file_in = "ALL_source_population.rds", file_out = "ALL_study_population.rds",folder_out = "populations")
SCHEME_04$nrows <- as.integer(NA)
SCHEME_04$ncols <- as.integer(NA)
SCHEME_04$ncolsneeded <- 23
FlowChartSourcetoStudy <- list()
for(i in 1:nrow(SCHEME_04)){
SOURCE <- readRDS(paste0(std_pop_tmp, SCHEME_04[["file_in"]][i]))
print('Exclude patients according to SelectionCriteria specified in to_run file')
for (j in 1:length(SelectionCriteria)){
before <- nrow(SOURCE)
SOURCE <- SOURCE[eval(SelectionCriteria[[j]]),]
after <- nrow(SOURCE)
FlowChartSourcetoStudy[[paste0(names(SelectionCriteria[j]),"_",SCHEME_04[["subpopulations"]][i])]]$step <- "04_CreateStudyPopulation"
FlowChartSourcetoStudy[[paste0(names(SelectionCriteria[j]),"_",SCHEME_04[["subpopulations"]][i])]]$population <- SCHEME_04[["subpopulations"]][i]
FlowChartSourcetoStudy[[paste0(names(SelectionCriteria[j]),"_",SCHEME_04[["subpopulations"]][i])]]$before <- before
FlowChartSourcetoStudy[[paste0(names(SelectionCriteria[j]),"_",SCHEME_04[["subpopulations"]][i])]]$after <- after
rm(before,after)
gc()
}
FlowChart3 <- list()
print(paste0("Set start_follow up date and end follow_up_date ",SCHEME_04[["subpopulations"]][i]))
STUDY_POPULATION <- SOURCE[,start_follow_up := max(start_study_date,op_start_date+lookback_period,date_min),by = list(row.names(SOURCE))]
STUDY_POPULATION <- STUDY_POPULATION[,end_follow_up := min(end_study_date,op_end_date,date_creation,recommended_end_date,date_max),by = list(row.names(SOURCE))]
rm(SOURCE)
gc()
before <- nrow(STUDY_POPULATION)
STUDY_POPULATION <- STUDY_POPULATION[start_follow_up < end_follow_up ,]
STUDY_POPULATION <- STUDY_POPULATION[(start_follow_up - op_start_date) >= lookback_period ,]
after <- nrow(STUDY_POPULATION)
FlowChart3[[paste0("End_look_back_period_after_end_follow_up_",SCHEME_04[["subpopulations"]][i])]]$step <- "04_CreateStudyPopulation"
FlowChart3[[paste0("End_look_back_period_after_end_follow_up_",SCHEME_04[["subpopulations"]][i])]]$population <- SCHEME_04[["subpopulations"]][i]
FlowChart3[[paste0("End_look_back_period_after_end_follow_up_",SCHEME_04[["subpopulations"]][i])]]$before <- before
FlowChart3[[paste0("End_look_back_period_after_end_follow_up_",SCHEME_04[["subpopulations"]][i])]]$after <- after
rm(before,after)
gc()
print(paste0("Calculate age at start and end follow up ",SCHEME_04[["subpopulations"]][i]))
STUDY_POPULATION <- STUDY_POPULATION[, ':='
(age_start_follow_up = floor(time_length(interval(birth_date, start_follow_up),"year")),
age_end_follow_up = floor(time_length(interval(birth_date, end_follow_up),"year"))
)
]
STUDY_POPULATION <- STUDY_POPULATION[, Population := SCHEME_04[["subpopulations"]][i]]
SCHEME_04[i,"nrows"] <- nrow(STUDY_POPULATION)
SCHEME_04[i,"ncols"] <- ncol(STUDY_POPULATION)
saveRDS(STUDY_POPULATION,file = paste0(populations_dir,SCHEME_04[["file_out"]][i]))
rm(STUDY_POPULATION)
gc()
}
saveRDS(FlowChart3,file = paste0(std_pop_tmp,"FlowChart3.rds"))
saveRDS(FlowChartSourcetoStudy,file = paste0(std_pop_tmp,"FlowChartSourcetoStudy.rds"))
saveRDS(SCHEME_04,file = paste0(std_pop_tmp,"SCHEME_04.rds"))
rm(FlowChart3,FlowChartSourcetoStudy,SCHEME_04)
gc()
| /Level_3_to_be_deployed1.0/p_steps/Old/Step_04_CreateStudyPopulation_old.R | permissive | vjolahoxhaj/LOT4STUDIES | R | false | false | 3,988 | r |
#Author: Roel Elbers MSc.
#email: r.j.h.elbers@umcutrecht.nl
#Organisation: UMC Utrecht, Utrecht, The Netherlands
#Date: 15/07/2021
if(SUBP){
SCHEME_04 <- copy(subpopulation_meanings)
SCHEME_04 <- SCHEME_04[, ':=' (file_in = paste0(subpopulations,"_source_population.rds"), file_out = paste0(subpopulations,"_study_population.rds"), folder_out = "populations") ]
}
if(!SUBP) SCHEME_04 <- data.frame(subpopulations = c("ALL"),file_in = "ALL_source_population.rds", file_out = "ALL_study_population.rds",folder_out = "populations")
SCHEME_04$nrows <- as.integer(NA)
SCHEME_04$ncols <- as.integer(NA)
SCHEME_04$ncolsneeded <- 23
FlowChartSourcetoStudy <- list()
for(i in 1:nrow(SCHEME_04)){
SOURCE <- readRDS(paste0(std_pop_tmp, SCHEME_04[["file_in"]][i]))
print('Exclude patients according to SelectionCriteria specified in to_run file')
for (j in 1:length(SelectionCriteria)){
before <- nrow(SOURCE)
SOURCE <- SOURCE[eval(SelectionCriteria[[j]]),]
after <- nrow(SOURCE)
FlowChartSourcetoStudy[[paste0(names(SelectionCriteria[j]),"_",SCHEME_04[["subpopulations"]][i])]]$step <- "04_CreateStudyPopulation"
FlowChartSourcetoStudy[[paste0(names(SelectionCriteria[j]),"_",SCHEME_04[["subpopulations"]][i])]]$population <- SCHEME_04[["subpopulations"]][i]
FlowChartSourcetoStudy[[paste0(names(SelectionCriteria[j]),"_",SCHEME_04[["subpopulations"]][i])]]$before <- before
FlowChartSourcetoStudy[[paste0(names(SelectionCriteria[j]),"_",SCHEME_04[["subpopulations"]][i])]]$after <- after
rm(before,after)
gc()
}
FlowChart3 <- list()
print(paste0("Set start_follow up date and end follow_up_date ",SCHEME_04[["subpopulations"]][i]))
STUDY_POPULATION <- SOURCE[,start_follow_up := max(start_study_date,op_start_date+lookback_period,date_min),by = list(row.names(SOURCE))]
STUDY_POPULATION <- STUDY_POPULATION[,end_follow_up := min(end_study_date,op_end_date,date_creation,recommended_end_date,date_max),by = list(row.names(SOURCE))]
rm(SOURCE)
gc()
before <- nrow(STUDY_POPULATION)
STUDY_POPULATION <- STUDY_POPULATION[start_follow_up < end_follow_up ,]
STUDY_POPULATION <- STUDY_POPULATION[(start_follow_up - op_start_date) >= lookback_period ,]
after <- nrow(STUDY_POPULATION)
FlowChart3[[paste0("End_look_back_period_after_end_follow_up_",SCHEME_04[["subpopulations"]][i])]]$step <- "04_CreateStudyPopulation"
FlowChart3[[paste0("End_look_back_period_after_end_follow_up_",SCHEME_04[["subpopulations"]][i])]]$population <- SCHEME_04[["subpopulations"]][i]
FlowChart3[[paste0("End_look_back_period_after_end_follow_up_",SCHEME_04[["subpopulations"]][i])]]$before <- before
FlowChart3[[paste0("End_look_back_period_after_end_follow_up_",SCHEME_04[["subpopulations"]][i])]]$after <- after
rm(before,after)
gc()
print(paste0("Calculate age at start and end follow up ",SCHEME_04[["subpopulations"]][i]))
STUDY_POPULATION <- STUDY_POPULATION[, ':='
(age_start_follow_up = floor(time_length(interval(birth_date, start_follow_up),"year")),
age_end_follow_up = floor(time_length(interval(birth_date, end_follow_up),"year"))
)
]
STUDY_POPULATION <- STUDY_POPULATION[, Population := SCHEME_04[["subpopulations"]][i]]
SCHEME_04[i,"nrows"] <- nrow(STUDY_POPULATION)
SCHEME_04[i,"ncols"] <- ncol(STUDY_POPULATION)
saveRDS(STUDY_POPULATION,file = paste0(populations_dir,SCHEME_04[["file_out"]][i]))
rm(STUDY_POPULATION)
gc()
}
saveRDS(FlowChart3,file = paste0(std_pop_tmp,"FlowChart3.rds"))
saveRDS(FlowChartSourcetoStudy,file = paste0(std_pop_tmp,"FlowChartSourcetoStudy.rds"))
saveRDS(SCHEME_04,file = paste0(std_pop_tmp,"SCHEME_04.rds"))
rm(FlowChart3,FlowChartSourcetoStudy,SCHEME_04)
gc()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lognorm.R
\name{getLognormMoments}
\alias{getLognormMoments}
\alias{getLognormMedian}
\alias{getLognormMode}
\title{Compute summary statistics of a log-normal distribution}
\usage{
getLognormMoments(mu, sigma)
getLognormMedian(mu, sigma)
getLognormMode(mu, sigma)
}
\arguments{
\item{mu}{numeric vector: location parameter}
\item{sigma}{numeric vector: scale parameter}
}
\value{
for \code{getLognormMoments} a numeric matrix with columns
\code{mean} (expected value at original scale)
, \code{var} (variance at original scale)
, and \code{cv} (coefficient of variation: sqrt(var)/mean).
For the other functions a numeric vector of the required summary.
}
\description{
Compute summary statistics of a log-normal distribution
}
\section{Functions}{
\itemize{
\item \code{getLognormMoments}: get the expected value, variance, and coefficient of variation
\item \code{getLognormMedian}: get the median
\item \code{getLognormMode}: get the mode
}}
\examples{
# start by estimating lognormal parameters from moments
.mean <- 1
.var <- c(1.3,2)^2
parms <- getParmsLognormForMoments(.mean, .var)
#
# computed moments must equal previous ones
(ans <- getLognormMoments(parms[,"mu"], parms[,"sigma"]))
cbind(.var, ans[,"var"])
#
getLognormMedian(mu = log(1), sigma = log(2))
getLognormMode(mu = log(1), sigma = c(log(1.2),log(2)))
}
\references{
\code{Limpert E, Stahel W & Abbt M (2001)
Log-normal Distributions across the Sciences: Keys and Clues.
Oxford University Press (OUP) 51, 341,
10.1641/0006-3568(2001)051[0341:lndats]2.0.co;2}
}
| /man/getLognormMoments.Rd | no_license | RCFilm/lognorm | R | false | true | 1,638 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lognorm.R
\name{getLognormMoments}
\alias{getLognormMoments}
\alias{getLognormMedian}
\alias{getLognormMode}
\title{Compute summary statistics of a log-normal distribution}
\usage{
getLognormMoments(mu, sigma)
getLognormMedian(mu, sigma)
getLognormMode(mu, sigma)
}
\arguments{
\item{mu}{numeric vector: location parameter}
\item{sigma}{numeric vector: scale parameter}
}
\value{
for \code{getLognormMoments} a numeric matrix with columns
\code{mean} (expected value at original scale)
, \code{var} (variance at original scale)
, and \code{cv} (coefficient of variation: sqrt(var)/mean).
For the other functions a numeric vector of the required summary.
}
\description{
Compute summary statistics of a log-normal distribution
}
\section{Functions}{
\itemize{
\item \code{getLognormMoments}: get the expected value, variance, and coefficient of variation
\item \code{getLognormMedian}: get the median
\item \code{getLognormMode}: get the mode
}}
\examples{
# start by estimating lognormal parameters from moments
.mean <- 1
.var <- c(1.3,2)^2
parms <- getParmsLognormForMoments(.mean, .var)
#
# computed moments must equal previous ones
(ans <- getLognormMoments(parms[,"mu"], parms[,"sigma"]))
cbind(.var, ans[,"var"])
#
getLognormMedian(mu = log(1), sigma = log(2))
getLognormMode(mu = log(1), sigma = c(log(1.2),log(2)))
}
\references{
\code{Limpert E, Stahel W & Abbt M (2001)
Log-normal Distributions across the Sciences: Keys and Clues.
Oxford University Press (OUP) 51, 341,
10.1641/0006-3568(2001)051[0341:lndats]2.0.co;2}
}
|
cutGenome <- function(bs, pattern, overhang=4L)
# This finds the target cut sites in the genome. It currently only searches the
# sense strand, which is fine because if the patterns is an inverse palindrome.
# Otherwise, there may be some problems as you'd need to search the reverse
# strand as well.
#
# written by Aaron Lun
# a long time ago.
# last modified 22 March 2017
{
if (nchar(pattern)%%2L!=0) { stop("recognition site must be even in size") }
ps <- DNAString(pattern)
if (reverseComplement(ps)!=ps) { stop("recognition site must be an inverse palindrome") }
overhang <- as.integer(overhang)
if (overhang > nchar(pattern) || overhang < 0L || overhang%%2L!=0) { stop("overhang must be a non-negative even integer that is not greater than pattern length") }
remainder <- (nchar(pattern)-overhang)/2L
if (is(bs, "BSgenome")) {
ref.names <- seqnames(bs)
gen <- genome(bs)
} else {
bs <- readDNAStringSet(bs)
ref.names <- names(bs)
gen <- NA
}
nchrs <- length(ref.names)
original <- vector("list", nchrs)
for (i in seq_len(nchrs)) {
chr <- ref.names[i]
x <- matchPattern(pattern, bs[[chr]])
match.start <- start(x)
if (is.unsorted(match.start)) { match.start <- sort(match.start) }
chrlen <- length(bs[[chr]])
starts <- c(1L, match.start+remainder)
ends <- c(match.start+remainder-1L+overhang, chrlen)
original[[i]] <- GRanges(chr, IRanges(starts, ends))
}
suppressWarnings(original <- do.call(c, original))
seqlevels(original) <- ref.names
suppressWarnings(seqinfo(original) <- Seqinfo(ref.names, seqlengths=seqlengths(bs), genome=gen))
return(original)
}
| /R/cutGenome.R | no_license | hwick/diffHic | R | false | false | 1,651 | r | cutGenome <- function(bs, pattern, overhang=4L)
# This finds the target cut sites in the genome. It currently only searches the
# sense strand, which is fine because if the patterns is an inverse palindrome.
# Otherwise, there may be some problems as you'd need to search the reverse
# strand as well.
#
# written by Aaron Lun
# a long time ago.
# last modified 22 March 2017
{
if (nchar(pattern)%%2L!=0) { stop("recognition site must be even in size") }
ps <- DNAString(pattern)
if (reverseComplement(ps)!=ps) { stop("recognition site must be an inverse palindrome") }
overhang <- as.integer(overhang)
if (overhang > nchar(pattern) || overhang < 0L || overhang%%2L!=0) { stop("overhang must be a non-negative even integer that is not greater than pattern length") }
remainder <- (nchar(pattern)-overhang)/2L
if (is(bs, "BSgenome")) {
ref.names <- seqnames(bs)
gen <- genome(bs)
} else {
bs <- readDNAStringSet(bs)
ref.names <- names(bs)
gen <- NA
}
nchrs <- length(ref.names)
original <- vector("list", nchrs)
for (i in seq_len(nchrs)) {
chr <- ref.names[i]
x <- matchPattern(pattern, bs[[chr]])
match.start <- start(x)
if (is.unsorted(match.start)) { match.start <- sort(match.start) }
chrlen <- length(bs[[chr]])
starts <- c(1L, match.start+remainder)
ends <- c(match.start+remainder-1L+overhang, chrlen)
original[[i]] <- GRanges(chr, IRanges(starts, ends))
}
suppressWarnings(original <- do.call(c, original))
seqlevels(original) <- ref.names
suppressWarnings(seqinfo(original) <- Seqinfo(ref.names, seqlengths=seqlengths(bs), genome=gen))
return(original)
}
|
methods::setClass(
Class = "polynomial",
contains = c("numbers", "oldClass"),
slots = c(exponents = "integer"),
prototype = methods::prototype(.S3Class = "polynomial")
)
methods::setMethod(
f = "coerce",
signature = c(from = "ANY", to = "polynomial"),
definition = function (from, to, strict = TRUE)
{
value <- as.polynomial(from)
if (strict)
attributes(value) <- attributes(value)[c("names", "class", "exponents")]
value
})
polynomial <- function (length = 0L)
.polynomial(numeric(length))
as.polynomial <- function (x, exponents = NULL, ...)
{
if (missing(x))
return(polynomial(0))
if (inherits(x, "polynomial"))
return(x)
x <- structure(as.numbers(x = x, ...), dim = dim(x), dimnames = dimnames(x),
names = names(x), class = "polynomial")
exponents(x) <- exponents
x
}
is.polynomial <- function (x)
inherits(x, "polynomial")
.polynomial <- function (xx, exponents = integer(0), cl = "polynomial")
{
class(xx) <- cl
attr(xx, "exponents") <- exponents
xx
}
is.numeric.polynomial <- function (x)
FALSE
is.complex.polynomial <- function (x)
FALSE
Abel <- function (n, a = 1)
{
n <- as.scalar.integer(n)
if (is.na(n) || n < 0L)
stop("'n' must be a non-negative integer")
if (n == 0L) {
1
}
else if (n == 1L) {
c(0, 1)
}
else {
a <- as.scalar.number(a)
k <- (n - 1L):0
c(0, choose(n - 1, k) * (-a * n)^k)
}
}
Bessel <- function (n)
{
n <- as.scalar.integer(n)
if (is.na(n) || n < 0L)
stop("'n' must be a non-negative integer")
vapply(X = 0:n, FUN = function(k) {
prod(seq.int(to = n + k, length.out = 2L * k)/c(seq_len(k), rep(2L, k)))
}, FUN.VALUE = 0)
}
rBessel <- function (n)
rev(Bessel(n))
Hermite <- function (n)
.Call(C_hermite, n)
.Chebyshev <- function (x)
{
value <- function(n) NULL
body(value) <- substitute({
n <- as.scalar.integer(n)
if (is.na(n) || n < 0L)
stop("'n' must be a non-negative integer")
Ts <- vector("list", length = n + 1L)
Ts[1:2] <- list(1, c(0, x))
fun <- function(n) {
if (!is.null(t <- Ts[[n]]))
t
else (Ts[[n]] <<- c(0, 2 * fun(n - 1L)) - c(fun(n - 2L), 0, 0))
}
fun(n + 1L)
}, list(x = x))
environment(value) <- parent.frame()
return(value)
}
Chebyshev1 <- ChebyshevT <- .Chebyshev(1)
Chebyshev2 <- ChebyshevU <- .Chebyshev(2)
remove(.Chebyshev)
gcd <- function (..., na.rm = FALSE)
.External(C_gcd, na.rm, ...)
pgcd <- function (..., na.rm = FALSE)
.External(C_pgcd, na.rm, ...)
lcm <- function (..., na.rm = FALSE)
.External(C_lcm, na.rm, ...)
plcm <- function (..., na.rm = FALSE)
.External(C_plcm, na.rm, ...)
coprime <- function (...)
gcd(...) == 1L
pcoprime <- function (...)
pgcd(...) == 1L
Cyclotomic <- function (n)
{
n <- as.scalar.integer(n)
if (is.na(n) || n <= 0L)
stop("'n' must be a positive integer")
k <- seq_len(n)
k <- lapply(X = -exp(2i * pi * k[pcoprime(k, n)]/n), FUN = "c", 1)
value <- k[[1L]]
fun <- function(x, y) {
value <- numeric(length(x) + 1L)
value[-length(value)] <- value[-length(value)] + x * y[1L]
value[-1L] <- value[-1L] + x * y[2L]
value
}
for (y in k[-1L]) value <- fun(value, y)
round(Re(value))
}
degree <- function (x)
{
if (!length(k <- attr(x, "exponents")))
max(0L, length(x) - 1L)
else max(0L, exponents(x), na.rm = TRUE)
}
exponents <- function (x)
{
if (length(value <- attr(x, "exponents")))
value
else seq.int(0L, along.with = x)
}
"exponents<-" <- function (x, value)
{
if (!is.polynomial(x))
x <- as.polynomial(x)
if (is.null(value)) {
attr(x, "exponents") <- integer(0)
return(x)
}
value <- as.integer(value)
if (length(value) != length(x))
stop("invalid 'exponents' length")
else if (anyNA(value))
stop("missing values in 'exponents' are not allowed")
else if (any(value < 0L))
stop("negative values in 'exponents' are not allowed")
attr(x, "exponents") <- value
x
}
as.body <- function (x, var = "x", ...)
{
x <- reduce(x = x, ...)
if (is.expression(var))
var <- aslength1(var)[[1L]]
if (!is.call(var))
var <- as.symbol(var)
if (!length(x))
return(substitute(fun(length(var)), list(fun = if (mode(x) ==
"complex") as.symbol("complex") else as.symbol("numeric"),
var = var)))
dim(x) <- dimnames(x) <- names(x) <- NULL
a <- coef(x)
k <- exponents(x)
if (length(x) == 1L) {
if (k != 1L)
var <- call("^", var, Re(k))
return(if (is.na(a) || is.complex(x))
call("*", a, var)
else if (a == 1)
var
else if (a == -1)
call("-", var)
else call("*", a, var))
}
vars <- lapply(X = Re(k), FUN = function(y) {
if (y == 1)
var
else if (!y)
NULL
else call("^", var, y)
})
if (all(k == 0L))
vars[[length(vars)]] <- call("^", var, 0)
if (is.numeric(a)) {
signs <- sign(a)[-1L]
signs[is.na(signs)] <- 1
a <- c(1, signs) * a
}
else {
signs <- sign(Re(a))[-1L]
i <- is.na(signs) | signs == 0
signs[i] <- sign(Im(a))[-1L][i]
signs[is.na(signs)] <- 1
a <- complex(real = c(1, signs) * Re(a),
imaginary = c(1, signs) * Im(a))
a <- lapply(a, function(x) {
if (!is.na(x) && !Im(x))
Re(x)
else x
})
if (!any(vapply(a, "is.complex", NA)))
a[[length(a)]] <- a[[length(a)]] + 0i
}
vars <- .mapply(function(e1, e2) {
if (is.null(e2))
e1
else if (!is.na(e1) && e1 == 1)
e2
else call("*", e1, e2)
}, list(a, vars), NULL)
i <- signs == -1
signs[i] <- "-"
signs[!i] <- "+"
value <- vars[[1L]]
for (i in seq_along(signs)) {
value <- call(signs[[i]], value, vars[[i + 1L]])
}
return(value)
}
integral <- function (x, lower, upper, C, ...)
{
if (!is.polynomial(x))
x <- as.polynomial(x)
a <- coef(x)
k <- exponents(x) + 1L
if (missing(C))
return(diff(predict(object = .polynomial(a/k, k), newdata = c(lower, upper), ...)))
C <- as.scalar.number(C)
if (!length(attr(x, "exponents")))
.polynomial(c(C, a/k))
else .polynomial(c(C, a/k), c(0L, k))
}
removeInvalidExponents <- function (x)
{
if (!length(k <- attr(x, "exponents")))
x
else if (any(i <- is.na(k) | k < 0L))
x[!i]
else x
}
sortExponents <- function (x, decreasing = FALSE)
{
x[order(exponents(x), decreasing = decreasing)]
}
fixDuplicateExponents <- function (x)
{
if (!length(k <- attr(x, "exponents"))) {
x
}
else if (anyDuplicated(k)) {
dim(x) <- dimnames(x) <- names(x) <- NULL
a <- coef(x)
dk <- duplicated(k)
u <- unique(k[dk])
i <- k %in% u
a[match(u, k)] <- vapply(X = split(a[i], factor(k[i], u)),
FUN = "sum", FUN.VALUE = vector(mode = mode(a), length = 1L))
.polynomial(a[!dk], k[!dk])
}
else x
}
withDefaultExponents <- function (x)
{
if (!length(k <- attr(x, "exponents")))
return(x)
value <- polynomial(max(k, na.rm = TRUE) + 1L)
value[k + 1L] <- coef(x)
return(value)
}
removeUnwantedCoefficients <- function (x, zero.rm = TRUE, na.rm = FALSE, finite = FALSE)
{
i <- if (zero.rm) {
if (finite)
is.finite(x) & x
else if (na.rm)
!is.na(x) & x
else is.na(x) | x
}
else if (finite) {
is.finite(x)
}
else if (na.rm) {
!is.na(x)
}
else return(x)
if (all(i))
x
else x[i]
}
maybeDefaultExponents <- function (x)
{
if (length(x) == 0L)
x
else if ((degree(x) + 1L)/2 <= length(x))
withDefaultExponents(x)
else x
}
reduce <- function (x, zero.rm = TRUE, na.rm = FALSE, finite = FALSE, dflt.exp = FALSE,
fix.dup.exp = is.na(dflt.exp) || dflt.exp, decreasing = NA,
...)
{
if (!is.polynomial(x))
x <- as.polynomial(x = x, ...)
x <- removeInvalidExponents(x)
x <- removeUnwantedCoefficients(x, zero.rm = zero.rm, na.rm = na.rm,
finite = finite)
dflt.exp <- as.scalar.logical(dflt.exp)
if (fix.dup.exp) {
x <- fixDuplicateExponents(x)
x <- removeUnwantedCoefficients(x, zero.rm = zero.rm,
na.rm = na.rm, finite = finite)
if (is.na(dflt.exp))
x <- maybeDefaultExponents(x)
else if (dflt.exp)
x <- withDefaultExponents(x)
}
else if (!anyDuplicated(exponents(x))) {
if (is.na(dflt.exp))
x <- maybeDefaultExponents(x)
else if (dflt.exp)
x <- withDefaultExponents(x)
}
decreasing <- as.scalar.logical(decreasing)
if (is.na(decreasing))
x
else sortExponents(x, decreasing = decreasing)
}
as.function.polynomial <- function (x, envir = parent.frame(), xname = "x", var = xname,
...)
{
xname <- as.symbol(xname)
value <- function(x) NULL
names(formals(value)) <- as.character(xname)
body(value) <- as.body(x = x, var = var, ...)
environment(value) <- envir
return(value)
}
as.list.polynomial <- function (x, ...)
{
value <- .mapply(.polynomial, list(coef(x), exponents(x)), NULL)
names(value) <- names(x)
return(value)
}
print.polynomial <- function (x, ...)
{
if (length(x) == 0L) {
if (length(d <- dim(x)) > 1L)
cat(sprintf("<%s polynomial>\n", paste0(d, collapse = " x ")))
else if (!is.null(names(x)))
cat("named polynomial(0)\n")
else cat("polynomial(0)\n")
}
else {
xx <- coef(x)
keepAttrs <- setdiff(names(attributes(x)), c("exponents",
"class"))
attributes(xx)[keepAttrs] <- attributes(x)[keepAttrs]
print(xx)
cat("Exponents:\n")
print(exponents(x))
}
invisible(x)
}
"-.polynomial" <- function (e1, e2)
{
if (!is.polynomial(e1))
e1 <- as.polynomial(e1)
if (nargs() == 1L)
return(.polynomial(-coef(e1), attr(e1, "exponents")))
if (!is.polynomial(e2))
e2 <- as.polynomial(e2)
if (!length(e2)) {
e1
}
else if (!length(e1)) {
.polynomial(-coef(e2), attr(e2, "exponents"))
}
else if (!length(attr(e1, "exponents")) && !length(attr(e2, "exponents"))) {
e1 <- coef(e1)
e2 <- coef(e2)
if (length(e1) > length(e2)) {
names(e2) <- NULL
e2 <- c(e2, rep(0, length(e1) - length(e2)))
}
else if (length(e1) < length(e2)) {
names(e1) <- NULL
e1 <- c(e1, rep(0, length(e2) - length(e1)))
}
.polynomial(e1 - e2)
}
else reduce(.polynomial(c(coef(e1), -coef(e2)), c(exponents(e1), exponents(e2))), zero.rm = FALSE, fix.dup.exp = TRUE)
}
"*.polynomial" <- function (e1, e2)
{
if (nargs() == 1L)
stop("invalid unary operator")
if (!is.polynomial(e1))
e1 <- as.polynomial(e1)
if (!is.polynomial(e2))
e2 <- as.polynomial(e2)
if (!length(e1) || !length(e2)) {
polynomial(0)
}
else if (!length(attr(e1, "exponents")) && !length(attr(e2, "exponents"))) {
e1 <- coef(e1)
e2 <- coef(e2)
if (length(e1) != 1L && length(e2) != 1L) {
value <- numeric(length(e1) + length(e2) - 1L)
n <- seq.int(0L, along.with = e1)
for (i in seq_along(e2)) {
value[n + i] <- value[n + i] + e1 * e2[[i]]
}
.polynomial(value)
}
else .polynomial(e1 * e2)
}
else reduce(.polynomial(do.call("c", c(lapply(coef(e1), "*", coef(e2)), list(use.names = FALSE))),
do.call("c", c(lapply(exponents(e1), "+", exponents(e2)), list(use.names = FALSE)))),
zero.rm = FALSE, fix.dup.exp = TRUE)
}
"/.polynomial" <- function (e1, e2)
{
if (nargs() == 1L)
stop("invalid unary operator")
if (!is.polynomial(e1))
e1 <- as.polynomial(e1)
if (!is.polynomial(e2))
e2 <- as.polynomial(e2)
if (!length(e1) || !length(e2))
return(polynomial(0))
if (!length(attr(e2, "exponents")) && length(e2) == 1L)
return(.polynomial(coef(e1)/coef(e2), attr(e1, "exponents")))
e1 <- rev(coef(reduce(e1, dflt.exp = TRUE)))
e2 <- rev(coef(reduce(e2, dflt.exp = TRUE)))
if (!length(e1) || !length(e2))
return(polynomial(0))
len <- max(0L, length(e1) - length(e2) + 1L)
value <- vector(mode(e1[0L] + e2[0L]), len)
for (i in seq.int(to = 1L, by = -1L, length.out = len)) {
value[[i]] <- e1[[1L]]/e2[[1L]]
e1 <- e1[-1L] - c(value[[i]] * e2[-1L], integer(i - 1L))
}
if (anyNA(e1) || any(e1 != 0))
warning("non-zero remainder in polynomial division")
.polynomial(value)
}
"[.polynomial" <- function (x, ...)
{
k <- exponents(x)
a <- x <- coef(x)
suppressWarnings(storage.mode(a) <- "integer")
a[] <- seq_along(a)
.polynomial(x[...], k[as.vector(a[...])])
}
"[[.polynomial" <- function (x, ...)
{
k <- exponents(x)
a <- x <- coef(x)
a[] <- seq_along(a)
.polynomial(x[[...]], k[[as.vector(a[[...]])]])
}
"+.polynomial" <- function (e1, e2)
{
if (!is.polynomial(e1))
e1 <- as.polynomial(e1)
if (nargs() == 1L)
return(.polynomial(+coef(e1), attr(e1, "exponents")))
if (!is.polynomial(e2))
e2 <- as.polynomial(e2)
if (!length(e2)) {
e1
}
else if (!length(e1)) {
.polynomial(+coef(e2), attr(e2, "exponents"))
}
else if (!length(attr(e1, "exponents")) && !length(attr(e2, "exponents"))) {
e1 <- coef(e1)
e2 <- coef(e2)
if (length(e1) > length(e2)) {
names(e2) <- NULL
e2 <- c(e2, rep(0, length(e1) - length(e2)))
}
else if (length(e1) < length(e2)) {
names(e1) <- NULL
e1 <- c(e1, rep(0, length(e2) - length(e1)))
}
.polynomial(e1 + e2)
}
else reduce(.polynomial(c(coef(e1), coef(e2)), c(exponents(e1), exponents(e2))), zero.rm = FALSE, fix.dup.exp = TRUE)
}
"^.polynomial" <- function (e1, e2)
{
if (nargs() == 1L)
stop("invalid unary operator")
if (!is.polynomial(e1))
e1 <- as.polynomial(e1)
if (is.polynomial(e2)) {
if (length(attr(e2, "exponents"))) {
tmp <- coef(e2)[!exponents(e2) %in% 0L]
if (anyNA(tmp) || any(tmp != 0))
warning("non-constant parts of 'e2' discarded in coercion")
e2 <- as.integer(sum(coef(e2)[exponents(e2) %in% 0L]))
}
else if (length(e2)) {
e2 <- as.integer(coef(e2)[[1L]])
}
else return(polynomial(0))
}
else if (length(e2)) {
e2 <- as.scalar.integer(e2)
}
else return(polynomial(0))
if (is.na(e2)) {
e1[] <- NA
return(e1)
}
if (e2 < 0L) {
warning("'e2' should be a non-negative integer")
return(polynomial(0))
}
if (e2 == 0L)
return(as.polynomial(1))
if (e2 == 1L)
return(e1)
value <- e1
for (i in logical(e2 - 1L)) value <- value * e1
return(value)
}
plot.polynomial <- function (x, y = NULL, to = NULL, from = y, n = 101, add = FALSE,
type = "l", xname = "x", xlab = xname, ylab = NULL, log = NULL,
xlim = NULL, ...)
{
if (dev.cur() == 1L && !isFALSE(add)) {
warning("'add' will be ignored as there is no existing plot")
add <- FALSE
}
addF <- isFALSE(add)
if (is.null(ylab) && !isTRUE(add))
try(ylab <- as.body(signif(x, 7L), var = xname, finite = TRUE))
if (is.null(from) || is.null(to)) {
xl <- if (!is.null(xlim))
xlim
else if (!addF) {
pu <- par("usr")[1:2]
if (par("xaxs") == "r")
pu <- extendrange(pu, f = -1/27)
if (par("xlog"))
10^pu
else pu
}
else c(0, 1)
if (is.null(from))
from <- xl[1L]
if (is.null(to))
to <- xl[2L]
}
lg <- if (length(log))
log
else if (!addF && par("xlog"))
"x"
else ""
y <- as.polynomial(x)
if (grepl("x", lg, fixed = TRUE)) {
if (from <= 0 || to <= 0)
stop("'from' and 'to' must be > 0 with log=\"x\"")
x <- exp(seq.int(log(from), log(to), length.out = n))
}
else x <- seq.int(from, to, length.out = n)
y <- predict(y, x, finite = TRUE)
if (isTRUE(add))
lines(x = x, y = y, type = type, ...)
else plot(x = x, y = y, type = type, xlab = xlab, ylab = ylab,
xlim = xlim, log = lg, ...)
invisible(list(x = x, y = y))
}
points.polynomial <- function (x, y = NULL, to = NULL, from = y, n = 101, add = FALSE,
type = "p", ...)
{
if (dev.cur() == 1L)
stop("plot.new has not been called yet")
plot.polynomial(x = x, y = y, to = to, from = from, n = n,
add = TRUE, type = type, ...)
}
lines.polynomial <- function (x, y = NULL, to = NULL, from = y, n = 101, add = FALSE,
type = "l", ...)
{
if (dev.cur() == 1L)
stop("plot.new has not been called yet")
plot.polynomial(x = x, y = y, to = to, from = from, n = n,
add = TRUE, type = type, ...)
}
coef.polynomial <- function (object, ...)
object@.Data
"coef<-" <- "coefficients<-" <- function (object, ..., value)
UseMethod("coef<-")
"coef<-.default" <- function (object, ..., value)
{
if (length(object) != length(value))
stop(sprintf("invalid 'value', length of value [%d] should be equal to length of object [%d]",
length(value), length(object)))
if (!is.polynomial(object))
object <- as.polynomial(object)
value <- as.numbers(x = value, ...)
attributes(value) <- c(attributes(object), class = "polynomial")
return(value)
}
deriv.polynomial <- function (expr, ...)
{
k <- exponents(expr)
reduce(.polynomial(k * coef(expr), k - 1L), zero.rm = FALSE)
}
predict.polynomial <- function (object, newdata = seq.int(0, 1, 0.01), ...)
{
object <- reduce(x = object, ...)
value <- vector(mode = mode(object), length = length(newdata))
for (e2 in .mapply(function(a, k) {
a * newdata^k
}, list(coef(object), exponents(object)), NULL)) {
value <- value + e2
}
return(value)
}
| /R/main.R | no_license | cran/polynomial | R | false | false | 19,552 | r | methods::setClass(
Class = "polynomial",
contains = c("numbers", "oldClass"),
slots = c(exponents = "integer"),
prototype = methods::prototype(.S3Class = "polynomial")
)
methods::setMethod(
f = "coerce",
signature = c(from = "ANY", to = "polynomial"),
definition = function (from, to, strict = TRUE)
{
value <- as.polynomial(from)
if (strict)
attributes(value) <- attributes(value)[c("names", "class", "exponents")]
value
})
polynomial <- function (length = 0L)
.polynomial(numeric(length))
as.polynomial <- function (x, exponents = NULL, ...)
{
if (missing(x))
return(polynomial(0))
if (inherits(x, "polynomial"))
return(x)
x <- structure(as.numbers(x = x, ...), dim = dim(x), dimnames = dimnames(x),
names = names(x), class = "polynomial")
exponents(x) <- exponents
x
}
is.polynomial <- function (x)
inherits(x, "polynomial")
.polynomial <- function (xx, exponents = integer(0), cl = "polynomial")
{
class(xx) <- cl
attr(xx, "exponents") <- exponents
xx
}
is.numeric.polynomial <- function (x)
FALSE
is.complex.polynomial <- function (x)
FALSE
Abel <- function (n, a = 1)
{
n <- as.scalar.integer(n)
if (is.na(n) || n < 0L)
stop("'n' must be a non-negative integer")
if (n == 0L) {
1
}
else if (n == 1L) {
c(0, 1)
}
else {
a <- as.scalar.number(a)
k <- (n - 1L):0
c(0, choose(n - 1, k) * (-a * n)^k)
}
}
Bessel <- function (n)
{
n <- as.scalar.integer(n)
if (is.na(n) || n < 0L)
stop("'n' must be a non-negative integer")
vapply(X = 0:n, FUN = function(k) {
prod(seq.int(to = n + k, length.out = 2L * k)/c(seq_len(k), rep(2L, k)))
}, FUN.VALUE = 0)
}
rBessel <- function (n)
rev(Bessel(n))
Hermite <- function (n)
.Call(C_hermite, n)
.Chebyshev <- function (x)
{
value <- function(n) NULL
body(value) <- substitute({
n <- as.scalar.integer(n)
if (is.na(n) || n < 0L)
stop("'n' must be a non-negative integer")
Ts <- vector("list", length = n + 1L)
Ts[1:2] <- list(1, c(0, x))
fun <- function(n) {
if (!is.null(t <- Ts[[n]]))
t
else (Ts[[n]] <<- c(0, 2 * fun(n - 1L)) - c(fun(n - 2L), 0, 0))
}
fun(n + 1L)
}, list(x = x))
environment(value) <- parent.frame()
return(value)
}
Chebyshev1 <- ChebyshevT <- .Chebyshev(1)
Chebyshev2 <- ChebyshevU <- .Chebyshev(2)
remove(.Chebyshev)
gcd <- function (..., na.rm = FALSE)
.External(C_gcd, na.rm, ...)
pgcd <- function (..., na.rm = FALSE)
.External(C_pgcd, na.rm, ...)
lcm <- function (..., na.rm = FALSE)
.External(C_lcm, na.rm, ...)
plcm <- function (..., na.rm = FALSE)
.External(C_plcm, na.rm, ...)
coprime <- function (...)
gcd(...) == 1L
pcoprime <- function (...)
pgcd(...) == 1L
Cyclotomic <- function (n)
{
n <- as.scalar.integer(n)
if (is.na(n) || n <= 0L)
stop("'n' must be a positive integer")
k <- seq_len(n)
k <- lapply(X = -exp(2i * pi * k[pcoprime(k, n)]/n), FUN = "c", 1)
value <- k[[1L]]
fun <- function(x, y) {
value <- numeric(length(x) + 1L)
value[-length(value)] <- value[-length(value)] + x * y[1L]
value[-1L] <- value[-1L] + x * y[2L]
value
}
for (y in k[-1L]) value <- fun(value, y)
round(Re(value))
}
degree <- function (x)
{
if (!length(k <- attr(x, "exponents")))
max(0L, length(x) - 1L)
else max(0L, exponents(x), na.rm = TRUE)
}
exponents <- function (x)
{
if (length(value <- attr(x, "exponents")))
value
else seq.int(0L, along.with = x)
}
"exponents<-" <- function (x, value)
{
if (!is.polynomial(x))
x <- as.polynomial(x)
if (is.null(value)) {
attr(x, "exponents") <- integer(0)
return(x)
}
value <- as.integer(value)
if (length(value) != length(x))
stop("invalid 'exponents' length")
else if (anyNA(value))
stop("missing values in 'exponents' are not allowed")
else if (any(value < 0L))
stop("negative values in 'exponents' are not allowed")
attr(x, "exponents") <- value
x
}
as.body <- function (x, var = "x", ...)
{
x <- reduce(x = x, ...)
if (is.expression(var))
var <- aslength1(var)[[1L]]
if (!is.call(var))
var <- as.symbol(var)
if (!length(x))
return(substitute(fun(length(var)), list(fun = if (mode(x) ==
"complex") as.symbol("complex") else as.symbol("numeric"),
var = var)))
dim(x) <- dimnames(x) <- names(x) <- NULL
a <- coef(x)
k <- exponents(x)
if (length(x) == 1L) {
if (k != 1L)
var <- call("^", var, Re(k))
return(if (is.na(a) || is.complex(x))
call("*", a, var)
else if (a == 1)
var
else if (a == -1)
call("-", var)
else call("*", a, var))
}
vars <- lapply(X = Re(k), FUN = function(y) {
if (y == 1)
var
else if (!y)
NULL
else call("^", var, y)
})
if (all(k == 0L))
vars[[length(vars)]] <- call("^", var, 0)
if (is.numeric(a)) {
signs <- sign(a)[-1L]
signs[is.na(signs)] <- 1
a <- c(1, signs) * a
}
else {
signs <- sign(Re(a))[-1L]
i <- is.na(signs) | signs == 0
signs[i] <- sign(Im(a))[-1L][i]
signs[is.na(signs)] <- 1
a <- complex(real = c(1, signs) * Re(a),
imaginary = c(1, signs) * Im(a))
a <- lapply(a, function(x) {
if (!is.na(x) && !Im(x))
Re(x)
else x
})
if (!any(vapply(a, "is.complex", NA)))
a[[length(a)]] <- a[[length(a)]] + 0i
}
vars <- .mapply(function(e1, e2) {
if (is.null(e2))
e1
else if (!is.na(e1) && e1 == 1)
e2
else call("*", e1, e2)
}, list(a, vars), NULL)
i <- signs == -1
signs[i] <- "-"
signs[!i] <- "+"
value <- vars[[1L]]
for (i in seq_along(signs)) {
value <- call(signs[[i]], value, vars[[i + 1L]])
}
return(value)
}
integral <- function (x, lower, upper, C, ...)
{
if (!is.polynomial(x))
x <- as.polynomial(x)
a <- coef(x)
k <- exponents(x) + 1L
if (missing(C))
return(diff(predict(object = .polynomial(a/k, k), newdata = c(lower, upper), ...)))
C <- as.scalar.number(C)
if (!length(attr(x, "exponents")))
.polynomial(c(C, a/k))
else .polynomial(c(C, a/k), c(0L, k))
}
removeInvalidExponents <- function (x)
{
if (!length(k <- attr(x, "exponents")))
x
else if (any(i <- is.na(k) | k < 0L))
x[!i]
else x
}
sortExponents <- function (x, decreasing = FALSE)
{
x[order(exponents(x), decreasing = decreasing)]
}
fixDuplicateExponents <- function (x)
{
if (!length(k <- attr(x, "exponents"))) {
x
}
else if (anyDuplicated(k)) {
dim(x) <- dimnames(x) <- names(x) <- NULL
a <- coef(x)
dk <- duplicated(k)
u <- unique(k[dk])
i <- k %in% u
a[match(u, k)] <- vapply(X = split(a[i], factor(k[i], u)),
FUN = "sum", FUN.VALUE = vector(mode = mode(a), length = 1L))
.polynomial(a[!dk], k[!dk])
}
else x
}
withDefaultExponents <- function (x)
{
if (!length(k <- attr(x, "exponents")))
return(x)
value <- polynomial(max(k, na.rm = TRUE) + 1L)
value[k + 1L] <- coef(x)
return(value)
}
removeUnwantedCoefficients <- function (x, zero.rm = TRUE, na.rm = FALSE, finite = FALSE)
{
i <- if (zero.rm) {
if (finite)
is.finite(x) & x
else if (na.rm)
!is.na(x) & x
else is.na(x) | x
}
else if (finite) {
is.finite(x)
}
else if (na.rm) {
!is.na(x)
}
else return(x)
if (all(i))
x
else x[i]
}
maybeDefaultExponents <- function (x)
{
if (length(x) == 0L)
x
else if ((degree(x) + 1L)/2 <= length(x))
withDefaultExponents(x)
else x
}
reduce <- function (x, zero.rm = TRUE, na.rm = FALSE, finite = FALSE, dflt.exp = FALSE,
fix.dup.exp = is.na(dflt.exp) || dflt.exp, decreasing = NA,
...)
{
if (!is.polynomial(x))
x <- as.polynomial(x = x, ...)
x <- removeInvalidExponents(x)
x <- removeUnwantedCoefficients(x, zero.rm = zero.rm, na.rm = na.rm,
finite = finite)
dflt.exp <- as.scalar.logical(dflt.exp)
if (fix.dup.exp) {
x <- fixDuplicateExponents(x)
x <- removeUnwantedCoefficients(x, zero.rm = zero.rm,
na.rm = na.rm, finite = finite)
if (is.na(dflt.exp))
x <- maybeDefaultExponents(x)
else if (dflt.exp)
x <- withDefaultExponents(x)
}
else if (!anyDuplicated(exponents(x))) {
if (is.na(dflt.exp))
x <- maybeDefaultExponents(x)
else if (dflt.exp)
x <- withDefaultExponents(x)
}
decreasing <- as.scalar.logical(decreasing)
if (is.na(decreasing))
x
else sortExponents(x, decreasing = decreasing)
}
as.function.polynomial <- function (x, envir = parent.frame(), xname = "x", var = xname,
...)
{
xname <- as.symbol(xname)
value <- function(x) NULL
names(formals(value)) <- as.character(xname)
body(value) <- as.body(x = x, var = var, ...)
environment(value) <- envir
return(value)
}
as.list.polynomial <- function (x, ...)
{
value <- .mapply(.polynomial, list(coef(x), exponents(x)), NULL)
names(value) <- names(x)
return(value)
}
print.polynomial <- function (x, ...)
{
if (length(x) == 0L) {
if (length(d <- dim(x)) > 1L)
cat(sprintf("<%s polynomial>\n", paste0(d, collapse = " x ")))
else if (!is.null(names(x)))
cat("named polynomial(0)\n")
else cat("polynomial(0)\n")
}
else {
xx <- coef(x)
keepAttrs <- setdiff(names(attributes(x)), c("exponents",
"class"))
attributes(xx)[keepAttrs] <- attributes(x)[keepAttrs]
print(xx)
cat("Exponents:\n")
print(exponents(x))
}
invisible(x)
}
"-.polynomial" <- function (e1, e2)
{
if (!is.polynomial(e1))
e1 <- as.polynomial(e1)
if (nargs() == 1L)
return(.polynomial(-coef(e1), attr(e1, "exponents")))
if (!is.polynomial(e2))
e2 <- as.polynomial(e2)
if (!length(e2)) {
e1
}
else if (!length(e1)) {
.polynomial(-coef(e2), attr(e2, "exponents"))
}
else if (!length(attr(e1, "exponents")) && !length(attr(e2, "exponents"))) {
e1 <- coef(e1)
e2 <- coef(e2)
if (length(e1) > length(e2)) {
names(e2) <- NULL
e2 <- c(e2, rep(0, length(e1) - length(e2)))
}
else if (length(e1) < length(e2)) {
names(e1) <- NULL
e1 <- c(e1, rep(0, length(e2) - length(e1)))
}
.polynomial(e1 - e2)
}
else reduce(.polynomial(c(coef(e1), -coef(e2)), c(exponents(e1), exponents(e2))), zero.rm = FALSE, fix.dup.exp = TRUE)
}
"*.polynomial" <- function (e1, e2)
{
if (nargs() == 1L)
stop("invalid unary operator")
if (!is.polynomial(e1))
e1 <- as.polynomial(e1)
if (!is.polynomial(e2))
e2 <- as.polynomial(e2)
if (!length(e1) || !length(e2)) {
polynomial(0)
}
else if (!length(attr(e1, "exponents")) && !length(attr(e2, "exponents"))) {
e1 <- coef(e1)
e2 <- coef(e2)
if (length(e1) != 1L && length(e2) != 1L) {
value <- numeric(length(e1) + length(e2) - 1L)
n <- seq.int(0L, along.with = e1)
for (i in seq_along(e2)) {
value[n + i] <- value[n + i] + e1 * e2[[i]]
}
.polynomial(value)
}
else .polynomial(e1 * e2)
}
else reduce(.polynomial(do.call("c", c(lapply(coef(e1), "*", coef(e2)), list(use.names = FALSE))),
do.call("c", c(lapply(exponents(e1), "+", exponents(e2)), list(use.names = FALSE)))),
zero.rm = FALSE, fix.dup.exp = TRUE)
}
"/.polynomial" <- function (e1, e2)
{
if (nargs() == 1L)
stop("invalid unary operator")
if (!is.polynomial(e1))
e1 <- as.polynomial(e1)
if (!is.polynomial(e2))
e2 <- as.polynomial(e2)
if (!length(e1) || !length(e2))
return(polynomial(0))
if (!length(attr(e2, "exponents")) && length(e2) == 1L)
return(.polynomial(coef(e1)/coef(e2), attr(e1, "exponents")))
e1 <- rev(coef(reduce(e1, dflt.exp = TRUE)))
e2 <- rev(coef(reduce(e2, dflt.exp = TRUE)))
if (!length(e1) || !length(e2))
return(polynomial(0))
len <- max(0L, length(e1) - length(e2) + 1L)
value <- vector(mode(e1[0L] + e2[0L]), len)
for (i in seq.int(to = 1L, by = -1L, length.out = len)) {
value[[i]] <- e1[[1L]]/e2[[1L]]
e1 <- e1[-1L] - c(value[[i]] * e2[-1L], integer(i - 1L))
}
if (anyNA(e1) || any(e1 != 0))
warning("non-zero remainder in polynomial division")
.polynomial(value)
}
"[.polynomial" <- function (x, ...)
{
k <- exponents(x)
a <- x <- coef(x)
suppressWarnings(storage.mode(a) <- "integer")
a[] <- seq_along(a)
.polynomial(x[...], k[as.vector(a[...])])
}
"[[.polynomial" <- function (x, ...)
{
k <- exponents(x)
a <- x <- coef(x)
a[] <- seq_along(a)
.polynomial(x[[...]], k[[as.vector(a[[...]])]])
}
"+.polynomial" <- function (e1, e2)
{
if (!is.polynomial(e1))
e1 <- as.polynomial(e1)
if (nargs() == 1L)
return(.polynomial(+coef(e1), attr(e1, "exponents")))
if (!is.polynomial(e2))
e2 <- as.polynomial(e2)
if (!length(e2)) {
e1
}
else if (!length(e1)) {
.polynomial(+coef(e2), attr(e2, "exponents"))
}
else if (!length(attr(e1, "exponents")) && !length(attr(e2, "exponents"))) {
e1 <- coef(e1)
e2 <- coef(e2)
if (length(e1) > length(e2)) {
names(e2) <- NULL
e2 <- c(e2, rep(0, length(e1) - length(e2)))
}
else if (length(e1) < length(e2)) {
names(e1) <- NULL
e1 <- c(e1, rep(0, length(e2) - length(e1)))
}
.polynomial(e1 + e2)
}
else reduce(.polynomial(c(coef(e1), coef(e2)), c(exponents(e1), exponents(e2))), zero.rm = FALSE, fix.dup.exp = TRUE)
}
"^.polynomial" <- function (e1, e2)
{
if (nargs() == 1L)
stop("invalid unary operator")
if (!is.polynomial(e1))
e1 <- as.polynomial(e1)
if (is.polynomial(e2)) {
if (length(attr(e2, "exponents"))) {
tmp <- coef(e2)[!exponents(e2) %in% 0L]
if (anyNA(tmp) || any(tmp != 0))
warning("non-constant parts of 'e2' discarded in coercion")
e2 <- as.integer(sum(coef(e2)[exponents(e2) %in% 0L]))
}
else if (length(e2)) {
e2 <- as.integer(coef(e2)[[1L]])
}
else return(polynomial(0))
}
else if (length(e2)) {
e2 <- as.scalar.integer(e2)
}
else return(polynomial(0))
if (is.na(e2)) {
e1[] <- NA
return(e1)
}
if (e2 < 0L) {
warning("'e2' should be a non-negative integer")
return(polynomial(0))
}
if (e2 == 0L)
return(as.polynomial(1))
if (e2 == 1L)
return(e1)
value <- e1
for (i in logical(e2 - 1L)) value <- value * e1
return(value)
}
plot.polynomial <- function (x, y = NULL, to = NULL, from = y, n = 101, add = FALSE,
type = "l", xname = "x", xlab = xname, ylab = NULL, log = NULL,
xlim = NULL, ...)
{
if (dev.cur() == 1L && !isFALSE(add)) {
warning("'add' will be ignored as there is no existing plot")
add <- FALSE
}
addF <- isFALSE(add)
if (is.null(ylab) && !isTRUE(add))
try(ylab <- as.body(signif(x, 7L), var = xname, finite = TRUE))
if (is.null(from) || is.null(to)) {
xl <- if (!is.null(xlim))
xlim
else if (!addF) {
pu <- par("usr")[1:2]
if (par("xaxs") == "r")
pu <- extendrange(pu, f = -1/27)
if (par("xlog"))
10^pu
else pu
}
else c(0, 1)
if (is.null(from))
from <- xl[1L]
if (is.null(to))
to <- xl[2L]
}
lg <- if (length(log))
log
else if (!addF && par("xlog"))
"x"
else ""
y <- as.polynomial(x)
if (grepl("x", lg, fixed = TRUE)) {
if (from <= 0 || to <= 0)
stop("'from' and 'to' must be > 0 with log=\"x\"")
x <- exp(seq.int(log(from), log(to), length.out = n))
}
else x <- seq.int(from, to, length.out = n)
y <- predict(y, x, finite = TRUE)
if (isTRUE(add))
lines(x = x, y = y, type = type, ...)
else plot(x = x, y = y, type = type, xlab = xlab, ylab = ylab,
xlim = xlim, log = lg, ...)
invisible(list(x = x, y = y))
}
points.polynomial <- function (x, y = NULL, to = NULL, from = y, n = 101, add = FALSE,
type = "p", ...)
{
if (dev.cur() == 1L)
stop("plot.new has not been called yet")
plot.polynomial(x = x, y = y, to = to, from = from, n = n,
add = TRUE, type = type, ...)
}
lines.polynomial <- function (x, y = NULL, to = NULL, from = y, n = 101, add = FALSE,
type = "l", ...)
{
if (dev.cur() == 1L)
stop("plot.new has not been called yet")
plot.polynomial(x = x, y = y, to = to, from = from, n = n,
add = TRUE, type = type, ...)
}
coef.polynomial <- function (object, ...)
object@.Data
"coef<-" <- "coefficients<-" <- function (object, ..., value)
UseMethod("coef<-")
"coef<-.default" <- function (object, ..., value)
{
if (length(object) != length(value))
stop(sprintf("invalid 'value', length of value [%d] should be equal to length of object [%d]",
length(value), length(object)))
if (!is.polynomial(object))
object <- as.polynomial(object)
value <- as.numbers(x = value, ...)
attributes(value) <- c(attributes(object), class = "polynomial")
return(value)
}
deriv.polynomial <- function (expr, ...)
{
k <- exponents(expr)
reduce(.polynomial(k * coef(expr), k - 1L), zero.rm = FALSE)
}
predict.polynomial <- function (object, newdata = seq.int(0, 1, 0.01), ...)
{
object <- reduce(x = object, ...)
value <- vector(mode = mode(object), length = length(newdata))
for (e2 in .mapply(function(a, k) {
a * newdata^k
}, list(coef(object), exponents(object)), NULL)) {
value <- value + e2
}
return(value)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ampute.default.R
\name{ampute.default.patterns}
\alias{ampute.default.patterns}
\title{Default \code{patterns} in \code{ampute}}
\usage{
ampute.default.patterns(n)
}
\arguments{
\item{n}{A scalar specifying the #variables in the data.}
}
\value{
A square matrix of size #variables where \code{0} indicates a variable
}
\description{
This function creates a default pattern matrix for the multivariate
amputation function \code{ampute()}.
}
\seealso{
\code{\link{ampute}}, \code{\link{md.pattern}}
}
\author{
Rianne Schouten, 2016
}
| /man/ampute.default.patterns.Rd | no_license | HBGDki/mice | R | false | true | 611 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ampute.default.R
\name{ampute.default.patterns}
\alias{ampute.default.patterns}
\title{Default \code{patterns} in \code{ampute}}
\usage{
ampute.default.patterns(n)
}
\arguments{
\item{n}{A scalar specifying the #variables in the data.}
}
\value{
A square matrix of size #variables where \code{0} indicates a variable
}
\description{
This function creates a default pattern matrix for the multivariate
amputation function \code{ampute()}.
}
\seealso{
\code{\link{ampute}}, \code{\link{md.pattern}}
}
\author{
Rianne Schouten, 2016
}
|
library(testthat)
context("rounding functions")
test_that("ceiling_digits returns a ceiled value with desired decimal", {
# Given
value <- 1234.1234
# When
result_zero <- ceiling_digits(value)
result_one <- ceiling_digits(value, 1)
result_two <- ceiling_digits(value, 2)
result_three <- ceiling_digits(value, 3)
result_four <- ceiling_digits(value, 4)
# Then
expect_equal(result_zero, 1235)
expect_equal(result_one, 1234.2)
expect_equal(result_two, 1234.13)
expect_equal(result_three, 1234.124)
expect_equal(result_four, 1234.1234)
})
test_that("floor_digits returns a ceiled value with desired decimal", {
# Given
value <- 1234.1234
# When
result_zero <- floor_digits(value)
result_one <- floor_digits(value, 1)
result_two <- floor_digits(value, 2)
result_three <- floor_digits(value, 3)
result_four <- floor_digits(value, 4)
# Then
expect_equal(result_zero, 1234)
expect_equal(result_one, 1234.1)
expect_equal(result_two, 1234.12)
expect_equal(result_three, 1234.123)
expect_equal(result_four, 1234.1234)
})
| /tests/testthat/test-001-round.R | permissive | jakubnowicki/dibbler | R | false | false | 1,073 | r | library(testthat)
context("rounding functions")
test_that("ceiling_digits returns a ceiled value with desired decimal", {
# Given
value <- 1234.1234
# When
result_zero <- ceiling_digits(value)
result_one <- ceiling_digits(value, 1)
result_two <- ceiling_digits(value, 2)
result_three <- ceiling_digits(value, 3)
result_four <- ceiling_digits(value, 4)
# Then
expect_equal(result_zero, 1235)
expect_equal(result_one, 1234.2)
expect_equal(result_two, 1234.13)
expect_equal(result_three, 1234.124)
expect_equal(result_four, 1234.1234)
})
test_that("floor_digits returns a ceiled value with desired decimal", {
# Given
value <- 1234.1234
# When
result_zero <- floor_digits(value)
result_one <- floor_digits(value, 1)
result_two <- floor_digits(value, 2)
result_three <- floor_digits(value, 3)
result_four <- floor_digits(value, 4)
# Then
expect_equal(result_zero, 1234)
expect_equal(result_one, 1234.1)
expect_equal(result_two, 1234.12)
expect_equal(result_three, 1234.123)
expect_equal(result_four, 1234.1234)
})
|
#' @title imputeMidPoint
#'
#' @description Impute a mid-point seroconversion date within the censored interval.
#'
#' @param dat dataset from \code{\link{getRTData}}.
#'
#' @return data.frame
#'
#' @export
imputeMidPoint <- function(dat) {
dat$sero_date <-
(as.numeric(dat$late_neg) + as.numeric(dat$early_pos))/2
dat$sero_date <- as.Date(dat$sero_date, origin='1970-01-01')
tibble::as_tibble(dat)
}
#' @title imputeRandomPoint
#'
#' @description Impute random seroconversion date(s) within the censored interval.
#'
#' @param dat dataset from \code{\link{getRTData}}.
#'
#' @return data.frame
#'
#' @export
imputeRandomPoint <- function(dat) {
idat <- split(dat, as.factor(dat$IIntID))
Fun1 <- function(idat) {
if (is.na(idat$early_pos)) {
with(idat, cbind(IIntID, sero_date=NA))
} else {
if (idat$early_pos - idat$late_neg <= 1) {
rpoint <- idat$early_pos
} else {
rpoint <- sample((idat$late_neg + 1):idat$early_pos, 1)
with(idat, cbind(IIntID, sero_date=rpoint))
}
}
}
sdat <- lapply(idat, Fun1)
sdat <- data.frame(do.call("rbind", sdat ))
dat <- merge(dat, sdat, by="IIntID", all.x=TRUE)
dat$sero_date <- as.Date(dat$sero_date, origin = "1970-01-01")
tibble::as_tibble(dat)
}
#' @title imputeEndPoint
#'
#' @description Impute a end-point seroconversion date within the censored interval.
#'
#' @param dat dataset from \code{\link{getRTData}}.
#'
#' @return data.frame
#'
#' @export
imputeEndPoint <- function(dat) {
dat$sero_date <- dat$early_pos
tibble::as_tibble(dat)
}
| /R/imputeMethods.R | no_license | hkim207/ahri-1 | R | false | false | 1,655 | r | #' @title imputeMidPoint
#'
#' @description Impute a mid-point seroconversion date within the censored interval.
#'
#' @param dat dataset from \code{\link{getRTData}}.
#'
#' @return data.frame
#'
#' @export
imputeMidPoint <- function(dat) {
dat$sero_date <-
(as.numeric(dat$late_neg) + as.numeric(dat$early_pos))/2
dat$sero_date <- as.Date(dat$sero_date, origin='1970-01-01')
tibble::as_tibble(dat)
}
#' @title imputeRandomPoint
#'
#' @description Impute random seroconversion date(s) within the censored interval.
#'
#' @param dat dataset from \code{\link{getRTData}}.
#'
#' @return data.frame
#'
#' @export
imputeRandomPoint <- function(dat) {
idat <- split(dat, as.factor(dat$IIntID))
Fun1 <- function(idat) {
if (is.na(idat$early_pos)) {
with(idat, cbind(IIntID, sero_date=NA))
} else {
if (idat$early_pos - idat$late_neg <= 1) {
rpoint <- idat$early_pos
} else {
rpoint <- sample((idat$late_neg + 1):idat$early_pos, 1)
with(idat, cbind(IIntID, sero_date=rpoint))
}
}
}
sdat <- lapply(idat, Fun1)
sdat <- data.frame(do.call("rbind", sdat ))
dat <- merge(dat, sdat, by="IIntID", all.x=TRUE)
dat$sero_date <- as.Date(dat$sero_date, origin = "1970-01-01")
tibble::as_tibble(dat)
}
#' @title imputeEndPoint
#'
#' @description Impute a end-point seroconversion date within the censored interval.
#'
#' @param dat dataset from \code{\link{getRTData}}.
#'
#' @return data.frame
#'
#' @export
imputeEndPoint <- function(dat) {
dat$sero_date <- dat$early_pos
tibble::as_tibble(dat)
}
|
#' EMD decomposition
#'
#' Decompose input data to Intrinsic Mode Functions (IMFs) with the
#' Empirical Mode Decomposition algorithm.
#'
#' This is a wrapper around \code{eemd} with \code{ensemble_size = 1} and \code{noise_strength = 0}.
#'
#' @export
#' @name emd
#' @param input Vector of length N. The input signal to decompose.
#' @param num_imfs Number of Intrinsic Mode Functions (IMFs) to compute. If num_imfs is set to zero, a value of
#' num_imfs = emd_num_imfs(N) will be used, which corresponds to a maximal number of
#' IMFs. Note that the final residual is also counted as an IMF in this
#' respect, so you most likely want at least num_imfs=2.
#' @param S_number Integer. Use the S-number stopping criterion [1] for the EMD procedure with the given values of S.
#' That is, iterate until the number of extrema and zero crossings in the
#' signal differ at most by one, and stay the same for S consecutive
#' iterations. Typical values are in the range 3--8. If \code{S_number} is
#' zero, this stopping criterion is ignored. Default is 4.
#' @param num_siftings Use a maximum number of siftings as a stopping criterion. If
#' \code{num_siftings} is zero, this stopping criterion is ignored. Default is 50.
#' @return Time series object of class \code{"mts"} where series corresponds to
#' IMFs of the input signal, with the last series being the final residual.
#' @references
#' \enumerate{
#' \item{N. E. Huang, Z. Shen and S. R. Long, "A new view of nonlinear water
#' waves: The Hilbert spectrum", Annual Review of Fluid Mechanics, Vol. 31
#' (1999) 417--457}
#' }
#' @seealso \code{\link{eemd}}, \code{\link{ceemdan}}
emd <- function(input, num_imfs = 0, S_number = 4L, num_siftings = 50L) {
if (!all(is.finite(input)))
stop("'input' must contain finite values only.")
if (num_imfs < 0)
stop("Argument 'num_imfs' must be non-negative integer.")
if (S_number < 0)
stop("Argument 'S_number' must be non-negative integer.")
if (num_siftings < 0)
stop("Argument 'num_siftings' must be non-negative integer.")
output <- eemdR(input, num_imfs, ensemble_size = 1L,
noise_strength = 0L, S_number, num_siftings,
rng_seed = 0L, threads = 0L)
if (inherits(input, "ts")) {
tsp(output) <- tsp(input)
} else tsp(output) <- c(1, nrow(output), 1)
if (ncol(output) > 1) {
class(output) <- c("mts", "ts", "matrix")
colnames(output) <- c(paste("IMF", 1:(ncol(output) - 1)), "Residual")
} else class(output) <- "ts"
output
} | /R/emd.R | no_license | helske/Rlibeemd | R | false | false | 2,627 | r | #' EMD decomposition
#'
#' Decompose input data to Intrinsic Mode Functions (IMFs) with the
#' Empirical Mode Decomposition algorithm.
#'
#' This is a wrapper around \code{eemd} with \code{ensemble_size = 1} and \code{noise_strength = 0}.
#'
#' @export
#' @name emd
#' @param input Vector of length N. The input signal to decompose.
#' @param num_imfs Number of Intrinsic Mode Functions (IMFs) to compute. If num_imfs is set to zero, a value of
#' num_imfs = emd_num_imfs(N) will be used, which corresponds to a maximal number of
#' IMFs. Note that the final residual is also counted as an IMF in this
#' respect, so you most likely want at least num_imfs=2.
#' @param S_number Integer. Use the S-number stopping criterion [1] for the EMD procedure with the given values of S.
#' That is, iterate until the number of extrema and zero crossings in the
#' signal differ at most by one, and stay the same for S consecutive
#' iterations. Typical values are in the range 3--8. If \code{S_number} is
#' zero, this stopping criterion is ignored. Default is 4.
#' @param num_siftings Use a maximum number of siftings as a stopping criterion. If
#' \code{num_siftings} is zero, this stopping criterion is ignored. Default is 50.
#' @return Time series object of class \code{"mts"} where series corresponds to
#' IMFs of the input signal, with the last series being the final residual.
#' @references
#' \enumerate{
#' \item{N. E. Huang, Z. Shen and S. R. Long, "A new view of nonlinear water
#' waves: The Hilbert spectrum", Annual Review of Fluid Mechanics, Vol. 31
#' (1999) 417--457}
#' }
#' @seealso \code{\link{eemd}}, \code{\link{ceemdan}}
emd <- function(input, num_imfs = 0, S_number = 4L, num_siftings = 50L) {
if (!all(is.finite(input)))
stop("'input' must contain finite values only.")
if (num_imfs < 0)
stop("Argument 'num_imfs' must be non-negative integer.")
if (S_number < 0)
stop("Argument 'S_number' must be non-negative integer.")
if (num_siftings < 0)
stop("Argument 'num_siftings' must be non-negative integer.")
output <- eemdR(input, num_imfs, ensemble_size = 1L,
noise_strength = 0L, S_number, num_siftings,
rng_seed = 0L, threads = 0L)
if (inherits(input, "ts")) {
tsp(output) <- tsp(input)
} else tsp(output) <- c(1, nrow(output), 1)
if (ncol(output) > 1) {
class(output) <- c("mts", "ts", "matrix")
colnames(output) <- c(paste("IMF", 1:(ncol(output) - 1)), "Residual")
} else class(output) <- "ts"
output
} |
# Quantification of significant sites in binding motifs
library(data.table)
filtered_mut <- readRDS("../Mutation_effects/Softmax_modelling/distribution_cutoffs.rds")
library(dplyr)
library(tidyr)
library(GenomicRanges)
# Get model data from the minigene --------------------------------
# Filter only single point mutations
filtered_mut <- filtered_mut %>%
filter(nchar(ALT) == 1 & nchar(REF) == 1)
# Get the significant ones (cutoff empirical > 0)
sig_pos_scores <- filtered_mut %>%
filter(reps_cutoff_empirical>0) %>%
select(isoform, ALT, POS) %>%
unique()
isofact<-levels(filtered_mut$isoform)
sig_pos_scores$isoform <- factor(sig_pos_scores$isoform,
levels = isofact,
labels = isofact,
ordered = T
)
sig_pos.gr <- GRanges(
seqnames = "minigene",
ranges = IRanges(
start = sig_pos_scores$POS,
end = sig_pos_scores$POS
),
strand = "+",
ALT = sig_pos_scores$ALT,
isoforms = sig_pos_scores$isoform
)
# chr16:28943192-28944465:+ (minigene)
cd19_nts_hg19 <- seq(28943192, 28944465)
### AtTRACT binding sites -------------------------------------------------------------------------------------------------------------------------
attract_motifs <- fread("data/CD19_merged_bs_wSeq.bed")
colnames(attract_motifs) <- c("chr", "hg38_start", "hg38_end", "RBP_motif", "merged_motifs", "strand")
attract_motifs <- attract_motifs %>%
separate(RBP_motif, c("RBP", "motif")) %>%
mutate(
minigene_st = match(hg38_start, cd19_nts_hg19),
minigene_end = match(hg38_end, cd19_nts_hg19)
)
attract.gr <- GRanges(
seqnames = "minigene",
ranges = IRanges(start = attract_motifs$minigene_st, end = attract_motifs$minigene_end),
strand = "+",
RBP = attract_motifs$RBP
)
ovp_bs <- findOverlapPairs(attract.gr, sig_pos.gr)
ovp_df <- cbind(
(first(ovp_bs) %>%
as.data.frame()),
second(ovp_bs) %>%
as.data.frame()
)
colnames(ovp_df) <- paste0(colnames(ovp_df), c(rep(c("_attract", ""), each = 6), ""))
ovp_df <- ovp_df %>%
select(start_attract, end_attract, width_attract, RBP_attract, start, ALT, isoforms)
not_ov_df <- as.data.frame(attract.gr[!(attract.gr %in% first(ovp_bs))] )
colnames(not_ov_df) <- paste0(colnames(not_ov_df), "_attract")
not_ov_df <- not_ov_df %>%
select(start_attract, end_attract, width_attract, RBP_attract)
ovp_df <- rbindlist(list(ovp_df, not_ov_df), fill = T)
ovp_df <- ovp_df %>%
mutate(isoforms = forcats::fct_explicit_na(isoforms, na_level = "No significant"))
# Only considering the positions, not the mutation (A site can be significant in up to 3 different mutations)
attract_sig_per_siteiso <- ovp_df %>%
select(-ALT) %>%
unique() %>%
dplyr::count(start_attract, end_attract, width_attract, RBP_attract, isoforms, name = "sig_sites") %>%
mutate(sig_sites = ifelse(isoforms == "No significant", 0, sig_sites))
attract_sig_per_site <- ovp_df %>%
mutate(sig_bs = ifelse(isoforms != "No significant", "Sig", "NoSig")) %>%
select(-ALT, -isoforms, -start) %>%
unique()
# Counts how many are significant per isoform
attract_sig_per_RBPiso <- ovp_df %>%
select(-ALT) %>%
unique() %>%
dplyr::count(RBP_attract, isoforms, name = "sig_bs")
attract_total_bs <- ovp_df %>%
select(start_attract, end_attract, width_attract, RBP_attract) %>%
unique() %>%
count(RBP_attract, name = "total_bs")
attract_sig_per_RBPiso <- left_join(attract_sig_per_RBPiso, attract_total_bs, by = "RBP_attract")
# Significant and not significant per RBP
attract_sig_per_RBP <- ovp_df %>%
mutate(sig_bs = ifelse(isoforms != "No significant", "Total_Any", "Total_NoSig")) %>%
select(-ALT, -isoforms, -start) %>%
unique() %>%
dplyr::count(RBP_attract, sig_bs, name = "total_sig") %>%
pivot_wider(names_from = sig_bs, values_from = total_sig, values_fill = 0)
colnames(attract_sig_per_RBP) <- gsub("_attract", "", colnames(attract_sig_per_RBP))
colnames(attract_sig_per_RBPiso) <- gsub("_attract", "", colnames(attract_sig_per_RBPiso))
colnames(attract_sig_per_site) <- gsub("_attract", "", colnames(attract_sig_per_site))
colnames(attract_sig_per_siteiso) <- gsub("_attract", "", colnames(attract_sig_per_siteiso))
fwrite(attract_sig_per_site, "output/tab/motif_sig_pos/Empirical_Cutoff/AtTRACT_sig_pos_per_site.tab")
fwrite(attract_sig_per_siteiso, "output/tab/motif_sig_pos/Empirical_Cutoff/AtTRACT_sig_pos_per_site_iso.tab")
fwrite(attract_sig_per_RBPiso, "output/tab/motif_sig_pos/Empirical_Cutoff/AtTRACT_sig_pos_per_RBP.tab")
fwrite(attract_sig_per_RBP, "output/tab/motif_sig_pos/Empirical_Cutoff/AtTRACT_sig_pos_per_RBPiso.tab")
## oRNAment binding sites ---------------------------------------------------------------------------------------------
ornament_motifs <- fread("data/motifs_exon1-3.cleaned.tsv")
ornament_motifs <- ornament_motifs %>%
mutate(RBP = gsub("\\s\\(.*\\)", "", motif))
ornament.gr <- GRanges(
seqnames = "minigene",
ranges = IRanges(start = ornament_motifs$minigene_pos_st, end = ornament_motifs$minigene_pos_end),
strand = "+",
RBP = ornament_motifs$RBP
)
ovp_bs <- findOverlapPairs(ornament.gr, sig_pos.gr)
ovp_df <- cbind(
(first(ovp_bs) %>%
as.data.frame()),
second(ovp_bs) %>%
as.data.frame()
)
colnames(ovp_df) <- paste0(colnames(ovp_df), c(rep(c("_ornament", ""), each = 6), ""))
ovp_df <- ovp_df %>%
select(start_ornament, end_ornament, width_ornament, RBP_ornament, start, ALT, isoforms)
not_ov_df <- as.data.frame(ornament.gr[!(ornament.gr %in% first(ovp_bs))] )
colnames(not_ov_df) <- paste0(colnames(not_ov_df), "_ornament")
not_ov_df <- not_ov_df %>%
select(start_ornament, end_ornament, width_ornament, RBP_ornament)
ovp_df <- rbindlist(list(ovp_df, not_ov_df), fill = T)
ovp_df <- ovp_df %>%
mutate(isoforms = forcats::fct_explicit_na(isoforms, na_level = "No significant"))
# Only considering the positions, not the mutation (A site can be significant in up to 3 different mutations)
ornament_sig_per_siteiso <- ovp_df %>%
select(-ALT) %>%
unique() %>%
dplyr::count(start_ornament, end_ornament, width_ornament, RBP_ornament, isoforms, name = "sig_sites") %>%
mutate(sig_sites = ifelse(isoforms == "No significant", 0, sig_sites))
ornament_sig_per_site <- ovp_df %>%
mutate(sig_bs = ifelse(isoforms != "No significant", "Sig", "NoSig")) %>%
select(-ALT, -isoforms, -start) %>%
unique()
# Counts how many are significant per isoform
ornament_sig_per_RBPiso <- ovp_df %>%
select(-ALT) %>%
unique() %>%
dplyr::count(RBP_ornament, isoforms, name = "sig_bs")
ornament_total_bs <- ovp_df %>%
select(start_ornament, end_ornament, width_ornament, RBP_ornament) %>%
unique() %>%
count(RBP_ornament, name = "total_bs")
ornament_sig_per_RBPiso <- left_join(ornament_sig_per_RBPiso, ornament_total_bs, by = "RBP_ornament")
# Significant and not significant per RBP
ornament_sig_per_RBP <- ovp_df %>%
mutate(sig_bs = ifelse(isoforms != "No significant", "Total_Any", "Total_NoSig")) %>%
select(-ALT, -isoforms, -start) %>%
unique() %>%
dplyr::count(RBP_ornament, sig_bs, name = "total_sig") %>%
pivot_wider(names_from = sig_bs, values_from = total_sig, values_fill = 0)
colnames(ornament_sig_per_RBP) <- gsub("_ornament", "", colnames(ornament_sig_per_RBP))
colnames(ornament_sig_per_RBPiso) <- gsub("_ornament", "", colnames(ornament_sig_per_RBPiso))
colnames(ornament_sig_per_site) <- gsub("_ornament", "", colnames(ornament_sig_per_site))
colnames(ornament_sig_per_siteiso) <- gsub("_ornament", "", colnames(ornament_sig_per_siteiso))
fwrite(ornament_sig_per_site, "output/tab/motif_sig_pos/Empirical_Cutoff/oRNAment_sig_pos_per_site.tab")
fwrite(ornament_sig_per_siteiso, "output/tab/motif_sig_pos/Empirical_Cutoff/oRNAment_sig_pos_per_site_iso.tab")
fwrite(ornament_sig_per_RBPiso, "output/tab/motif_sig_pos/Empirical_Cutoff/oRNAment_sig_pos_per_RBP.tab")
fwrite(ornament_sig_per_RBP, "output/tab/motif_sig_pos/Empirical_Cutoff/oRNAment_sig_pos_per_RBPiso.tab")
#### Significant sites in DeepRiPe predictions---------------------------------------------------------############
deepripe <- fread("data/rbp_variant_scores_DeepRiPe.csv") %>%
mutate(POS = gsub("[A-Z]([0-9]+)[A-Z]", "\\1", variant)) %>%
separate(ref_alt, c("REF", "ALT"))
deepripe$POS <- as.numeric(deepripe$POS)
deepripe_sig_mut <- left_join(deepripe,
sig_pos_scores %>%
mutate(sig_mut = 1),
by = c("POS", "ALT")
)
deepripe_bs <- deepripe_bs <- left_join(deepripe,
sig_pos_scores %>%
mutate(sig_mut = 1),
by = c("POS", "ALT")
) %>%
group_by(RBP, set) %>%
summarise(
total_pos = n(),
total_sig_pos = sum(sig_mut, na.rm = T)
) %>%
ungroup()
# Vector with unique significant variants in the dataset
deepripe_sig <- left_join(deepripe,
sig_pos_scores %>%
mutate(sig_mut = 1),
by = c("POS", "ALT")
) %>%
filter(sig_mut == 1) %>%
select(isoform, variant) %>%
unique()
deepripe_sig.list <- split(deepripe_sig$variant, deepripe_sig$isoform)
# GRanges with all the positions
deepripe.grl <- deepripe %>%
mutate(strand = ifelse(score > 0, "+", "-")) %>%
select(POS, variant, set, strand, RBP) %>%
unique() %>%
mutate(
seqnames = "minigene",
start = POS,
end = POS,
rbp_set = paste0(RBP, "_", set)
) %>%
makeGRangesListFromDataFrame(.,
keep.extra.columns = T,
split.field = "rbp_set",
names.field = "rbp_set"
)
deepripe.grl
# Granges reduced (to get clusters)
deepripe_red.grl <- reduce(deepripe.grl, drop.empty.ranges = F, with.revmap = T)
# Getting the information of the records that where collapsed
deepripe_red.grl_var <- mapply(FUN = function(set, red) {
variants <- sapply(red$revmap, function(ind) {
paste(set[ind]$variant, collapse = ",")
})
red$variants <- variants
red
}, red = deepripe_red.grl, set = deepripe.grl, USE.NAMES = T)
deepripe_red.grl_var <- GenomicRangesList(deepripe_red.grl_var) %>%
unlist()
# Filtering motifs for any cluster with at least 4 consecutive positions
deepripe_motifs <- deepripe_red.grl_var[width(deepripe_red.grl_var) >= 4]
# Length of variants in a motif
deepripe_motifs$n_variants <- sapply(
deepripe_motifs$variants,
function(x) {
stringr::str_dplyr::count(x, ",")
}
)
deepripe_motifs$RBP_set <- names(deepripe_motifs)
# Convert the motif GR to a data frame
deepripe_motifs.df <- as_tibble(deepripe_motifs) %>%
select(-seqnames, -revmap) %>%
separate(RBP_set, into = c("RBP", "set"), sep = "_", extra = "merge")
# Quantification of total motifs and total significant motifs
sig_var_iso <- lapply(deepripe_sig.list, function(iso) {
# Getting the significant variants that overlap the detected with DeepRiPe
sig_list <- lapply(
strsplit(deepripe_motifs.df$variants, ","),
function(v) {
int_var <- intersect(iso, v)
sig_var <- paste(int_var, collapse = ",")
n_sig <- length(int_var)
n_var <- length(v)
return(list(sig_var, n_sig, n_var))
}
)
# Adding them as new columns
deepripe_motifs.df$n_sig_variants <- sapply(sig_list, `[[`, 2)
deepripe_motifs.df$sig_variants <- sapply(sig_list, `[[`, 1)
deepripe_motifs.df$n_variants <- sapply(sig_list, `[[`, 3)
deepripe_motifs.df
})
# Convert the list to dataframe
deepripe_motifs_iso.df <- rbindlist(sig_var_iso, use.names = T, idcol = "isoform")
# dplyr::count significant motifs per binding site and isoform
deepripe_motifs_tab <- left_join(deepripe_motifs_iso.df %>%
dplyr::count(RBP, isoform, set, name = "n_motifs"),
deepripe_motifs_iso.df %>%
filter(n_sig_variants > 0) %>%
dplyr::count(RBP, isoform, set, name = "n_motifs_with_sig_var"),
by = c("RBP", "set", "isoform")
)
# Adding variant ratio
deepripe_motifs_iso.df <- deepripe_motifs_iso.df %>%
mutate(
total_pos_var = width * 3,
raw_var_ratio = n_sig_variants / n_variants,
norm_var_ratio = n_sig_variants / total_pos_var
)
var_ratio <- deepripe_motifs_iso.df %>%
group_by(RBP, set) %>%
summarise(
max_raw_var_ratio = max(raw_var_ratio),
min_raw_var_ratio = min(raw_var_ratio),
max_norm_var_ratio = max(norm_var_ratio),
min_norm_var_ratio = min(norm_var_ratio)
) %>%
ungroup()
# Merge with the individual variants
# deepripe_bs<-left_join(
# deepripe_bs,
# left_join(var_ratio, deepripe_motifs_tab, by=c("RBP", "set", "isoform")),
# by=c("RBP", "set", "isoform"))
fwrite(deepripe_sig_mut, "output/tab/motif_sig_pos/Empirical_Cutoff/DeepRiPe_predictions_sig_mut.tab")
fwrite(deepripe_motifs_iso.df, "output/tab/motif_sig_pos/Empirical_Cutoff/DeepRiPe_predictions_sig_var_and_isoforms.tab")
fwrite(deepripe_bs, "output/tab/motif_sig_pos/Empirical_Cutoff/DeepRiPe_predictions_sig_var.tab")
fwrite(deepripe_motifs.df, "output/tab/motif_sig_pos/Empirical_Cutoff/DeepRiPe_predictions_sig_var_perbs.tab")
| /RBP_predictions/sig_bs_per_motif_EmpiricalCutoff_v2.R | permissive | mcortes-lopez/CD19_splicing_mutagenesis | R | false | false | 13,390 | r | # Quantification of significant sites in binding motifs
library(data.table)
filtered_mut <- readRDS("../Mutation_effects/Softmax_modelling/distribution_cutoffs.rds")
library(dplyr)
library(tidyr)
library(GenomicRanges)
# Get model data from the minigene --------------------------------
# Filter only single point mutations
filtered_mut <- filtered_mut %>%
filter(nchar(ALT) == 1 & nchar(REF) == 1)
# Get the significant ones (cutoff empirical > 0)
sig_pos_scores <- filtered_mut %>%
filter(reps_cutoff_empirical>0) %>%
select(isoform, ALT, POS) %>%
unique()
isofact<-levels(filtered_mut$isoform)
sig_pos_scores$isoform <- factor(sig_pos_scores$isoform,
levels = isofact,
labels = isofact,
ordered = T
)
sig_pos.gr <- GRanges(
seqnames = "minigene",
ranges = IRanges(
start = sig_pos_scores$POS,
end = sig_pos_scores$POS
),
strand = "+",
ALT = sig_pos_scores$ALT,
isoforms = sig_pos_scores$isoform
)
# chr16:28943192-28944465:+ (minigene)
cd19_nts_hg19 <- seq(28943192, 28944465)
### AtTRACT binding sites -------------------------------------------------------------------------------------------------------------------------
attract_motifs <- fread("data/CD19_merged_bs_wSeq.bed")
colnames(attract_motifs) <- c("chr", "hg38_start", "hg38_end", "RBP_motif", "merged_motifs", "strand")
attract_motifs <- attract_motifs %>%
separate(RBP_motif, c("RBP", "motif")) %>%
mutate(
minigene_st = match(hg38_start, cd19_nts_hg19),
minigene_end = match(hg38_end, cd19_nts_hg19)
)
attract.gr <- GRanges(
seqnames = "minigene",
ranges = IRanges(start = attract_motifs$minigene_st, end = attract_motifs$minigene_end),
strand = "+",
RBP = attract_motifs$RBP
)
ovp_bs <- findOverlapPairs(attract.gr, sig_pos.gr)
ovp_df <- cbind(
(first(ovp_bs) %>%
as.data.frame()),
second(ovp_bs) %>%
as.data.frame()
)
colnames(ovp_df) <- paste0(colnames(ovp_df), c(rep(c("_attract", ""), each = 6), ""))
ovp_df <- ovp_df %>%
select(start_attract, end_attract, width_attract, RBP_attract, start, ALT, isoforms)
not_ov_df <- as.data.frame(attract.gr[!(attract.gr %in% first(ovp_bs))] )
colnames(not_ov_df) <- paste0(colnames(not_ov_df), "_attract")
not_ov_df <- not_ov_df %>%
select(start_attract, end_attract, width_attract, RBP_attract)
ovp_df <- rbindlist(list(ovp_df, not_ov_df), fill = T)
ovp_df <- ovp_df %>%
mutate(isoforms = forcats::fct_explicit_na(isoforms, na_level = "No significant"))
# Only considering the positions, not the mutation (A site can be significant in up to 3 different mutations)
attract_sig_per_siteiso <- ovp_df %>%
select(-ALT) %>%
unique() %>%
dplyr::count(start_attract, end_attract, width_attract, RBP_attract, isoforms, name = "sig_sites") %>%
mutate(sig_sites = ifelse(isoforms == "No significant", 0, sig_sites))
attract_sig_per_site <- ovp_df %>%
mutate(sig_bs = ifelse(isoforms != "No significant", "Sig", "NoSig")) %>%
select(-ALT, -isoforms, -start) %>%
unique()
# Counts how many are significant per isoform
attract_sig_per_RBPiso <- ovp_df %>%
select(-ALT) %>%
unique() %>%
dplyr::count(RBP_attract, isoforms, name = "sig_bs")
attract_total_bs <- ovp_df %>%
select(start_attract, end_attract, width_attract, RBP_attract) %>%
unique() %>%
count(RBP_attract, name = "total_bs")
attract_sig_per_RBPiso <- left_join(attract_sig_per_RBPiso, attract_total_bs, by = "RBP_attract")
# Significant and not significant per RBP
attract_sig_per_RBP <- ovp_df %>%
mutate(sig_bs = ifelse(isoforms != "No significant", "Total_Any", "Total_NoSig")) %>%
select(-ALT, -isoforms, -start) %>%
unique() %>%
dplyr::count(RBP_attract, sig_bs, name = "total_sig") %>%
pivot_wider(names_from = sig_bs, values_from = total_sig, values_fill = 0)
colnames(attract_sig_per_RBP) <- gsub("_attract", "", colnames(attract_sig_per_RBP))
colnames(attract_sig_per_RBPiso) <- gsub("_attract", "", colnames(attract_sig_per_RBPiso))
colnames(attract_sig_per_site) <- gsub("_attract", "", colnames(attract_sig_per_site))
colnames(attract_sig_per_siteiso) <- gsub("_attract", "", colnames(attract_sig_per_siteiso))
fwrite(attract_sig_per_site, "output/tab/motif_sig_pos/Empirical_Cutoff/AtTRACT_sig_pos_per_site.tab")
fwrite(attract_sig_per_siteiso, "output/tab/motif_sig_pos/Empirical_Cutoff/AtTRACT_sig_pos_per_site_iso.tab")
fwrite(attract_sig_per_RBPiso, "output/tab/motif_sig_pos/Empirical_Cutoff/AtTRACT_sig_pos_per_RBP.tab")
fwrite(attract_sig_per_RBP, "output/tab/motif_sig_pos/Empirical_Cutoff/AtTRACT_sig_pos_per_RBPiso.tab")
## oRNAment binding sites ---------------------------------------------------------------------------------------------
ornament_motifs <- fread("data/motifs_exon1-3.cleaned.tsv")
ornament_motifs <- ornament_motifs %>%
mutate(RBP = gsub("\\s\\(.*\\)", "", motif))
ornament.gr <- GRanges(
seqnames = "minigene",
ranges = IRanges(start = ornament_motifs$minigene_pos_st, end = ornament_motifs$minigene_pos_end),
strand = "+",
RBP = ornament_motifs$RBP
)
ovp_bs <- findOverlapPairs(ornament.gr, sig_pos.gr)
ovp_df <- cbind(
(first(ovp_bs) %>%
as.data.frame()),
second(ovp_bs) %>%
as.data.frame()
)
colnames(ovp_df) <- paste0(colnames(ovp_df), c(rep(c("_ornament", ""), each = 6), ""))
ovp_df <- ovp_df %>%
select(start_ornament, end_ornament, width_ornament, RBP_ornament, start, ALT, isoforms)
not_ov_df <- as.data.frame(ornament.gr[!(ornament.gr %in% first(ovp_bs))] )
colnames(not_ov_df) <- paste0(colnames(not_ov_df), "_ornament")
not_ov_df <- not_ov_df %>%
select(start_ornament, end_ornament, width_ornament, RBP_ornament)
ovp_df <- rbindlist(list(ovp_df, not_ov_df), fill = T)
ovp_df <- ovp_df %>%
mutate(isoforms = forcats::fct_explicit_na(isoforms, na_level = "No significant"))
# Only considering the positions, not the mutation (A site can be significant in up to 3 different mutations)
ornament_sig_per_siteiso <- ovp_df %>%
select(-ALT) %>%
unique() %>%
dplyr::count(start_ornament, end_ornament, width_ornament, RBP_ornament, isoforms, name = "sig_sites") %>%
mutate(sig_sites = ifelse(isoforms == "No significant", 0, sig_sites))
ornament_sig_per_site <- ovp_df %>%
mutate(sig_bs = ifelse(isoforms != "No significant", "Sig", "NoSig")) %>%
select(-ALT, -isoforms, -start) %>%
unique()
# Counts how many are significant per isoform
ornament_sig_per_RBPiso <- ovp_df %>%
select(-ALT) %>%
unique() %>%
dplyr::count(RBP_ornament, isoforms, name = "sig_bs")
ornament_total_bs <- ovp_df %>%
select(start_ornament, end_ornament, width_ornament, RBP_ornament) %>%
unique() %>%
count(RBP_ornament, name = "total_bs")
ornament_sig_per_RBPiso <- left_join(ornament_sig_per_RBPiso, ornament_total_bs, by = "RBP_ornament")
# Significant and not significant per RBP
ornament_sig_per_RBP <- ovp_df %>%
mutate(sig_bs = ifelse(isoforms != "No significant", "Total_Any", "Total_NoSig")) %>%
select(-ALT, -isoforms, -start) %>%
unique() %>%
dplyr::count(RBP_ornament, sig_bs, name = "total_sig") %>%
pivot_wider(names_from = sig_bs, values_from = total_sig, values_fill = 0)
colnames(ornament_sig_per_RBP) <- gsub("_ornament", "", colnames(ornament_sig_per_RBP))
colnames(ornament_sig_per_RBPiso) <- gsub("_ornament", "", colnames(ornament_sig_per_RBPiso))
colnames(ornament_sig_per_site) <- gsub("_ornament", "", colnames(ornament_sig_per_site))
colnames(ornament_sig_per_siteiso) <- gsub("_ornament", "", colnames(ornament_sig_per_siteiso))
fwrite(ornament_sig_per_site, "output/tab/motif_sig_pos/Empirical_Cutoff/oRNAment_sig_pos_per_site.tab")
fwrite(ornament_sig_per_siteiso, "output/tab/motif_sig_pos/Empirical_Cutoff/oRNAment_sig_pos_per_site_iso.tab")
fwrite(ornament_sig_per_RBPiso, "output/tab/motif_sig_pos/Empirical_Cutoff/oRNAment_sig_pos_per_RBP.tab")
fwrite(ornament_sig_per_RBP, "output/tab/motif_sig_pos/Empirical_Cutoff/oRNAment_sig_pos_per_RBPiso.tab")
#### Significant sites in DeepRiPe predictions---------------------------------------------------------############
deepripe <- fread("data/rbp_variant_scores_DeepRiPe.csv") %>%
mutate(POS = gsub("[A-Z]([0-9]+)[A-Z]", "\\1", variant)) %>%
separate(ref_alt, c("REF", "ALT"))
deepripe$POS <- as.numeric(deepripe$POS)
deepripe_sig_mut <- left_join(deepripe,
sig_pos_scores %>%
mutate(sig_mut = 1),
by = c("POS", "ALT")
)
deepripe_bs <- deepripe_bs <- left_join(deepripe,
sig_pos_scores %>%
mutate(sig_mut = 1),
by = c("POS", "ALT")
) %>%
group_by(RBP, set) %>%
summarise(
total_pos = n(),
total_sig_pos = sum(sig_mut, na.rm = T)
) %>%
ungroup()
# Vector with unique significant variants in the dataset
deepripe_sig <- left_join(deepripe,
sig_pos_scores %>%
mutate(sig_mut = 1),
by = c("POS", "ALT")
) %>%
filter(sig_mut == 1) %>%
select(isoform, variant) %>%
unique()
deepripe_sig.list <- split(deepripe_sig$variant, deepripe_sig$isoform)
# GRanges with all the positions
deepripe.grl <- deepripe %>%
mutate(strand = ifelse(score > 0, "+", "-")) %>%
select(POS, variant, set, strand, RBP) %>%
unique() %>%
mutate(
seqnames = "minigene",
start = POS,
end = POS,
rbp_set = paste0(RBP, "_", set)
) %>%
makeGRangesListFromDataFrame(.,
keep.extra.columns = T,
split.field = "rbp_set",
names.field = "rbp_set"
)
deepripe.grl
# Granges reduced (to get clusters)
deepripe_red.grl <- reduce(deepripe.grl, drop.empty.ranges = F, with.revmap = T)
# Getting the information of the records that where collapsed
deepripe_red.grl_var <- mapply(FUN = function(set, red) {
variants <- sapply(red$revmap, function(ind) {
paste(set[ind]$variant, collapse = ",")
})
red$variants <- variants
red
}, red = deepripe_red.grl, set = deepripe.grl, USE.NAMES = T)
deepripe_red.grl_var <- GenomicRangesList(deepripe_red.grl_var) %>%
unlist()
# Filtering motifs for any cluster with at least 4 consecutive positions
deepripe_motifs <- deepripe_red.grl_var[width(deepripe_red.grl_var) >= 4]
# Length of variants in a motif
deepripe_motifs$n_variants <- sapply(
deepripe_motifs$variants,
function(x) {
stringr::str_dplyr::count(x, ",")
}
)
deepripe_motifs$RBP_set <- names(deepripe_motifs)
# Convert the motif GR to a data frame
deepripe_motifs.df <- as_tibble(deepripe_motifs) %>%
select(-seqnames, -revmap) %>%
separate(RBP_set, into = c("RBP", "set"), sep = "_", extra = "merge")
# Quantification of total motifs and total significant motifs
sig_var_iso <- lapply(deepripe_sig.list, function(iso) {
# Getting the significant variants that overlap the detected with DeepRiPe
sig_list <- lapply(
strsplit(deepripe_motifs.df$variants, ","),
function(v) {
int_var <- intersect(iso, v)
sig_var <- paste(int_var, collapse = ",")
n_sig <- length(int_var)
n_var <- length(v)
return(list(sig_var, n_sig, n_var))
}
)
# Adding them as new columns
deepripe_motifs.df$n_sig_variants <- sapply(sig_list, `[[`, 2)
deepripe_motifs.df$sig_variants <- sapply(sig_list, `[[`, 1)
deepripe_motifs.df$n_variants <- sapply(sig_list, `[[`, 3)
deepripe_motifs.df
})
# Convert the list to dataframe
deepripe_motifs_iso.df <- rbindlist(sig_var_iso, use.names = T, idcol = "isoform")
# dplyr::count significant motifs per binding site and isoform
deepripe_motifs_tab <- left_join(deepripe_motifs_iso.df %>%
dplyr::count(RBP, isoform, set, name = "n_motifs"),
deepripe_motifs_iso.df %>%
filter(n_sig_variants > 0) %>%
dplyr::count(RBP, isoform, set, name = "n_motifs_with_sig_var"),
by = c("RBP", "set", "isoform")
)
# Adding variant ratio
deepripe_motifs_iso.df <- deepripe_motifs_iso.df %>%
mutate(
total_pos_var = width * 3,
raw_var_ratio = n_sig_variants / n_variants,
norm_var_ratio = n_sig_variants / total_pos_var
)
var_ratio <- deepripe_motifs_iso.df %>%
group_by(RBP, set) %>%
summarise(
max_raw_var_ratio = max(raw_var_ratio),
min_raw_var_ratio = min(raw_var_ratio),
max_norm_var_ratio = max(norm_var_ratio),
min_norm_var_ratio = min(norm_var_ratio)
) %>%
ungroup()
# Merge with the individual variants
# deepripe_bs<-left_join(
# deepripe_bs,
# left_join(var_ratio, deepripe_motifs_tab, by=c("RBP", "set", "isoform")),
# by=c("RBP", "set", "isoform"))
fwrite(deepripe_sig_mut, "output/tab/motif_sig_pos/Empirical_Cutoff/DeepRiPe_predictions_sig_mut.tab")
fwrite(deepripe_motifs_iso.df, "output/tab/motif_sig_pos/Empirical_Cutoff/DeepRiPe_predictions_sig_var_and_isoforms.tab")
fwrite(deepripe_bs, "output/tab/motif_sig_pos/Empirical_Cutoff/DeepRiPe_predictions_sig_var.tab")
fwrite(deepripe_motifs.df, "output/tab/motif_sig_pos/Empirical_Cutoff/DeepRiPe_predictions_sig_var_perbs.tab")
|
#airquality = read.csv('path/airquality.csv',header=TRUE, sep=",")
data()
airquality <- datasets::airquality
####Top 10 rows and last 10 rows
head(airquality,10)
tail(airquality,10)
######Columns
airquality[,c(1,2)]
df<-airquality[,-6]
summary(airquality[1,])
airquality$Wind
###########Summary of the data#########
summary(airquality$Temp)
summary(airquality)
summary(airquality$Wind)
#####################
plot(airquality$Wind)
plot(airquality$Temp,airquality$Wind,type="p")
plot(airquality)
# points and lines
plot(airquality$Wind, type= "b") # p: points, l: lines,b: both
plot(airquality$Wind, xlab = 'No of Instances',
ylab = 'ozone Concentration', main = 'Ozone levels in NY city',
col = 'blue')
# Horizontal bar plot
barplot(airquality$Ozone, main = 'Ozone Concenteration in air',
ylab = 'ozone levels', col= 'blue',horiz = F,axes=T)
#Histogram
hist(airquality$Temp)
hist(airquality$Temp,
main = 'Solar Radiation values in air',
xlab = 'Solar rad.', col='blue')
#Single box plot
boxplot(airquality$Temp,main="Boxplot")
# Multiple box plots
boxplot(airquality[,1:4],main='Multiple')
##Home work
e_quakes<-datasets::quakes
#margin of the grid(mar),
#no of rows and columns(mfrow),
#whether a border is to be included(bty)
#and position of the
#labels(las: 1 for horizontal, las: 0 for vertical)
#bty - box around the plot
par(mfrow=c(3,3),mar=c(2,5,2,1), las=0, bty="o")
plot(airquality$Ozone)
plot(airquality$Ozone, airquality$Wind)
plot(airquality$Ozone, type= "l")
plot(airquality$Ozone, type= "l")
plot(airquality$Ozone, type= "l")
barplot(airquality$Ozone, main = 'Ozone Concenteration in air',
xlab = 'ozone levels', col='green',horiz = TRUE)
hist(airquality$Solar.R)
boxplot(airquality$Solar.R)
boxplot(airquality[,0:4], main='Multiple Box plots')
sd(airquality$Ozone,na.rm = T)
| /Classes/R-studio/Descriptive_Vis.R | no_license | VishnubhotlaBharadwaj/Data-Science | R | false | false | 1,946 | r |
#airquality = read.csv('path/airquality.csv',header=TRUE, sep=",")
data()
airquality <- datasets::airquality
####Top 10 rows and last 10 rows
head(airquality,10)
tail(airquality,10)
######Columns
airquality[,c(1,2)]
df<-airquality[,-6]
summary(airquality[1,])
airquality$Wind
###########Summary of the data#########
summary(airquality$Temp)
summary(airquality)
summary(airquality$Wind)
#####################
plot(airquality$Wind)
plot(airquality$Temp,airquality$Wind,type="p")
plot(airquality)
# points and lines
plot(airquality$Wind, type= "b") # p: points, l: lines,b: both
plot(airquality$Wind, xlab = 'No of Instances',
ylab = 'ozone Concentration', main = 'Ozone levels in NY city',
col = 'blue')
# Horizontal bar plot
barplot(airquality$Ozone, main = 'Ozone Concenteration in air',
ylab = 'ozone levels', col= 'blue',horiz = F,axes=T)
#Histogram
hist(airquality$Temp)
hist(airquality$Temp,
main = 'Solar Radiation values in air',
xlab = 'Solar rad.', col='blue')
#Single box plot
boxplot(airquality$Temp,main="Boxplot")
# Multiple box plots
boxplot(airquality[,1:4],main='Multiple')
##Home work
e_quakes<-datasets::quakes
#margin of the grid(mar),
#no of rows and columns(mfrow),
#whether a border is to be included(bty)
#and position of the
#labels(las: 1 for horizontal, las: 0 for vertical)
#bty - box around the plot
par(mfrow=c(3,3),mar=c(2,5,2,1), las=0, bty="o")
plot(airquality$Ozone)
plot(airquality$Ozone, airquality$Wind)
plot(airquality$Ozone, type= "l")
plot(airquality$Ozone, type= "l")
plot(airquality$Ozone, type= "l")
barplot(airquality$Ozone, main = 'Ozone Concenteration in air',
xlab = 'ozone levels', col='green',horiz = TRUE)
hist(airquality$Solar.R)
boxplot(airquality$Solar.R)
boxplot(airquality[,0:4], main='Multiple Box plots')
sd(airquality$Ozone,na.rm = T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nonlinear_KLDE.R
\name{do.klde}
\alias{do.klde}
\title{Kernel Local Discriminant Embedding}
\usage{
do.klde(X, label, ndim = 2, t = 1, numk = max(ceiling(nrow(X)/10),
2), preprocess = c("center", "scale", "cscale", "decorrelate",
"whiten"), ktype = c("gaussian", 1), kcentering = TRUE)
}
\arguments{
\item{X}{an \eqn{(n\times p)} matrix or data frame whose rows are observations.}
\item{label}{a length-\eqn{n} vector of data class labels.}
\item{ndim}{an integer-valued target dimension.}
\item{t}{kernel bandwidth in \eqn{(0,\infty)}.}
\item{numk}{the number of neighboring points for k-nn graph construction.}
\item{preprocess}{an additional option for preprocessing the data.
Default is "center". See also \code{\link{aux.preprocess}} for more details.}
\item{ktype}{a vector containing name of a kernel and corresponding parameters. See also \code{\link{aux.kernelcov}} for complete description of Kernel Trick.}
\item{kcentering}{a logical; \code{TRUE} to use centered Kernel matrix, \code{FALSE} otherwise.}
}
\value{
a named list containing
\describe{
\item{Y}{an \eqn{(n\times ndim)} matrix whose rows are embedded observations.}
\item{trfinfo}{a list containing information for out-of-sample prediction.}
}
}
\description{
Kernel Local Discriminant Embedding (KLDE) is a variant of Local Discriminant Embedding in that
it aims to preserve inter- and intra-class neighborhood information in a nonlinear manner using
kernel trick. \emph{Note} that the combination of kernel matrix and its eigendecomposition
often suffers from lacking numerical rank. For such case, our algorithm returns a warning message and
algorithm stops working any further due to its innate limitations of constructing weight matrix.
}
\examples{
\dontrun{
## generate data of 2 types with clear difference
diff = 5
dt1 = aux.gensamples(n=123)-diff;
dt2 = aux.gensamples(n=123)+diff;
## merge the data and create a label correspondingly
Y = rbind(dt1,dt2)
label = c(rep(1,123), rep(2,123))
## try different neighborhood size
out1 <- do.klde(Y, label, kcentering=TRUE, numk=5)
out2 <- do.klde(Y, label, numk=10)
out3 <- do.klde(Y, label, numk=25)
## visualize
par(mfrow=c(1,3))
plot(out1$Y[,1], out1$Y[,2], main="k=5")
plot(out2$Y[,1], out2$Y[,2], main="k=10")
plot(out3$Y[,1], out3$Y[,2], main="k=25")
}
}
\references{
\insertRef{hwann-tzong_chen_local_2005}{Rdimtools}
}
\author{
Kisung You
}
| /man/nonlinear_KLDE.Rd | no_license | rcannood/Rdimtools | R | false | true | 2,476 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nonlinear_KLDE.R
\name{do.klde}
\alias{do.klde}
\title{Kernel Local Discriminant Embedding}
\usage{
do.klde(X, label, ndim = 2, t = 1, numk = max(ceiling(nrow(X)/10),
2), preprocess = c("center", "scale", "cscale", "decorrelate",
"whiten"), ktype = c("gaussian", 1), kcentering = TRUE)
}
\arguments{
\item{X}{an \eqn{(n\times p)} matrix or data frame whose rows are observations.}
\item{label}{a length-\eqn{n} vector of data class labels.}
\item{ndim}{an integer-valued target dimension.}
\item{t}{kernel bandwidth in \eqn{(0,\infty)}.}
\item{numk}{the number of neighboring points for k-nn graph construction.}
\item{preprocess}{an additional option for preprocessing the data.
Default is "center". See also \code{\link{aux.preprocess}} for more details.}
\item{ktype}{a vector containing name of a kernel and corresponding parameters. See also \code{\link{aux.kernelcov}} for complete description of Kernel Trick.}
\item{kcentering}{a logical; \code{TRUE} to use centered Kernel matrix, \code{FALSE} otherwise.}
}
\value{
a named list containing
\describe{
\item{Y}{an \eqn{(n\times ndim)} matrix whose rows are embedded observations.}
\item{trfinfo}{a list containing information for out-of-sample prediction.}
}
}
\description{
Kernel Local Discriminant Embedding (KLDE) is a variant of Local Discriminant Embedding in that
it aims to preserve inter- and intra-class neighborhood information in a nonlinear manner using
kernel trick. \emph{Note} that the combination of kernel matrix and its eigendecomposition
often suffers from lacking numerical rank. For such case, our algorithm returns a warning message and
algorithm stops working any further due to its innate limitations of constructing weight matrix.
}
\examples{
\dontrun{
## generate data of 2 types with clear difference
diff = 5
dt1 = aux.gensamples(n=123)-diff;
dt2 = aux.gensamples(n=123)+diff;
## merge the data and create a label correspondingly
Y = rbind(dt1,dt2)
label = c(rep(1,123), rep(2,123))
## try different neighborhood size
out1 <- do.klde(Y, label, kcentering=TRUE, numk=5)
out2 <- do.klde(Y, label, numk=10)
out3 <- do.klde(Y, label, numk=25)
## visualize
par(mfrow=c(1,3))
plot(out1$Y[,1], out1$Y[,2], main="k=5")
plot(out2$Y[,1], out2$Y[,2], main="k=10")
plot(out3$Y[,1], out3$Y[,2], main="k=25")
}
}
\references{
\insertRef{hwann-tzong_chen_local_2005}{Rdimtools}
}
\author{
Kisung You
}
|
44e41c21dcb3c74a9814c79b153dc904 mult_bool_matrix_14_14_14.sat.qdimacs 41362 122310 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Tentrup/mult-matrix/mult_bool_matrix_14_14_14.sat/mult_bool_matrix_14_14_14.sat.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 83 | r | 44e41c21dcb3c74a9814c79b153dc904 mult_bool_matrix_14_14_14.sat.qdimacs 41362 122310 |
pkgname <- "plsgenomics"
source(file.path(R.home("share"), "R", "examples-header.R"))
options(warn = 1)
library('plsgenomics')
assign(".oldSearch", search(), pos = 'CheckExEnv')
###############################################################################################
cleanEx()
nameEx("Colon")
### * Colon
flush(stderr()); flush(stdout())
### Name: Colon
### Title: Gene expression data from Alon et al. (1999)
### Aliases: Colon
### Keywords: datasets
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load data set
data(Colon)
# how many samples and how many genes ?
dim(Colon$X)
# how many samples of class 1 and 2 respectively ?
sum(Colon$Y==1)
sum(Colon$Y==2)
###############################################################################################
cleanEx()
nameEx("Ecoli")
### * Ecoli
flush(stderr()); flush(stdout())
### Name: Ecoli
### Title: Ecoli gene expression and connectivity data from Kao et al.
### (2003)
### Aliases: Ecoli
### Keywords: datasets
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load data set
data(Ecoli)
# how many genes and how many transcription factors ?
dim(Ecoli$CONNECdata)
###############################################################################################
cleanEx()
nameEx("SRBCT")
### * SRBCT
flush(stderr()); flush(stdout())
### Name: SRBCT
### Title: Gene expression data from Khan et al. (2001)
### Aliases: SRBCT
### Keywords: datasets
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load data set
data(SRBCT)
# how many samples and how many genes ?
dim(SRBCT$X)
# how many samples of class 1, 2, 3 and 4, respectively ?
sum(SRBCT$Y==1)
sum(SRBCT$Y==2)
sum(SRBCT$Y==3)
sum(SRBCT$Y==4)
###############################################################################################
cleanEx()
nameEx("TFA.estimate")
### * TFA.estimate
flush(stderr()); flush(stdout())
### Name: TFA.estimate
### Title: Prediction of Transcription Factor Activities using PLS
### Aliases: TFA.estimate
### Keywords: regression
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load Ecoli data
data(Ecoli)
# estimate TFAs based on 3 latent components
TFA.estimate(Ecoli$CONNECdata,Ecoli$GEdata,ncomp=3,nruncv=0)
# estimate TFAs and determine the best number of latent components simultaneously
TFA.estimate(Ecoli$CONNECdata,Ecoli$GEdata,ncomp=1:5,nruncv=20)
###############################################################################################
cleanEx()
nameEx("gsim")
### * gsim
flush(stderr()); flush(stdout())
### Name: gsim
### Title: GSIM for binary data
### Aliases: gsim
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load Colon data
data(Colon)
IndexLearn <- c(sample(which(Colon$Y==2),12),sample(which(Colon$Y==1),8))
Xtrain <- Colon$X[IndexLearn,]
Ytrain <- Colon$Y[IndexLearn]
Xtest <- Colon$X[-IndexLearn,]
# preprocess data
resP <- preprocess(Xtrain= Xtrain, Xtest=Xtest,Threshold = c(100,16000),Filtering=c(5,500),log10.scale=TRUE,row.stand=TRUE)
# perform prediction by GSIM
res <- gsim(Xtrain=resP$pXtrain,Ytrain= Ytrain,Xtest=resP$pXtest,Lambda=10,hA=50,hB=NULL)
res$Cvg
sum(res$Ytest!=Colon$Y[-IndexLearn])
###############################################################################################
cleanEx()
nameEx("gsim.cv")
### * gsim.cv
flush(stderr()); flush(stdout())
### Name: gsim.cv
### Title: Determination of the ridge regularization parameter and the
### bandwidth to be used for classification with GSIM for binary data
### Aliases: gsim.cv
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load Colon data
data(Colon)
IndexLearn <- c(sample(which(Colon$Y==2),12),sample(which(Colon$Y==1),8))
Xtrain <- Colon$X[IndexLearn,]
Ytrain <- Colon$Y[IndexLearn]
Xtest <- Colon$X[-IndexLearn,]
# preprocess data
resP <- preprocess(Xtrain= Xtrain, Xtest=Xtest,Threshold = c(100,16000),Filtering=c(5,500),log10.scale=TRUE,row.stand=TRUE)
# Determine optimum h and lambda
hl <- gsim.cv(Xtrain=resP$pXtrain,Ytrain=Ytrain,hARange=c(7,20),LambdaRange=c(0.1,1),hB=NULL)
# perform prediction by GSIM
res <- gsim(Xtrain=resP$pXtrain,Ytrain=Ytrain,Xtest=resP$pXtest,Lambda=hl$Lambda,hA=hl$hA,hB=NULL)
res$Cvg
sum(res$Ytest!=Colon$Y[-IndexLearn])
###############################################################################################
cleanEx()
nameEx("leukemia")
### * leukemia
flush(stderr()); flush(stdout())
### Name: leukemia
### Title: Gene expression data from Golub et al. (1999)
### Aliases: leukemia
### Keywords: datasets
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load data set
data(leukemia)
# how many samples and how many genes ?
dim(leukemia$X)
# how many samples of class 1 and 2, respectively ?
sum(leukemia$Y==1)
sum(leukemia$Y==2)
###############################################################################################
cleanEx()
nameEx("mgsim")
### * mgsim
flush(stderr()); flush(stdout())
### Name: mgsim
### Title: GSIM for categorical data
### Aliases: mgsim
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load SRBCT data
data(SRBCT)
IndexLearn <- c(sample(which(SRBCT$Y==1),10),sample(which(SRBCT$Y==2),4),sample(which(SRBCT$Y==3),7),sample(which(SRBCT$Y==4),9))
# perform prediction by MGSIM
res <- mgsim(Ytrain=SRBCT$Y[IndexLearn],Xtrain=SRBCT$X[IndexLearn,],Lambda=0.001,h=19,Xtest=SRBCT$X[-IndexLearn,])
res$Cvg
sum(res$Ytest!=SRBCT$Y[-IndexLearn])
# prediction for another sample
Xnew <- SRBCT$X[83,]
# projection of Xnew onto the c estimated direction
Xproj <- Xnew %*% res$beta
# Compute the linear predictor for each classes expect class 1
eta <- diag(cbind(rep(1,3),t(Xproj)) %*% res$Coefficients)
Ypred <- which.max(c(0,eta))
Ypred
SRBCT$Y[83]
###############################################################################################
cleanEx()
nameEx("mgsim.cv")
### * mgsim.cv
flush(stderr()); flush(stdout())
### Name: mgsim.cv
### Title: Determination of the ridge regularization parameter and the
### bandwidth to be used for classification with GSIM for categorical
### data
### Aliases: mgsim.cv
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load SRBCT data
data(SRBCT)
IndexLearn <- c(sample(which(SRBCT$Y==1),10),sample(which(SRBCT$Y==2),4),
sample(which(SRBCT$Y==3),7),sample(which(SRBCT$Y==4),9))
### Determine optimum h and lambda
# /!\ take 30 secondes to run
#hl <- mgsim.cv(Ytrain=SRBCT$Y[IndexLearn],Xtrain=SRBCT$X[IndexLearn,],
# LambdaRange=c(0.1),hRange=c(7,20))
### perform prediction by MGSIM
#res <- mgsim(Ytrain=SRBCT$Y[IndexLearn],Xtrain=SRBCT$X[IndexLearn,],Lambda=hl$Lambda,
# h=hl$h,Xtest=SRBCT$X[-IndexLearn,])
#res$Cvg
#sum(res$Ytest!=SRBCT$Y[-IndexLearn])
###############################################################################################
cleanEx()
nameEx("mrpls")
### * mrpls
flush(stderr()); flush(stdout())
### Name: mrpls
### Title: Ridge Partial Least Square for categorical data
### Aliases: mrpls
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load SRBCT data
data(SRBCT)
IndexLearn <- c(sample(which(SRBCT$Y==1),10),sample(which(SRBCT$Y==2),4),sample(which(SRBCT$Y==3),7),sample(which(SRBCT$Y==4),9))
# perform prediction by MRPLS
res <- mrpls(Ytrain=SRBCT$Y[IndexLearn],Xtrain=SRBCT$X[IndexLearn,],Lambda=0.001,ncomp=2,Xtest=SRBCT$X[-IndexLearn,])
sum(res$Ytest!=SRBCT$Y[-IndexLearn])
# prediction for another sample
Xnew <- SRBCT$X[83,]
# Compute the linear predictor for each classes expect class 1
eta <- diag(t(cbind(c(1,Xnew),c(1,Xnew),c(1,Xnew))) %*% res$Coefficients)
Ypred <- which.max(c(0,eta))
Ypred
SRBCT$Y[83]
###############################################################################################
cleanEx()
nameEx("mrpls.cv")
### * mrpls.cv
flush(stderr()); flush(stdout())
### Name: mrpls.cv
### Title: Determination of the ridge regularization parameter and the
### number of PLS components to be used for classification with RPLS for
### categorical data
### Aliases: mrpls.cv
### ** Examples
# load plsgenomics library
# load plsgenomics library
library(plsgenomics)
# load SRBCT data
data(SRBCT)
IndexLearn <- c(sample(which(SRBCT$Y==1),10),sample(which(SRBCT$Y==2),4),sample(which(SRBCT$Y==3),7),sample(which(SRBCT$Y==4),9))
# Determine optimum ncomp and Lambda
nl <- mrpls.cv(Ytrain=SRBCT$Y[IndexLearn],Xtrain=SRBCT$X[IndexLearn,],LambdaRange=c(0.1,1),ncompMax=3)
# perform prediction by MRPLS
res <- mrpls(Ytrain=SRBCT$Y[IndexLearn],Xtrain=SRBCT$X[IndexLearn,],Lambda=nl$Lambda,ncomp=nl$ncomp,Xtest=SRBCT$X[-IndexLearn,])
sum(res$Ytest!=SRBCT$Y[-IndexLearn])
###############################################################################################
cleanEx()
nameEx("pls.lda")
### * pls.lda
flush(stderr()); flush(stdout())
### Name: pls.lda
### Title: Classification with PLS Dimension Reduction and Linear
### Discriminant Analysis
### Aliases: pls.lda
### Keywords: multivariate
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load leukemia data
data(leukemia)
# Classify observations 1,2,3 (test set) using observations 4 to 38 (training set), with 2 PLS components
pls.lda(Xtrain=leukemia$X[-(1:3),],Ytrain=leukemia$Y[-(1:3)],Xtest=leukemia$X[1:3,],ncomp=2,nruncv=0)
# Classify observations 1,2,3 (test set) using observations 4 to 38 (training set), with the best number of components as determined by cross-validation
pls.lda(Xtrain=leukemia$X[-(1:3),],Ytrain=leukemia$Y[-(1:3)],Xtest=leukemia$X[1:3,],ncomp=1:4,nruncv=20)
###############################################################################################
cleanEx()
nameEx("pls.lda.cv")
### * pls.lda.cv
flush(stderr()); flush(stdout())
### Name: pls.lda.cv
### Title: Determination of the number of latent components to be used for
### classification with PLS and LDA
### Aliases: pls.lda.cv
### Keywords: multivariate
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load leukemia data
data(leukemia)
# Determine the best number of components to be used for classification using the cross-validation procedure
# choose the best number from 2,3,4
pls.lda.cv(Xtrain=leukemia$X,Ytrain=leukemia$Y,ncomp=2:4,nruncv=20)
# choose the best number from 1,2,3
pls.lda.cv(Xtrain=leukemia$X,Ytrain=leukemia$Y,ncomp=3,nruncv=20)
###############################################################################################
cleanEx()
nameEx("pls.regression")
### * pls.regression
flush(stderr()); flush(stdout())
### Name: pls.regression
### Title: Multivariate Partial Least Squares Regression
### Aliases: pls.regression
### Keywords: multivariate
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load the Ecoli data
data(Ecoli)
# perform pls regression
# with unit latent components
pls.regression(Xtrain=Ecoli$CONNECdata,Ytrain=Ecoli$GEdata,Xtest=Ecoli$CONNECdata,ncomp=1:3,unit.weights=FALSE)
# with unit weight vectors
pls.regression(Xtrain=Ecoli$CONNECdata,Ytrain=Ecoli$GEdata,Xtest=Ecoli$CONNECdata,ncomp=1:3,unit.weights=TRUE)
###############################################################################################
cleanEx()
nameEx("pls.regression.cv")
### * pls.regression.cv
flush(stderr()); flush(stdout())
### Name: pls.regression.cv
### Title: Determination of the number of latent components to be used in
### PLS regression
### Aliases: pls.regression.cv
### Keywords: multivariate
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load Ecoli data
data(Ecoli)
# determine the best number of components for PLS regression using the cross-validation approach
# choose the best number from 1,2,3,4
pls.regression.cv(Xtrain=Ecoli$CONNECdata,Ytrain=Ecoli$GEdata,ncomp=4,nruncv=20)
# choose the best number from 2,3
pls.regression.cv(Xtrain=Ecoli$CONNECdata,Ytrain=Ecoli$GEdata,ncomp=c(2,3),nruncv=20)
###############################################################################################
cleanEx()
nameEx("preprocess")
### * preprocess
flush(stderr()); flush(stdout())
### Name: preprocess
### Title: preprocess for microarray data
### Aliases: preprocess
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load Colon data
data(Colon)
IndexLearn <- c(sample(which(Colon$Y==2),27),sample(which(Colon$Y==1),14))
Xtrain <- Colon$X[IndexLearn,]
Ytrain <- Colon$Y[IndexLearn]
Xtest <- Colon$X[-IndexLearn,]
# preprocess data
resP <- preprocess(Xtrain= Xtrain, Xtest=Xtest,Threshold = c(100,16000),Filtering=c(5,500),log10.scale=TRUE,row.stand=TRUE)
# how many genes after preprocess ?
dim(resP$pXtrain)[2]
###############################################################################################
cleanEx()
nameEx("rpls")
### * rpls
flush(stderr()); flush(stdout())
### Name: rpls
### Title: Ridge Partial Least Square for binary data
### Aliases: rpls
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load Colon data
data(Colon)
IndexLearn <- c(sample(which(Colon$Y==2),12),sample(which(Colon$Y==1),8))
# preprocess data
res <- preprocess(Xtrain= Colon$X[IndexLearn,], Xtest=Colon$X[-IndexLearn,],Threshold = c(100,16000),Filtering=c(5,500),log10.scale=TRUE,row.stand=TRUE)
# the results are given in res$pXtrain and res$pXtest
# perform prediction by RPLS
resrpls <- rpls(Ytrain=Colon$Y[IndexLearn],Xtrain=res$pXtrain,Lambda=0.6,ncomp=1,Xtest=res$pXtest)
resrpls$hatY
sum(resrpls$Ytest!=Colon$Y[-IndexLearn])
# prediction for another sample
Xnew <- res$pXtest[1,]
# Compute the linear predictor for each classes expect class 0
eta <- c(1,Xnew) %*% resrpls$Coefficients
Ypred <- which.max(c(0,eta))
Ypred
###############################################################################################
cleanEx()
nameEx("rpls.cv")
### * rpls.cv
flush(stderr()); flush(stdout())
### Name: rpls.cv
### Title: Determination of the ridge regularization parameter and the
### number of PLS components to be used for classification with RPLS for
### binary data
### Aliases: rpls.cv
### ** Examples
# load plsgenomics library
# load plsgenomics library
library(plsgenomics)
# load Colon data
data(Colon)
IndexLearn <- c(sample(which(Colon$Y==2),12),sample(which(Colon$Y==1),8))
# preprocess data
res <- preprocess(Xtrain= Colon$X[IndexLearn,], Xtest=Colon$X[-IndexLearn,],Threshold = c(100,16000),Filtering=c(5,500),log10.scale=TRUE,row.stand=TRUE)
# the results are given in res$pXtrain and res$pXtest
# Determine optimum ncomp and lambda
nl <- rpls.cv(Ytrain=Colon$Y[IndexLearn],Xtrain=res$pXtrain,LambdaRange=c(0.1,1),ncompMax=3)
# perform prediction by RPLS
resrpls <- rpls(Ytrain=Colon$Y[IndexLearn],Xtrain=res$pXtrain,Lambda=nl$Lambda,ncomp=nl$ncomp,Xtest=res$pXtest)
sum(resrpls$Ytest!=Colon$Y[-IndexLearn])
###############################################################################################
cleanEx()
nameEx("variable.selection")
### * variable.selection
flush(stderr()); flush(stdout())
### Name: variable.selection
### Title: Variable selection using the PLS weights
### Aliases: variable.selection
### Keywords: multivariate
### ** Examples
# load plsgenomics library
library(plsgenomics)
# generate X and Y (4 observations and 3 variables)
X<-matrix(c(4,3,3,4,1,0,6,7,3,5,5,9),4,3,byrow=FALSE)
Y<-c(1,1,2,2)
# select the 2 best variables
variable.selection(X,Y,nvar=2)
# order the 3 variables
variable.selection(X,Y)
# load the leukemia data
data(leukemia)
# select the 50 best variables from the leukemia data
variable.selection(leukemia$X,leukemia$Y,nvar=50)
###############################################################################################
cleanEx()
nameEx("rirls.spls")
### * rirls.spls
flush(stderr()); flush(stdout())
### Name: rirls.spls
### Title: Classification by Ridge Iteratively Reweighted Least Squares
### followed by Adaptive Sparse PLS regression for binary response
### Aliases: rirls.spls
### Keywords: multivariate
### ** Examples
### load plsgenomics library
library(plsgenomics)
### generating data
n <- 50
p <- 100
sample1 <- sample.bin(n=n, p=p, kstar=20, lstar=2, beta.min=0.25, beta.max=0.75,
mean.H=0.2, sigma.H=10, sigma.F=5)
X <- sample1$X
Y <- sample1$Y
### splitting between learning and testing set
index.train <- sort(sample(1:n, size=round(0.7*n)))
index.test <- (1:n)[-index.train]
Xtrain <- X[index.train,]
Ytrain <- Y[index.train,]
Xtest <- X[index.test,]
Ytest <- Y[index.test,]
### fitting the model, and predicting new observations
model1 <- rirls.spls(Xtrain=Xtrain, Ytrain=Ytrain, lambda.ridge=2, lambda.l1=0.5, ncomp=2,
Xtest=Xtest, adapt=TRUE, maxIter=100, svd.decompose=TRUE)
str(model1)
### prediction error rate
sum(model1$hatYtest!=Ytest) / length(index.test)
###############################################################################################
cleanEx()
nameEx("rirls.spls.tune")
### * rirls.spls.tune
flush(stderr()); flush(stdout())
### Name: rirls.spls.tune
### Title: Tuning parameters (ncomp, lambda.l1, lambda.ridge) for Ridge Iteratively Reweighted Least Squares
### followed by Adaptive Sparse PLS regression for binary response, by K-fold cross-validation
### Aliases: rirls.spls.tune
### Keywords: multivariate
### ** Examples
### load plsgenomics library
library(plsgenomics)
### generating data
n <- 50
p <- 100
sample1 <- sample.bin(n=n, p=p, kstar=20, lstar=2, beta.min=0.25, beta.max=0.75, mean.H=0.2,
sigma.H=10, sigma.F=5)
X <- sample1$X
Y <- sample1$Y
### hyper-parameters values to test
lambda.l1.range <- seq(0.05,0.95,by=0.3) # between 0 and 1
ncomp.range <- 1:2
# log-linear range between 0.01 a,d 1000 for lambda.ridge.range
logspace <- function( d1, d2, n) exp(log(10)*seq(d1, d2, length.out=n))
lambda.ridge.range <- signif(logspace(d1 <- -2, d2 <- 3, n=6), digits=3)
### tuning the hyper-parameters
cv1 <- rirls.spls.tune(X=X, Y=Y, lambda.ridge.range=lambda.ridge.range,
lambda.l1.range=lambda.l1.range, ncomp.range=ncomp.range,
adapt=TRUE, maxIter=100, svd.decompose=TRUE,
return.grid=TRUE, ncores=1, nfolds=10)
str(cv1)
###############################################################################################
### * <FOOTER>
###
cat("Time elapsed: ", proc.time() - get("ptime", pos = 'CheckExEnv'),"\n")
grDevices::dev.off()
###
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "\\(> \\)?### [*]+" ***
### End: ***
quit('no')
| /plsgenomics/tests/plsgenomics-Ex.R | no_license | ingted/R-Examples | R | false | false | 18,641 | r | pkgname <- "plsgenomics"
source(file.path(R.home("share"), "R", "examples-header.R"))
options(warn = 1)
library('plsgenomics')
assign(".oldSearch", search(), pos = 'CheckExEnv')
###############################################################################################
cleanEx()
nameEx("Colon")
### * Colon
flush(stderr()); flush(stdout())
### Name: Colon
### Title: Gene expression data from Alon et al. (1999)
### Aliases: Colon
### Keywords: datasets
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load data set
data(Colon)
# how many samples and how many genes ?
dim(Colon$X)
# how many samples of class 1 and 2 respectively ?
sum(Colon$Y==1)
sum(Colon$Y==2)
###############################################################################################
cleanEx()
nameEx("Ecoli")
### * Ecoli
flush(stderr()); flush(stdout())
### Name: Ecoli
### Title: Ecoli gene expression and connectivity data from Kao et al.
### (2003)
### Aliases: Ecoli
### Keywords: datasets
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load data set
data(Ecoli)
# how many genes and how many transcription factors ?
dim(Ecoli$CONNECdata)
###############################################################################################
cleanEx()
nameEx("SRBCT")
### * SRBCT
flush(stderr()); flush(stdout())
### Name: SRBCT
### Title: Gene expression data from Khan et al. (2001)
### Aliases: SRBCT
### Keywords: datasets
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load data set
data(SRBCT)
# how many samples and how many genes ?
dim(SRBCT$X)
# how many samples of class 1, 2, 3 and 4, respectively ?
sum(SRBCT$Y==1)
sum(SRBCT$Y==2)
sum(SRBCT$Y==3)
sum(SRBCT$Y==4)
###############################################################################################
cleanEx()
nameEx("TFA.estimate")
### * TFA.estimate
flush(stderr()); flush(stdout())
### Name: TFA.estimate
### Title: Prediction of Transcription Factor Activities using PLS
### Aliases: TFA.estimate
### Keywords: regression
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load Ecoli data
data(Ecoli)
# estimate TFAs based on 3 latent components
TFA.estimate(Ecoli$CONNECdata,Ecoli$GEdata,ncomp=3,nruncv=0)
# estimate TFAs and determine the best number of latent components simultaneously
TFA.estimate(Ecoli$CONNECdata,Ecoli$GEdata,ncomp=1:5,nruncv=20)
###############################################################################################
cleanEx()
nameEx("gsim")
### * gsim
flush(stderr()); flush(stdout())
### Name: gsim
### Title: GSIM for binary data
### Aliases: gsim
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load Colon data
data(Colon)
IndexLearn <- c(sample(which(Colon$Y==2),12),sample(which(Colon$Y==1),8))
Xtrain <- Colon$X[IndexLearn,]
Ytrain <- Colon$Y[IndexLearn]
Xtest <- Colon$X[-IndexLearn,]
# preprocess data
resP <- preprocess(Xtrain= Xtrain, Xtest=Xtest,Threshold = c(100,16000),Filtering=c(5,500),log10.scale=TRUE,row.stand=TRUE)
# perform prediction by GSIM
res <- gsim(Xtrain=resP$pXtrain,Ytrain= Ytrain,Xtest=resP$pXtest,Lambda=10,hA=50,hB=NULL)
res$Cvg
sum(res$Ytest!=Colon$Y[-IndexLearn])
###############################################################################################
cleanEx()
nameEx("gsim.cv")
### * gsim.cv
flush(stderr()); flush(stdout())
### Name: gsim.cv
### Title: Determination of the ridge regularization parameter and the
### bandwidth to be used for classification with GSIM for binary data
### Aliases: gsim.cv
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load Colon data
data(Colon)
IndexLearn <- c(sample(which(Colon$Y==2),12),sample(which(Colon$Y==1),8))
Xtrain <- Colon$X[IndexLearn,]
Ytrain <- Colon$Y[IndexLearn]
Xtest <- Colon$X[-IndexLearn,]
# preprocess data
resP <- preprocess(Xtrain= Xtrain, Xtest=Xtest,Threshold = c(100,16000),Filtering=c(5,500),log10.scale=TRUE,row.stand=TRUE)
# Determine optimum h and lambda
hl <- gsim.cv(Xtrain=resP$pXtrain,Ytrain=Ytrain,hARange=c(7,20),LambdaRange=c(0.1,1),hB=NULL)
# perform prediction by GSIM
res <- gsim(Xtrain=resP$pXtrain,Ytrain=Ytrain,Xtest=resP$pXtest,Lambda=hl$Lambda,hA=hl$hA,hB=NULL)
res$Cvg
sum(res$Ytest!=Colon$Y[-IndexLearn])
###############################################################################################
cleanEx()
nameEx("leukemia")
### * leukemia
flush(stderr()); flush(stdout())
### Name: leukemia
### Title: Gene expression data from Golub et al. (1999)
### Aliases: leukemia
### Keywords: datasets
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load data set
data(leukemia)
# how many samples and how many genes ?
dim(leukemia$X)
# how many samples of class 1 and 2, respectively ?
sum(leukemia$Y==1)
sum(leukemia$Y==2)
###############################################################################################
cleanEx()
nameEx("mgsim")
### * mgsim
flush(stderr()); flush(stdout())
### Name: mgsim
### Title: GSIM for categorical data
### Aliases: mgsim
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load SRBCT data
data(SRBCT)
IndexLearn <- c(sample(which(SRBCT$Y==1),10),sample(which(SRBCT$Y==2),4),sample(which(SRBCT$Y==3),7),sample(which(SRBCT$Y==4),9))
# perform prediction by MGSIM
res <- mgsim(Ytrain=SRBCT$Y[IndexLearn],Xtrain=SRBCT$X[IndexLearn,],Lambda=0.001,h=19,Xtest=SRBCT$X[-IndexLearn,])
res$Cvg
sum(res$Ytest!=SRBCT$Y[-IndexLearn])
# prediction for another sample
Xnew <- SRBCT$X[83,]
# projection of Xnew onto the c estimated direction
Xproj <- Xnew %*% res$beta
# Compute the linear predictor for each classes expect class 1
eta <- diag(cbind(rep(1,3),t(Xproj)) %*% res$Coefficients)
Ypred <- which.max(c(0,eta))
Ypred
SRBCT$Y[83]
###############################################################################################
cleanEx()
nameEx("mgsim.cv")
### * mgsim.cv
flush(stderr()); flush(stdout())
### Name: mgsim.cv
### Title: Determination of the ridge regularization parameter and the
### bandwidth to be used for classification with GSIM for categorical
### data
### Aliases: mgsim.cv
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load SRBCT data
data(SRBCT)
IndexLearn <- c(sample(which(SRBCT$Y==1),10),sample(which(SRBCT$Y==2),4),
sample(which(SRBCT$Y==3),7),sample(which(SRBCT$Y==4),9))
### Determine optimum h and lambda
# /!\ take 30 secondes to run
#hl <- mgsim.cv(Ytrain=SRBCT$Y[IndexLearn],Xtrain=SRBCT$X[IndexLearn,],
# LambdaRange=c(0.1),hRange=c(7,20))
### perform prediction by MGSIM
#res <- mgsim(Ytrain=SRBCT$Y[IndexLearn],Xtrain=SRBCT$X[IndexLearn,],Lambda=hl$Lambda,
# h=hl$h,Xtest=SRBCT$X[-IndexLearn,])
#res$Cvg
#sum(res$Ytest!=SRBCT$Y[-IndexLearn])
###############################################################################################
cleanEx()
nameEx("mrpls")
### * mrpls
flush(stderr()); flush(stdout())
### Name: mrpls
### Title: Ridge Partial Least Square for categorical data
### Aliases: mrpls
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load SRBCT data
data(SRBCT)
IndexLearn <- c(sample(which(SRBCT$Y==1),10),sample(which(SRBCT$Y==2),4),sample(which(SRBCT$Y==3),7),sample(which(SRBCT$Y==4),9))
# perform prediction by MRPLS
res <- mrpls(Ytrain=SRBCT$Y[IndexLearn],Xtrain=SRBCT$X[IndexLearn,],Lambda=0.001,ncomp=2,Xtest=SRBCT$X[-IndexLearn,])
sum(res$Ytest!=SRBCT$Y[-IndexLearn])
# prediction for another sample
Xnew <- SRBCT$X[83,]
# Compute the linear predictor for each classes expect class 1
eta <- diag(t(cbind(c(1,Xnew),c(1,Xnew),c(1,Xnew))) %*% res$Coefficients)
Ypred <- which.max(c(0,eta))
Ypred
SRBCT$Y[83]
###############################################################################################
cleanEx()
nameEx("mrpls.cv")
### * mrpls.cv
flush(stderr()); flush(stdout())
### Name: mrpls.cv
### Title: Determination of the ridge regularization parameter and the
### number of PLS components to be used for classification with RPLS for
### categorical data
### Aliases: mrpls.cv
### ** Examples
# load plsgenomics library
# load plsgenomics library
library(plsgenomics)
# load SRBCT data
data(SRBCT)
IndexLearn <- c(sample(which(SRBCT$Y==1),10),sample(which(SRBCT$Y==2),4),sample(which(SRBCT$Y==3),7),sample(which(SRBCT$Y==4),9))
# Determine optimum ncomp and Lambda
nl <- mrpls.cv(Ytrain=SRBCT$Y[IndexLearn],Xtrain=SRBCT$X[IndexLearn,],LambdaRange=c(0.1,1),ncompMax=3)
# perform prediction by MRPLS
res <- mrpls(Ytrain=SRBCT$Y[IndexLearn],Xtrain=SRBCT$X[IndexLearn,],Lambda=nl$Lambda,ncomp=nl$ncomp,Xtest=SRBCT$X[-IndexLearn,])
sum(res$Ytest!=SRBCT$Y[-IndexLearn])
###############################################################################################
cleanEx()
nameEx("pls.lda")
### * pls.lda
flush(stderr()); flush(stdout())
### Name: pls.lda
### Title: Classification with PLS Dimension Reduction and Linear
### Discriminant Analysis
### Aliases: pls.lda
### Keywords: multivariate
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load leukemia data
data(leukemia)
# Classify observations 1,2,3 (test set) using observations 4 to 38 (training set), with 2 PLS components
pls.lda(Xtrain=leukemia$X[-(1:3),],Ytrain=leukemia$Y[-(1:3)],Xtest=leukemia$X[1:3,],ncomp=2,nruncv=0)
# Classify observations 1,2,3 (test set) using observations 4 to 38 (training set), with the best number of components as determined by cross-validation
pls.lda(Xtrain=leukemia$X[-(1:3),],Ytrain=leukemia$Y[-(1:3)],Xtest=leukemia$X[1:3,],ncomp=1:4,nruncv=20)
###############################################################################################
cleanEx()
nameEx("pls.lda.cv")
### * pls.lda.cv
flush(stderr()); flush(stdout())
### Name: pls.lda.cv
### Title: Determination of the number of latent components to be used for
### classification with PLS and LDA
### Aliases: pls.lda.cv
### Keywords: multivariate
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load leukemia data
data(leukemia)
# Determine the best number of components to be used for classification using the cross-validation procedure
# choose the best number from 2,3,4
pls.lda.cv(Xtrain=leukemia$X,Ytrain=leukemia$Y,ncomp=2:4,nruncv=20)
# choose the best number from 1,2,3
pls.lda.cv(Xtrain=leukemia$X,Ytrain=leukemia$Y,ncomp=3,nruncv=20)
###############################################################################################
cleanEx()
nameEx("pls.regression")
### * pls.regression
flush(stderr()); flush(stdout())
### Name: pls.regression
### Title: Multivariate Partial Least Squares Regression
### Aliases: pls.regression
### Keywords: multivariate
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load the Ecoli data
data(Ecoli)
# perform pls regression
# with unit latent components
pls.regression(Xtrain=Ecoli$CONNECdata,Ytrain=Ecoli$GEdata,Xtest=Ecoli$CONNECdata,ncomp=1:3,unit.weights=FALSE)
# with unit weight vectors
pls.regression(Xtrain=Ecoli$CONNECdata,Ytrain=Ecoli$GEdata,Xtest=Ecoli$CONNECdata,ncomp=1:3,unit.weights=TRUE)
###############################################################################################
cleanEx()
nameEx("pls.regression.cv")
### * pls.regression.cv
flush(stderr()); flush(stdout())
### Name: pls.regression.cv
### Title: Determination of the number of latent components to be used in
### PLS regression
### Aliases: pls.regression.cv
### Keywords: multivariate
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load Ecoli data
data(Ecoli)
# determine the best number of components for PLS regression using the cross-validation approach
# choose the best number from 1,2,3,4
pls.regression.cv(Xtrain=Ecoli$CONNECdata,Ytrain=Ecoli$GEdata,ncomp=4,nruncv=20)
# choose the best number from 2,3
pls.regression.cv(Xtrain=Ecoli$CONNECdata,Ytrain=Ecoli$GEdata,ncomp=c(2,3),nruncv=20)
###############################################################################################
cleanEx()
nameEx("preprocess")
### * preprocess
flush(stderr()); flush(stdout())
### Name: preprocess
### Title: preprocess for microarray data
### Aliases: preprocess
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load Colon data
data(Colon)
IndexLearn <- c(sample(which(Colon$Y==2),27),sample(which(Colon$Y==1),14))
Xtrain <- Colon$X[IndexLearn,]
Ytrain <- Colon$Y[IndexLearn]
Xtest <- Colon$X[-IndexLearn,]
# preprocess data
resP <- preprocess(Xtrain= Xtrain, Xtest=Xtest,Threshold = c(100,16000),Filtering=c(5,500),log10.scale=TRUE,row.stand=TRUE)
# how many genes after preprocess ?
dim(resP$pXtrain)[2]
###############################################################################################
cleanEx()
nameEx("rpls")
### * rpls
flush(stderr()); flush(stdout())
### Name: rpls
### Title: Ridge Partial Least Square for binary data
### Aliases: rpls
### ** Examples
# load plsgenomics library
library(plsgenomics)
# load Colon data
data(Colon)
IndexLearn <- c(sample(which(Colon$Y==2),12),sample(which(Colon$Y==1),8))
# preprocess data
res <- preprocess(Xtrain= Colon$X[IndexLearn,], Xtest=Colon$X[-IndexLearn,],Threshold = c(100,16000),Filtering=c(5,500),log10.scale=TRUE,row.stand=TRUE)
# the results are given in res$pXtrain and res$pXtest
# perform prediction by RPLS
resrpls <- rpls(Ytrain=Colon$Y[IndexLearn],Xtrain=res$pXtrain,Lambda=0.6,ncomp=1,Xtest=res$pXtest)
resrpls$hatY
sum(resrpls$Ytest!=Colon$Y[-IndexLearn])
# prediction for another sample
Xnew <- res$pXtest[1,]
# Compute the linear predictor for each classes expect class 0
eta <- c(1,Xnew) %*% resrpls$Coefficients
Ypred <- which.max(c(0,eta))
Ypred
###############################################################################################
cleanEx()
nameEx("rpls.cv")
### * rpls.cv
flush(stderr()); flush(stdout())
### Name: rpls.cv
### Title: Determination of the ridge regularization parameter and the
### number of PLS components to be used for classification with RPLS for
### binary data
### Aliases: rpls.cv
### ** Examples
# load plsgenomics library
# load plsgenomics library
library(plsgenomics)
# load Colon data
data(Colon)
IndexLearn <- c(sample(which(Colon$Y==2),12),sample(which(Colon$Y==1),8))
# preprocess data
res <- preprocess(Xtrain= Colon$X[IndexLearn,], Xtest=Colon$X[-IndexLearn,],Threshold = c(100,16000),Filtering=c(5,500),log10.scale=TRUE,row.stand=TRUE)
# the results are given in res$pXtrain and res$pXtest
# Determine optimum ncomp and lambda
nl <- rpls.cv(Ytrain=Colon$Y[IndexLearn],Xtrain=res$pXtrain,LambdaRange=c(0.1,1),ncompMax=3)
# perform prediction by RPLS
resrpls <- rpls(Ytrain=Colon$Y[IndexLearn],Xtrain=res$pXtrain,Lambda=nl$Lambda,ncomp=nl$ncomp,Xtest=res$pXtest)
sum(resrpls$Ytest!=Colon$Y[-IndexLearn])
###############################################################################################
cleanEx()
nameEx("variable.selection")
### * variable.selection
flush(stderr()); flush(stdout())
### Name: variable.selection
### Title: Variable selection using the PLS weights
### Aliases: variable.selection
### Keywords: multivariate
### ** Examples
# load plsgenomics library
library(plsgenomics)
# generate X and Y (4 observations and 3 variables)
X<-matrix(c(4,3,3,4,1,0,6,7,3,5,5,9),4,3,byrow=FALSE)
Y<-c(1,1,2,2)
# select the 2 best variables
variable.selection(X,Y,nvar=2)
# order the 3 variables
variable.selection(X,Y)
# load the leukemia data
data(leukemia)
# select the 50 best variables from the leukemia data
variable.selection(leukemia$X,leukemia$Y,nvar=50)
###############################################################################################
cleanEx()
nameEx("rirls.spls")
### * rirls.spls
flush(stderr()); flush(stdout())
### Name: rirls.spls
### Title: Classification by Ridge Iteratively Reweighted Least Squares
### followed by Adaptive Sparse PLS regression for binary response
### Aliases: rirls.spls
### Keywords: multivariate
### ** Examples
### load plsgenomics library
library(plsgenomics)
### generating data
n <- 50
p <- 100
sample1 <- sample.bin(n=n, p=p, kstar=20, lstar=2, beta.min=0.25, beta.max=0.75,
mean.H=0.2, sigma.H=10, sigma.F=5)
X <- sample1$X
Y <- sample1$Y
### splitting between learning and testing set
index.train <- sort(sample(1:n, size=round(0.7*n)))
index.test <- (1:n)[-index.train]
Xtrain <- X[index.train,]
Ytrain <- Y[index.train,]
Xtest <- X[index.test,]
Ytest <- Y[index.test,]
### fitting the model, and predicting new observations
model1 <- rirls.spls(Xtrain=Xtrain, Ytrain=Ytrain, lambda.ridge=2, lambda.l1=0.5, ncomp=2,
Xtest=Xtest, adapt=TRUE, maxIter=100, svd.decompose=TRUE)
str(model1)
### prediction error rate
sum(model1$hatYtest!=Ytest) / length(index.test)
###############################################################################################
cleanEx()
nameEx("rirls.spls.tune")
### * rirls.spls.tune
flush(stderr()); flush(stdout())
### Name: rirls.spls.tune
### Title: Tuning parameters (ncomp, lambda.l1, lambda.ridge) for Ridge Iteratively Reweighted Least Squares
### followed by Adaptive Sparse PLS regression for binary response, by K-fold cross-validation
### Aliases: rirls.spls.tune
### Keywords: multivariate
### ** Examples
### load plsgenomics library
library(plsgenomics)
### generating data
n <- 50
p <- 100
sample1 <- sample.bin(n=n, p=p, kstar=20, lstar=2, beta.min=0.25, beta.max=0.75, mean.H=0.2,
sigma.H=10, sigma.F=5)
X <- sample1$X
Y <- sample1$Y
### hyper-parameters values to test
lambda.l1.range <- seq(0.05,0.95,by=0.3) # between 0 and 1
ncomp.range <- 1:2
# log-linear range between 0.01 a,d 1000 for lambda.ridge.range
logspace <- function( d1, d2, n) exp(log(10)*seq(d1, d2, length.out=n))
lambda.ridge.range <- signif(logspace(d1 <- -2, d2 <- 3, n=6), digits=3)
### tuning the hyper-parameters
cv1 <- rirls.spls.tune(X=X, Y=Y, lambda.ridge.range=lambda.ridge.range,
lambda.l1.range=lambda.l1.range, ncomp.range=ncomp.range,
adapt=TRUE, maxIter=100, svd.decompose=TRUE,
return.grid=TRUE, ncores=1, nfolds=10)
str(cv1)
###############################################################################################
### * <FOOTER>
###
cat("Time elapsed: ", proc.time() - get("ptime", pos = 'CheckExEnv'),"\n")
grDevices::dev.off()
###
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "\\(> \\)?### [*]+" ***
### End: ***
quit('no')
|
## Put comments here that give an overall description of what your
## functions do
## These functions create a special matrix that can cache its inverse
## Write a short comment describing this function
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## Return a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
| /cachematrix.R | no_license | mouraaex/ProgrammingAssignment2 | R | false | false | 939 | r | ## Put comments here that give an overall description of what your
## functions do
## These functions create a special matrix that can cache its inverse
## Write a short comment describing this function
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## Return a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.