content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' @export
tune_u_joint = function(u_candi, K, X, iter.max=500, stop=1e-3, trueY=NULL){
## u_candi is a list of candidate evelope dimension
dimen = dim(X)[-length(dim(X))]
dim_u = sapply(u_candi, length)
p = prod(dimen)
n = dim(X)[length(dim(X))]
M = length(dim(X))-1
Xnl = asplit(X,M+1)
Xm = sapply(Xnl,as.vector)
opt.bic = 1e9
opt.u = rep(0,M)
# bic = matrix(0,dim_u[1],dim_u[2])
# err = matrix(0,dim_u[1],dim_u[2])
# if(M==2){
# opt.bic = 1e9
# opt.u = rep(0,M)
# # bic = matrix(0,dim_u[1],dim_u[2])
# # err = matrix(0,dim_u[1],dim_u[2])
#
# for(i in 1:dim_u[1]) {
# for (j in 1:dim_u[2]) {
# u_now = c(u_candi[[1]][i],u_candi[[2]][j])
# Ku = (K-1)*prod(u_now) + sum(dimen*(dimen+1))/2
# env = TEMM(Xn=X, u=u_now, K=K, initial="kmeans", iter.max=iter.max, trueY=trueY)
# loglk = logMixTenGau(Xm, env$pi, env$eta, env$Mu.est, env$SIG.est)
#
# # err[i,j] = cluster_err(K,Y,env$id)$cluster_err
# # bic[i,j] = -2*loglk + log(n)*Ku
# bic_now = -2*loglk + log(n)*Ku
#
# if(bic_now<opt.bic){
# opt.bic = bic_now
# opt.u[1] = u_candi[[1]][i]
# opt.u[2] = u_candi[[2]][j]
# opt.id = env$id
# opt.Mu = env$Mu.est
# }
# }
# }
# }
for(i in 1:prod(dim_u)) {
u_ind = as.vector(arrayInd(i, dim_u))
u_now = rep(0,M)
for (m in 1:M) {
u_now[m] = u_candi[[m]][u_ind[m]]
}
Ku = (K-1)*prod(u_now) + sum(dimen*(dimen+1))/2
env = TEMM(Xn=X, u=u_now, K=K, initial="kmeans", iter.max=iter.max, stop=stop, trueY=trueY)
loglk = logMixTenGau(Xm, env$pi, env$eta, env$Mu.est, env$SIG.est)
# err[i,j] = cluster_err(K,Y,env$id)$cluster_err
# bic[i,j] = -2*loglk + log(n)*Ku
bic_now = -2*loglk + log(n)*Ku
if(bic_now<opt.bic){
opt.bic = bic_now
opt.u = u_now
opt.id = env$id
opt.Mu = env$Mu.est
}
}
# ind = as.vector(arrayInd(which.min(bic), dim_u))
# opt.u = rep(0,M)
# for (m in 1:M) {
# opt.u[m] = u_candi[[m]][ind[m]]
# }
# opt.err = err[ind]
return(list(opt.u=opt.u, opt.id=opt.id, opt.Mu=opt.Mu, bic=opt.bic))
}
| /R/tune_u_joint.R | permissive | azuryee/TensorClustering | R | false | false | 2,336 | r | #' @export
tune_u_joint = function(u_candi, K, X, iter.max=500, stop=1e-3, trueY=NULL){
## u_candi is a list of candidate evelope dimension
dimen = dim(X)[-length(dim(X))]
dim_u = sapply(u_candi, length)
p = prod(dimen)
n = dim(X)[length(dim(X))]
M = length(dim(X))-1
Xnl = asplit(X,M+1)
Xm = sapply(Xnl,as.vector)
opt.bic = 1e9
opt.u = rep(0,M)
# bic = matrix(0,dim_u[1],dim_u[2])
# err = matrix(0,dim_u[1],dim_u[2])
# if(M==2){
# opt.bic = 1e9
# opt.u = rep(0,M)
# # bic = matrix(0,dim_u[1],dim_u[2])
# # err = matrix(0,dim_u[1],dim_u[2])
#
# for(i in 1:dim_u[1]) {
# for (j in 1:dim_u[2]) {
# u_now = c(u_candi[[1]][i],u_candi[[2]][j])
# Ku = (K-1)*prod(u_now) + sum(dimen*(dimen+1))/2
# env = TEMM(Xn=X, u=u_now, K=K, initial="kmeans", iter.max=iter.max, trueY=trueY)
# loglk = logMixTenGau(Xm, env$pi, env$eta, env$Mu.est, env$SIG.est)
#
# # err[i,j] = cluster_err(K,Y,env$id)$cluster_err
# # bic[i,j] = -2*loglk + log(n)*Ku
# bic_now = -2*loglk + log(n)*Ku
#
# if(bic_now<opt.bic){
# opt.bic = bic_now
# opt.u[1] = u_candi[[1]][i]
# opt.u[2] = u_candi[[2]][j]
# opt.id = env$id
# opt.Mu = env$Mu.est
# }
# }
# }
# }
for(i in 1:prod(dim_u)) {
u_ind = as.vector(arrayInd(i, dim_u))
u_now = rep(0,M)
for (m in 1:M) {
u_now[m] = u_candi[[m]][u_ind[m]]
}
Ku = (K-1)*prod(u_now) + sum(dimen*(dimen+1))/2
env = TEMM(Xn=X, u=u_now, K=K, initial="kmeans", iter.max=iter.max, stop=stop, trueY=trueY)
loglk = logMixTenGau(Xm, env$pi, env$eta, env$Mu.est, env$SIG.est)
# err[i,j] = cluster_err(K,Y,env$id)$cluster_err
# bic[i,j] = -2*loglk + log(n)*Ku
bic_now = -2*loglk + log(n)*Ku
if(bic_now<opt.bic){
opt.bic = bic_now
opt.u = u_now
opt.id = env$id
opt.Mu = env$Mu.est
}
}
# ind = as.vector(arrayInd(which.min(bic), dim_u))
# opt.u = rep(0,M)
# for (m in 1:M) {
# opt.u[m] = u_candi[[m]][ind[m]]
# }
# opt.err = err[ind]
return(list(opt.u=opt.u, opt.id=opt.id, opt.Mu=opt.Mu, bic=opt.bic))
}
|
library(fpp)
### Name: melsyd
### Title: Total weekly air passenger numbers on Ansett airline flights
### between Melbourne and Sydney, 1987-1992.
### Aliases: melsyd
### Keywords: datasets
### ** Examples
plot(melsyd)
| /data/genthat_extracted_code/fpp/examples/melsyd.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 228 | r | library(fpp)
### Name: melsyd
### Title: Total weekly air passenger numbers on Ansett airline flights
### between Melbourne and Sydney, 1987-1992.
### Aliases: melsyd
### Keywords: datasets
### ** Examples
plot(melsyd)
|
## This R program solves for the inverse of a matrix and caches it
## makeCacheMatrix function stores a given matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
z <- NULL
set <- function(y){
x <<- y
z <<- NULL
}
get <- function() x
setinverse <- function(inv) z <<- inv
getinverse <- function() z
list(set = set, get = get,
setinverse = setinverse, getinverse = getinverse)
}
## cacheSolve function solves for the inverse of a given matrix
## and stores the results in makeCacheMatrix
cacheSolve <- function(x, ...) {
z <- x$getinverse()
if(!is.null(z)){
message("Retreiving cashed data")
return(z)
}
data <- x$get()
z <- solve(data,...)
x$setinverse(z)
z
}
| /cachematrix.R | no_license | bahani/ProgrammingAssignment2 | R | false | false | 881 | r | ## This R program solves for the inverse of a matrix and caches it
## makeCacheMatrix function stores a given matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
z <- NULL
set <- function(y){
x <<- y
z <<- NULL
}
get <- function() x
setinverse <- function(inv) z <<- inv
getinverse <- function() z
list(set = set, get = get,
setinverse = setinverse, getinverse = getinverse)
}
## cacheSolve function solves for the inverse of a given matrix
## and stores the results in makeCacheMatrix
cacheSolve <- function(x, ...) {
z <- x$getinverse()
if(!is.null(z)){
message("Retreiving cashed data")
return(z)
}
data <- x$get()
z <- solve(data,...)
x$setinverse(z)
z
}
|
library(survival)
#
# A test of nesting. It makes sure tha model.frame is built correctly
#
tfun <- function(fit, mydata) {
survfit(fit, newdata=mydata)
}
myfit <- coxph(Surv(time, status) ~ age + factor(sex), lung)
temp1 <- tfun(myfit, lung[1:5,])
temp2 <- survfit(myfit, lung[1:5,])
indx <- match('call', names(temp1)) #the call components won't match
all.equal(unclass(temp1)[-indx], unclass(temp2)[-indx])
| /scripts/AltDatabase/tools/R/PC/library/survival/tests/nested.R | no_license | venkatmi/oncosplice | R | false | false | 424 | r | library(survival)
#
# A test of nesting. It makes sure tha model.frame is built correctly
#
tfun <- function(fit, mydata) {
survfit(fit, newdata=mydata)
}
myfit <- coxph(Surv(time, status) ~ age + factor(sex), lung)
temp1 <- tfun(myfit, lung[1:5,])
temp2 <- survfit(myfit, lung[1:5,])
indx <- match('call', names(temp1)) #the call components won't match
all.equal(unclass(temp1)[-indx], unclass(temp2)[-indx])
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(leaflet)
library(shiny)
library(lubridate)
library(tidyverse)
state_map_data <- read_csv(".//state_map_data")
state_map_data
library(shiny)
ui <- fluidPage(
sliderInput(inputId = "year",
label = "Select a Year:",
min = min(state_map_data$year),
max = max(state_map_data$year),
value = 2010,
step = 1),
radioButtons(inputId = "layer",
label = "Select a Dataset to View:",
choices = c("Eviction Filing Rate", "Percent Rent Burden",
"Percent Renter Occupied", "Poverty Rate")),
selectInput(inputId = "state",
label = "Select a State:",
choices = unique(eviction_state$name)),
mainPanel(
leafletOutput("map"))
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
output$map <- renderLeaflet({
leaflet() %>%
addProviderTiles('Hydda.Full') %>%
addPolygons(data = state_map_data, fill = state_map_data$poverty_rate)%>%
setView(lat = 39.8283, lng = -98.5795, zoom = 4)
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /Eviction_Shiny/Eviction_Shiny/app.R | no_license | monipip3/eviction_lab_project | R | false | false | 1,760 | r | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(leaflet)
library(shiny)
library(lubridate)
library(tidyverse)
state_map_data <- read_csv(".//state_map_data")
state_map_data
library(shiny)
ui <- fluidPage(
sliderInput(inputId = "year",
label = "Select a Year:",
min = min(state_map_data$year),
max = max(state_map_data$year),
value = 2010,
step = 1),
radioButtons(inputId = "layer",
label = "Select a Dataset to View:",
choices = c("Eviction Filing Rate", "Percent Rent Burden",
"Percent Renter Occupied", "Poverty Rate")),
selectInput(inputId = "state",
label = "Select a State:",
choices = unique(eviction_state$name)),
mainPanel(
leafletOutput("map"))
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
output$map <- renderLeaflet({
leaflet() %>%
addProviderTiles('Hydda.Full') %>%
addPolygons(data = state_map_data, fill = state_map_data$poverty_rate)%>%
setView(lat = 39.8283, lng = -98.5795, zoom = 4)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/umap_naive.R
\name{umap.naive.predict}
\alias{umap.naive.predict}
\title{predict embedding of new data given an existing umap object}
\usage{
umap.naive.predict(umap, data)
}
\arguments{
\item{umap}{object of class umap}
\item{data}{matrix with new data}
}
\value{
matrix with embedding coordinates
}
\description{
predict embedding of new data given an existing umap object
}
\keyword{internal}
| /man/umap.naive.predict.Rd | permissive | JenniferSLyon/umap | R | false | true | 475 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/umap_naive.R
\name{umap.naive.predict}
\alias{umap.naive.predict}
\title{predict embedding of new data given an existing umap object}
\usage{
umap.naive.predict(umap, data)
}
\arguments{
\item{umap}{object of class umap}
\item{data}{matrix with new data}
}
\value{
matrix with embedding coordinates
}
\description{
predict embedding of new data given an existing umap object
}
\keyword{internal}
|
library(ggplot2)
library(extrafont)
library(sysfonts)
library(showtext)
loadfonts(device = "win")
# add the Arial font
font_add("Arial", regular = "arial.ttf", bold = "arialbd.ttf", italic = "ariali.ttf", bolditalic = "arialbi.ttf")
# function to plot CCM results
plot_ccm_result = function(ccm_result_data, variable, species){
# AgeDiversity/Abundance xmap fishing mortality
subdata = subset(ccm_result_data, subset = ccm_result_data$library == variable)
max_lag = max(abs(subdata$tar.lag))
plot(x = -max_lag:0, y = subdata$rho, type = 'l', col = 'blue', ylim = c(-0.5,1), xaxt = 'n',
xlab = expression(paste('Cross map lag (', italic('l'), ')')), main = species,
ylab = expression(paste('Correlation coefficient ( ', rho, ' )')))
axis(1, at = seq(-max_lag, 0, 1))
segments(-max_lag:0, subdata[, 'rho'] - subdata[, 'sd.rho'],
-max_lag:0, subdata[, 'rho'] + subdata[, 'sd.rho'], col = 'blue')
segments(-max_lag:0 - 0.1, subdata[, 'rho'] - subdata[, 'sd.rho'],
-max_lag:0 + 0.1, subdata[, 'rho'] - subdata[, 'sd.rho'], col = 'blue')
segments(-max_lag:0 - 0.1, subdata[, 'rho'] + subdata[, 'sd.rho'],
-max_lag:0 + 0.1, subdata[, 'rho'] + subdata[, 'sd.rho'], col = 'blue')
abline(h = 0)
legend(x = -max_lag, y = 0.98, legend = paste0(variable, ' xmap fishingM'), text.col = c('blue'))
}
# ggplot function to plot CCM results
gplot_ccm_result = function(species_list){
# AgeDiversity/Abundance xmap fishing mortality
data = species_list$ccm
data[data$kendall.tau >= 0.1 | data$significance >= 0.1, c('rho')] = NA
var_order = c('AgeDiversity', 'Abundance')
data$library = factor(data$library, levels=var_order)
species = species_list$species
ggplot(aes(x = tar.lag, y = rho, color=library), data = data) +
geom_hline(aes(yintercept = 0), linetype = 'dashed', color = 'black') +
geom_point(size = 5) +
geom_segment(aes(x=tar.lag-0.1, y=rho+sd.rho, xend=tar.lag+0.1, yend=rho+sd.rho), size=1) +
geom_segment(aes(x=tar.lag-0.1, y=rho-sd.rho, xend=tar.lag+0.1, yend=rho-sd.rho), size=1) +
geom_segment(aes(x=tar.lag, y=rho-sd.rho, xend=tar.lag, yend=rho+sd.rho), size=1) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(size = 1.1),
axis.line = element_line(color = 'black'),
axis.title = element_text(color = 'black', size = 16),
axis.text = element_text(color = 'black', size = 14),
plot.title = element_text(hjust = 0.5, vjust = 3, size = 18, face = 'bold.italic'),
legend.position = c(0.025, 0.9),
legend.background = element_blank(),
legend.title = element_blank(),
legend.text = element_text(size = 14),
legend.justification = c(0, 0)) +
scale_y_continuous(limits = c(-1, 1.2)) +
scale_color_manual(labels = paste0(var_order, ' xmap F'), values = c('red', 'green')) +
labs(x = 'Lag of fishing mortality', y = expression(rho), title = species)
}
# plot S-map coefficients
plotSmapCoeff = function(
smap_result_list,
species,
colors,
shapes,
mode="series")
{
# extract lags for each variable
data_for_smap = smap_result_list$data
library_var = names(data_for_smap)[1]
target_vars = names(data_for_smap)[-1]
num_na = colSums(is.na(data_for_smap))
lag_of_var = as.numeric(num_na[target_vars])
# coefficients of S-map model without library variable and constant
data_of_coeff = smap_result_list$coefficients
N = nrow(na.omit(data_of_coeff))
data_of_coeff = data_of_coeff[target_vars]
rho = round(smap_result_list$rho, 2)
# sort data according to customized order of variables
order_var = variables[sort(match(names(data_of_coeff), variables))]
lag_of_var = lag_of_var[order(match(names(data_of_coeff), order_var))]
data_of_coeff = data_of_coeff[, order_var, drop = FALSE]
ntime = dim(data_of_coeff)[1]
nvar = dim(data_of_coeff)[2]
coeff.melt = cbind(date = rep(1:ntime, nvar), melt(data_of_coeff))
whichvar = match(names(data_of_coeff), variables)
cl = rep(colors[whichvar], each = ntime)
sh = rep(shapes[whichvar], each = ntime)
if (mode == "series"){
return(smap_timeseries_plot(data=coeff.melt,
lib_var=library_var,
cl=cl,
sh=sh,
species=species,
rho=rho,
N=N))
} else if (mode == "box"){
return(smap_boxplot(data=coeff.melt,
lib_var=library_var,
cl=cl,
sh=sh,
n_var=length(unique(colors)),
species=species,
rho=rho,
N=N))
} else {
stop("mode must be either 'series' or 'box'")
}
}
# plot time series
smap_timeseries_plot = function(data, lib_var, cl, sh, species, rho, N){
max_value = max(abs(data$value), na.rm = TRUE)
scaleFUN = function(x){sprintf("%.2f", x)}
smaptime =
ggplot(data=data, aes(x=date, y=value, shape=variable, color=variable, fill=variable)) +
geom_point(size=4) +
geom_line(size=1) +
geom_hline(yintercept=0, linetype='dashed') +
theme(plot.title = element_text(hjust = 0.5, size = 24),
axis.title = element_text(size = 22, face = "bold"),
axis.text = element_text(size = 20, colour = "black"),
panel.border = element_rect(size = 1.1, fill = NA, colour = 'black'),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
aspect.ratio = 0.8,
legend.position = "none",
text = element_text(family='Arial')) +
labs(x = 'Time', y = 'S-map coefficients') +
ggtitle(bquote(paste(italic(.(species)), ", ", rho, " = ", .(rho), " ",
italic("N"), " = ", .(N)))) +
scale_y_continuous(labels=scaleFUN, limits=c(-max_value, max_value)) +
scale_shape_manual(values=unique(sh)) +
scale_color_manual(values=unique(cl)) +
scale_fill_manual(values=unique(cl))
return(smaptime)
}
# legend for time series plot
smap_timeseries_legend = function(lib_var, colors, shapes){
data = data.frame(cbind(colors, shapes))
data$x = 0
data$y = 0
data$variable = factor(c(1:dim(data)[1]))
legend_labels = paste0(variables, " effect on ", lib_var)
smaptime =
ggplot(data=data, aes(x=x, y=y, shape=variable, color=variable, fill=variable)) +
geom_point(size=4) +
geom_line() +
theme_bw() +
theme(legend.background = element_blank(),
legend.text = element_text(size = 16),
legend.key.size = unit(1, 'cm')) +
scale_shape_manual(labels=legend_labels, values=shapes) +
scale_color_manual(labels=legend_labels, values=colors) +
scale_fill_manual(labels=legend_labels, values=colors)
return(smaptime)
}
# plot box plot
smap_boxplot = function(data, lib_var, cl, sh, n_var, species, rho, N){
max_value = max(abs(data$value), na.rm = TRUE)
tar_vars = levels(data$variable)
if (length(grep("SST", tar_vars)) > 0){
limits = c("AgeDiversity", "Abundance", "AMO", "SST", "CVofSST")
} else {
limits = c("AgeDiversity", "Abundance", "AMO", "SBT", "CVofSBT")
}
print(limits)
print(tar_vars)
n = length(unique(data$variable))
scaleFUN = function(x){sprintf("%.2f", x)}
smapbox =
ggplot(data=data, aes(x=variable, y=value)) +
geom_boxplot(aes(color=variable), na.rm=T, lwd=1, width=0.6, outlier.shape=NA) +
geom_point(aes(color=variable)) +
geom_hline(yintercept=0, linetype='dashed') +
theme(plot.title = element_text(hjust = 0.5, size = 24),
axis.title = element_text(size = 22, face = 'bold'),
axis.title.x = element_blank(),
axis.text = element_text(size = 20, colour = 'black'),
panel.border = element_rect(size = 1.1, fill = NA, colour = 'black'),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
aspect.ratio = 1.1,
legend.position = "none",
text = element_text(family='Arial')) +
labs(y = 'S-map coefficients') +
ggtitle(bquote(paste(italic(.(species)), ", ", rho, " = ", .(rho), " ",
italic("N"), " = ", .(N)))) +
scale_y_continuous(labels=scaleFUN, limits=c(-max_value, max_value)) +
coord_cartesian(xlim = c(1, 5)) +
scale_x_discrete(breaks=tar_vars, labels=tar_vars, limits=limits) +
scale_color_manual(values=unique(cl))
return(smapbox)
}
# legend for box plot
smap_boxplot_legend = function(lib_var, colors){
data = data.frame(colors)
data$y = 0
data$variable = factor(c(1:dim(data)[1]))
legend_labels = paste0(variables, " effect on ", lib_var)
smapbox =
ggplot(data=data, aes(x=variable, y=y, color=variable)) +
geom_boxplot(na.rm=T, lwd=1) +
theme_bw() +
theme(legend.background = element_blank(),
legend.text = element_text(size = 16),
legend.key.size = unit(1, 'cm')) +
scale_color_manual(labels=legend_labels, values=colors)
return(smapbox)
}
| /script/utils/plot.r | permissive | snakepowerpoint/SpatialVariability | R | false | false | 9,993 | r | library(ggplot2)
library(extrafont)
library(sysfonts)
library(showtext)
loadfonts(device = "win")
# add the Arial font
font_add("Arial", regular = "arial.ttf", bold = "arialbd.ttf", italic = "ariali.ttf", bolditalic = "arialbi.ttf")
# function to plot CCM results
plot_ccm_result = function(ccm_result_data, variable, species){
# AgeDiversity/Abundance xmap fishing mortality
subdata = subset(ccm_result_data, subset = ccm_result_data$library == variable)
max_lag = max(abs(subdata$tar.lag))
plot(x = -max_lag:0, y = subdata$rho, type = 'l', col = 'blue', ylim = c(-0.5,1), xaxt = 'n',
xlab = expression(paste('Cross map lag (', italic('l'), ')')), main = species,
ylab = expression(paste('Correlation coefficient ( ', rho, ' )')))
axis(1, at = seq(-max_lag, 0, 1))
segments(-max_lag:0, subdata[, 'rho'] - subdata[, 'sd.rho'],
-max_lag:0, subdata[, 'rho'] + subdata[, 'sd.rho'], col = 'blue')
segments(-max_lag:0 - 0.1, subdata[, 'rho'] - subdata[, 'sd.rho'],
-max_lag:0 + 0.1, subdata[, 'rho'] - subdata[, 'sd.rho'], col = 'blue')
segments(-max_lag:0 - 0.1, subdata[, 'rho'] + subdata[, 'sd.rho'],
-max_lag:0 + 0.1, subdata[, 'rho'] + subdata[, 'sd.rho'], col = 'blue')
abline(h = 0)
legend(x = -max_lag, y = 0.98, legend = paste0(variable, ' xmap fishingM'), text.col = c('blue'))
}
# ggplot function to plot CCM results
gplot_ccm_result = function(species_list){
# AgeDiversity/Abundance xmap fishing mortality
data = species_list$ccm
data[data$kendall.tau >= 0.1 | data$significance >= 0.1, c('rho')] = NA
var_order = c('AgeDiversity', 'Abundance')
data$library = factor(data$library, levels=var_order)
species = species_list$species
ggplot(aes(x = tar.lag, y = rho, color=library), data = data) +
geom_hline(aes(yintercept = 0), linetype = 'dashed', color = 'black') +
geom_point(size = 5) +
geom_segment(aes(x=tar.lag-0.1, y=rho+sd.rho, xend=tar.lag+0.1, yend=rho+sd.rho), size=1) +
geom_segment(aes(x=tar.lag-0.1, y=rho-sd.rho, xend=tar.lag+0.1, yend=rho-sd.rho), size=1) +
geom_segment(aes(x=tar.lag, y=rho-sd.rho, xend=tar.lag, yend=rho+sd.rho), size=1) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(size = 1.1),
axis.line = element_line(color = 'black'),
axis.title = element_text(color = 'black', size = 16),
axis.text = element_text(color = 'black', size = 14),
plot.title = element_text(hjust = 0.5, vjust = 3, size = 18, face = 'bold.italic'),
legend.position = c(0.025, 0.9),
legend.background = element_blank(),
legend.title = element_blank(),
legend.text = element_text(size = 14),
legend.justification = c(0, 0)) +
scale_y_continuous(limits = c(-1, 1.2)) +
scale_color_manual(labels = paste0(var_order, ' xmap F'), values = c('red', 'green')) +
labs(x = 'Lag of fishing mortality', y = expression(rho), title = species)
}
# plot S-map coefficients
plotSmapCoeff = function(
smap_result_list,
species,
colors,
shapes,
mode="series")
{
# extract lags for each variable
data_for_smap = smap_result_list$data
library_var = names(data_for_smap)[1]
target_vars = names(data_for_smap)[-1]
num_na = colSums(is.na(data_for_smap))
lag_of_var = as.numeric(num_na[target_vars])
# coefficients of S-map model without library variable and constant
data_of_coeff = smap_result_list$coefficients
N = nrow(na.omit(data_of_coeff))
data_of_coeff = data_of_coeff[target_vars]
rho = round(smap_result_list$rho, 2)
# sort data according to customized order of variables
order_var = variables[sort(match(names(data_of_coeff), variables))]
lag_of_var = lag_of_var[order(match(names(data_of_coeff), order_var))]
data_of_coeff = data_of_coeff[, order_var, drop = FALSE]
ntime = dim(data_of_coeff)[1]
nvar = dim(data_of_coeff)[2]
coeff.melt = cbind(date = rep(1:ntime, nvar), melt(data_of_coeff))
whichvar = match(names(data_of_coeff), variables)
cl = rep(colors[whichvar], each = ntime)
sh = rep(shapes[whichvar], each = ntime)
if (mode == "series"){
return(smap_timeseries_plot(data=coeff.melt,
lib_var=library_var,
cl=cl,
sh=sh,
species=species,
rho=rho,
N=N))
} else if (mode == "box"){
return(smap_boxplot(data=coeff.melt,
lib_var=library_var,
cl=cl,
sh=sh,
n_var=length(unique(colors)),
species=species,
rho=rho,
N=N))
} else {
stop("mode must be either 'series' or 'box'")
}
}
# plot time series
smap_timeseries_plot = function(data, lib_var, cl, sh, species, rho, N){
max_value = max(abs(data$value), na.rm = TRUE)
scaleFUN = function(x){sprintf("%.2f", x)}
smaptime =
ggplot(data=data, aes(x=date, y=value, shape=variable, color=variable, fill=variable)) +
geom_point(size=4) +
geom_line(size=1) +
geom_hline(yintercept=0, linetype='dashed') +
theme(plot.title = element_text(hjust = 0.5, size = 24),
axis.title = element_text(size = 22, face = "bold"),
axis.text = element_text(size = 20, colour = "black"),
panel.border = element_rect(size = 1.1, fill = NA, colour = 'black'),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
aspect.ratio = 0.8,
legend.position = "none",
text = element_text(family='Arial')) +
labs(x = 'Time', y = 'S-map coefficients') +
ggtitle(bquote(paste(italic(.(species)), ", ", rho, " = ", .(rho), " ",
italic("N"), " = ", .(N)))) +
scale_y_continuous(labels=scaleFUN, limits=c(-max_value, max_value)) +
scale_shape_manual(values=unique(sh)) +
scale_color_manual(values=unique(cl)) +
scale_fill_manual(values=unique(cl))
return(smaptime)
}
# legend for time series plot
smap_timeseries_legend = function(lib_var, colors, shapes){
data = data.frame(cbind(colors, shapes))
data$x = 0
data$y = 0
data$variable = factor(c(1:dim(data)[1]))
legend_labels = paste0(variables, " effect on ", lib_var)
smaptime =
ggplot(data=data, aes(x=x, y=y, shape=variable, color=variable, fill=variable)) +
geom_point(size=4) +
geom_line() +
theme_bw() +
theme(legend.background = element_blank(),
legend.text = element_text(size = 16),
legend.key.size = unit(1, 'cm')) +
scale_shape_manual(labels=legend_labels, values=shapes) +
scale_color_manual(labels=legend_labels, values=colors) +
scale_fill_manual(labels=legend_labels, values=colors)
return(smaptime)
}
# plot box plot
smap_boxplot = function(data, lib_var, cl, sh, n_var, species, rho, N){
max_value = max(abs(data$value), na.rm = TRUE)
tar_vars = levels(data$variable)
if (length(grep("SST", tar_vars)) > 0){
limits = c("AgeDiversity", "Abundance", "AMO", "SST", "CVofSST")
} else {
limits = c("AgeDiversity", "Abundance", "AMO", "SBT", "CVofSBT")
}
print(limits)
print(tar_vars)
n = length(unique(data$variable))
scaleFUN = function(x){sprintf("%.2f", x)}
smapbox =
ggplot(data=data, aes(x=variable, y=value)) +
geom_boxplot(aes(color=variable), na.rm=T, lwd=1, width=0.6, outlier.shape=NA) +
geom_point(aes(color=variable)) +
geom_hline(yintercept=0, linetype='dashed') +
theme(plot.title = element_text(hjust = 0.5, size = 24),
axis.title = element_text(size = 22, face = 'bold'),
axis.title.x = element_blank(),
axis.text = element_text(size = 20, colour = 'black'),
panel.border = element_rect(size = 1.1, fill = NA, colour = 'black'),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
aspect.ratio = 1.1,
legend.position = "none",
text = element_text(family='Arial')) +
labs(y = 'S-map coefficients') +
ggtitle(bquote(paste(italic(.(species)), ", ", rho, " = ", .(rho), " ",
italic("N"), " = ", .(N)))) +
scale_y_continuous(labels=scaleFUN, limits=c(-max_value, max_value)) +
coord_cartesian(xlim = c(1, 5)) +
scale_x_discrete(breaks=tar_vars, labels=tar_vars, limits=limits) +
scale_color_manual(values=unique(cl))
return(smapbox)
}
# legend for box plot
smap_boxplot_legend = function(lib_var, colors){
data = data.frame(colors)
data$y = 0
data$variable = factor(c(1:dim(data)[1]))
legend_labels = paste0(variables, " effect on ", lib_var)
smapbox =
ggplot(data=data, aes(x=variable, y=y, color=variable)) +
geom_boxplot(na.rm=T, lwd=1) +
theme_bw() +
theme(legend.background = element_blank(),
legend.text = element_text(size = 16),
legend.key.size = unit(1, 'cm')) +
scale_color_manual(labels=legend_labels, values=colors)
return(smapbox)
}
|
#' Access files in the current app
#'
#' NOTE: If you manually change your package name in the DESCRIPTION,
#' don't forget to change it here too, and in the config file.
#' For a safer name change mechanism, use the `golem::set_golem_name()` function.
#'
#' @param ... character vectors, specifying subdirectory and file(s)
#' within your package. The default, none, returns the root of the app.
#'
#' @noRd
app_sys <- function(...){
system.file(..., package = "volcanogolem")
}
#' Read App Config
#'
#' @param value Value to retrieve from the config file.
#' @param config GOLEM_CONFIG_ACTIVE value. If unset, R_CONFIG_ACTIVE.
#' If unset, "default".
#' @param use_parent Logical, scan the parent directory for config file.
#'
#' @noRd
get_golem_config <- function(
value,
config = Sys.getenv(
"GOLEM_CONFIG_ACTIVE",
Sys.getenv(
"R_CONFIG_ACTIVE",
"default"
)
),
use_parent = TRUE
){
config::get(
value = value,
config = config,
# Modify this if your config file is somewhere else:
file = app_sys("golem-config.yml"),
use_parent = use_parent
)
}
| /R/app_config.R | no_license | kgilds/shiny_volcano_golem | R | false | false | 1,127 | r | #' Access files in the current app
#'
#' NOTE: If you manually change your package name in the DESCRIPTION,
#' don't forget to change it here too, and in the config file.
#' For a safer name change mechanism, use the `golem::set_golem_name()` function.
#'
#' @param ... character vectors, specifying subdirectory and file(s)
#' within your package. The default, none, returns the root of the app.
#'
#' @noRd
app_sys <- function(...){
system.file(..., package = "volcanogolem")
}
#' Read App Config
#'
#' @param value Value to retrieve from the config file.
#' @param config GOLEM_CONFIG_ACTIVE value. If unset, R_CONFIG_ACTIVE.
#' If unset, "default".
#' @param use_parent Logical, scan the parent directory for config file.
#'
#' @noRd
get_golem_config <- function(
value,
config = Sys.getenv(
"GOLEM_CONFIG_ACTIVE",
Sys.getenv(
"R_CONFIG_ACTIVE",
"default"
)
),
use_parent = TRUE
){
config::get(
value = value,
config = config,
# Modify this if your config file is somewhere else:
file = app_sys("golem-config.yml"),
use_parent = use_parent
)
}
|
## Put comments here that give an overall description of what your
## functions do
## Creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
i <- x$getInverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i
}
| /cachematrix.R | no_license | amandaluniz/ProgrammingAssignment2 | R | false | false | 1,020 | r | ## Put comments here that give an overall description of what your
## functions do
## Creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
i <- x$getInverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i
}
|
library(permute)
n_mountains <- 14
mountains <- gl(n = n_mountains, k = 2)# must be factor - safer to use real mountain names
old_new <- gl(n = 2, k = 1, length = n_mountains * 2)#should be factor
## blocks
CTRL <- how(blocks = mountains, complete = TRUE, maxperm = Inf)
check(1:length(mountains), control = CTRL) # how many possible permutations
#Some example permutations
set.seed(42)
shuffleSet(1:length(mountains), nset = 10, control = CTRL)
#expected ordination code
mod <- rda(spp ~ old_new + Condition(mountains))#partial out effect of mountain
anova(mod, permutations = CTRL)
## strata in plots - gives similar permutations
CTRL <- how(plots = Plots(strata = mountains))
check(1:length(mountains), control = CTRL)
set.seed(42)
shuffleSet(1:length(mountains), nset = 10, control = CTRL)
##Patryk's
h<-how(within=Within(type="series", constant=TRUE), plots=Plots(strata=mountains, type="free"))
shuffleSet(1:length(mountains), nset = 10, control = h)#not appropriate
| /Expectations/permutations.R | no_license | amyeycott/KlimaVeg | R | false | false | 984 | r | library(permute)
n_mountains <- 14
mountains <- gl(n = n_mountains, k = 2)# must be factor - safer to use real mountain names
old_new <- gl(n = 2, k = 1, length = n_mountains * 2)#should be factor
## blocks
CTRL <- how(blocks = mountains, complete = TRUE, maxperm = Inf)
check(1:length(mountains), control = CTRL) # how many possible permutations
#Some example permutations
set.seed(42)
shuffleSet(1:length(mountains), nset = 10, control = CTRL)
#expected ordination code
mod <- rda(spp ~ old_new + Condition(mountains))#partial out effect of mountain
anova(mod, permutations = CTRL)
## strata in plots - gives similar permutations
CTRL <- how(plots = Plots(strata = mountains))
check(1:length(mountains), control = CTRL)
set.seed(42)
shuffleSet(1:length(mountains), nset = 10, control = CTRL)
##Patryk's
h<-how(within=Within(type="series", constant=TRUE), plots=Plots(strata=mountains, type="free"))
shuffleSet(1:length(mountains), nset = 10, control = h)#not appropriate
|
# run-mif.R
# Clear the decks ---------------------------------------------------------
rm(list = ls(all.names = TRUE))
# Load libraries ----------------------------------------------------------
library(tidyverse)
library(pomp)
library(doParallel)
# Load the pomp object ----------------------------------------------------
pomp_object <- readRDS("../output/covid-ga-pomp-object.RDS")
pomp_object <- readRDS("../output2/pomp-model.RDS")
# Set the parameters to estimate (i.e., those to vary) --------------------
# We have to fix several parameters. E.g. it's impossible to estimate
# all beta and the reduction factor, they are fully collinear. So, we
# fix all the betas here.
params_to_estimate <- c("beta_d", "beta_u", "beta_e", "beta_red_factor",
"gamma_u", "gamma_d", "detect_frac_0")
params_perts <- rw.sd(beta_d = 0, # change to let it vary
beta_u = 0, # change to let it vary
beta_e = 0, # change to let it vary
beta_red_factor = 0.02,
gamma_u = 0.02,
gamma_d = 0.02,
detect_frac_0 = 0.02)
curr_theta <- coef(pomp_object)
# Define "proposal" function for starting values --------------------------
prop_func <- function(theta) {
betas <- theta[c("beta_d", "beta_u", "beta_e")]
one <- rnorm(n = length(betas), mean = betas, sd = 0) # update sd if desired
others <- theta[-(which(names(theta) %in% names(betas)))]
two <- rlnorm(n = (length(others)),
meanlog = log(others),
sdlog = 1)
out <- c(one, two)
names(out) <- names(theta)
return(out)
}
# Run MIF from different starting points ----------------------------------
num_particles <- 2000
num_mif_iterations <- 50
#num_cores <- parallel::detectCores() - 1 # alter as needed
num_cores <- 1 # alter as needed
foreach (i = 1:num_cores,
.combine = c,
.export = c("params_perts",
"prop_func",
"curr_theta")) %dopar% {
print(sprintf('starting mif number %d',i))
theta_guess <- curr_theta
theta_guess[params_to_estimate] <- prop_func(curr_theta[params_to_estimate])
mif2(pomp_object, Nmif = num_mif_iterations, params = theta_guess,
Np = num_particles, cooling.fraction = 0.5, rw.sd = params_perts)
} -> mifs
mifs %>%
traces() %>%
melt() %>%
filter(variable %in% c("loglik", params_to_estimate)) %>%
ggplot(aes(x=iteration,y=value,group=L1,color=L1))+
geom_line()+
facet_wrap(~variable,scales="free_y")+
guides(color=FALSE)
# Use particle filter to get the likelihood at the end of MIF run ---------
pf1 <- foreach(mf = mifs, .combine = c) %dopar% {
pf <- replicate(n = 10, logLik(pfilter(mf, Np = 10000)))
logmeanexp(pf)
}
# Extract and save best parameter set for MCMC ----------------------------
mf1 <- mifs[[which.max(pf1)]]
theta_mif <- coef(mf1)
saveRDS("../output/mif-mles.RDS")
# Cache -------------------------------------------------------------------
#
# # Question: Are there rules of thumb for specifying Nmif, Np, coooling.fraction and rw.sd? Or ways to diagnose if one is choosing them right?
# # Other question: Is this only estimating those parameters that are specified in rw.sd and all others are assumed fixed?
#
# # pf <- pfilter(covid_ga_pomp, params = coef(covid_ga_pomp), Np = 1000)
#
# test <- mif2(pomp_object, Nmif = 50, params = theta.guess,
# Np = 2000, cooling.fraction = 1,
# rw.sd = rw.sd(beta_red_factor = 0.02, gamma_u = 0.02,
# gamma_d = 0.02, detect_frac_0 = 0.02))
#
#
# mifs <- foreach (i = 1:10, .combine = c) %dopar% { #Inspect from multiple, randomly chosen starting points
# theta.guess <- theta.true
# theta.guess[estpars] <- rlnorm(n = length(estpars),
# meanlog = log(theta.guess[estpars]), sdlog = 1)
# }
#
#
#
#
| /cache/ah-workingfiles/at-run-mif.R | no_license | Proloy2018/COVID-stochastic-fitting | R | false | false | 3,984 | r | # run-mif.R
# Clear the decks ---------------------------------------------------------
rm(list = ls(all.names = TRUE))
# Load libraries ----------------------------------------------------------
library(tidyverse)
library(pomp)
library(doParallel)
# Load the pomp object ----------------------------------------------------
pomp_object <- readRDS("../output/covid-ga-pomp-object.RDS")
pomp_object <- readRDS("../output2/pomp-model.RDS")
# Set the parameters to estimate (i.e., those to vary) --------------------
# We have to fix several parameters. E.g. it's impossible to estimate
# all beta and the reduction factor, they are fully collinear. So, we
# fix all the betas here.
params_to_estimate <- c("beta_d", "beta_u", "beta_e", "beta_red_factor",
"gamma_u", "gamma_d", "detect_frac_0")
params_perts <- rw.sd(beta_d = 0, # change to let it vary
beta_u = 0, # change to let it vary
beta_e = 0, # change to let it vary
beta_red_factor = 0.02,
gamma_u = 0.02,
gamma_d = 0.02,
detect_frac_0 = 0.02)
curr_theta <- coef(pomp_object)
# Define "proposal" function for starting values --------------------------
prop_func <- function(theta) {
betas <- theta[c("beta_d", "beta_u", "beta_e")]
one <- rnorm(n = length(betas), mean = betas, sd = 0) # update sd if desired
others <- theta[-(which(names(theta) %in% names(betas)))]
two <- rlnorm(n = (length(others)),
meanlog = log(others),
sdlog = 1)
out <- c(one, two)
names(out) <- names(theta)
return(out)
}
# Run MIF from different starting points ----------------------------------
num_particles <- 2000
num_mif_iterations <- 50
#num_cores <- parallel::detectCores() - 1 # alter as needed
num_cores <- 1 # alter as needed
foreach (i = 1:num_cores,
.combine = c,
.export = c("params_perts",
"prop_func",
"curr_theta")) %dopar% {
print(sprintf('starting mif number %d',i))
theta_guess <- curr_theta
theta_guess[params_to_estimate] <- prop_func(curr_theta[params_to_estimate])
mif2(pomp_object, Nmif = num_mif_iterations, params = theta_guess,
Np = num_particles, cooling.fraction = 0.5, rw.sd = params_perts)
} -> mifs
mifs %>%
traces() %>%
melt() %>%
filter(variable %in% c("loglik", params_to_estimate)) %>%
ggplot(aes(x=iteration,y=value,group=L1,color=L1))+
geom_line()+
facet_wrap(~variable,scales="free_y")+
guides(color=FALSE)
# Use particle filter to get the likelihood at the end of MIF run ---------
pf1 <- foreach(mf = mifs, .combine = c) %dopar% {
pf <- replicate(n = 10, logLik(pfilter(mf, Np = 10000)))
logmeanexp(pf)
}
# Extract and save best parameter set for MCMC ----------------------------
mf1 <- mifs[[which.max(pf1)]]
theta_mif <- coef(mf1)
saveRDS("../output/mif-mles.RDS")
# Cache -------------------------------------------------------------------
#
# # Question: Are there rules of thumb for specifying Nmif, Np, coooling.fraction and rw.sd? Or ways to diagnose if one is choosing them right?
# # Other question: Is this only estimating those parameters that are specified in rw.sd and all others are assumed fixed?
#
# # pf <- pfilter(covid_ga_pomp, params = coef(covid_ga_pomp), Np = 1000)
#
# test <- mif2(pomp_object, Nmif = 50, params = theta.guess,
# Np = 2000, cooling.fraction = 1,
# rw.sd = rw.sd(beta_red_factor = 0.02, gamma_u = 0.02,
# gamma_d = 0.02, detect_frac_0 = 0.02))
#
#
# mifs <- foreach (i = 1:10, .combine = c) %dopar% { #Inspect from multiple, randomly chosen starting points
# theta.guess <- theta.true
# theta.guess[estpars] <- rlnorm(n = length(estpars),
# meanlog = log(theta.guess[estpars]), sdlog = 1)
# }
#
#
#
#
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boot_sample.R
\name{boot_sample}
\alias{boot_sample}
\title{Function to create bootstrap samples of LDA projected means}
\usage{
boot_sample(values, N.b = 100)
}
\arguments{
\item{values}{A matrix of group ID's and the first 2 lda projections of the data.}
\item{N.b}{The number of bootstrap sample means to be estimated.}
}
\value{
returns matrix of lda projected means dim n.b*number of groups by 3, columns group, LDA1, and LDA2.
}
\description{
Bootstrap resampling used for mean CI estimation
}
\examples{
library(MASS)
# simulated data set to give random groups
q=100
data <- as.data.frame(list(x1 = runif(q), x2 = rnorm(q), x3 = rlnorm(q), group = sample(c('s','d','w'),q,replace=TRUE)))
# create lda projections, though not needed function will work with any bivariate data and corresponding grouping column
lda <- lda(group ~., data = data)
# format data, groups need to be numeric, can not be factors of characters
V1<-as.numeric(as.factor(data$group))
lda.vec<-as.data.frame(lda$scaling)
lda.p <- predict(lda)
v <- as.data.frame(cbind(V1, lda.p$x))
str(v)
# create bootstrap sample means
b <- boot_sample(values = v)
str(b)
}
\author{
James Colee
}
| /man/boot_sample.Rd | permissive | boikin/LDA-Plots | R | false | true | 1,242 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boot_sample.R
\name{boot_sample}
\alias{boot_sample}
\title{Function to create bootstrap samples of LDA projected means}
\usage{
boot_sample(values, N.b = 100)
}
\arguments{
\item{values}{A matrix of group ID's and the first 2 lda projections of the data.}
\item{N.b}{The number of bootstrap sample means to be estimated.}
}
\value{
returns matrix of lda projected means dim n.b*number of groups by 3, columns group, LDA1, and LDA2.
}
\description{
Bootstrap resampling used for mean CI estimation
}
\examples{
library(MASS)
# simulated data set to give random groups
q=100
data <- as.data.frame(list(x1 = runif(q), x2 = rnorm(q), x3 = rlnorm(q), group = sample(c('s','d','w'),q,replace=TRUE)))
# create lda projections, though not needed function will work with any bivariate data and corresponding grouping column
lda <- lda(group ~., data = data)
# format data, groups need to be numeric, can not be factors of characters
V1<-as.numeric(as.factor(data$group))
lda.vec<-as.data.frame(lda$scaling)
lda.p <- predict(lda)
v <- as.data.frame(cbind(V1, lda.p$x))
str(v)
# create bootstrap sample means
b <- boot_sample(values = v)
str(b)
}
\author{
James Colee
}
|
library(ggplot2)
library(gridExtra)
library(dplyr)
library(tidyr)
library(stargazer)
library(usdm)
library(olsrr)
library(corrplot)
workdir="C:/Koma/Sync/_Amsterdam/_PhD/Chapter2_habitat_str_lidar/3_Dataprocessing/Analysis8/"
setwd(workdir)
####################################### Import
plot_data05=read.csv(paste("Plot_db_",0.5,".csv",sep=""))
plot_data05$total.weight=plot_data05$total.weight/10000
plot_data05_scaled=scale(plot_data05[,c(3:10)])
colnames(plot_data05_scaled)=paste("Scaled_",colnames(plot_data05_scaled),sep="")
plot_data05_f=cbind(plot_data05,plot_data05_scaled)
plot_data05_f=plot_data05_f[(plot_data05_f$OBJNAME!=120 & plot_data05_f$OBJNAME!=209 & plot_data05_f$OBJNAME!=163),]
plot_data2=read.csv(paste("Plot_db_",2.5,".csv",sep=""))
plot_data2$total.weight=plot_data2$total.weight/10000
plot_data2_scaled=scale(plot_data2[,c(3:10)])
colnames(plot_data2_scaled)=paste("Scaled_",colnames(plot_data2_scaled),sep="")
plot_data2_f=cbind(plot_data2,plot_data2_scaled)
plot_data2_f=plot_data2_f[(plot_data2_f$OBJNAME!=120 & plot_data2_f$OBJNAME!=209 & plot_data2_f$OBJNAME!=163),]
plot_data5=read.csv(paste("Plot_db_",5,".csv",sep=""))
plot_data5$total.weight=plot_data5$total.weight/10000
plot_data5_scaled=scale(plot_data5[,c(3:10)])
colnames(plot_data5_scaled)=paste("Scaled_",colnames(plot_data5_scaled),sep="")
plot_data5_f=cbind(plot_data5,plot_data5_scaled)
plot_data5_f=plot_data5_f[(plot_data5_f$OBJNAME!=120 & plot_data5_f$OBJNAME!=209 & plot_data5_f$OBJNAME!=163),]
############################################ Height
a5h=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Tisza"),], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-1.2,2.2)
d5h=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Ferto"),], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-1.2,2.2)
b5h=ggplot(data=plot_data5_f[plot_data5_f$lake=="Lake Balaton",], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-1.2,2.2)
c5h=ggplot(data=plot_data5_f, aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-1.2,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a2h=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Tisza"),], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-1.2,2.2)
d2h=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Ferto"),], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-1.2,2.2)
b2h=ggplot(data=plot_data2_f[plot_data2_f$lake=="Lake Balaton",], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-1.2,2.2)
c2h=ggplot(data=plot_data2_f, aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-1.2,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a05h=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Tisza"),], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-1.2,2.2)
d05h=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Ferto"),], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-1.2,2.2)
b05h=ggplot(data=plot_data05_f[plot_data05_f$lake=="Lake Balaton",], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-1.2,2.2)
c05h=ggplot(data=plot_data05_f, aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-1.2,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
grid.arrange(d05h,a05h,b05h,c05h,
d2h,a2h,b2h,c2h,
d5h,a5h,b5h,c5h,
nrow = 3,
ncol = 4
)
###############
a5h_2=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Tisza"),], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-2.5,2.2)
d5h_2=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Ferto"),], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-2.5,2.2)
b5h_2=ggplot(data=plot_data5_f[plot_data5_f$lake=="Lake Balaton",], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-2.5,2.2)
c5h_2=ggplot(data=plot_data5_f, aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-2.5,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a2h_2=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Tisza"),], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-2.5,2.2)
d2h_2=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Ferto"),], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-2.5,2.2)
b2h_2=ggplot(data=plot_data2_f[plot_data2_f$lake=="Lake Balaton",], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-2.5,2.2)
c2h_2=ggplot(data=plot_data2_f, aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-2.5,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a05h_2=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Tisza"),], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-2.5,2.2)
d05h_2=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Ferto"),], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-2.5,2.2)
b05h_2=ggplot(data=plot_data05_f[plot_data05_f$lake=="Lake Balaton",], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-2.5,2.2)
c05h_2=ggplot(data=plot_data05_f, aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-2.5,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
grid.arrange(d05h_2,a05h_2,b05h_2,c05h_2,
d2h_2,a2h_2,b2h_2,c2h_2,
d5h_2,a5h_2,b5h_2,c5h_2,
nrow = 3,
ncol = 4
)
############################################ Biomass
a5=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Tisza"),], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-1.2,2.2)
d5=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Ferto"),], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-1.2,2.2)
b5=ggplot(data=plot_data5_f[plot_data5_f$lake=="Lake Balaton",], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-1.2,2.2)
c5=ggplot(data=plot_data5_f, aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-1.2,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a2=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Tisza"),], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-1.2,2.2)
d2=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Ferto"),], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-1.2,2.2)
b2=ggplot(data=plot_data2_f[plot_data2_f$lake=="Lake Balaton",], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-1.2,2.2)
c2=ggplot(data=plot_data2_f, aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-1.2,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a05=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Tisza"),], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-1.2,2.2)
d05=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Ferto"),], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-1.2,2.2)
b05=ggplot(data=plot_data05_f[plot_data05_f$lake=="Lake Balaton",], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-1.2,2.2)
c05=ggplot(data=plot_data05_f, aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-1.2,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
grid.arrange(d05,a05,b05,c05,
d2,a2,b2,c2,
d5,a5,b5,c5,
nrow = 3,
ncol = 4
)
###############
a5_2=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Tisza"),], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-2,2)
d5_2=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Ferto"),], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-2,2)
b5_2=ggplot(data=plot_data5_f[plot_data5_f$lake=="Lake Balaton",], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-2,2)
c5_2=ggplot(data=plot_data5_f, aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-2,2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a2_2=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Tisza"),], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-2,2)
d2_2=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Ferto"),], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-2,2)
b2_2=ggplot(data=plot_data2_f[plot_data2_f$lake=="Lake Balaton",], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-2,2)
c2_2=ggplot(data=plot_data2_f, aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-2,2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a05_2=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Tisza"),], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-2,2)
d05_2=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Ferto"),], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-2,2)
b05_2=ggplot(data=plot_data05_f[plot_data05_f$lake=="Lake Balaton",], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-2,2)
c05_2=ggplot(data=plot_data05_f, aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-2,2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
grid.arrange(d05_2,a05_2,b05_2,c05_2,
d2_2,a2_2,b2_2,c2_2,
d5_2,a5_2,b5_2,c5_2,
nrow = 3,
ncol = 4
) | /src/analysis_forpaper/Visualization.R | permissive | komazsofi/PhDPaper3_wetlandstr | R | false | false | 26,641 | r | library(ggplot2)
library(gridExtra)
library(dplyr)
library(tidyr)
library(stargazer)
library(usdm)
library(olsrr)
library(corrplot)
workdir="C:/Koma/Sync/_Amsterdam/_PhD/Chapter2_habitat_str_lidar/3_Dataprocessing/Analysis8/"
setwd(workdir)
####################################### Import
plot_data05=read.csv(paste("Plot_db_",0.5,".csv",sep=""))
plot_data05$total.weight=plot_data05$total.weight/10000
plot_data05_scaled=scale(plot_data05[,c(3:10)])
colnames(plot_data05_scaled)=paste("Scaled_",colnames(plot_data05_scaled),sep="")
plot_data05_f=cbind(plot_data05,plot_data05_scaled)
plot_data05_f=plot_data05_f[(plot_data05_f$OBJNAME!=120 & plot_data05_f$OBJNAME!=209 & plot_data05_f$OBJNAME!=163),]
plot_data2=read.csv(paste("Plot_db_",2.5,".csv",sep=""))
plot_data2$total.weight=plot_data2$total.weight/10000
plot_data2_scaled=scale(plot_data2[,c(3:10)])
colnames(plot_data2_scaled)=paste("Scaled_",colnames(plot_data2_scaled),sep="")
plot_data2_f=cbind(plot_data2,plot_data2_scaled)
plot_data2_f=plot_data2_f[(plot_data2_f$OBJNAME!=120 & plot_data2_f$OBJNAME!=209 & plot_data2_f$OBJNAME!=163),]
plot_data5=read.csv(paste("Plot_db_",5,".csv",sep=""))
plot_data5$total.weight=plot_data5$total.weight/10000
plot_data5_scaled=scale(plot_data5[,c(3:10)])
colnames(plot_data5_scaled)=paste("Scaled_",colnames(plot_data5_scaled),sep="")
plot_data5_f=cbind(plot_data5,plot_data5_scaled)
plot_data5_f=plot_data5_f[(plot_data5_f$OBJNAME!=120 & plot_data5_f$OBJNAME!=209 & plot_data5_f$OBJNAME!=163),]
############################################ Height
a5h=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Tisza"),], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-1.2,2.2)
d5h=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Ferto"),], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-1.2,2.2)
b5h=ggplot(data=plot_data5_f[plot_data5_f$lake=="Lake Balaton",], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-1.2,2.2)
c5h=ggplot(data=plot_data5_f, aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-1.2,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a2h=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Tisza"),], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-1.2,2.2)
d2h=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Ferto"),], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-1.2,2.2)
b2h=ggplot(data=plot_data2_f[plot_data2_f$lake=="Lake Balaton",], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-1.2,2.2)
c2h=ggplot(data=plot_data2_f, aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-1.2,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a05h=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Tisza"),], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-1.2,2.2)
d05h=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Ferto"),], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-1.2,2.2)
b05h=ggplot(data=plot_data05_f[plot_data05_f$lake=="Lake Balaton",], aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-1.2,2.2)
c05h=ggplot(data=plot_data05_f, aes(x=Scaled_V_var , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-1.2,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
grid.arrange(d05h,a05h,b05h,c05h,
d2h,a2h,b2h,c2h,
d5h,a5h,b5h,c5h,
nrow = 3,
ncol = 4
)
###############
a5h_2=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Tisza"),], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-2.5,2.2)
d5h_2=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Ferto"),], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-2.5,2.2)
b5h_2=ggplot(data=plot_data5_f[plot_data5_f$lake=="Lake Balaton",], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-2.5,2.2)
c5h_2=ggplot(data=plot_data5_f, aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-2.5,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a2h_2=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Tisza"),], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-2.5,2.2)
d2h_2=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Ferto"),], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-2.5,2.2)
b2h_2=ggplot(data=plot_data2_f[plot_data2_f$lake=="Lake Balaton",], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-2.5,2.2)
c2h_2=ggplot(data=plot_data2_f, aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-2.5,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a05h_2=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Tisza"),], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-2.5,2.2)
d05h_2=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Ferto"),], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,5)+
xlim(-2.5,2.2)
b05h_2=ggplot(data=plot_data05_f[plot_data05_f$lake=="Lake Balaton",], aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-2.5,2.2)
c05h_2=ggplot(data=plot_data05_f, aes(x=Scaled_A_std , y=veg_height_m),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Height (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,5)+
xlim(-2.5,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
grid.arrange(d05h_2,a05h_2,b05h_2,c05h_2,
d2h_2,a2h_2,b2h_2,c2h_2,
d5h_2,a5h_2,b5h_2,c5h_2,
nrow = 3,
ncol = 4
)
############################################ Biomass
a5=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Tisza"),], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-1.2,2.2)
d5=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Ferto"),], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-1.2,2.2)
b5=ggplot(data=plot_data5_f[plot_data5_f$lake=="Lake Balaton",], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-1.2,2.2)
c5=ggplot(data=plot_data5_f, aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-1.2,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a2=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Tisza"),], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-1.2,2.2)
d2=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Ferto"),], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-1.2,2.2)
b2=ggplot(data=plot_data2_f[plot_data2_f$lake=="Lake Balaton",], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-1.2,2.2)
c2=ggplot(data=plot_data2_f, aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-1.2,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a05=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Tisza"),], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-1.2,2.2)
d05=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Ferto"),], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-1.2,2.2)
b05=ggplot(data=plot_data05_f[plot_data05_f$lake=="Lake Balaton",], aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-1.2,2.2)
c05=ggplot(data=plot_data05_f, aes(x=Scaled_V_var , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-1.2,2.2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
grid.arrange(d05,a05,b05,c05,
d2,a2,b2,c2,
d5,a5,b5,c5,
nrow = 3,
ncol = 4
)
###############
a5_2=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Tisza"),], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-2,2)
d5_2=ggplot(data=plot_data5_f[(plot_data5_f$lake=="Lake Ferto"),], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-2,2)
b5_2=ggplot(data=plot_data5_f[plot_data5_f$lake=="Lake Balaton",], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-2,2)
c5_2=ggplot(data=plot_data5_f, aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-2,2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a2_2=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Tisza"),], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-2,2)
d2_2=ggplot(data=plot_data2_f[(plot_data2_f$lake=="Lake Ferto"),], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-2,2)
b2_2=ggplot(data=plot_data2_f[plot_data2_f$lake=="Lake Balaton",], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-2,2)
c2_2=ggplot(data=plot_data2_f, aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-2,2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
a05_2=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Tisza"),], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="blue")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-2,2)
d05_2=ggplot(data=plot_data05_f[(plot_data05_f$lake=="Lake Ferto"),], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="darkgreen")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
scale_colour_manual(values=c("Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
ylim(0,2)+
xlim(-2,2)
b05_2=ggplot(data=plot_data05_f[plot_data05_f$lake=="Lake Balaton",], aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(shape=veg_type_2),size=5,color="red",show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,size=2,color="red")+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-2,2)
c05_2=ggplot(data=plot_data05_f, aes(x=Scaled_A_cover , y=total.weight),show.legend = TRUE) +
geom_point(aes(color=lake,shape=veg_type_2),size=5,show.legend = FALSE) +
geom_smooth(method="lm",se=TRUE,color="black",size=2)+
theme_bw(base_size = 20) +
ylab("Biomass (field)") +
geom_text(aes(label=OBJNAME),hjust=0, vjust=0,size=4)+
ylim(0,2)+
xlim(-2,2)+
scale_colour_manual(values=c("Lake Balaton"="red", "Lake Ferto"="darkgreen","Lake Tisza"="blue"),name="Lakes")+
scale_shape_manual(values=c("carex"=16,"phragmites"=17,"typha"=15),name="Species",labels=c("Carex spec.","Phragmites australis","Typha spec."))
grid.arrange(d05_2,a05_2,b05_2,c05_2,
d2_2,a2_2,b2_2,c2_2,
d5_2,a5_2,b5_2,c5_2,
nrow = 3,
ncol = 4
) |
const.client.id <- "FzOYqDgb"
const.client.secret <-"SPrvmY8eGRcGA"
test_knoema_exp <- function(expr){
tryCatch(
{
return(expr)
},
error = function(e) {
return(e$message)
}
)}
context("search by mnememonics - annual - MetaDataframe - one dataset")
test_that("search by mnememonics - annual - MetaDataframe one dataset",{
data_frame = test_knoema_exp(Knoema("eqohmpb", mnemonics="512NGDP_A_in_test_dataset", type = "MetaDataFrame", client.id = const.client.id, client.secret = const.client.secret))
if (class(data_frame)=="data.frame") {
sname = "512NGDP_A_in_test_dataset"
expect_equal(nrow(data_frame),5)
expect_equal(data_frame[['Mnemonics',sname]], '512NGDP_A_in_test_dataset')
} else {
expect_equal(data_frame,"Client error: (403) Forbidden")
}
})
context("search by mnememonics - semiannual, daily - DataFrame one dataset")
test_that("search by mnememonics - semiannual, daily - DataFrame one dataset",{
data_frame = test_knoema_exp(Knoema("eqohmpb", mnemonics="512NGDP_S_in_test_dataset;512NGDP_D_in_test_dataset", type = "DataFrame", client.id = const.client.id, client.secret = const.client.secret))
if (is.list(data_frame)) {
expect_equal(length(data_frame),2)
sname = "512NGDP_S_in_test_dataset"
expect_equal(data_frame[['2003-07-01',sname]], 2)
sname = "512NGDP_D_in_test_dataset"
expect_equal(data_frame[['2004-10-03',sname]], 17)
expect_equal(data_frame[['2004-12-02',sname]], 16)
} else {
expect_equal(data_frame,"Client error: (403) Forbidden")
}
})
context("search by mnememonics - quarterly, monthly - ts one dataset")
test_that("search by mnememonics - quarterly, monthly - ts one dataset",{
data_frame = test_knoema_exp(Knoema("eqohmpb", mnemonics="512NGDP_Q_in_test_dataset;512NGDP_M_in_test_dataset", client.id = const.client.id, client.secret = const.client.secret))
if (is.list(data_frame)) {
expect_equal(length(data_frame),2)
sname = "512NGDP_Q_in_test_dataset"
time_ser = data_frame[[sname]]
value = window(time_ser, start=c(2003,2),frequency=4)[[1]]
expect_equal(value, 5)
sname = "512NGDP_M_in_test_dataset"
time_ser = data_frame[[sname]]
value = window(time_ser, start=c(2003,2),frequency=12)[[1]]
expect_equal(value, 80.7144, tolerance=0.001)
} else {
expect_equal(data_frame,"Client error: (403) Forbidden")
}
})
context("search by mnememonics - annual - MetaDataframe all datasets")
test_that("search by mnememonics - annual - MetaDataframe all datasets",{
data_frame = test_knoema_exp(Knoema(NULL, mnemonics="512NGDP_A_in_test_dataset", type = "MetaDataFrame", client.id = const.client.id, client.secret = const.client.secret))
if (class(data_frame)=="data.frame") {
sname = "512NGDP_A_in_test_dataset"
expect_equal(nrow(data_frame),5)
expect_equal(data_frame[['Mnemonics',sname]], '512NGDP_A_in_test_dataset')
} else {
expect_equal(data_frame,"Client error: (403) Forbidden")
}
})
context("search by mnememonics - semiannual, daily - DataFrame all datasets")
test_that("search by mnememonics - semiannual, daily - DataFrame all datasets",{
data_frame = test_knoema_exp(Knoema(dataset = NULL, mnemonics="512NGDP_S_in_test_dataset;512NGDP_D_in_test_dataset", type = "DataFrame", client.id = const.client.id, client.secret = const.client.secret))
if (class(data_frame)=="data.frame") {
expect_equal(length(data_frame),2)
sname = "512NGDP_S_in_test_dataset"
expect_equal(data_frame[['2003-07-01',sname]], 2)
sname = "512NGDP_D_in_test_dataset"
expect_equal(data_frame[['2004-10-03',sname]], 17)
expect_equal(data_frame[['2004-12-02',sname]], 16)
} else {
expect_equal(data_frame,"Client error: (403) Forbidden")
}
})
context("search by mnememonics - quarterly, monthly - ts all datasets")
test_that("search by mnememonics - quarterly, monthly - ts all datasets",{
data_frame = test_knoema_exp(Knoema(mnemonics="512NGDP_Q_in_test_dataset;512NGDP_M_in_test_dataset", client.id = const.client.id, client.secret = const.client.secret))
if (is.list(data_frame)) {
expect_equal(length(data_frame),2)
sname = "512NGDP_Q_in_test_dataset"
time_ser = data_frame[[sname]]
value = window(time_ser, start=c(2003,2),frequency=4)[[1]]
expect_equal(value, 5)
sname = "512NGDP_M_in_test_dataset"
time_ser = data_frame[[sname]]
value = window(time_ser, start=c(2003,2),frequency=12)[[1]]
expect_equal(value, 80.7144, tolerance=0.001)
} else {
expect_equal(data_frame,"Client error: (403) Forbidden")
}
})
| /data/genthat_extracted_code/Knoema/tests/test_search_by_mnemonics.R | no_license | surayaaramli/typeRrh | R | false | false | 4,664 | r | const.client.id <- "FzOYqDgb"
const.client.secret <-"SPrvmY8eGRcGA"
test_knoema_exp <- function(expr){
tryCatch(
{
return(expr)
},
error = function(e) {
return(e$message)
}
)}
context("search by mnememonics - annual - MetaDataframe - one dataset")
test_that("search by mnememonics - annual - MetaDataframe one dataset",{
data_frame = test_knoema_exp(Knoema("eqohmpb", mnemonics="512NGDP_A_in_test_dataset", type = "MetaDataFrame", client.id = const.client.id, client.secret = const.client.secret))
if (class(data_frame)=="data.frame") {
sname = "512NGDP_A_in_test_dataset"
expect_equal(nrow(data_frame),5)
expect_equal(data_frame[['Mnemonics',sname]], '512NGDP_A_in_test_dataset')
} else {
expect_equal(data_frame,"Client error: (403) Forbidden")
}
})
context("search by mnememonics - semiannual, daily - DataFrame one dataset")
test_that("search by mnememonics - semiannual, daily - DataFrame one dataset",{
data_frame = test_knoema_exp(Knoema("eqohmpb", mnemonics="512NGDP_S_in_test_dataset;512NGDP_D_in_test_dataset", type = "DataFrame", client.id = const.client.id, client.secret = const.client.secret))
if (is.list(data_frame)) {
expect_equal(length(data_frame),2)
sname = "512NGDP_S_in_test_dataset"
expect_equal(data_frame[['2003-07-01',sname]], 2)
sname = "512NGDP_D_in_test_dataset"
expect_equal(data_frame[['2004-10-03',sname]], 17)
expect_equal(data_frame[['2004-12-02',sname]], 16)
} else {
expect_equal(data_frame,"Client error: (403) Forbidden")
}
})
context("search by mnememonics - quarterly, monthly - ts one dataset")
test_that("search by mnememonics - quarterly, monthly - ts one dataset",{
data_frame = test_knoema_exp(Knoema("eqohmpb", mnemonics="512NGDP_Q_in_test_dataset;512NGDP_M_in_test_dataset", client.id = const.client.id, client.secret = const.client.secret))
if (is.list(data_frame)) {
expect_equal(length(data_frame),2)
sname = "512NGDP_Q_in_test_dataset"
time_ser = data_frame[[sname]]
value = window(time_ser, start=c(2003,2),frequency=4)[[1]]
expect_equal(value, 5)
sname = "512NGDP_M_in_test_dataset"
time_ser = data_frame[[sname]]
value = window(time_ser, start=c(2003,2),frequency=12)[[1]]
expect_equal(value, 80.7144, tolerance=0.001)
} else {
expect_equal(data_frame,"Client error: (403) Forbidden")
}
})
context("search by mnememonics - annual - MetaDataframe all datasets")
test_that("search by mnememonics - annual - MetaDataframe all datasets",{
data_frame = test_knoema_exp(Knoema(NULL, mnemonics="512NGDP_A_in_test_dataset", type = "MetaDataFrame", client.id = const.client.id, client.secret = const.client.secret))
if (class(data_frame)=="data.frame") {
sname = "512NGDP_A_in_test_dataset"
expect_equal(nrow(data_frame),5)
expect_equal(data_frame[['Mnemonics',sname]], '512NGDP_A_in_test_dataset')
} else {
expect_equal(data_frame,"Client error: (403) Forbidden")
}
})
context("search by mnememonics - semiannual, daily - DataFrame all datasets")
test_that("search by mnememonics - semiannual, daily - DataFrame all datasets",{
data_frame = test_knoema_exp(Knoema(dataset = NULL, mnemonics="512NGDP_S_in_test_dataset;512NGDP_D_in_test_dataset", type = "DataFrame", client.id = const.client.id, client.secret = const.client.secret))
if (class(data_frame)=="data.frame") {
expect_equal(length(data_frame),2)
sname = "512NGDP_S_in_test_dataset"
expect_equal(data_frame[['2003-07-01',sname]], 2)
sname = "512NGDP_D_in_test_dataset"
expect_equal(data_frame[['2004-10-03',sname]], 17)
expect_equal(data_frame[['2004-12-02',sname]], 16)
} else {
expect_equal(data_frame,"Client error: (403) Forbidden")
}
})
context("search by mnememonics - quarterly, monthly - ts all datasets")
test_that("search by mnememonics - quarterly, monthly - ts all datasets",{
data_frame = test_knoema_exp(Knoema(mnemonics="512NGDP_Q_in_test_dataset;512NGDP_M_in_test_dataset", client.id = const.client.id, client.secret = const.client.secret))
if (is.list(data_frame)) {
expect_equal(length(data_frame),2)
sname = "512NGDP_Q_in_test_dataset"
time_ser = data_frame[[sname]]
value = window(time_ser, start=c(2003,2),frequency=4)[[1]]
expect_equal(value, 5)
sname = "512NGDP_M_in_test_dataset"
time_ser = data_frame[[sname]]
value = window(time_ser, start=c(2003,2),frequency=12)[[1]]
expect_equal(value, 80.7144, tolerance=0.001)
} else {
expect_equal(data_frame,"Client error: (403) Forbidden")
}
})
|
### RNetlogo Package ###
# --Linux Version--#
library(RNetLogo)
library(doParallel)
nl.path <- "/usr/local/Cluster-Apps/netlogo/6.0.4/app"
NLStart(nl.path, gui=F, nl.obj=NULL, is3d=FALSE, nl.jarname='netlogo-6.0.4.jar')
model.path <- "/home/hs621/github/jasss/Gangnam_v6_macro.nlogo"
NLLoadModel(model.path)
new.col.names <- c( "riskpop", "d_sinsa", "d_nonhyun1", "d_nonhyun2",
"d_samsung1", "d_samsung2","d_daechi1","d_daechi4","d_yeoksam1",
"d_yeoksam2","d_dogok1","d_dogok2","d_gaepo1","d_gaepo4",
"d_ilwon","d_ilwon1","d_ilwon2","d_suseo", "d_ap","d_chungdam",
"d_daechi2","d_gaepo2","d_segok",
"a_u15","a_btw1564","a_ov65","e_high","e_low")
init <- Sys.time()
foreach (i = 1:50) %dopar% {
NLCommand("setup")
NLCommand (paste('set AC', 100))
NLCommand (paste('set Scenario', '"BAU"'))
NLCommand (paste('set scenario-percent', '"inc-sce"'))
NLCommand (paste('set PM10-parameters', 100))
simulation <- paste("model100",i, sep = ".")
assign(simulation, NLDoReportWhile("ticks < 8764" , "go",
c("%riskpop", "d_sinsa", "d_nonhyun1", "d_nonhyun2",
"d_samsung1", "d_samsung2","d_daechi1","d_daechi4","d_yeoksam1",
"d_yeoksam2","d_dogok1","d_dogok2","d_gaepo1","d_gaepo4",
"d_ilwon","d_ilwon1","d_ilwon2","d_suseo", "d_ap","d_chungdam",
"d_daechi2","d_gaepo2","d_segok",
"a_u15","a_btw1564","a_ov65","e_high","e_low"), df.col.names= new.col.names,as.data.frame = T, max.minutes=150)
)
h <- paste("health100",i, sep = ".")
assign(h, NLGetAgentSet(c("who", "homename", "destinationName", "age", "health"), "people"))
}
Sys.time() - init
init <- Sys.time()
foreach (i = 1:50) %dopar% {
NLCommand("setup")
NLCommand (paste('set AC', 150))
NLCommand (paste('set Scenario', '"BAU"'))
NLCommand (paste('set scenario-percent', '"inc-sce"'))
NLCommand (paste('set PM10-parameters', 100))
simulation <- paste("model100",i, sep = ".")
assign(simulation, NLDoReportWhile("ticks < 8764" , "go",
c("%riskpop", "d_sinsa", "d_nonhyun1", "d_nonhyun2",
"d_samsung1", "d_samsung2","d_daechi1","d_daechi4","d_yeoksam1",
"d_yeoksam2","d_dogok1","d_dogok2","d_gaepo1","d_gaepo4",
"d_ilwon","d_ilwon1","d_ilwon2","d_suseo", "d_ap","d_chungdam",
"d_daechi2","d_gaepo2","d_segok",
"a_u15","a_btw1564","a_ov65","e_high","e_low"), df.col.names= new.col.names,as.data.frame = T, max.minutes=150)
)
h <- paste("health100",i, sep = ".")
assign(h, NLGetAgentSet(c("who", "homename", "destinationName", "age", "health"), "people"))
}
Sys.time() - init
init <- Sys.time()
foreach (i = 1:50) %dopar% {
NLCommand("setup")
NLCommand (paste('set AC', 200))
NLCommand (paste('set Scenario', '"BAU"'))
NLCommand (paste('set scenario-percent', '"inc-sce"'))
NLCommand (paste('set PM10-parameters', 100))
simulation <- paste("model100",i, sep = ".")
assign(simulation, NLDoReportWhile("ticks < 8764" , "go",
c("%riskpop", "d_sinsa", "d_nonhyun1", "d_nonhyun2",
"d_samsung1", "d_samsung2","d_daechi1","d_daechi4","d_yeoksam1",
"d_yeoksam2","d_dogok1","d_dogok2","d_gaepo1","d_gaepo4",
"d_ilwon","d_ilwon1","d_ilwon2","d_suseo", "d_ap","d_chungdam",
"d_daechi2","d_gaepo2","d_segok",
"a_u15","a_btw1564","a_ov65","e_high","e_low"), df.col.names= new.col.names,as.data.frame = T, max.minutes=150)
)
h <- paste("health100",i, sep = ".")
assign(h, NLGetAgentSet(c("who", "homename", "destinationName", "age", "health"), "people"))
}
Sys.time() - init
#######################################
#
save.image(file = "/home/hs621/github/jasss/cluster.Rdata")
stopCluster(cl) | /Rnetlogo_Ubuntu_cluster.R | no_license | dataandcrowd/jasss | R | false | false | 4,328 | r | ### RNetlogo Package ###
# --Linux Version--#
library(RNetLogo)
library(doParallel)
nl.path <- "/usr/local/Cluster-Apps/netlogo/6.0.4/app"
NLStart(nl.path, gui=F, nl.obj=NULL, is3d=FALSE, nl.jarname='netlogo-6.0.4.jar')
model.path <- "/home/hs621/github/jasss/Gangnam_v6_macro.nlogo"
NLLoadModel(model.path)
new.col.names <- c( "riskpop", "d_sinsa", "d_nonhyun1", "d_nonhyun2",
"d_samsung1", "d_samsung2","d_daechi1","d_daechi4","d_yeoksam1",
"d_yeoksam2","d_dogok1","d_dogok2","d_gaepo1","d_gaepo4",
"d_ilwon","d_ilwon1","d_ilwon2","d_suseo", "d_ap","d_chungdam",
"d_daechi2","d_gaepo2","d_segok",
"a_u15","a_btw1564","a_ov65","e_high","e_low")
init <- Sys.time()
foreach (i = 1:50) %dopar% {
NLCommand("setup")
NLCommand (paste('set AC', 100))
NLCommand (paste('set Scenario', '"BAU"'))
NLCommand (paste('set scenario-percent', '"inc-sce"'))
NLCommand (paste('set PM10-parameters', 100))
simulation <- paste("model100",i, sep = ".")
assign(simulation, NLDoReportWhile("ticks < 8764" , "go",
c("%riskpop", "d_sinsa", "d_nonhyun1", "d_nonhyun2",
"d_samsung1", "d_samsung2","d_daechi1","d_daechi4","d_yeoksam1",
"d_yeoksam2","d_dogok1","d_dogok2","d_gaepo1","d_gaepo4",
"d_ilwon","d_ilwon1","d_ilwon2","d_suseo", "d_ap","d_chungdam",
"d_daechi2","d_gaepo2","d_segok",
"a_u15","a_btw1564","a_ov65","e_high","e_low"), df.col.names= new.col.names,as.data.frame = T, max.minutes=150)
)
h <- paste("health100",i, sep = ".")
assign(h, NLGetAgentSet(c("who", "homename", "destinationName", "age", "health"), "people"))
}
Sys.time() - init
init <- Sys.time()
foreach (i = 1:50) %dopar% {
NLCommand("setup")
NLCommand (paste('set AC', 150))
NLCommand (paste('set Scenario', '"BAU"'))
NLCommand (paste('set scenario-percent', '"inc-sce"'))
NLCommand (paste('set PM10-parameters', 100))
simulation <- paste("model100",i, sep = ".")
assign(simulation, NLDoReportWhile("ticks < 8764" , "go",
c("%riskpop", "d_sinsa", "d_nonhyun1", "d_nonhyun2",
"d_samsung1", "d_samsung2","d_daechi1","d_daechi4","d_yeoksam1",
"d_yeoksam2","d_dogok1","d_dogok2","d_gaepo1","d_gaepo4",
"d_ilwon","d_ilwon1","d_ilwon2","d_suseo", "d_ap","d_chungdam",
"d_daechi2","d_gaepo2","d_segok",
"a_u15","a_btw1564","a_ov65","e_high","e_low"), df.col.names= new.col.names,as.data.frame = T, max.minutes=150)
)
h <- paste("health100",i, sep = ".")
assign(h, NLGetAgentSet(c("who", "homename", "destinationName", "age", "health"), "people"))
}
Sys.time() - init
init <- Sys.time()
foreach (i = 1:50) %dopar% {
NLCommand("setup")
NLCommand (paste('set AC', 200))
NLCommand (paste('set Scenario', '"BAU"'))
NLCommand (paste('set scenario-percent', '"inc-sce"'))
NLCommand (paste('set PM10-parameters', 100))
simulation <- paste("model100",i, sep = ".")
assign(simulation, NLDoReportWhile("ticks < 8764" , "go",
c("%riskpop", "d_sinsa", "d_nonhyun1", "d_nonhyun2",
"d_samsung1", "d_samsung2","d_daechi1","d_daechi4","d_yeoksam1",
"d_yeoksam2","d_dogok1","d_dogok2","d_gaepo1","d_gaepo4",
"d_ilwon","d_ilwon1","d_ilwon2","d_suseo", "d_ap","d_chungdam",
"d_daechi2","d_gaepo2","d_segok",
"a_u15","a_btw1564","a_ov65","e_high","e_low"), df.col.names= new.col.names,as.data.frame = T, max.minutes=150)
)
h <- paste("health100",i, sep = ".")
assign(h, NLGetAgentSet(c("who", "homename", "destinationName", "age", "health"), "people"))
}
Sys.time() - init
#######################################
#
save.image(file = "/home/hs621/github/jasss/cluster.Rdata")
stopCluster(cl) |
require(XML) #Loading XML package
require(stringr)
theURL<-"http://techbus.safaribooksonline.com/9780133578867/35-2013-12-05?percentage=&reader=pf"
Hrs <-readHTMLTable(theURL, which = 1,header = FALSE,StringsAsFactors= FALSE) #imported data in Hrs DF
class(Hrs) #checked class of DF
Calc <- Hrs[60:115,3,drop=FALSE] #Copied records 60 to 115 and column 3 as a DF
names(Calc) <- "time" #named column
Calc2<-str_split_fixed(Calc$time, ":", 3) #used : delimetor to store time into 3 separate columns
colnames(Calc2) <- c("Hours","Minutes","Sec") #since Calc2 is a matrix, renamed the columns
Calc2<-as.data.frame(Calc2,stringsAsFactors = FALSE) #Converted matrix to data frame
is.null(Calc2[9,"Hours"]) #just checked if blank values were actually NULL
Calc2$Hours <- as.character(Calc2$Hours) #Converting columns to character to find empty fields and change to NA
Calc2$Hours[Calc2$Hours == ""] <- NA
Calc2$Minutes <- as.character(Calc2$Minutes) #Converting columns to character to find empty fields and change to NA
Calc2$Minutes[Calc2$Minutes == ""] <- NA
Calc2$Sec <- as.character(Calc2$Sec) #Converting columns to character to find empty fields and change to NA
Calc2$Sec[Calc2$Sec == ""] <- NA
Calc2<-Calc2[complete.cases(Calc2),] #updating DF by taking evrything excluding NA rows
Calc2$Hours <- as.numeric(Calc2$Hours) #converting all columns back to numeric
Calc2$Minutes <- as.numeric(Calc2$Minutes) #converting all columns back to numeric
Calc2$Sec <- as.numeric(Calc2$Sec) #converting all columns back to numeric
Time<-(sum(Calc2$Minutes)+(sum(Calc2$Sec)/60))/60 #calculating total time in Hrs
TimeF<- paste(floor(Time), round((Time-floor(Time))*60), sep=":") #formatting time to show hours:minutes
| /XMLInternetImport.r | no_license | vpranavanshu91/Intro-to-R | R | false | false | 1,836 | r | require(XML) #Loading XML package
require(stringr)
theURL<-"http://techbus.safaribooksonline.com/9780133578867/35-2013-12-05?percentage=&reader=pf"
Hrs <-readHTMLTable(theURL, which = 1,header = FALSE,StringsAsFactors= FALSE) #imported data in Hrs DF
class(Hrs) #checked class of DF
Calc <- Hrs[60:115,3,drop=FALSE] #Copied records 60 to 115 and column 3 as a DF
names(Calc) <- "time" #named column
Calc2<-str_split_fixed(Calc$time, ":", 3) #used : delimetor to store time into 3 separate columns
colnames(Calc2) <- c("Hours","Minutes","Sec") #since Calc2 is a matrix, renamed the columns
Calc2<-as.data.frame(Calc2,stringsAsFactors = FALSE) #Converted matrix to data frame
is.null(Calc2[9,"Hours"]) #just checked if blank values were actually NULL
Calc2$Hours <- as.character(Calc2$Hours) #Converting columns to character to find empty fields and change to NA
Calc2$Hours[Calc2$Hours == ""] <- NA
Calc2$Minutes <- as.character(Calc2$Minutes) #Converting columns to character to find empty fields and change to NA
Calc2$Minutes[Calc2$Minutes == ""] <- NA
Calc2$Sec <- as.character(Calc2$Sec) #Converting columns to character to find empty fields and change to NA
Calc2$Sec[Calc2$Sec == ""] <- NA
Calc2<-Calc2[complete.cases(Calc2),] #updating DF by taking evrything excluding NA rows
Calc2$Hours <- as.numeric(Calc2$Hours) #converting all columns back to numeric
Calc2$Minutes <- as.numeric(Calc2$Minutes) #converting all columns back to numeric
Calc2$Sec <- as.numeric(Calc2$Sec) #converting all columns back to numeric
Time<-(sum(Calc2$Minutes)+(sum(Calc2$Sec)/60))/60 #calculating total time in Hrs
TimeF<- paste(floor(Time), round((Time-floor(Time))*60), sep=":") #formatting time to show hours:minutes
|
socks_ll <- function(p,s,k){
# it is not possible to choose more than p+s distinct socks
if(k > p + s) return(-Inf)
# log likelihood terms for the log-sum-exp trick.
f <- purrr::map(0:k, function(j){
(k-j)*log(2) + lchoose(s,j) +lchoose(p,k-j) - lchoose(2*p + s,k)
})
# the log likelihood
ll <- matrixStats::logSumExp(f)
return(ll)
}
# socks_ll(p = 3, s = 4, k = 4)
socks_likelihood_grid <- function(p_max,s_max,k, prior = NULL){
grid <- crossing(p = 0:p_max, s = 0:s_max, k = k) %>%
rowwise() %>%
mutate(
ll = socks_ll(p,s,k)
)
return(grid)
}
| /R/socks_ll.R | no_license | odaniel1/broman-socks | R | false | false | 607 | r | socks_ll <- function(p,s,k){
# it is not possible to choose more than p+s distinct socks
if(k > p + s) return(-Inf)
# log likelihood terms for the log-sum-exp trick.
f <- purrr::map(0:k, function(j){
(k-j)*log(2) + lchoose(s,j) +lchoose(p,k-j) - lchoose(2*p + s,k)
})
# the log likelihood
ll <- matrixStats::logSumExp(f)
return(ll)
}
# socks_ll(p = 3, s = 4, k = 4)
socks_likelihood_grid <- function(p_max,s_max,k, prior = NULL){
grid <- crossing(p = 0:p_max, s = 0:s_max, k = k) %>%
rowwise() %>%
mutate(
ll = socks_ll(p,s,k)
)
return(grid)
}
|
/cachematrix.R | no_license | mhelmasim/ProgrammingAssignment2 | R | false | false | 1,378 | r | ||
#========
#module.R
#========
#This script defines functions related to testing modules and other
#parts of the the system
#TEST MODULE
#===========
#' Test module
#'
#' \code{testModule} a visioneval framework module developer function that sets
#' up a test environment and tests a module.
#'
#' This function is used to set up a test environment and test a module to check
#' that it can run successfully in the VisionEval model system. The function
#' sets up the test environment by switching to the tests directory and
#' initializing a model state list, a log file, and a datastore. The user may
#' use an existing datastore rather than initialize a new datastore. The use
#' case for loading an existing datastore is where a package contains several
#' modules that run in sequence. The first module would initialize a datastore
#' and then subsequent modules use the datastore that is modified by testing the
#' previous module. When run this way, it is also necessary to set the
#' SaveDatastore argument equal to TRUE so that the module outputs will be
#' saved to the datastore. The function performs several tests including
#' checking whether the module specifications are written properly, whether
#' the the test inputs are correct and complete and can be loaded into the
#' datastore, whether the datastore contains all the module inputs identified in
#' the Get specifications, whether the module will run, and whether all of the
#' outputs meet the module's Set specifications. The latter check is carried out
#' in large part by the checkModuleOutputs function that is called.
#'
#' @param ModuleName A string identifying the module name.
#' @param Param_ls Parameter configuration (list)
#' @param ... Other parameters (see comments)
#' @return If DoRun is FALSE, the return value is a list containing the module
#' specifications. If DoRun is TRUE, there is no return value. The function
#' writes out messages to the console and to the log as the testing proceeds.
#' These messages include the time when each test starts and when it ends.
#' When a key test fails, requiring a fix before other tests can be run,
#' execution stops and an error message is written to the console. Detailed
#' error messages are also written to the log.
#' @export
testModule <-
function(ModuleName,Param_ls=NULL,...) {
# ParamDir = "defs",
# RunParamFile = "run_parameters.json",
# GeoFile = "geo.csv",
# ModelParamFile = "model_parameters.json",
# LoadDatastore = FALSE,
# SaveDatastore = TRUE,
# DoRun = TRUE,
# RunFor = "AllYears",
# StopOnErr = TRUE,
# RequiredPackages = NULL,
# TestGeoName = NULL)
# TODO: make this work with the new parameter setup
# the entire thing needs to be rethought...
#Set working directory to tests and return to main module directory on exit
#--------------------------------------------------------------------------
setwd("tests")
on.exit(setwd("../"))
if ( ! is.list(Param_ls) ) {
model.env <- modelEnvironment()
if ( "RunParam_ls" %in% ls(model.env) ) {
Param_ls <- model.env$RunParam_ls
} else {
Param_ls <- list()
}
}
ParamDir = "defs"
RunParamFile = "run_parameters.json"
GeoFile = "geo.csv"
ModelParamFile = "model_parameters.json"
LoadDatastore = FALSE
SaveDatastore = TRUE
DoRun = TRUE
RunFor = "AllYears"
StopOnErr = TRUE
RequiredPackages = NULL
TestGeoName = NULL
defParam_ls <- list(
ParamDir = "defs",
RunParamFile = "run_parameters.json",
GeoFile = "geo.csv",
ModelParamFile = "model_parameters.json",
LoadDatastore = FALSE,
SaveDatastore = TRUE,
DoRun = TRUE,
RunFor = "AllYears",
StopOnErr = TRUE,
RequiredPackages = NULL,
TestGeoName = NULL
)
missing <- ! names(defParam_ls) %in% names(Param_ls)
Param_ls[missing] <- defParam_ls[missing]
f.env <- environment()
for ( p in names(Param_ls) ) assign(p,Param_ls[p],envir=f.env)
#Initialize model state and log files
#------------------------------------
Msg <- paste0("Testing ", ModuleName, ".")
initLog(Save=TRUE,Threshold="info")
initModelState(Save=TRUE,Param_ls=NULL)
writeLog(Msg,Level="warn")
rm(Msg)
#Assign the correct datastore interaction functions
#--------------------------------------------------
assignDatastoreFunctions(readModelState()$DatastoreType)
#Make correspondence tables of modules and datasets to packages
#--------------------------------------------------------------
#This supports soft call and dataset references in modules
RequiredPkg_ <- RequiredPackages
#If RequiredPkg_ is not NULL make a list of modules and datasets in packages
if (!is.null(RequiredPkg_)) {
#Make sure all required packages are present
InstalledPkgs_ <- rownames(installed.packages())
MissingPkg_ <- RequiredPkg_[!(RequiredPkg_ %in% InstalledPkgs_)];
if (length(MissingPkg_ != 0)) {
Msg <-
paste0("One or more required packages need to be installed in order ",
"to run the model. Following are the missing package(s): ",
paste(MissingPkg_, collapse = ", "), ".")
stop(Msg)
}
#Identify all modules and datasets in required packages
Datasets_df <-
data.frame(
do.call(
rbind,
lapply(RequiredPkg_, function(x) {
data(package = x)$results[,c("Package", "Item")]
})
), stringsAsFactors = FALSE
)
WhichAreModules_ <- grep("Specifications", Datasets_df$Item)
ModulesByPackage_df <- Datasets_df[WhichAreModules_,]
ModulesByPackage_df$Module <-
gsub("Specifications", "", ModulesByPackage_df$Item)
ModulesByPackage_df$Item <- NULL
DatasetsByPackage_df <- Datasets_df[-WhichAreModules_,]
names(DatasetsByPackage_df) <- c("Package", "Dataset")
#Save the modules and datasets lists in the model state
setModelState(list(ModulesByPackage_df = ModulesByPackage_df,
DatasetsByPackage_df = DatasetsByPackage_df))
rm(Datasets_df, WhichAreModules_)
}
#Load datastore if specified or initialize new datastore
#-------------------------------------------------------
if (LoadDatastore) {
writeLog("Attempting to load datastore.", Level="warn")
DatastoreName <- getModelState()$DatastoreName
if (!file.exists(DatastoreName)) {
Msg <-
paste0("LoadDatastore argument is TRUE but the datastore file ",
"specified in the RunParamFile doesn't exist in the tests ",
"directory.")
stop(Msg)
rm(Msg)
}
loadDatastore(
FileToLoad = DatastoreName,
SaveDatastore = FALSE
)
writeLog("Datastore loaded.", Level="warn")
} else {
writeLog("Attempting to initialize datastore.", Level="warn")
initDatastore()
readGeography()
initDatastoreGeography()
loadModelParameters()
writeLog("Datastore initialized.", Level="warn")
}
#Load module specifications and check whether they are proper
#------------------------------------------------------------
loadSpec <- function() {
SpecsName <- paste0(ModuleName, "Specifications")
SpecsFileName <- paste0("../data/", SpecsName, ".rda")
load(SpecsFileName)
return(processModuleSpecs(get(SpecsName)))
}
writeLog("Attempting to load and check specifications.", Level="warn")
Specs_ls <- loadSpec()
#Check for errors
Errors_ <- checkModuleSpecs(Specs_ls, ModuleName)
if (length(Errors_) != 0) {
Msg <-
paste0("Specifications for module '", ModuleName,
"' have the following errors.")
writeLog(Msg,Level="error")
writeLog(Errors_,Level="error")
Msg <- paste0("Specifications for module '", ModuleName,
"' have errors. Check the log for details.")
stop(Msg)
rm(Msg)
}
rm(Errors_)
writeLog("Module specifications successfully loaded and checked for errors.",
Level="warn")
#Check for developer warnings
DeveloperWarnings_ls <-
lapply(c(Specs_ls$Inp, Specs_ls$Get, Specs_ls$Set), function(x) {
attributes(x)$WARN
})
DeveloperWarnings_ <-
unique(unlist(lapply(DeveloperWarnings_ls, function(x) x[!is.null(x)])))
if (length(DeveloperWarnings_) != 0) {
writeLog(DeveloperWarnings_,Level="warn")
Msg <- paste0(
"Specifications check for module '", ModuleName,
"' generated warnings. Check log for details."
)
warning(Msg)
rm(DeveloperWarnings_ls, DeveloperWarnings_, Msg)
}
#Process, check, and load module inputs
#--------------------------------------
if (is.null(Specs_ls$Inp)) {
writeLog("No inputs to process.", Level="warn")
# If no inputs and the module is "Initialize", we're done
# i.e. all inputs are optional and none are provided
if (ModuleName == "Initialize") return()
} else {
writeLog("Attempting to process, check and load module inputs.",
Level="warn")
# Process module inputs
ProcessedInputs_ls <- processModuleInputs(Specs_ls, ModuleName)
# Write warnings to log if any
if (length(ProcessedInputs_ls$Warnings != 0)) {
writeLog(ProcessedInputs_ls$Warnings,Level="warn")
}
# Write errors to log and stop if any errors
if (length(ProcessedInputs_ls$Errors) != 0) {
Msg <- paste0(
"Input files for module ", ModuleName,
" have errors. Check the log for details."
)
writeLog(ProcessedInputs_ls$Errors,Level="error")
stop(Msg)
}
# If module is NOT Initialize, save the inputs in the datastore
if (ModuleName != "Initialize") {
inputsToDatastore(ProcessedInputs_ls, Specs_ls, ModuleName)
writeLog("Module inputs successfully checked and loaded into datastore.",
Level="warn")
} else {
if (DoRun) {
# If module IS Initialize, apply the Initialize function
initFunc <- get("Initialize")
InitializedInputs_ls <- initFunc(ProcessedInputs_ls)
# Write warnings to log if any
if (length(InitializedInputs_ls$Warnings != 0)) {
writeLog(InitializedInputs_ls$Warnings,Level="warn")
}
# Write errors to log and stop if any errors
if (length(InitializedInputs_ls$Errors) != 0) {
writeLog(InitializedInputs_ls$Errors,Level="error")
stop("Errors in Initialize module inputs. Check log for details.")
}
# Save inputs to datastore
inputsToDatastore(InitializedInputs_ls, Specs_ls, ModuleName)
writeLog("Module inputs successfully checked and loaded into datastore.",
Level="warn")
return() # Break out of function because purpose of Initialize is to process inputs.
} else {
return(ProcessedInputs_ls)
}
}
}
#Check whether datastore contains all data items in Get specifications
#---------------------------------------------------------------------
writeLog(
"Checking whether datastore contains all datasets in Get specifications.",
Level="warn")
G <- getModelState()
Get_ls <- Specs_ls$Get
#Vector to keep track of missing datasets that are specified
Missing_ <- character(0)
#Function to check whether dataset is optional
isOptional <- function(Spec_ls) {
if (!is.null(Spec_ls$OPTIONAL)) {
Spec_ls$OPTIONAL
} else {
FALSE
}
}
#Vector to keep track of Get specs that need to be removed from list because
#they are optional and the datasets are not present
OptSpecToRemove_ <- numeric(0)
#Check each specification
for (i in 1:length(Get_ls)) {
Spec_ls <- Get_ls[[i]]
if (Spec_ls$GROUP == "Year") {
for (Year in G$Years) {
if (RunFor == "NotBaseYear"){
if(!Year %in% G$BaseYear){
Present <-
checkDataset(Spec_ls$NAME, Spec_ls$TABLE, Year, G$Datastore)
if (!Present) {
if(isOptional(Spec_ls)) {
#Identify for removal because optional and not present
OptSpecToRemove_ <- c(OptSpecToRemove_, i)
} else {
#Identify as missing because not optional and not present
Missing_ <- c(Missing_, attributes(Present))
}
}
}
} else {
Present <-
checkDataset(Spec_ls$NAME, Spec_ls$TABLE, Year, G$Datastore)
if (!Present) {
if(isOptional(Spec_ls)) {
#Identify for removal because optional and not present
OptSpecToRemove_ <- c(OptSpecToRemove_, i)
} else {
#Identify as missing because not optional and not present
Missing_ <- c(Missing_, attributes(Present))
}
}
}
}
}
if (Spec_ls$GROUP == "BaseYear") {
Present <-
checkDataset(Spec_ls$NAME, Spec_ls$TABLE, G$BaseYear, G$Datastore)
if (!Present) {
if (isOptional(Spec_ls)) {
#Identify for removal because optional and not present
OptSpecToRemove_ <- c(OptSpecToRemove_, i)
} else {
#Identify as missing because not optional and not present
Missing_ <- c(Missing_, attributes(Present))
}
}
}
if (Spec_ls$GROUP == "Global") {
Present <-
checkDataset(Spec_ls$NAME, Spec_ls$TABLE, "Global", G$Datastore)
if (!Present) {
if (isOptional(Spec_ls)) {
#Identify for removal because optional and not present
OptSpecToRemove_ <- c(OptSpecToRemove_, i)
} else {
#Identify as missing because not optional and not present
Missing_ <- c(Missing_, attributes(Present))
}
}
}
}
#If any non-optional datasets are missing, write out error messages and
#stop execution
if (length(Missing_) != 0) {
Msg <-
paste0("The following datasets identified in the Get specifications ",
"for module ", ModuleName, " are missing from the datastore.")
Msg <- paste(c(Msg, Missing_), collapse = "\n")
writeLog(Msg,Level="error")
stop(
paste0("Datastore is missing one or more datasets specified in the ",
"Get specifications for module ", ModuleName, ". Check the log ",
"for details.")
)
rm(Msg)
}
#If any optional datasets are missing, remove the specifications for them so
#that there will be no errors when data are retrieved from the datastore
if (length(OptSpecToRemove_) != 0) {
Specs_ls$Get <- Specs_ls$Get[-OptSpecToRemove_]
}
writeLog(
"Datastore contains all datasets identified in module Get specifications.",
Level="warn")
#Run the module and check that results meet specifications
#---------------------------------------------------------
#The module is run only if the DoRun argument is TRUE. Otherwise the
#datastore is initialized, specifications are checked, and a list is
#returned which contains the specifications list, the data list from the
#datastore meeting specifications, and a functions list containing any
#called module functions.
#Run the module if DoRun is TRUE
if (DoRun) {
writeLog(
"Running module and checking whether outputs meet Set specifications.",
Level="warn"
)
if (SaveDatastore) {
writeLog("Also saving module outputs to datastore.", Level="warn")
}
#Load the module function
Func <- get(ModuleName)
#Load any modules identified by 'Call' spec if any
if (is.list(Specs_ls$Call)) {
Call <- list(
Func = list(),
Specs = list()
)
for (Alias in names(Specs_ls$Call)) {
#Called module function when specified as package::module
Function <- Specs_ls$Call[[Alias]]
#Called module function when only module is specified
if (length(unlist(strsplit(Function, "::"))) == 1) {
Pkg_df <- getModelState()$ModulesByPackage_df
Function <-
paste(Pkg_df$Package[Pkg_df$Module == Function], Function, sep = "::")
rm(Pkg_df)
}
#Called module specifications
Specs <- paste0(Function, "Specifications")
#Assign called module function and specifications for the alias
Call$Func[[Alias]] <- eval(parse(text = Function))
Call$Specs[[Alias]] <- processModuleSpecs(eval(parse(text = Specs)))
Call$Specs[[Alias]]$RunBy <- Specs_ls$RunBy
}
}
#Run module for each year
if (RunFor == "AllYears") Years <- getYears()
if (RunFor == "BaseYear") Years <- G$BaseYear
if (RunFor == "NotBaseYear") Years <- getYears()[!getYears() %in% G$BaseYear]
for (Year in Years) {
ResultsCheck_ <- character(0)
#If RunBy is 'Region', this code is run
if (Specs_ls$RunBy == "Region") {
#Get data from datastore
L <- getFromDatastore(Specs_ls, RunYear = Year)
if (exists("Call")) {
for (Alias in names(Call$Specs)) {
L[[Alias]] <-
getFromDatastore(Call$Specs[[Alias]], RunYear = Year)
}
}
#Run module
if (exists("Call")) {
R <- Func(L, Call$Func)
} else {
R <- Func(L)
}
#Check for errors and warnings in module return list
#Save results in datastore if no errors from module
if (is.null(R$Errors)) {
#Check results
Check_ <-
checkModuleOutputs(
Data_ls = R,
ModuleSpec_ls = Specs_ls,
ModuleName = ModuleName)
ResultsCheck_ <- Check_
#Save results if SaveDatastore and no errors found
if (SaveDatastore & length(Check_) == 0) {
setInDatastore(R, Specs_ls, ModuleName, Year, Geo = NULL)
}
}
#Handle warnings
if (!is.null(R$Warnings)) {
writeLog(R$Warnings,Level="warn")
Msg <-
paste0("Module ", ModuleName, " has reported one or more warnings. ",
"Check log for details.")
warning(Msg)
}
#Handle errors
if (!is.null(R$Errors) & StopOnErr) {
writeLog(R$Errors,Level="warn")
Msg <-
paste0("Module ", ModuleName, " has reported one or more errors. ",
"Check log for details.")
stop(Msg)
}
#Otherwise the following code is run
} else {
#Initialize vectors to store module errors and warnings
Errors_ <- character(0)
Warnings_ <- character(0)
#Identify the units of geography to iterate over
GeoCategory <- Specs_ls$RunBy
#Create the geographic index list
GeoIndex_ls <- createGeoIndexList(c(Specs_ls$Get, Specs_ls$Set), GeoCategory, Year)
if (exists("Call")) {
for (Alias in names(Call$Specs)) {
GeoIndex_ls[[Alias]] <-
createGeoIndexList(Call$Specs[[Alias]]$Get, GeoCategory, Year)
}
}
#Run module for each geographic area
Geo_ <- readFromTable(GeoCategory, GeoCategory, Year)
for (Geo in Geo_) {
#Get data from datastore for geographic area
L <-
getFromDatastore(Specs_ls, RunYear = Year, Geo = Geo, GeoIndex_ls = GeoIndex_ls)
if (exists("Call")) {
for (Alias in names(Call$Specs)) {
L[[Alias]] <-
getFromDatastore(Call$Specs[[Alias]], RunYear = Year, Geo = Geo, GeoIndex_ls = GeoIndex_ls[[Alias]])
}
}
#Run model for geographic area
if (exists("Call")) {
R <- Func(L, Call$Func)
} else {
R <- Func(L)
}
#Check for errors and warnings in module return list
#Save results in datastore if no errors from module
if (is.null(R$Errors)) {
#Check results
Check_ <-
checkModuleOutputs(
Data_ls = R,
ModuleSpec_ls = Specs_ls,
ModuleName = ModuleName)
ResultsCheck_ <- c(ResultsCheck_, Check_)
#Save results if SaveDatastore and no errors found
if (SaveDatastore & length(Check_) == 0) {
setInDatastore(R, Specs_ls, ModuleName, Year, Geo = Geo, GeoIndex_ls = GeoIndex_ls)
}
}
#Handle warnings
if (!is.null(R$Warnings)) {
writeLog(R$Warnings,Level="warn")
Msg <-
paste0("Module ", ModuleName, " has reported one or more warnings. ",
"Check log for details.")
warning(Msg)
}
#Handle errors
if (!is.null(R$Errors) & StopOnErr) {
writeLog(R$Errors,Level="error")
Msg <-
paste0("Module ", ModuleName, " has reported one or more errors. ",
"Check log for details.")
stop(Msg)
}
}
}
if (length(ResultsCheck_) != 0) {
Msg <-
paste0("Following are inconsistencies between module outputs and the ",
"module Set specifications:")
Msg <- paste(c(Msg, ResultsCheck_), collapse = "\n")
writeLog(Msg,Level="error")
rm(Msg)
stop(
paste0("The outputs for module ", ModuleName, " are inconsistent ",
"with one or more of the module's Set specifications. ",
"Check the log for details."))
}
}
writeLog("Module run successfully and outputs meet Set specifications.",
Level="warn")
if (SaveDatastore) {
writeLog("Module outputs saved to datastore.", Level="warn")
}
#Print success message if no errors found
Msg <- paste0("Congratulations. Module ", ModuleName, " passed all tests.")
writeLog(Msg, Level="warn")
rm(Msg)
#Return the specifications, data list, and functions list if DoRun is FALSE
} else {
#Load any modules identified by 'Call' spec if any
if (!is.null(Specs_ls$Call)) {
Call <- list(
Func = list(),
Specs = list()
)
for (Alias in names(Specs_ls$Call)) {
Function <- Specs_ls$Call[[Alias]]
#Called module function when only module is specified
if (length(unlist(strsplit(Function, "::"))) == 1) {
Pkg_df <- getModelState()$ModulesByPackage_df
Function <-
paste(Pkg_df$Package[Pkg_df$Module == Function], Function, sep = "::")
rm(Pkg_df)
}
#Called module specifications
Specs <- paste0(Function, "Specifications")
Call$Func[[Alias]] <- eval(parse(text = Function))
Call$Specs[[Alias]] <- processModuleSpecs(eval(parse(text = Specs)))
}
}
#Get data from datastore
if (RunFor == "AllYears") Year <- getYears()[1]
if (RunFor == "BaseYear") Year <- G$BaseYear
if (RunFor == "NotBaseYear") Year <- getYears()[!getYears() %in% G$BaseYear][1]
#Identify the units of geography to iterate over
GeoCategory <- Specs_ls$RunBy
#Create the geographic index list
GeoIndex_ls <- createGeoIndexList(Specs_ls$Get, GeoCategory, Year)
if (exists("Call")) {
for (Alias in names(Call$Specs)) {
GeoIndex_ls[[Alias]] <-
createGeoIndexList(Call$Specs[[Alias]]$Get, GeoCategory, Year)
}
}
#Get the data required
if (GeoCategory == "Region") {
L <- getFromDatastore(Specs_ls, RunYear = Year, Geo = NULL)
if (exists("Call")) {
for (Alias in names(Call$Specs)) {
L[[Alias]] <-
getFromDatastore(Call$Specs[[Alias]], RunYear = Year, Geo = NULL)
}
}
} else {
Geo_ <- readFromTable(GeoCategory, GeoCategory, Year)
#Check whether the TestGeoName is proper
if (!is.null(TestGeoName)) {
if (!(TestGeoName %in% Geo_)) {
stop(paste0(
"The 'TestGeoName' value - ", TestGeoName,
" - is not a recognized name for the ",
GeoCategory, " geography that this module is specified to be run ",
"for."
))
}
}
#If TestGeoName is NULL get the data for the first name in the list
if (is.null(TestGeoName)) TestGeoName <- Geo_[1]
#Get the data
L <- getFromDatastore(Specs_ls, RunYear = Year, Geo = TestGeoName, GeoIndex_ls = GeoIndex_ls)
if (exists("Call")) {
for (Alias in names(Call$Specs)) {
L[[Alias]] <-
getFromDatastore(Call$Specs[[Alias]], RunYear = Year, Geo = TestGeoName, GeoIndex_ls = GeoIndex_ls)
}
}
}
#Return the specifications, data list, and called functions
if (exists("Call")) {
return(list(Specs_ls = Specs_ls, L = L, M = Call$Func))
} else {
return(list(Specs_ls = Specs_ls, L = L))
}
}
}
#LOAD SAVED DATASTORE
#====================
#' Load saved datastore for testing
#'
#' \code{loadDatastore} a visioneval framework control function that copies an
#' existing saved datastore and writes information to run environment.
#'
#' This function copies a saved datastore as the working datastore attributes
#' the global list with related geographic information. This function enables
#' scenario variants to be built from a constant set of starting conditions.
#'
#' @param FileToLoad A string identifying the full path name to the saved
#' datastore. Path name can either be relative to the working directory or
#' absolute.
#' @param SaveDatastore A logical identifying whether an existing datastore
#' will be saved. It is renamed by appending the system time to the name. The
#' default value is TRUE.
#' @return TRUE if the datastore is loaded. It copies the saved datastore to
#' working directory as 'datastore.h5'. If a 'datastore.h5' file already
#' exists, it first renames that file as 'archive-datastore.h5'. The function
#' updates information in the model state file regarding the model geography
#' and the contents of the loaded datastore. If the stored file does not exist
#' an error is thrown.
#' @export
loadDatastore <- function(FileToLoad, SaveDatastore = TRUE) {
# TODO: This function is apparently only used when testing a module
# (and it is mighty invasive for that!)
G <- getModelState()
#If data store exists, rename
DatastoreName <- G$DatastoreName
if (file.exists(DatastoreName) & SaveDatastore) {
# TODO: blow away the existing one if SaveDatastore is not TRUE
TimeString <- gsub(" ", "_", as.character(Sys.time()))
ArchiveDatastoreName <-
paste0(unlist(strsplit(DatastoreName, "\\."))[1],
"_", TimeString, ".",
unlist(strsplit(DatastoreName, "\\."))[2])
ArchiveDatastoreName <- gsub(":", "-", ArchiveDatastoreName)
file.copy(DatastoreName, ArchiveDatastoreName)
}
if (file.exists(FileToLoad)) {
file.copy(FileToLoad, DatastoreName)
# Note: already checked geography consistency
# GeoFile <- file.path(Dir, GeoFile)
# Geo_df <- read.csv(GeoFile, colClasses = "character")
# Update_ls <- list()
# Update_ls$BzoneSpecified <- !all(is.na(Geo_df$Bzone))
# Update_ls$CzoneSpecified <- !all(is.na(Geo_df$Czone))
# Update_ls$Geo_df <- Geo_df
# setModelState(Update_ls)
listDatastore() # Rebuild datastore index
} else {
Message <- paste("File", FileToLoad, "not found.")
writeLog(Message,Level="error")
stop(Message)
}
TRUE
}
#SIMULATE DATA STORE TRANSACTIONS
#================================
#' Create simulation of datastore transactions.
#'
#' \code{simDataTransactions} a visioneval framework control function that loads
#' all module specifications in order (by run year) and creates a simulated
#' listing of the data which is in the datastore and the requests of data from
#' the datastore and checks whether tables will be present to put datasets in
#' and that datasets will be present that data is to be retrieved from.
#'
#' This function creates a list of the datastore listings for the working
#' datastore and for all datastore references. The list includes a 'Global'
#' component, in which 'Global' references are simulated, components for each
#' model run year, in which 'Year' references are simulated, and if the base
#' year is not one of the run years, a base year component, in which base year
#' references are simulated. For each model run year the function steps through
#' a data frame of module calls as produced by 'parseModelScript', and loads and
#' processes the module specifications in order: adds 'NewInpTable' references,
#' adds 'Inp' dataset references, checks whether references to datasets
#' identified in 'Get' specifications are present, adds 'NewSetTable' references,
#' and adds 'Set' dataset references. The function compiles a vector of error
#' and warning messages. Error messages are made if: 1) a 'NewInpTable' or
#' 'NewSetTable' specification of a module would create a new table for a table
#' that already exists; 2) a dataset identified by a 'Get' specification would
#' not be present in the working datastore or any referenced datastores; 3) the
#' 'Get' specifications for a dataset would not be consistent with the
#' specifications for the dataset in the datastore. The function compiles
#' warnings if a 'Set' specification will cause existing data in the working
#' datastore to be overwritten. The function writes warning and error messages
#' to the log and stops program execution if there are any errors.
#'
#' @param AllSpecs_ls A list containing the processed specifications of all of
#' the modules run by model script in the order that the modules are called with
#' duplicated module calls removed. Information about each module call is a
#' component of the list in the order of the module calls. Each component is
#' composed of 3 components: 'ModuleName' contains the name of the module,
#' 'PackageName' contains the name of the package the module is in, and
#' 'Specs' contains the processed specifications of the module. The 'Get'
#' specification component includes the 'Get' specifications of all modules
#' that are called by the module. See \code{parseModuleCalls}.
#'
#' @return There is no return value. The function has the side effect of
#' writing messages to the log and stops program execution if there are any
#' errors.
#' @export
simDataTransactions <- function(AllSpecs_ls) {
G <- getModelState()
#Initialize errors and warnings vectors
#--------------------------------------
Errors_ <- character(0)
addError <- function(Msg) {
Errors_ <<- c(Errors_, Msg)
}
Warnings_ <- character(0)
addWarning <- function(Msg) {
Warnings_ <<- c(Warnings_, Msg)
}
#Make a list to store the working datastore and all referenced datastores
#------------------------------------------------------------------------
RunYears_ <- getYears()
BaseYear <- G$BaseYear
if (BaseYear %in% RunYears_) {
Years_ <- RunYears_
} else {
Years_ <- c(BaseYear, RunYears_)
}
Dstores_ls <-
list(
Global = list()
)
for (Year in Years_) Dstores_ls[[Year]] <- list()
#Add the working datastore inventory to the datastores list
#----------------------------------------------------------
Dstores_ls[["Global"]] <-
G$Datastore[grep("Global", G$Datastore$group),]
# for (Year in RunYears_) {
# Dstores_ls[[Year]][[G$DatastoreName]] <-
# G$Datastore[grep(Year, G$Datastore$group),]
# }
getDatastoreYears <- function() {
DstoreGroups_ls <- strsplit(G$Datastore$group, "/")
ToKeep_ <- unlist(lapply(DstoreGroups_ls, function(x) length(x) == 2))
DstoreGroups_ls <- DstoreGroups_ls[ToKeep_]
DstoreGroups_ <- unique(unlist(lapply(DstoreGroups_ls, function(x) x[2])))
DstoreGroups_[!(DstoreGroups_ %in% "Global")]
}
for (Year in getDatastoreYears()) {
Dstores_ls[[Year]] <-
G$Datastore[grep(Year, G$Datastore$group),]
}
# #Function to get datastore inventory corresponding to datastore reference
# #------------------------------------------------------------------------
# getInventoryRef <- function(DstoreRef) {
# SplitRef_ <- unlist(strsplit(DstoreRef, "/"))
# RefHead <- paste(SplitRef_[-length(SplitRef_)], collapse = "/")
# paste(RefHead, getModelStateFileName(), sep = "/")
# }
#
# #Get datastore inventories for datastore references
# #--------------------------------------------------
# if (!is.null(G$DatastoreReferences)) {
# RefNames_ <- names(G$DatastoreReferences)
# for (Name in RefNames_) {
# Refs_ <- G$DatastoreReferences[[Name]]
# for (Ref in Refs_) {
# if (file.exists(Ref)) {
# RefDstore_df <-
# readModelState(FileName = getInventoryRef(Ref))$Datastore
# RefDstore_df <- RefDstore_df[grep(Name, RefDstore_df$group),]
# Dstores_ls[[Name]][[Ref]] <- RefDstore_df
# rm(RefDstore_df)
#
# } else {
# Msg <-
# paste0("The file '", Ref,
# "' included in the 'DatastoreReferences' in the ",
# "'run_parameters.json' file is not present.")
# addError(Msg)
# }
# }
# }
# }
#Define function to add table reference to datastore inventory
#-------------------------------------------------------------
addTableRef <- function(Dstore_df, TableSpec_, IsBaseYear, MakeTableType) {
Group <- TableSpec_$GROUP
if (Group == "Year") Group <- Year
Table <- TableSpec_$TABLE
#Check if table already exists
HasTable <- checkTableExistence(Table, Group, Dstore_df)
#If table exists then possible error, otherwise add reference to table
if (HasTable) {
#Is not an error if the group is 'Global' and year is not the base year
#Because not a conflict between tables created by different modules
if (Group == "Global" & !IsBaseYear) {
NewDstore_df <- Dstore_df
#Otherwise is an error
} else {
if (MakeTableType == "Inp") {
MakeTableSpecName <- "MakeInpTable"
} else {
MakeTableSpecName <- "MakeSetTable"
}
Msg <-
paste0("Error: ", MakeTableSpecName, "specification for module '",
TableSpec_$MODULE, "' will create a table '", Table,
"' that already exists in the working datastore.")
addError(Msg)
NewDstore_df <- Dstore_df
}
} else {
NewDstore_df <- data.frame(
group = c(Dstore_df$group, paste0("/", Group)),
name = c(Dstore_df$name, Table),
groupname = c(Dstore_df$groupname, paste0(Group, "/", Table)),
stringsAsFactors = FALSE
)
NewDstore_df$attributes <- c(Dstore_df$attributes, list(TableSpec_))
}
NewDstore_df
}
#Define function to add dataset reference to datastore inventory
#---------------------------------------------------------------
addDatasetRef <- function(Dstore_df, DatasetSpec_, IsBaseYear) {
Group <- DatasetSpec_$GROUP
if (Group == "Year") Group <- Year
Table <- DatasetSpec_$TABLE
Name <- DatasetSpec_$NAME
#Check if dataset already exists
HasDataset <- checkDataset(Name, Table, Group, Dstore_df)
#If dataset exists then warn and check consistency of specifications
if (HasDataset) {
#No need to check if the group is 'Global' and year is not the base year
#Because not a conflict between datasets created by different modules
if (Group == "Global" & !IsBaseYear) {
NewDstore_df <- Dstore_df
#Otherwise issue a warning and check for consistent data specifications
} else {
#Add warning that existing dataset will be overwritten
Msg <-
paste0("Module '", Module, "' will overwrite dataset '", Name,
"' in table '", Table, "'.")
addWarning(Msg)
#Check attributes are consistent
DstoreDatasetAttr_ls <-
getDatasetAttr(Name, Table, Group, Dstore_df)
AttrConsistency_ls <-
checkSpecConsistency(DatasetSpec_, DstoreDatasetAttr_ls)
if (length(AttrConsistency_ls$Errors != 0)) {
addError(AttrConsistency_ls$Errors)
}
NewDstore_df <- Dstore_df
}
} else {
NewDstore_df <- data.frame(
group = c(Dstore_df$group, paste0("/", Group)),
name = c(Dstore_df$name, Name),
groupname = c(Dstore_df$groupname, paste0(Group, "/", Table, "/", Name)),
stringsAsFactors = FALSE
)
NewDstore_df$attributes <-
c(Dstore_df$attributes,
list(DatasetSpec_[c("NAVALUE", "SIZE", "TYPE", "UNITS")]))
}
NewDstore_df
}
#Define function to check whether dataset is optional
#----------------------------------------------------
isOptional <- function(Spec_ls) {
if (!is.null(Spec_ls$OPTIONAL)) {
Spec_ls$OPTIONAL
} else {
FALSE
}
}
#Iterate through run years and modules to simulate model run
#-----------------------------------------------------------
for (Year in RunYears_) {
#Iterate through module calls
for (i in 1:length(AllSpecs_ls)) {
Module <- AllSpecs_ls[[i]]$ModuleName
Package <- AllSpecs_ls[[i]]$PackageName
RunFor <- AllSpecs_ls[[i]]$RunFor
if (RunFor == "BaseYear" & Year != "BaseYear") break()
if (RunFor == "NotBaseYear" & Year == "BaseYear") break()
ModuleSpecs_ls <-
processModuleSpecs(getModuleSpecs(Module, Package))
#Add 'Inp' table references to the working datastore inventory
#-------------------------------------------------------------
if (!is.null(ModuleSpecs_ls$NewInpTable)) {
for (j in 1:length(ModuleSpecs_ls$NewInpTable)) {
Spec_ls <- ModuleSpecs_ls$NewInpTable[[j]]
Spec_ls$MODULE <- Module
if (Spec_ls[["GROUP"]] == "Global") {
RefGroup <- "Global"
} else {
RefGroup <- Year
}
#Get the datastore inventory for the group
Dstore_df <- Dstores_ls[[RefGroup]]
#Add the table reference and check for table add error
Dstores_ls[[RefGroup]] <-
addTableRef(Dstore_df, Spec_ls, Year == BaseYear, "Inp")
rm(Spec_ls, RefGroup, Dstore_df)
}
rm(j)
}
#Add 'Inp' dataset references to the working datastore inventory
#---------------------------------------------------------------
if (!is.null(ModuleSpecs_ls$Inp)) {
for (j in 1:length(ModuleSpecs_ls$Inp)) {
Spec_ls <- ModuleSpecs_ls$Inp[[j]]
Spec_ls$MODULE <- Module
if (Spec_ls[["GROUP"]] == "Global") {
RefGroup <- "Global"
} else {
RefGroup <- Year
}
#Get the datastore inventory for the group
Dstore_df <- Dstores_ls[[RefGroup]]
#Add the dataset reference and check for dataset add error
Dstores_ls[[RefGroup]] <-
addDatasetRef(Dstore_df, Spec_ls, Year == BaseYear)
rm(Spec_ls, RefGroup, Dstore_df)
}
rm(j)
}
#Check for presence of 'Get' dataset references in datastore inventory
#---------------------------------------------------------------------
if (!is.null(ModuleSpecs_ls$Get)) {
for (j in 1:length(ModuleSpecs_ls$Get)) {
Spec_ls <- ModuleSpecs_ls$Get[[j]]
Spec_ls$MODULE <- Module
Group <- Spec_ls[["GROUP"]]
Table <- Spec_ls[["TABLE"]]
Name <- Spec_ls[["NAME"]]
if (Group == "Global") {
Group <- "Global"
}
if (Group == "BaseYear") {
Group <- G$BaseYear
}
if (Group == "Year") {
Group <- Year
}
DatasetFound <- FALSE
Dstore_df <- Dstores_ls[[Group]]
DatasetInDstore <- checkDataset(Name, Table, Group, Dstore_df)
if (!DatasetInDstore) {
next()
} else {
DatasetFound <- TRUE
DstoreAttr_ <- getDatasetAttr(Name, Table, Group, Dstore_df)
AttrConsistency_ls <-
checkSpecConsistency(Spec_ls, DstoreAttr_)
if (length(AttrConsistency_ls$Errors != 0)) {
addError(AttrConsistency_ls$Errors)
}
rm(DstoreAttr_, AttrConsistency_ls)
}
rm(Dstore_df, DatasetInDstore)
if (!DatasetFound & !isOptional(Spec_ls)) {
Msg <-
paste0("Module '", Module,
"' has a 'Get' specification for dataset '", Name,
"' in table '", Table,
"' that will not be present in the working datastore or ",
"any referenced datastores when it is needed.")
addError(Msg)
stop("CheckError")
}
}
}
#Add 'Set' table references to the working datastore inventory
#-------------------------------------------------------------
if (!is.null(ModuleSpecs_ls$NewSetTable)) {
for (j in 1:length(ModuleSpecs_ls$NewSetTable)) {
Spec_ls <- ModuleSpecs_ls$NewSetTable[[j]]
Spec_ls$MODULE <- Module
if (Spec_ls[["GROUP"]] == "Global") {
RefGroup <- "Global"
} else {
RefGroup <- Year
}
#Get the datastore inventory for the group
Dstore_df <- Dstores_ls[[RefGroup]]
#Add the table reference and check for table add error
Dstores_ls[[RefGroup]] <-
addTableRef(Dstore_df, Spec_ls, Year == BaseYear, "Set")
rm(Spec_ls, RefGroup, Dstore_df)
}
}
#Add 'Set' dataset references to the working datastore inventory
#---------------------------------------------------------------
if (!is.null(ModuleSpecs_ls$Set)) {
for (j in 1:length(ModuleSpecs_ls$Set)) {
Spec_ls <- ModuleSpecs_ls$Set[[j]]
Spec_ls$MODULE <- Module
if (Spec_ls[["GROUP"]] == "Global") {
Group <- "Global"
} else {
Group <- Year
}
#Get the datastore inventory for the group
Dstore_df <- Dstores_ls[[Group]]
Dstores_ls[[Group]] <-
addDatasetRef(Dstore_df, Spec_ls, Year == BaseYear)
rm(Spec_ls, Group, Dstore_df)
}
}
rm(Module, Package, ModuleSpecs_ls)
} #End for loop through module calls
} #End for loop through years
writeLog("Simulating model run.",Level="warn")
if (length(Warnings_) != 0) {
Msg <-
paste0("Model run simulation had one or more warnings. ",
"Datasets will be overwritten when the model runs. ",
"Check that this is what it intended. ")
writeLog(Msg,Level="warn")
writeLog(Warnings_,Level="warn")
}
if (length(Errors_) == 0) {
writeLog("Model run simulation completed without identifying any errors.",Level="warn")
} else {
Msg <-
paste0("Model run simulation has found one or more errors. ",
"The following errors must be corrected before the model may be run.")
writeLog(Msg,Level="error")
writeLog(Errors_,Level="error")
stop(Msg, " Check log for details.")
}
}
| /sources/framework/visioneval/R/tests.R | permissive | VisionEval/VisionEval-Dev | R | false | false | 43,064 | r | #========
#module.R
#========
#This script defines functions related to testing modules and other
#parts of the the system
#TEST MODULE
#===========
#' Test module
#'
#' \code{testModule} a visioneval framework module developer function that sets
#' up a test environment and tests a module.
#'
#' This function is used to set up a test environment and test a module to check
#' that it can run successfully in the VisionEval model system. The function
#' sets up the test environment by switching to the tests directory and
#' initializing a model state list, a log file, and a datastore. The user may
#' use an existing datastore rather than initialize a new datastore. The use
#' case for loading an existing datastore is where a package contains several
#' modules that run in sequence. The first module would initialize a datastore
#' and then subsequent modules use the datastore that is modified by testing the
#' previous module. When run this way, it is also necessary to set the
#' SaveDatastore argument equal to TRUE so that the module outputs will be
#' saved to the datastore. The function performs several tests including
#' checking whether the module specifications are written properly, whether
#' the the test inputs are correct and complete and can be loaded into the
#' datastore, whether the datastore contains all the module inputs identified in
#' the Get specifications, whether the module will run, and whether all of the
#' outputs meet the module's Set specifications. The latter check is carried out
#' in large part by the checkModuleOutputs function that is called.
#'
#' @param ModuleName A string identifying the module name.
#' @param Param_ls Parameter configuration (list)
#' @param ... Other parameters (see comments)
#' @return If DoRun is FALSE, the return value is a list containing the module
#' specifications. If DoRun is TRUE, there is no return value. The function
#' writes out messages to the console and to the log as the testing proceeds.
#' These messages include the time when each test starts and when it ends.
#' When a key test fails, requiring a fix before other tests can be run,
#' execution stops and an error message is written to the console. Detailed
#' error messages are also written to the log.
#' @export
testModule <-
function(ModuleName,Param_ls=NULL,...) {
# ParamDir = "defs",
# RunParamFile = "run_parameters.json",
# GeoFile = "geo.csv",
# ModelParamFile = "model_parameters.json",
# LoadDatastore = FALSE,
# SaveDatastore = TRUE,
# DoRun = TRUE,
# RunFor = "AllYears",
# StopOnErr = TRUE,
# RequiredPackages = NULL,
# TestGeoName = NULL)
# TODO: make this work with the new parameter setup
# the entire thing needs to be rethought...
#Set working directory to tests and return to main module directory on exit
#--------------------------------------------------------------------------
setwd("tests")
on.exit(setwd("../"))
if ( ! is.list(Param_ls) ) {
model.env <- modelEnvironment()
if ( "RunParam_ls" %in% ls(model.env) ) {
Param_ls <- model.env$RunParam_ls
} else {
Param_ls <- list()
}
}
ParamDir = "defs"
RunParamFile = "run_parameters.json"
GeoFile = "geo.csv"
ModelParamFile = "model_parameters.json"
LoadDatastore = FALSE
SaveDatastore = TRUE
DoRun = TRUE
RunFor = "AllYears"
StopOnErr = TRUE
RequiredPackages = NULL
TestGeoName = NULL
defParam_ls <- list(
ParamDir = "defs",
RunParamFile = "run_parameters.json",
GeoFile = "geo.csv",
ModelParamFile = "model_parameters.json",
LoadDatastore = FALSE,
SaveDatastore = TRUE,
DoRun = TRUE,
RunFor = "AllYears",
StopOnErr = TRUE,
RequiredPackages = NULL,
TestGeoName = NULL
)
missing <- ! names(defParam_ls) %in% names(Param_ls)
Param_ls[missing] <- defParam_ls[missing]
f.env <- environment()
for ( p in names(Param_ls) ) assign(p,Param_ls[p],envir=f.env)
#Initialize model state and log files
#------------------------------------
Msg <- paste0("Testing ", ModuleName, ".")
initLog(Save=TRUE,Threshold="info")
initModelState(Save=TRUE,Param_ls=NULL)
writeLog(Msg,Level="warn")
rm(Msg)
#Assign the correct datastore interaction functions
#--------------------------------------------------
assignDatastoreFunctions(readModelState()$DatastoreType)
#Make correspondence tables of modules and datasets to packages
#--------------------------------------------------------------
#This supports soft call and dataset references in modules
RequiredPkg_ <- RequiredPackages
#If RequiredPkg_ is not NULL make a list of modules and datasets in packages
if (!is.null(RequiredPkg_)) {
#Make sure all required packages are present
InstalledPkgs_ <- rownames(installed.packages())
MissingPkg_ <- RequiredPkg_[!(RequiredPkg_ %in% InstalledPkgs_)];
if (length(MissingPkg_ != 0)) {
Msg <-
paste0("One or more required packages need to be installed in order ",
"to run the model. Following are the missing package(s): ",
paste(MissingPkg_, collapse = ", "), ".")
stop(Msg)
}
#Identify all modules and datasets in required packages
Datasets_df <-
data.frame(
do.call(
rbind,
lapply(RequiredPkg_, function(x) {
data(package = x)$results[,c("Package", "Item")]
})
), stringsAsFactors = FALSE
)
WhichAreModules_ <- grep("Specifications", Datasets_df$Item)
ModulesByPackage_df <- Datasets_df[WhichAreModules_,]
ModulesByPackage_df$Module <-
gsub("Specifications", "", ModulesByPackage_df$Item)
ModulesByPackage_df$Item <- NULL
DatasetsByPackage_df <- Datasets_df[-WhichAreModules_,]
names(DatasetsByPackage_df) <- c("Package", "Dataset")
#Save the modules and datasets lists in the model state
setModelState(list(ModulesByPackage_df = ModulesByPackage_df,
DatasetsByPackage_df = DatasetsByPackage_df))
rm(Datasets_df, WhichAreModules_)
}
#Load datastore if specified or initialize new datastore
#-------------------------------------------------------
if (LoadDatastore) {
writeLog("Attempting to load datastore.", Level="warn")
DatastoreName <- getModelState()$DatastoreName
if (!file.exists(DatastoreName)) {
Msg <-
paste0("LoadDatastore argument is TRUE but the datastore file ",
"specified in the RunParamFile doesn't exist in the tests ",
"directory.")
stop(Msg)
rm(Msg)
}
loadDatastore(
FileToLoad = DatastoreName,
SaveDatastore = FALSE
)
writeLog("Datastore loaded.", Level="warn")
} else {
writeLog("Attempting to initialize datastore.", Level="warn")
initDatastore()
readGeography()
initDatastoreGeography()
loadModelParameters()
writeLog("Datastore initialized.", Level="warn")
}
#Load module specifications and check whether they are proper
#------------------------------------------------------------
loadSpec <- function() {
SpecsName <- paste0(ModuleName, "Specifications")
SpecsFileName <- paste0("../data/", SpecsName, ".rda")
load(SpecsFileName)
return(processModuleSpecs(get(SpecsName)))
}
writeLog("Attempting to load and check specifications.", Level="warn")
Specs_ls <- loadSpec()
#Check for errors
Errors_ <- checkModuleSpecs(Specs_ls, ModuleName)
if (length(Errors_) != 0) {
Msg <-
paste0("Specifications for module '", ModuleName,
"' have the following errors.")
writeLog(Msg,Level="error")
writeLog(Errors_,Level="error")
Msg <- paste0("Specifications for module '", ModuleName,
"' have errors. Check the log for details.")
stop(Msg)
rm(Msg)
}
rm(Errors_)
writeLog("Module specifications successfully loaded and checked for errors.",
Level="warn")
#Check for developer warnings
DeveloperWarnings_ls <-
lapply(c(Specs_ls$Inp, Specs_ls$Get, Specs_ls$Set), function(x) {
attributes(x)$WARN
})
DeveloperWarnings_ <-
unique(unlist(lapply(DeveloperWarnings_ls, function(x) x[!is.null(x)])))
if (length(DeveloperWarnings_) != 0) {
writeLog(DeveloperWarnings_,Level="warn")
Msg <- paste0(
"Specifications check for module '", ModuleName,
"' generated warnings. Check log for details."
)
warning(Msg)
rm(DeveloperWarnings_ls, DeveloperWarnings_, Msg)
}
#Process, check, and load module inputs
#--------------------------------------
if (is.null(Specs_ls$Inp)) {
writeLog("No inputs to process.", Level="warn")
# If no inputs and the module is "Initialize", we're done
# i.e. all inputs are optional and none are provided
if (ModuleName == "Initialize") return()
} else {
writeLog("Attempting to process, check and load module inputs.",
Level="warn")
# Process module inputs
ProcessedInputs_ls <- processModuleInputs(Specs_ls, ModuleName)
# Write warnings to log if any
if (length(ProcessedInputs_ls$Warnings != 0)) {
writeLog(ProcessedInputs_ls$Warnings,Level="warn")
}
# Write errors to log and stop if any errors
if (length(ProcessedInputs_ls$Errors) != 0) {
Msg <- paste0(
"Input files for module ", ModuleName,
" have errors. Check the log for details."
)
writeLog(ProcessedInputs_ls$Errors,Level="error")
stop(Msg)
}
# If module is NOT Initialize, save the inputs in the datastore
if (ModuleName != "Initialize") {
inputsToDatastore(ProcessedInputs_ls, Specs_ls, ModuleName)
writeLog("Module inputs successfully checked and loaded into datastore.",
Level="warn")
} else {
if (DoRun) {
# If module IS Initialize, apply the Initialize function
initFunc <- get("Initialize")
InitializedInputs_ls <- initFunc(ProcessedInputs_ls)
# Write warnings to log if any
if (length(InitializedInputs_ls$Warnings != 0)) {
writeLog(InitializedInputs_ls$Warnings,Level="warn")
}
# Write errors to log and stop if any errors
if (length(InitializedInputs_ls$Errors) != 0) {
writeLog(InitializedInputs_ls$Errors,Level="error")
stop("Errors in Initialize module inputs. Check log for details.")
}
# Save inputs to datastore
inputsToDatastore(InitializedInputs_ls, Specs_ls, ModuleName)
writeLog("Module inputs successfully checked and loaded into datastore.",
Level="warn")
return() # Break out of function because purpose of Initialize is to process inputs.
} else {
return(ProcessedInputs_ls)
}
}
}
#Check whether datastore contains all data items in Get specifications
#---------------------------------------------------------------------
writeLog(
"Checking whether datastore contains all datasets in Get specifications.",
Level="warn")
G <- getModelState()
Get_ls <- Specs_ls$Get
#Vector to keep track of missing datasets that are specified
Missing_ <- character(0)
#Function to check whether dataset is optional
isOptional <- function(Spec_ls) {
if (!is.null(Spec_ls$OPTIONAL)) {
Spec_ls$OPTIONAL
} else {
FALSE
}
}
#Vector to keep track of Get specs that need to be removed from list because
#they are optional and the datasets are not present
OptSpecToRemove_ <- numeric(0)
#Check each specification
for (i in 1:length(Get_ls)) {
Spec_ls <- Get_ls[[i]]
if (Spec_ls$GROUP == "Year") {
for (Year in G$Years) {
if (RunFor == "NotBaseYear"){
if(!Year %in% G$BaseYear){
Present <-
checkDataset(Spec_ls$NAME, Spec_ls$TABLE, Year, G$Datastore)
if (!Present) {
if(isOptional(Spec_ls)) {
#Identify for removal because optional and not present
OptSpecToRemove_ <- c(OptSpecToRemove_, i)
} else {
#Identify as missing because not optional and not present
Missing_ <- c(Missing_, attributes(Present))
}
}
}
} else {
Present <-
checkDataset(Spec_ls$NAME, Spec_ls$TABLE, Year, G$Datastore)
if (!Present) {
if(isOptional(Spec_ls)) {
#Identify for removal because optional and not present
OptSpecToRemove_ <- c(OptSpecToRemove_, i)
} else {
#Identify as missing because not optional and not present
Missing_ <- c(Missing_, attributes(Present))
}
}
}
}
}
if (Spec_ls$GROUP == "BaseYear") {
Present <-
checkDataset(Spec_ls$NAME, Spec_ls$TABLE, G$BaseYear, G$Datastore)
if (!Present) {
if (isOptional(Spec_ls)) {
#Identify for removal because optional and not present
OptSpecToRemove_ <- c(OptSpecToRemove_, i)
} else {
#Identify as missing because not optional and not present
Missing_ <- c(Missing_, attributes(Present))
}
}
}
if (Spec_ls$GROUP == "Global") {
Present <-
checkDataset(Spec_ls$NAME, Spec_ls$TABLE, "Global", G$Datastore)
if (!Present) {
if (isOptional(Spec_ls)) {
#Identify for removal because optional and not present
OptSpecToRemove_ <- c(OptSpecToRemove_, i)
} else {
#Identify as missing because not optional and not present
Missing_ <- c(Missing_, attributes(Present))
}
}
}
}
#If any non-optional datasets are missing, write out error messages and
#stop execution
if (length(Missing_) != 0) {
Msg <-
paste0("The following datasets identified in the Get specifications ",
"for module ", ModuleName, " are missing from the datastore.")
Msg <- paste(c(Msg, Missing_), collapse = "\n")
writeLog(Msg,Level="error")
stop(
paste0("Datastore is missing one or more datasets specified in the ",
"Get specifications for module ", ModuleName, ". Check the log ",
"for details.")
)
rm(Msg)
}
#If any optional datasets are missing, remove the specifications for them so
#that there will be no errors when data are retrieved from the datastore
if (length(OptSpecToRemove_) != 0) {
Specs_ls$Get <- Specs_ls$Get[-OptSpecToRemove_]
}
writeLog(
"Datastore contains all datasets identified in module Get specifications.",
Level="warn")
#Run the module and check that results meet specifications
#---------------------------------------------------------
#The module is run only if the DoRun argument is TRUE. Otherwise the
#datastore is initialized, specifications are checked, and a list is
#returned which contains the specifications list, the data list from the
#datastore meeting specifications, and a functions list containing any
#called module functions.
#Run the module if DoRun is TRUE
if (DoRun) {
writeLog(
"Running module and checking whether outputs meet Set specifications.",
Level="warn"
)
if (SaveDatastore) {
writeLog("Also saving module outputs to datastore.", Level="warn")
}
#Load the module function
Func <- get(ModuleName)
#Load any modules identified by 'Call' spec if any
if (is.list(Specs_ls$Call)) {
Call <- list(
Func = list(),
Specs = list()
)
for (Alias in names(Specs_ls$Call)) {
#Called module function when specified as package::module
Function <- Specs_ls$Call[[Alias]]
#Called module function when only module is specified
if (length(unlist(strsplit(Function, "::"))) == 1) {
Pkg_df <- getModelState()$ModulesByPackage_df
Function <-
paste(Pkg_df$Package[Pkg_df$Module == Function], Function, sep = "::")
rm(Pkg_df)
}
#Called module specifications
Specs <- paste0(Function, "Specifications")
#Assign called module function and specifications for the alias
Call$Func[[Alias]] <- eval(parse(text = Function))
Call$Specs[[Alias]] <- processModuleSpecs(eval(parse(text = Specs)))
Call$Specs[[Alias]]$RunBy <- Specs_ls$RunBy
}
}
#Run module for each year
if (RunFor == "AllYears") Years <- getYears()
if (RunFor == "BaseYear") Years <- G$BaseYear
if (RunFor == "NotBaseYear") Years <- getYears()[!getYears() %in% G$BaseYear]
for (Year in Years) {
ResultsCheck_ <- character(0)
#If RunBy is 'Region', this code is run
if (Specs_ls$RunBy == "Region") {
#Get data from datastore
L <- getFromDatastore(Specs_ls, RunYear = Year)
if (exists("Call")) {
for (Alias in names(Call$Specs)) {
L[[Alias]] <-
getFromDatastore(Call$Specs[[Alias]], RunYear = Year)
}
}
#Run module
if (exists("Call")) {
R <- Func(L, Call$Func)
} else {
R <- Func(L)
}
#Check for errors and warnings in module return list
#Save results in datastore if no errors from module
if (is.null(R$Errors)) {
#Check results
Check_ <-
checkModuleOutputs(
Data_ls = R,
ModuleSpec_ls = Specs_ls,
ModuleName = ModuleName)
ResultsCheck_ <- Check_
#Save results if SaveDatastore and no errors found
if (SaveDatastore & length(Check_) == 0) {
setInDatastore(R, Specs_ls, ModuleName, Year, Geo = NULL)
}
}
#Handle warnings
if (!is.null(R$Warnings)) {
writeLog(R$Warnings,Level="warn")
Msg <-
paste0("Module ", ModuleName, " has reported one or more warnings. ",
"Check log for details.")
warning(Msg)
}
#Handle errors
if (!is.null(R$Errors) & StopOnErr) {
writeLog(R$Errors,Level="warn")
Msg <-
paste0("Module ", ModuleName, " has reported one or more errors. ",
"Check log for details.")
stop(Msg)
}
#Otherwise the following code is run
} else {
#Initialize vectors to store module errors and warnings
Errors_ <- character(0)
Warnings_ <- character(0)
#Identify the units of geography to iterate over
GeoCategory <- Specs_ls$RunBy
#Create the geographic index list
GeoIndex_ls <- createGeoIndexList(c(Specs_ls$Get, Specs_ls$Set), GeoCategory, Year)
if (exists("Call")) {
for (Alias in names(Call$Specs)) {
GeoIndex_ls[[Alias]] <-
createGeoIndexList(Call$Specs[[Alias]]$Get, GeoCategory, Year)
}
}
#Run module for each geographic area
Geo_ <- readFromTable(GeoCategory, GeoCategory, Year)
for (Geo in Geo_) {
#Get data from datastore for geographic area
L <-
getFromDatastore(Specs_ls, RunYear = Year, Geo = Geo, GeoIndex_ls = GeoIndex_ls)
if (exists("Call")) {
for (Alias in names(Call$Specs)) {
L[[Alias]] <-
getFromDatastore(Call$Specs[[Alias]], RunYear = Year, Geo = Geo, GeoIndex_ls = GeoIndex_ls[[Alias]])
}
}
#Run model for geographic area
if (exists("Call")) {
R <- Func(L, Call$Func)
} else {
R <- Func(L)
}
#Check for errors and warnings in module return list
#Save results in datastore if no errors from module
if (is.null(R$Errors)) {
#Check results
Check_ <-
checkModuleOutputs(
Data_ls = R,
ModuleSpec_ls = Specs_ls,
ModuleName = ModuleName)
ResultsCheck_ <- c(ResultsCheck_, Check_)
#Save results if SaveDatastore and no errors found
if (SaveDatastore & length(Check_) == 0) {
setInDatastore(R, Specs_ls, ModuleName, Year, Geo = Geo, GeoIndex_ls = GeoIndex_ls)
}
}
#Handle warnings
if (!is.null(R$Warnings)) {
writeLog(R$Warnings,Level="warn")
Msg <-
paste0("Module ", ModuleName, " has reported one or more warnings. ",
"Check log for details.")
warning(Msg)
}
#Handle errors
if (!is.null(R$Errors) & StopOnErr) {
writeLog(R$Errors,Level="error")
Msg <-
paste0("Module ", ModuleName, " has reported one or more errors. ",
"Check log for details.")
stop(Msg)
}
}
}
if (length(ResultsCheck_) != 0) {
Msg <-
paste0("Following are inconsistencies between module outputs and the ",
"module Set specifications:")
Msg <- paste(c(Msg, ResultsCheck_), collapse = "\n")
writeLog(Msg,Level="error")
rm(Msg)
stop(
paste0("The outputs for module ", ModuleName, " are inconsistent ",
"with one or more of the module's Set specifications. ",
"Check the log for details."))
}
}
writeLog("Module run successfully and outputs meet Set specifications.",
Level="warn")
if (SaveDatastore) {
writeLog("Module outputs saved to datastore.", Level="warn")
}
#Print success message if no errors found
Msg <- paste0("Congratulations. Module ", ModuleName, " passed all tests.")
writeLog(Msg, Level="warn")
rm(Msg)
#Return the specifications, data list, and functions list if DoRun is FALSE
} else {
#Load any modules identified by 'Call' spec if any
if (!is.null(Specs_ls$Call)) {
Call <- list(
Func = list(),
Specs = list()
)
for (Alias in names(Specs_ls$Call)) {
Function <- Specs_ls$Call[[Alias]]
#Called module function when only module is specified
if (length(unlist(strsplit(Function, "::"))) == 1) {
Pkg_df <- getModelState()$ModulesByPackage_df
Function <-
paste(Pkg_df$Package[Pkg_df$Module == Function], Function, sep = "::")
rm(Pkg_df)
}
#Called module specifications
Specs <- paste0(Function, "Specifications")
Call$Func[[Alias]] <- eval(parse(text = Function))
Call$Specs[[Alias]] <- processModuleSpecs(eval(parse(text = Specs)))
}
}
#Get data from datastore
if (RunFor == "AllYears") Year <- getYears()[1]
if (RunFor == "BaseYear") Year <- G$BaseYear
if (RunFor == "NotBaseYear") Year <- getYears()[!getYears() %in% G$BaseYear][1]
#Identify the units of geography to iterate over
GeoCategory <- Specs_ls$RunBy
#Create the geographic index list
GeoIndex_ls <- createGeoIndexList(Specs_ls$Get, GeoCategory, Year)
if (exists("Call")) {
for (Alias in names(Call$Specs)) {
GeoIndex_ls[[Alias]] <-
createGeoIndexList(Call$Specs[[Alias]]$Get, GeoCategory, Year)
}
}
#Get the data required
if (GeoCategory == "Region") {
L <- getFromDatastore(Specs_ls, RunYear = Year, Geo = NULL)
if (exists("Call")) {
for (Alias in names(Call$Specs)) {
L[[Alias]] <-
getFromDatastore(Call$Specs[[Alias]], RunYear = Year, Geo = NULL)
}
}
} else {
Geo_ <- readFromTable(GeoCategory, GeoCategory, Year)
#Check whether the TestGeoName is proper
if (!is.null(TestGeoName)) {
if (!(TestGeoName %in% Geo_)) {
stop(paste0(
"The 'TestGeoName' value - ", TestGeoName,
" - is not a recognized name for the ",
GeoCategory, " geography that this module is specified to be run ",
"for."
))
}
}
#If TestGeoName is NULL get the data for the first name in the list
if (is.null(TestGeoName)) TestGeoName <- Geo_[1]
#Get the data
L <- getFromDatastore(Specs_ls, RunYear = Year, Geo = TestGeoName, GeoIndex_ls = GeoIndex_ls)
if (exists("Call")) {
for (Alias in names(Call$Specs)) {
L[[Alias]] <-
getFromDatastore(Call$Specs[[Alias]], RunYear = Year, Geo = TestGeoName, GeoIndex_ls = GeoIndex_ls)
}
}
}
#Return the specifications, data list, and called functions
if (exists("Call")) {
return(list(Specs_ls = Specs_ls, L = L, M = Call$Func))
} else {
return(list(Specs_ls = Specs_ls, L = L))
}
}
}
#LOAD SAVED DATASTORE
#====================
#' Load saved datastore for testing
#'
#' \code{loadDatastore} a visioneval framework control function that copies an
#' existing saved datastore and writes information to run environment.
#'
#' This function copies a saved datastore as the working datastore attributes
#' the global list with related geographic information. This function enables
#' scenario variants to be built from a constant set of starting conditions.
#'
#' @param FileToLoad A string identifying the full path name to the saved
#' datastore. Path name can either be relative to the working directory or
#' absolute.
#' @param SaveDatastore A logical identifying whether an existing datastore
#' will be saved. It is renamed by appending the system time to the name. The
#' default value is TRUE.
#' @return TRUE if the datastore is loaded. It copies the saved datastore to
#' working directory as 'datastore.h5'. If a 'datastore.h5' file already
#' exists, it first renames that file as 'archive-datastore.h5'. The function
#' updates information in the model state file regarding the model geography
#' and the contents of the loaded datastore. If the stored file does not exist
#' an error is thrown.
#' @export
loadDatastore <- function(FileToLoad, SaveDatastore = TRUE) {
# TODO: This function is apparently only used when testing a module
# (and it is mighty invasive for that!)
G <- getModelState()
#If data store exists, rename
DatastoreName <- G$DatastoreName
if (file.exists(DatastoreName) & SaveDatastore) {
# TODO: blow away the existing one if SaveDatastore is not TRUE
TimeString <- gsub(" ", "_", as.character(Sys.time()))
ArchiveDatastoreName <-
paste0(unlist(strsplit(DatastoreName, "\\."))[1],
"_", TimeString, ".",
unlist(strsplit(DatastoreName, "\\."))[2])
ArchiveDatastoreName <- gsub(":", "-", ArchiveDatastoreName)
file.copy(DatastoreName, ArchiveDatastoreName)
}
if (file.exists(FileToLoad)) {
file.copy(FileToLoad, DatastoreName)
# Note: already checked geography consistency
# GeoFile <- file.path(Dir, GeoFile)
# Geo_df <- read.csv(GeoFile, colClasses = "character")
# Update_ls <- list()
# Update_ls$BzoneSpecified <- !all(is.na(Geo_df$Bzone))
# Update_ls$CzoneSpecified <- !all(is.na(Geo_df$Czone))
# Update_ls$Geo_df <- Geo_df
# setModelState(Update_ls)
listDatastore() # Rebuild datastore index
} else {
Message <- paste("File", FileToLoad, "not found.")
writeLog(Message,Level="error")
stop(Message)
}
TRUE
}
#SIMULATE DATA STORE TRANSACTIONS
#================================
#' Create simulation of datastore transactions.
#'
#' \code{simDataTransactions} a visioneval framework control function that loads
#' all module specifications in order (by run year) and creates a simulated
#' listing of the data which is in the datastore and the requests of data from
#' the datastore and checks whether tables will be present to put datasets in
#' and that datasets will be present that data is to be retrieved from.
#'
#' This function creates a list of the datastore listings for the working
#' datastore and for all datastore references. The list includes a 'Global'
#' component, in which 'Global' references are simulated, components for each
#' model run year, in which 'Year' references are simulated, and if the base
#' year is not one of the run years, a base year component, in which base year
#' references are simulated. For each model run year the function steps through
#' a data frame of module calls as produced by 'parseModelScript', and loads and
#' processes the module specifications in order: adds 'NewInpTable' references,
#' adds 'Inp' dataset references, checks whether references to datasets
#' identified in 'Get' specifications are present, adds 'NewSetTable' references,
#' and adds 'Set' dataset references. The function compiles a vector of error
#' and warning messages. Error messages are made if: 1) a 'NewInpTable' or
#' 'NewSetTable' specification of a module would create a new table for a table
#' that already exists; 2) a dataset identified by a 'Get' specification would
#' not be present in the working datastore or any referenced datastores; 3) the
#' 'Get' specifications for a dataset would not be consistent with the
#' specifications for the dataset in the datastore. The function compiles
#' warnings if a 'Set' specification will cause existing data in the working
#' datastore to be overwritten. The function writes warning and error messages
#' to the log and stops program execution if there are any errors.
#'
#' @param AllSpecs_ls A list containing the processed specifications of all of
#' the modules run by model script in the order that the modules are called with
#' duplicated module calls removed. Information about each module call is a
#' component of the list in the order of the module calls. Each component is
#' composed of 3 components: 'ModuleName' contains the name of the module,
#' 'PackageName' contains the name of the package the module is in, and
#' 'Specs' contains the processed specifications of the module. The 'Get'
#' specification component includes the 'Get' specifications of all modules
#' that are called by the module. See \code{parseModuleCalls}.
#'
#' @return There is no return value. The function has the side effect of
#' writing messages to the log and stops program execution if there are any
#' errors.
#' @export
simDataTransactions <- function(AllSpecs_ls) {
G <- getModelState()
#Initialize errors and warnings vectors
#--------------------------------------
Errors_ <- character(0)
addError <- function(Msg) {
Errors_ <<- c(Errors_, Msg)
}
Warnings_ <- character(0)
addWarning <- function(Msg) {
Warnings_ <<- c(Warnings_, Msg)
}
#Make a list to store the working datastore and all referenced datastores
#------------------------------------------------------------------------
RunYears_ <- getYears()
BaseYear <- G$BaseYear
if (BaseYear %in% RunYears_) {
Years_ <- RunYears_
} else {
Years_ <- c(BaseYear, RunYears_)
}
Dstores_ls <-
list(
Global = list()
)
for (Year in Years_) Dstores_ls[[Year]] <- list()
#Add the working datastore inventory to the datastores list
#----------------------------------------------------------
Dstores_ls[["Global"]] <-
G$Datastore[grep("Global", G$Datastore$group),]
# for (Year in RunYears_) {
# Dstores_ls[[Year]][[G$DatastoreName]] <-
# G$Datastore[grep(Year, G$Datastore$group),]
# }
getDatastoreYears <- function() {
DstoreGroups_ls <- strsplit(G$Datastore$group, "/")
ToKeep_ <- unlist(lapply(DstoreGroups_ls, function(x) length(x) == 2))
DstoreGroups_ls <- DstoreGroups_ls[ToKeep_]
DstoreGroups_ <- unique(unlist(lapply(DstoreGroups_ls, function(x) x[2])))
DstoreGroups_[!(DstoreGroups_ %in% "Global")]
}
for (Year in getDatastoreYears()) {
Dstores_ls[[Year]] <-
G$Datastore[grep(Year, G$Datastore$group),]
}
# #Function to get datastore inventory corresponding to datastore reference
# #------------------------------------------------------------------------
# getInventoryRef <- function(DstoreRef) {
# SplitRef_ <- unlist(strsplit(DstoreRef, "/"))
# RefHead <- paste(SplitRef_[-length(SplitRef_)], collapse = "/")
# paste(RefHead, getModelStateFileName(), sep = "/")
# }
#
# #Get datastore inventories for datastore references
# #--------------------------------------------------
# if (!is.null(G$DatastoreReferences)) {
# RefNames_ <- names(G$DatastoreReferences)
# for (Name in RefNames_) {
# Refs_ <- G$DatastoreReferences[[Name]]
# for (Ref in Refs_) {
# if (file.exists(Ref)) {
# RefDstore_df <-
# readModelState(FileName = getInventoryRef(Ref))$Datastore
# RefDstore_df <- RefDstore_df[grep(Name, RefDstore_df$group),]
# Dstores_ls[[Name]][[Ref]] <- RefDstore_df
# rm(RefDstore_df)
#
# } else {
# Msg <-
# paste0("The file '", Ref,
# "' included in the 'DatastoreReferences' in the ",
# "'run_parameters.json' file is not present.")
# addError(Msg)
# }
# }
# }
# }
#Define function to add table reference to datastore inventory
#-------------------------------------------------------------
addTableRef <- function(Dstore_df, TableSpec_, IsBaseYear, MakeTableType) {
Group <- TableSpec_$GROUP
if (Group == "Year") Group <- Year
Table <- TableSpec_$TABLE
#Check if table already exists
HasTable <- checkTableExistence(Table, Group, Dstore_df)
#If table exists then possible error, otherwise add reference to table
if (HasTable) {
#Is not an error if the group is 'Global' and year is not the base year
#Because not a conflict between tables created by different modules
if (Group == "Global" & !IsBaseYear) {
NewDstore_df <- Dstore_df
#Otherwise is an error
} else {
if (MakeTableType == "Inp") {
MakeTableSpecName <- "MakeInpTable"
} else {
MakeTableSpecName <- "MakeSetTable"
}
Msg <-
paste0("Error: ", MakeTableSpecName, "specification for module '",
TableSpec_$MODULE, "' will create a table '", Table,
"' that already exists in the working datastore.")
addError(Msg)
NewDstore_df <- Dstore_df
}
} else {
NewDstore_df <- data.frame(
group = c(Dstore_df$group, paste0("/", Group)),
name = c(Dstore_df$name, Table),
groupname = c(Dstore_df$groupname, paste0(Group, "/", Table)),
stringsAsFactors = FALSE
)
NewDstore_df$attributes <- c(Dstore_df$attributes, list(TableSpec_))
}
NewDstore_df
}
#Define function to add dataset reference to datastore inventory
#---------------------------------------------------------------
addDatasetRef <- function(Dstore_df, DatasetSpec_, IsBaseYear) {
Group <- DatasetSpec_$GROUP
if (Group == "Year") Group <- Year
Table <- DatasetSpec_$TABLE
Name <- DatasetSpec_$NAME
#Check if dataset already exists
HasDataset <- checkDataset(Name, Table, Group, Dstore_df)
#If dataset exists then warn and check consistency of specifications
if (HasDataset) {
#No need to check if the group is 'Global' and year is not the base year
#Because not a conflict between datasets created by different modules
if (Group == "Global" & !IsBaseYear) {
NewDstore_df <- Dstore_df
#Otherwise issue a warning and check for consistent data specifications
} else {
#Add warning that existing dataset will be overwritten
Msg <-
paste0("Module '", Module, "' will overwrite dataset '", Name,
"' in table '", Table, "'.")
addWarning(Msg)
#Check attributes are consistent
DstoreDatasetAttr_ls <-
getDatasetAttr(Name, Table, Group, Dstore_df)
AttrConsistency_ls <-
checkSpecConsistency(DatasetSpec_, DstoreDatasetAttr_ls)
if (length(AttrConsistency_ls$Errors != 0)) {
addError(AttrConsistency_ls$Errors)
}
NewDstore_df <- Dstore_df
}
} else {
NewDstore_df <- data.frame(
group = c(Dstore_df$group, paste0("/", Group)),
name = c(Dstore_df$name, Name),
groupname = c(Dstore_df$groupname, paste0(Group, "/", Table, "/", Name)),
stringsAsFactors = FALSE
)
NewDstore_df$attributes <-
c(Dstore_df$attributes,
list(DatasetSpec_[c("NAVALUE", "SIZE", "TYPE", "UNITS")]))
}
NewDstore_df
}
#Define function to check whether dataset is optional
#----------------------------------------------------
isOptional <- function(Spec_ls) {
if (!is.null(Spec_ls$OPTIONAL)) {
Spec_ls$OPTIONAL
} else {
FALSE
}
}
#Iterate through run years and modules to simulate model run
#-----------------------------------------------------------
for (Year in RunYears_) {
#Iterate through module calls
for (i in 1:length(AllSpecs_ls)) {
Module <- AllSpecs_ls[[i]]$ModuleName
Package <- AllSpecs_ls[[i]]$PackageName
RunFor <- AllSpecs_ls[[i]]$RunFor
if (RunFor == "BaseYear" & Year != "BaseYear") break()
if (RunFor == "NotBaseYear" & Year == "BaseYear") break()
ModuleSpecs_ls <-
processModuleSpecs(getModuleSpecs(Module, Package))
#Add 'Inp' table references to the working datastore inventory
#-------------------------------------------------------------
if (!is.null(ModuleSpecs_ls$NewInpTable)) {
for (j in 1:length(ModuleSpecs_ls$NewInpTable)) {
Spec_ls <- ModuleSpecs_ls$NewInpTable[[j]]
Spec_ls$MODULE <- Module
if (Spec_ls[["GROUP"]] == "Global") {
RefGroup <- "Global"
} else {
RefGroup <- Year
}
#Get the datastore inventory for the group
Dstore_df <- Dstores_ls[[RefGroup]]
#Add the table reference and check for table add error
Dstores_ls[[RefGroup]] <-
addTableRef(Dstore_df, Spec_ls, Year == BaseYear, "Inp")
rm(Spec_ls, RefGroup, Dstore_df)
}
rm(j)
}
#Add 'Inp' dataset references to the working datastore inventory
#---------------------------------------------------------------
if (!is.null(ModuleSpecs_ls$Inp)) {
for (j in 1:length(ModuleSpecs_ls$Inp)) {
Spec_ls <- ModuleSpecs_ls$Inp[[j]]
Spec_ls$MODULE <- Module
if (Spec_ls[["GROUP"]] == "Global") {
RefGroup <- "Global"
} else {
RefGroup <- Year
}
#Get the datastore inventory for the group
Dstore_df <- Dstores_ls[[RefGroup]]
#Add the dataset reference and check for dataset add error
Dstores_ls[[RefGroup]] <-
addDatasetRef(Dstore_df, Spec_ls, Year == BaseYear)
rm(Spec_ls, RefGroup, Dstore_df)
}
rm(j)
}
#Check for presence of 'Get' dataset references in datastore inventory
#---------------------------------------------------------------------
if (!is.null(ModuleSpecs_ls$Get)) {
for (j in 1:length(ModuleSpecs_ls$Get)) {
Spec_ls <- ModuleSpecs_ls$Get[[j]]
Spec_ls$MODULE <- Module
Group <- Spec_ls[["GROUP"]]
Table <- Spec_ls[["TABLE"]]
Name <- Spec_ls[["NAME"]]
if (Group == "Global") {
Group <- "Global"
}
if (Group == "BaseYear") {
Group <- G$BaseYear
}
if (Group == "Year") {
Group <- Year
}
DatasetFound <- FALSE
Dstore_df <- Dstores_ls[[Group]]
DatasetInDstore <- checkDataset(Name, Table, Group, Dstore_df)
if (!DatasetInDstore) {
next()
} else {
DatasetFound <- TRUE
DstoreAttr_ <- getDatasetAttr(Name, Table, Group, Dstore_df)
AttrConsistency_ls <-
checkSpecConsistency(Spec_ls, DstoreAttr_)
if (length(AttrConsistency_ls$Errors != 0)) {
addError(AttrConsistency_ls$Errors)
}
rm(DstoreAttr_, AttrConsistency_ls)
}
rm(Dstore_df, DatasetInDstore)
if (!DatasetFound & !isOptional(Spec_ls)) {
Msg <-
paste0("Module '", Module,
"' has a 'Get' specification for dataset '", Name,
"' in table '", Table,
"' that will not be present in the working datastore or ",
"any referenced datastores when it is needed.")
addError(Msg)
stop("CheckError")
}
}
}
#Add 'Set' table references to the working datastore inventory
#-------------------------------------------------------------
if (!is.null(ModuleSpecs_ls$NewSetTable)) {
for (j in 1:length(ModuleSpecs_ls$NewSetTable)) {
Spec_ls <- ModuleSpecs_ls$NewSetTable[[j]]
Spec_ls$MODULE <- Module
if (Spec_ls[["GROUP"]] == "Global") {
RefGroup <- "Global"
} else {
RefGroup <- Year
}
#Get the datastore inventory for the group
Dstore_df <- Dstores_ls[[RefGroup]]
#Add the table reference and check for table add error
Dstores_ls[[RefGroup]] <-
addTableRef(Dstore_df, Spec_ls, Year == BaseYear, "Set")
rm(Spec_ls, RefGroup, Dstore_df)
}
}
#Add 'Set' dataset references to the working datastore inventory
#---------------------------------------------------------------
if (!is.null(ModuleSpecs_ls$Set)) {
for (j in 1:length(ModuleSpecs_ls$Set)) {
Spec_ls <- ModuleSpecs_ls$Set[[j]]
Spec_ls$MODULE <- Module
if (Spec_ls[["GROUP"]] == "Global") {
Group <- "Global"
} else {
Group <- Year
}
#Get the datastore inventory for the group
Dstore_df <- Dstores_ls[[Group]]
Dstores_ls[[Group]] <-
addDatasetRef(Dstore_df, Spec_ls, Year == BaseYear)
rm(Spec_ls, Group, Dstore_df)
}
}
rm(Module, Package, ModuleSpecs_ls)
} #End for loop through module calls
} #End for loop through years
writeLog("Simulating model run.",Level="warn")
if (length(Warnings_) != 0) {
Msg <-
paste0("Model run simulation had one or more warnings. ",
"Datasets will be overwritten when the model runs. ",
"Check that this is what it intended. ")
writeLog(Msg,Level="warn")
writeLog(Warnings_,Level="warn")
}
if (length(Errors_) == 0) {
writeLog("Model run simulation completed without identifying any errors.",Level="warn")
} else {
Msg <-
paste0("Model run simulation has found one or more errors. ",
"The following errors must be corrected before the model may be run.")
writeLog(Msg,Level="error")
writeLog(Errors_,Level="error")
stop(Msg, " Check log for details.")
}
}
|
#Read the name file
#The split_name function works split name file
#-Argument: name file
#-Return: split of name file
split_name <- function(filename)
{
split_name <- unlist(strsplit(filename, split='_', fixed=TRUE))
split_name <- gsub("txt","",gsub("[[:punct:]]","",split_name) )
return(split_name)
}
#join_files works merging files pdf into only one.
#-Argument: name_method name of method of clustering
# : num_cluter number of cluster
#the workspace must be where are the others folders
join_files <- function(name_method, num_cluter)
{
#Read files Curve Tx and TM
graph_all_station_Curve_TX_TM(name_method)
#Read files Orbothermic
graph_all_station (name_method)
#Read Soil texture
graph_all_texture_clus(name_method)
#Elevation
graph_all_elevation_complete(name_method)
#Excel texture
soil_texture(name_method)
#Create folder fo each folder
for (i in 1:num_cluter)
{
mainDir <- paste0(getwd(), "/Data_Final")
dir.create(file.path(mainDir, paste0("Cluster_", i)), showWarnings = FALSE)
}
for (i in 1:num_cluter)
{
list_files_Curve <- list.files("./Temperature_curve", pattern =paste0(i, ))
}
}
| /join_files.R | no_license | j-river1/My_First_RASTER | R | false | false | 1,239 | r | #Read the name file
#The split_name function works split name file
#-Argument: name file
#-Return: split of name file
split_name <- function(filename)
{
split_name <- unlist(strsplit(filename, split='_', fixed=TRUE))
split_name <- gsub("txt","",gsub("[[:punct:]]","",split_name) )
return(split_name)
}
#join_files works merging files pdf into only one.
#-Argument: name_method name of method of clustering
# : num_cluter number of cluster
#the workspace must be where are the others folders
join_files <- function(name_method, num_cluter)
{
#Read files Curve Tx and TM
graph_all_station_Curve_TX_TM(name_method)
#Read files Orbothermic
graph_all_station (name_method)
#Read Soil texture
graph_all_texture_clus(name_method)
#Elevation
graph_all_elevation_complete(name_method)
#Excel texture
soil_texture(name_method)
#Create folder fo each folder
for (i in 1:num_cluter)
{
mainDir <- paste0(getwd(), "/Data_Final")
dir.create(file.path(mainDir, paste0("Cluster_", i)), showWarnings = FALSE)
}
for (i in 1:num_cluter)
{
list_files_Curve <- list.files("./Temperature_curve", pattern =paste0(i, ))
}
}
|
####################################
### PESCARTE
### GIARS - UFMG
### Script: Neylson Crepalde
### Objetivo: Processa a base única
####################################
library(readr)
library(dplyr)
library(descr)
library(igraph)
library(reshape2)
library(magrittr)
dados = read_csv('pescarte_nova.csv')
####################################
# Verificando a integridade
names(dados)
# Juntando indicacoes
rodadas = c(6, 12, 18, 24, 28, 33, 39, 44, 49, 53, 58, 62, 66, 70, 75, 79)
names(dados)[rodadas]
# Juntando os nomes com apelidos e funcoes e guardando num vetor unico
indicacoes = c()
for (row in 1:nrow(dados)) {
for (col in rodadas) {
completo = paste(dados[row,col])
indicacoes = c(indicacoes, completo)
}
}
tabela = freq(indicacoes, plot = F)
tabela = as.matrix(tabela)
tabela
#View(tabela)
## Não funcionou
# Printando a tabela por comunidade
freq(dados$comunidade, plot=F)
comunidades = as.factor(dados$comunidade)
comunidades = levels(comunidades)
length(comunidades)
freq(indicacoes[dados$comunidade == comunidades[4]], plot=F)
############################################
# Rastrear os nomes nas matrizes em respondentes e parentes...
############################################
#########################################################
# Montando apenas com o nome
el = dados %>% select(MUNICIPIO, Comunidade, `Respondente Principal`, rodadas)
el = melt(el, id.vars = c('MUNICIPIO','Comunidade','Respondente Principal'))
el = el %>% filter(!is.na(value)) %>% filter(!is.na(`Respondente Principal`))
el %>% arrange(`Respondente Principal`) %>% View
mat = el %>% select(`Respondente Principal`, value) %>% as.matrix
which(is.na(mat) == T)
g = graph_from_edgelist(mat, directed = T)
####################
g
plot(g, vertex.size = 5, vertex.label=NA, edge.arrow.size=.2)
# Extraindo o componente principal
clu = components(g, "weak")
V(g)$cluster = clu$membership
strong = induced_subgraph(g, V(g)[V(g)$cluster == 1])
strong
plot(strong, vertex.size = 5, vertex.label=NA, edge.arrow.size=.2)
| /02processa_base_unica.R | no_license | neylsoncrepalde/giars_consultoria | R | false | false | 2,035 | r | ####################################
### PESCARTE
### GIARS - UFMG
### Script: Neylson Crepalde
### Objetivo: Processa a base única
####################################
library(readr)
library(dplyr)
library(descr)
library(igraph)
library(reshape2)
library(magrittr)
dados = read_csv('pescarte_nova.csv')
####################################
# Verificando a integridade
names(dados)
# Juntando indicacoes
rodadas = c(6, 12, 18, 24, 28, 33, 39, 44, 49, 53, 58, 62, 66, 70, 75, 79)
names(dados)[rodadas]
# Juntando os nomes com apelidos e funcoes e guardando num vetor unico
indicacoes = c()
for (row in 1:nrow(dados)) {
for (col in rodadas) {
completo = paste(dados[row,col])
indicacoes = c(indicacoes, completo)
}
}
tabela = freq(indicacoes, plot = F)
tabela = as.matrix(tabela)
tabela
#View(tabela)
## Não funcionou
# Printando a tabela por comunidade
freq(dados$comunidade, plot=F)
comunidades = as.factor(dados$comunidade)
comunidades = levels(comunidades)
length(comunidades)
freq(indicacoes[dados$comunidade == comunidades[4]], plot=F)
############################################
# Rastrear os nomes nas matrizes em respondentes e parentes...
############################################
#########################################################
# Montando apenas com o nome
el = dados %>% select(MUNICIPIO, Comunidade, `Respondente Principal`, rodadas)
el = melt(el, id.vars = c('MUNICIPIO','Comunidade','Respondente Principal'))
el = el %>% filter(!is.na(value)) %>% filter(!is.na(`Respondente Principal`))
el %>% arrange(`Respondente Principal`) %>% View
mat = el %>% select(`Respondente Principal`, value) %>% as.matrix
which(is.na(mat) == T)
g = graph_from_edgelist(mat, directed = T)
####################
g
plot(g, vertex.size = 5, vertex.label=NA, edge.arrow.size=.2)
# Extraindo o componente principal
clu = components(g, "weak")
V(g)$cluster = clu$membership
strong = induced_subgraph(g, V(g)[V(g)$cluster == 1])
strong
plot(strong, vertex.size = 5, vertex.label=NA, edge.arrow.size=.2)
|
#' Normal scale bandwidth using ks::Hns function.
#'
#' A simple wrapper for the ks::Hns function.
#'
#' @param x 2d matrix of data values.
#' @return A numeric vector of estimated x and y bandwidths. Must subset your data if you wish to obtain group specific bandwidths.
#' @author Shannon E. Albeke, Wyoming Geographic Information Science Center, University of Wyoming
#' @export
#' @examples
#' data("rodents")
#' # Subset the data for a single species
#' spec1<- rodents[rodents$Species == "Species1", ]
#' # Calculate the bandwidth
#' bw_hns(as.matrix(spec1[, c("Ave_C", "Ave_N")]))
bw_hns<- function(x){
if(!inherits(x, "matrix"))
stop("x must be a 2-d numeric matrix")
if(!is.numeric(x))
stop("x must be a 2-d numeric matrix")
if(dim(x)[2] != 2)
stop("x must be a 2-d numeric matrix")
return(ks::Hns(x)[c(1, 4)])
}
| /R/bw_hns.R | no_license | salbeke/rKIN | R | false | false | 844 | r | #' Normal scale bandwidth using ks::Hns function.
#'
#' A simple wrapper for the ks::Hns function.
#'
#' @param x 2d matrix of data values.
#' @return A numeric vector of estimated x and y bandwidths. Must subset your data if you wish to obtain group specific bandwidths.
#' @author Shannon E. Albeke, Wyoming Geographic Information Science Center, University of Wyoming
#' @export
#' @examples
#' data("rodents")
#' # Subset the data for a single species
#' spec1<- rodents[rodents$Species == "Species1", ]
#' # Calculate the bandwidth
#' bw_hns(as.matrix(spec1[, c("Ave_C", "Ave_N")]))
bw_hns<- function(x){
if(!inherits(x, "matrix"))
stop("x must be a 2-d numeric matrix")
if(!is.numeric(x))
stop("x must be a 2-d numeric matrix")
if(dim(x)[2] != 2)
stop("x must be a 2-d numeric matrix")
return(ks::Hns(x)[c(1, 4)])
}
|
#Jonas Wydler; dendrogram for paper; 23.05.2021
#Careful about the FG names, in order to have a sequential order on the famd+ward plot
#(figure 1 in the manuscript) we used the order assinged here and changed it everywhere else in the text
#according to scheme described in FG_name_change.txt
#
#-----------------------------------------------------------------
wd_trait_dat <- ("wd_data")
Sys.setenv(LANG = "en")
#-----------------------------------------------------------------
library('dendextend')
library("tidyverse")
library("FactoMineR")
library("missMDA")
library("DendSer ")
library("svglite")
library("RColorBrewer")
library("FD")
#-----------------------------------------------------------------
#Read in trait data
setwd(wd_trait_dat)
fct <- read.csv("table_funct_traits_copepods_v2.csv", h = T, sep = ";", dec = ",")
fct <- fct[,c(3:20)]
names <- colnames(fct)[c(7,8,9,10,15)] ; names
fct$na_count <- apply(fct[,names], 1, function(x) sum(is.na(x)))
#-----------------------------------------------------------------
# Drop species with missing body size info
fct <- fct[!is.na(fct$max_body_length),]
#-----------------------------------------------------------------
# Drop species with more than two missing traits
fct <- fct[fct$na_count < 2,]
#-----------------------------------------------------------------
#saving as factors for FAMD
fct$Spawning <- as.factor(fct$Spawning)
fct$Myelination <- as.factor(fct$Myelination)
fct$Omnivore <- as.factor(fct$Omnivore)
fct$Carnivore <- as.factor(fct$Carnivore)
fct$Herbivore <- as.factor(fct$Herbivore)
fct$Detritivore <- as.factor(fct$Detritivore)
fct$Current <- as.factor(fct$Current)
fct$Cruise <- as.factor(fct$Cruise)
fct$Ambush <- as.factor(fct$Ambush)
fct$Trophism <- as.factor(fct$Trophism)
fct$Feeding_mode <- as.factor(fct$Feeding_mode)
#-----------------------------------------------------------------
#FAMD
compfamd <- imputeFAMD(fct[,c(7:9,11:14,16:18)], npc = 4)
FAMD <- FAMD(fct[,c(7:9,11:14,16:18)], tab.disj = compfamd$tab.disj, graph = F)
famd <- data.frame(FAMD$ind$coord[,1:4])
famd_sp <- data.frame(FAMD$ind$coord[,1:4])
colnames(famd_sp) <- c("FAMD1","FAMD2","FAMD3","FAMD4")
famd_all_temp <- rbind(famd_sp)
famd_all_sp <- famd_all_temp[ order(row.names(famd_all_temp)), ]
famd_dist <- dist(famd_all_temp, method = "euclidean")
#-----------------------------------------------------------------
#Clustering
fit_famd_ward <- hclust(famd_dist, method = "ward.D2")
kk <- 11
groups <- cutree(fit_famd_ward, k = kk)
fct$FG <- groups
colnames(fct)
trait_dat <- fct[c("Species", "n", "max_body_length", "Myelination", "Spawning", "Trophism",
"Omnivore", "Carnivore", "Herbivore", "Detritivore", "Feeding_mode",
"Current", "Cruise", "Ambush", "FG")]
colnames(trait_dat)[1] <- "species"
colnames(trait_dat)[3] <- "body_size"
#-----------------------------------------------------------------
###plot dendrogram famd ward
dend <- fit_famd_ward %>% as.dendrogram
fit_famd_ward %>% color_branches(k = 11) %>% set("branches_lwd", 2.5) %>% plot()
setwd(wd_plots)
colors <- c('#8b4513', '#008000', '#4682b4', '#4b0082',
'#ff0000', '#ffd700', '#00ff00', '#00ffff',
'#0000ff', '#ff1493', '#ffe4b5')
ggsave(plot = plot(dend %>% color_branches(k = 11, col = colors, groupLabels = T) %>% set("branches_lwd", 2.5), horiz = TRUE), filename = paste0("dend_famd_ward.svg"),width = 6, height = 12, dpi = 300)
plot = plot(dend %>% color_branches(k = 11, col = colors, groupLabels = T) %>% set("branches_lwd", 2.5), horiz = TRUE)
table_traits <- as.data.frame(trait_dat %>% group_by(cell_id) %>% summarize())
table_traits_subset <- subset(trait_dat, FG == 6)
table(table_traits_subset$Feeding_mode)
length((table_traits_subset$Feeding_mode))
#-----------------------------------------------------------------
###plot dendrogram famd average
fit_famd_avg <- hclust(famd_dist, method = "average")
kk <- 11
groups <- cutree(fit_famd_ward, k = kk)
fct$FG <- groups
colnames(fct)
trait_dat <- fct[c("Species", "n", "max_body_length", "Myelination", "Spawning", "Trophism",
"Omnivore", "Carnivore", "Herbivore", "Detritivore", "Feeding_mode",
"Current", "Cruise", "Ambush", "FG")]
colnames(trait_dat)[1] <- "species"
colnames(trait_dat)[3] <- "body_size"
#-----------------------------------------------------------------
###plot dendrogram famd ward
#careful not directly comparable as the groups do not necessarily align with dendrogramgs derived from other methods
dend2 <- fit_famd_avg %>% as.dendrogram
fit_famd_avg %>% color_branches(k = 11) %>% set("branches_lwd", 2.5) %>% plot()
setwd(wd_plots)
colors <- c('#8b4513', '#008000', '#4682b4', '#4b0082',
'#ff0000', '#ffd700', '#00ff00', '#00ffff',
'#0000ff', '#ff1493', '#ffe4b5')
ggsave(plot = plot(dend2 %>% set("branches_lwd", 2.5), horiz = TRUE), filename = paste0("dend_famd_avg.png"),width = 6, height = 12, dpi = 300)
plot = plot(dend2 %>% set("branches_lwd", 2.5), horiz = TRUE)
#-----------------------------------------------------------------
###plot dendrograms for gower distance
#careful not directly comparable as the groups do not necessarily align with dendrogramgs derived from other methods
# Compute Gower's distance matrix, with all species having 0 or just 1 NA and then just 0 NA
gow <- gowdis(fct[,c(7:9,11:14,16:18)])# maybe we dont need to check for another na
fit_gow_ward <- hclust(gow, method = "ward.D2")
dend3 <- fit_gow_ward %>% as.dendrogram
plot = plot(dend3 %>% set("branches_lwd", 2.5), horiz = TRUE)
ggsave(plot = plot(dend3 %>% set("branches_lwd", 2.5), horiz = TRUE), filename = paste0("dend_gow_ward.png"),width = 6, height = 12, dpi = 300)
# Compute Gower's distance matrix, with all species having 0 or just 1 NA and then just 0 NA
gow <- gowdis(fct[,c(7:9,11:14,16:18)])# maybe we dont need to check for another na
fit_gow_avg <- hclust(gow, method = "average")
dend4 <- fit_gow_avg %>% as.dendrogram
plot = plot(dend4 %>% set("branches_lwd", 2.5), horiz = TRUE)
ggsave(plot = plot(dend4 %>% set("branches_lwd", 2.5), horiz = TRUE), filename = paste0("dend_gow_avg.png"),width = 6, height = 12, dpi = 300)
| /Manuscript_figure_1/functional_dendrogram_plot.R | no_license | jonas-wydler/ma_jonas_wydler_2021 | R | false | false | 6,219 | r | #Jonas Wydler; dendrogram for paper; 23.05.2021
#Careful about the FG names, in order to have a sequential order on the famd+ward plot
#(figure 1 in the manuscript) we used the order assinged here and changed it everywhere else in the text
#according to scheme described in FG_name_change.txt
#
#-----------------------------------------------------------------
wd_trait_dat <- ("wd_data")
Sys.setenv(LANG = "en")
#-----------------------------------------------------------------
library('dendextend')
library("tidyverse")
library("FactoMineR")
library("missMDA")
library("DendSer ")
library("svglite")
library("RColorBrewer")
library("FD")
#-----------------------------------------------------------------
#Read in trait data
setwd(wd_trait_dat)
fct <- read.csv("table_funct_traits_copepods_v2.csv", h = T, sep = ";", dec = ",")
fct <- fct[,c(3:20)]
names <- colnames(fct)[c(7,8,9,10,15)] ; names
fct$na_count <- apply(fct[,names], 1, function(x) sum(is.na(x)))
#-----------------------------------------------------------------
# Drop species with missing body size info
fct <- fct[!is.na(fct$max_body_length),]
#-----------------------------------------------------------------
# Drop species with more than two missing traits
fct <- fct[fct$na_count < 2,]
#-----------------------------------------------------------------
#saving as factors for FAMD
fct$Spawning <- as.factor(fct$Spawning)
fct$Myelination <- as.factor(fct$Myelination)
fct$Omnivore <- as.factor(fct$Omnivore)
fct$Carnivore <- as.factor(fct$Carnivore)
fct$Herbivore <- as.factor(fct$Herbivore)
fct$Detritivore <- as.factor(fct$Detritivore)
fct$Current <- as.factor(fct$Current)
fct$Cruise <- as.factor(fct$Cruise)
fct$Ambush <- as.factor(fct$Ambush)
fct$Trophism <- as.factor(fct$Trophism)
fct$Feeding_mode <- as.factor(fct$Feeding_mode)
#-----------------------------------------------------------------
#FAMD
compfamd <- imputeFAMD(fct[,c(7:9,11:14,16:18)], npc = 4)
FAMD <- FAMD(fct[,c(7:9,11:14,16:18)], tab.disj = compfamd$tab.disj, graph = F)
famd <- data.frame(FAMD$ind$coord[,1:4])
famd_sp <- data.frame(FAMD$ind$coord[,1:4])
colnames(famd_sp) <- c("FAMD1","FAMD2","FAMD3","FAMD4")
famd_all_temp <- rbind(famd_sp)
famd_all_sp <- famd_all_temp[ order(row.names(famd_all_temp)), ]
famd_dist <- dist(famd_all_temp, method = "euclidean")
#-----------------------------------------------------------------
#Clustering
fit_famd_ward <- hclust(famd_dist, method = "ward.D2")
kk <- 11
groups <- cutree(fit_famd_ward, k = kk)
fct$FG <- groups
colnames(fct)
trait_dat <- fct[c("Species", "n", "max_body_length", "Myelination", "Spawning", "Trophism",
"Omnivore", "Carnivore", "Herbivore", "Detritivore", "Feeding_mode",
"Current", "Cruise", "Ambush", "FG")]
colnames(trait_dat)[1] <- "species"
colnames(trait_dat)[3] <- "body_size"
#-----------------------------------------------------------------
###plot dendrogram famd ward
dend <- fit_famd_ward %>% as.dendrogram
fit_famd_ward %>% color_branches(k = 11) %>% set("branches_lwd", 2.5) %>% plot()
setwd(wd_plots)
colors <- c('#8b4513', '#008000', '#4682b4', '#4b0082',
'#ff0000', '#ffd700', '#00ff00', '#00ffff',
'#0000ff', '#ff1493', '#ffe4b5')
ggsave(plot = plot(dend %>% color_branches(k = 11, col = colors, groupLabels = T) %>% set("branches_lwd", 2.5), horiz = TRUE), filename = paste0("dend_famd_ward.svg"),width = 6, height = 12, dpi = 300)
plot = plot(dend %>% color_branches(k = 11, col = colors, groupLabels = T) %>% set("branches_lwd", 2.5), horiz = TRUE)
table_traits <- as.data.frame(trait_dat %>% group_by(cell_id) %>% summarize())
table_traits_subset <- subset(trait_dat, FG == 6)
table(table_traits_subset$Feeding_mode)
length((table_traits_subset$Feeding_mode))
#-----------------------------------------------------------------
###plot dendrogram famd average
fit_famd_avg <- hclust(famd_dist, method = "average")
kk <- 11
groups <- cutree(fit_famd_ward, k = kk)
fct$FG <- groups
colnames(fct)
trait_dat <- fct[c("Species", "n", "max_body_length", "Myelination", "Spawning", "Trophism",
"Omnivore", "Carnivore", "Herbivore", "Detritivore", "Feeding_mode",
"Current", "Cruise", "Ambush", "FG")]
colnames(trait_dat)[1] <- "species"
colnames(trait_dat)[3] <- "body_size"
#-----------------------------------------------------------------
###plot dendrogram famd ward
#careful not directly comparable as the groups do not necessarily align with dendrogramgs derived from other methods
dend2 <- fit_famd_avg %>% as.dendrogram
fit_famd_avg %>% color_branches(k = 11) %>% set("branches_lwd", 2.5) %>% plot()
setwd(wd_plots)
colors <- c('#8b4513', '#008000', '#4682b4', '#4b0082',
'#ff0000', '#ffd700', '#00ff00', '#00ffff',
'#0000ff', '#ff1493', '#ffe4b5')
ggsave(plot = plot(dend2 %>% set("branches_lwd", 2.5), horiz = TRUE), filename = paste0("dend_famd_avg.png"),width = 6, height = 12, dpi = 300)
plot = plot(dend2 %>% set("branches_lwd", 2.5), horiz = TRUE)
#-----------------------------------------------------------------
###plot dendrograms for gower distance
#careful not directly comparable as the groups do not necessarily align with dendrogramgs derived from other methods
# Compute Gower's distance matrix, with all species having 0 or just 1 NA and then just 0 NA
gow <- gowdis(fct[,c(7:9,11:14,16:18)])# maybe we dont need to check for another na
fit_gow_ward <- hclust(gow, method = "ward.D2")
dend3 <- fit_gow_ward %>% as.dendrogram
plot = plot(dend3 %>% set("branches_lwd", 2.5), horiz = TRUE)
ggsave(plot = plot(dend3 %>% set("branches_lwd", 2.5), horiz = TRUE), filename = paste0("dend_gow_ward.png"),width = 6, height = 12, dpi = 300)
# Compute Gower's distance matrix, with all species having 0 or just 1 NA and then just 0 NA
gow <- gowdis(fct[,c(7:9,11:14,16:18)])# maybe we dont need to check for another na
fit_gow_avg <- hclust(gow, method = "average")
dend4 <- fit_gow_avg %>% as.dendrogram
plot = plot(dend4 %>% set("branches_lwd", 2.5), horiz = TRUE)
ggsave(plot = plot(dend4 %>% set("branches_lwd", 2.5), horiz = TRUE), filename = paste0("dend_gow_avg.png"),width = 6, height = 12, dpi = 300)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hr.r
\name{print-hr}
\alias{print-hr}
\alias{print.humanreadable}
\title{Print \code{humanreadable} objects}
\usage{
\method{print}{humanreadable}(x, ...)
}
\arguments{
\item{x}{\code{humanreadable} object}
\item{...}{unused}
}
\description{
Printing for \code{hr()}
}
| /man/print-hr.Rd | permissive | shinra-dev/memuse | R | false | true | 348 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hr.r
\name{print-hr}
\alias{print-hr}
\alias{print.humanreadable}
\title{Print \code{humanreadable} objects}
\usage{
\method{print}{humanreadable}(x, ...)
}
\arguments{
\item{x}{\code{humanreadable} object}
\item{...}{unused}
}
\description{
Printing for \code{hr()}
}
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236989L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result) | /IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609861285-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 729 | r | testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236989L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_max.R
\name{cequals}
\alias{cequals}
\title{Convenient equals operator}
\usage{
cequals(x, y)
}
\arguments{
\item{x}{numeric vector or scalar}
\item{y}{numeric scalar}
}
\value{
logical vector
}
\description{
Performs x == y, but returns FALSE rather than NA for NA elements of x.
}
\examples{
x <- c(A=1,B=3,C=2,D=3, E=NA)
y <- 3
equals(x, y)
}
| /autonomics.support/man/cequals.Rd | no_license | bhagwataditya/autonomics0 | R | false | true | 433 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_max.R
\name{cequals}
\alias{cequals}
\title{Convenient equals operator}
\usage{
cequals(x, y)
}
\arguments{
\item{x}{numeric vector or scalar}
\item{y}{numeric scalar}
}
\value{
logical vector
}
\description{
Performs x == y, but returns FALSE rather than NA for NA elements of x.
}
\examples{
x <- c(A=1,B=3,C=2,D=3, E=NA)
y <- 3
equals(x, y)
}
|
#' @export
disease2symbol<-function(ab,file)
{
c <- file[diseaseName==ab,]#查询
print(c$geneSymbol)
}
| /R/disease2symbol1.R | no_license | pwj6/disease2symbol | R | false | false | 108 | r | #' @export
disease2symbol<-function(ab,file)
{
c <- file[diseaseName==ab,]#查询
print(c$geneSymbol)
}
|
library(ethnobotanyR)
### Name: RIs
### Title: #Relative Importance Index (RI)
### Aliases: RIs
### Keywords: ethnobotany, importance quantitative relative
### ** Examples
RIs(ethnobotanydata)
| /data/genthat_extracted_code/ethnobotanyR/examples/RIs.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 202 | r | library(ethnobotanyR)
### Name: RIs
### Title: #Relative Importance Index (RI)
### Aliases: RIs
### Keywords: ethnobotany, importance quantitative relative
### ** Examples
RIs(ethnobotanydata)
|
# Get current marks from SecDb using the "Forward Curve" property
#
# curveName <- "Commod PWX 5x16 Physical"
# contractDate <- seq(as.Date("2009-07-01"), by="1 month", length.out=12)
#
#
secdb.getCurrentMarks <- function( curveName, contractDate, expand=FALSE )
{
if (expand){
df <- expand.grid( toupper(curveName), contractDate)
names(df) <- c("curveName", "contractDate")
} else {
df <- data.frame(curveName=toupper(curveName), contractDate=contractDate)
}
reutersCode <- format.dateMYY(df$contractDate, -1)
contract <- gsub("COMMOD ", "", df$curveName)
splitNames <- strsplit(contract, " ")
prefix <- sapply(splitNames, "[", 1)
suffix <- lapply( splitNames, "[", -1 )
suffixStrings <- sapply( suffix, paste, collapse = " " )
contract <- paste(prefix, reutersCode, " ", suffixStrings, sep="")
df$value <- NA
for (i in 1:nrow(df)){
aux <- secdb.getValueType( contract[i], "Forward Curve" )
df$value[i] <- aux$value[length(aux$value)] # should I use the first element?
}
return(df)
}
| /R Extension/RMG/Utilities/Interfaces/PM/R/secdb.getCurrentMarks.R | no_license | uhasan1/QLExtension-backup | R | false | false | 1,041 | r | # Get current marks from SecDb using the "Forward Curve" property
#
# curveName <- "Commod PWX 5x16 Physical"
# contractDate <- seq(as.Date("2009-07-01"), by="1 month", length.out=12)
#
#
secdb.getCurrentMarks <- function( curveName, contractDate, expand=FALSE )
{
if (expand){
df <- expand.grid( toupper(curveName), contractDate)
names(df) <- c("curveName", "contractDate")
} else {
df <- data.frame(curveName=toupper(curveName), contractDate=contractDate)
}
reutersCode <- format.dateMYY(df$contractDate, -1)
contract <- gsub("COMMOD ", "", df$curveName)
splitNames <- strsplit(contract, " ")
prefix <- sapply(splitNames, "[", 1)
suffix <- lapply( splitNames, "[", -1 )
suffixStrings <- sapply( suffix, paste, collapse = " " )
contract <- paste(prefix, reutersCode, " ", suffixStrings, sep="")
df$value <- NA
for (i in 1:nrow(df)){
aux <- secdb.getValueType( contract[i], "Forward Curve" )
df$value[i] <- aux$value[length(aux$value)] # should I use the first element?
}
return(df)
}
|
# Model parameters
model_method = "nnet"
model_grid = expand.grid(size = c(60),
decay = c(0.0001))
#model_grid = NULL
extra_params = list(MaxNWts = 100000, linout = TRUE)
# Cross-validation parameters
do_cv = TRUE
partition_ratio = .8 # for cross-validation
cv_folds = 10 # for cross-validation
verbose_on = TRUE # output cv folds results?
metric = 'MAE' # metric use for evaluating cross-validation
# Misc parameters
subset_ratio = 0.1 # for testing purposes (set to 1 for full data)
create_submission = FALSE # create a submission for Kaggle?
use_log = TRUE # take the log transform of the response?
| /Output/60-decay_0-0.5/26_11_2016_18.49.12_nnet_full/nnet_full.R | no_license | NickTalavera/Kaggle---Nick-Josh-Dina | R | false | false | 629 | r | # Model parameters
model_method = "nnet"
model_grid = expand.grid(size = c(60),
decay = c(0.0001))
#model_grid = NULL
extra_params = list(MaxNWts = 100000, linout = TRUE)
# Cross-validation parameters
do_cv = TRUE
partition_ratio = .8 # for cross-validation
cv_folds = 10 # for cross-validation
verbose_on = TRUE # output cv folds results?
metric = 'MAE' # metric use for evaluating cross-validation
# Misc parameters
subset_ratio = 0.1 # for testing purposes (set to 1 for full data)
create_submission = FALSE # create a submission for Kaggle?
use_log = TRUE # take the log transform of the response?
|
# Daniel, Ryan, James, Michael
# https://www.csee.umbc.edu/~cmarron/cmsc478/labs/lab5/lab05.shtml
# CMSC 478 Fall 2017
options(warn=1)
# output the results of problem 1
# all the other problems are just modified versions of this problem
prob1_2 = function() {
library(ISLR)
set.seed(1)
cat("---------------Exercise 1:---------------\n")
def=read.csv("data/Default.csv")
attach(def)
def["res"] = ifelse(default == "Yes", 1, 0)
attach(def)
train=sample(10000,5000)
# glm without library=binomial is the same as lm
# instead we are doing logistical regression
fit = glm(res ~ balance + income, family=binomial, data=def, subset=train)
fit_probs = predict(fit, def[-train], type='response')
fit_pred = ifelse(fit_probs > 0.5, 1, 0) # set threshold
print(fit)
cat("###table:\n")
print("num rows = " + nrow(def[-train]))
print(table(fit_pred, def[-train]$res))
# get the MSE
#print(mean((res-predict(fit, def))[-train]^2))
print(mean(fit_pred[-train]^2))
print(summary(fit))
}
prob3 = function() {
}
prob4 = function() {
}
# do the problems
prob1_2()
#prob3()
#prob4()
| /cmsc478-ML/labs/lab5/lab5.r | no_license | dangbert/college | R | false | false | 1,163 | r | # Daniel, Ryan, James, Michael
# https://www.csee.umbc.edu/~cmarron/cmsc478/labs/lab5/lab05.shtml
# CMSC 478 Fall 2017
options(warn=1)
# output the results of problem 1
# all the other problems are just modified versions of this problem
prob1_2 = function() {
library(ISLR)
set.seed(1)
cat("---------------Exercise 1:---------------\n")
def=read.csv("data/Default.csv")
attach(def)
def["res"] = ifelse(default == "Yes", 1, 0)
attach(def)
train=sample(10000,5000)
# glm without library=binomial is the same as lm
# instead we are doing logistical regression
fit = glm(res ~ balance + income, family=binomial, data=def, subset=train)
fit_probs = predict(fit, def[-train], type='response')
fit_pred = ifelse(fit_probs > 0.5, 1, 0) # set threshold
print(fit)
cat("###table:\n")
print("num rows = " + nrow(def[-train]))
print(table(fit_pred, def[-train]$res))
# get the MSE
#print(mean((res-predict(fit, def))[-train]^2))
print(mean(fit_pred[-train]^2))
print(summary(fit))
}
prob3 = function() {
}
prob4 = function() {
}
# do the problems
prob1_2()
#prob3()
#prob4()
|
library(plyr)
#**** variables pour la boucle ****
start <- as.Date("2016-01-01")
end <- as.Date("2017-03-02")
theDate <- start
selected_data_temp <- data.frame()
selected_data <- data.frame()
vctr_sum_deposits <- vector()
vctr_date <- vector()
selected_data_temp <- data.frame()
v<-vector()
d<-vector()
tmpPosX <- as.Date.POSIXct(2016-01-01)
for(i in start:end){
v <- c(v,nrow(subset(totaux_depots, totaux_depots$DateD == i)))
d <- c(d,i)
}
temp<-data.frame(d,v)
temp$d<-as.Date(temp$d,tmpPosX)
temp$d<-weekdays(temp$d)
sum_lundi<-sum(subset(temp$v,temp$d=="lundi"))
sum_mardi<-sum(subset(temp$v,temp$d=="mardi"))
sum_mercredi<-sum(subset(temp$v,temp$d=="mercredi"))
sum_jeudi<-sum(subset(temp$v,temp$d=="jeudi"))
sum_vendredi<-sum(subset(temp$v,temp$d=="vendredi"))
sum_samedi<-sum(subset(temp$v,temp$d=="samedi"))
sum_dimanche<-sum(subset(temp$v,temp$d=="dimanche"))
somme <- c(sum_lundi,sum_mardi,sum_mercredi,sum_jeudi,sum_vendredi,sum_samedi,sum_dimanche)
| /st.R | no_license | Flibidi42/Pe---Big-Data | R | false | false | 972 | r | library(plyr)
#**** variables pour la boucle ****
start <- as.Date("2016-01-01")
end <- as.Date("2017-03-02")
theDate <- start
selected_data_temp <- data.frame()
selected_data <- data.frame()
vctr_sum_deposits <- vector()
vctr_date <- vector()
selected_data_temp <- data.frame()
v<-vector()
d<-vector()
tmpPosX <- as.Date.POSIXct(2016-01-01)
for(i in start:end){
v <- c(v,nrow(subset(totaux_depots, totaux_depots$DateD == i)))
d <- c(d,i)
}
temp<-data.frame(d,v)
temp$d<-as.Date(temp$d,tmpPosX)
temp$d<-weekdays(temp$d)
sum_lundi<-sum(subset(temp$v,temp$d=="lundi"))
sum_mardi<-sum(subset(temp$v,temp$d=="mardi"))
sum_mercredi<-sum(subset(temp$v,temp$d=="mercredi"))
sum_jeudi<-sum(subset(temp$v,temp$d=="jeudi"))
sum_vendredi<-sum(subset(temp$v,temp$d=="vendredi"))
sum_samedi<-sum(subset(temp$v,temp$d=="samedi"))
sum_dimanche<-sum(subset(temp$v,temp$d=="dimanche"))
somme <- c(sum_lundi,sum_mardi,sum_mercredi,sum_jeudi,sum_vendredi,sum_samedi,sum_dimanche)
|
# My max fft function for finding the most prominent frequency from the given window
source('functions/complex_magnitude.R')
max.fft <- function(data_) {
temp <- as.data.frame(data_) %>%
#dplyr::select(-Activity,-User) %>%
as.matrix() %>%
fft() %>%
complex_magnitude() %>%
as.data.frame %>%
slice(2:(ceiling(win/2)))
freq <- apply(temp, 2, which.max) %>% t() %>% as.data.frame()
return(freq)
}
| /functions/max_fft.R | no_license | sl0thower/Activity_Recognition | R | false | false | 457 | r | # My max fft function for finding the most prominent frequency from the given window
source('functions/complex_magnitude.R')
max.fft <- function(data_) {
temp <- as.data.frame(data_) %>%
#dplyr::select(-Activity,-User) %>%
as.matrix() %>%
fft() %>%
complex_magnitude() %>%
as.data.frame %>%
slice(2:(ceiling(win/2)))
freq <- apply(temp, 2, which.max) %>% t() %>% as.data.frame()
return(freq)
}
|
\name{RLIM}
\alias{RLIM}
\alias{get.relation}
\alias{get.perpetual.series}
\alias{get.futures.series}
\alias{get.coms}
\alias{get.ohlc}
\title{Read data from lim}
\description{
reads any tseries type object form LIM
}
\usage{
get.relation(relname,colnames=NULL,units="days",bars=1)
get.perpetual.series(relname,colnames=c("open","high","low","close","volume","OpenInterest"),
rollDay="open_interest crossover",rollPolicy="Actual Prices",units="days",bars=1)
get.ohlc(relname,colnames=c("open","high","low","close"),units="days",bars=1)
get.futures.series(relname, units="days", bars=1, rollPolicy="open_interest crossover")
}
\arguments{
\item{relname}{ contract, symbol, or ticker}
\item{colnames}{ what cols do you want to read}
\item{rollDay}{string describing when to roll the contract}
\item{rollPolicy}{string describing how to adjust the prices when a
roll occurs}
\item{units}{ minutes or days}
\item{bars}{ how many minutes or days}
}
\value{
an fts object
}
\author{ Whit Armstrong }
\examples{
## load all columns
ibm.all <- get.relation("IBM")
## load only the open/high/low/close columns
ibm.ohlc <- get.ohlc("IBM")
ty <- get.futures.series("TY")
ty.p <- get.perpetual.series("TY")
ty.p <- get.perpetual.series("TY",rollDay="open_interest crossover")
ty.p1 <- get.perpetual.series("TY",rollDay="1 day after open_interest crossover")
ty.adj <- get.perpetual.series("TY",rollDay="open_interest crossover",rollPolicy="backward adjusted prices")
}
\keyword{ts}
| /man/get.relation.Rd | no_license | armstrtw/rlim | R | false | false | 1,495 | rd | \name{RLIM}
\alias{RLIM}
\alias{get.relation}
\alias{get.perpetual.series}
\alias{get.futures.series}
\alias{get.coms}
\alias{get.ohlc}
\title{Read data from lim}
\description{
reads any tseries type object form LIM
}
\usage{
get.relation(relname,colnames=NULL,units="days",bars=1)
get.perpetual.series(relname,colnames=c("open","high","low","close","volume","OpenInterest"),
rollDay="open_interest crossover",rollPolicy="Actual Prices",units="days",bars=1)
get.ohlc(relname,colnames=c("open","high","low","close"),units="days",bars=1)
get.futures.series(relname, units="days", bars=1, rollPolicy="open_interest crossover")
}
\arguments{
\item{relname}{ contract, symbol, or ticker}
\item{colnames}{ what cols do you want to read}
\item{rollDay}{string describing when to roll the contract}
\item{rollPolicy}{string describing how to adjust the prices when a
roll occurs}
\item{units}{ minutes or days}
\item{bars}{ how many minutes or days}
}
\value{
an fts object
}
\author{ Whit Armstrong }
\examples{
## load all columns
ibm.all <- get.relation("IBM")
## load only the open/high/low/close columns
ibm.ohlc <- get.ohlc("IBM")
ty <- get.futures.series("TY")
ty.p <- get.perpetual.series("TY")
ty.p <- get.perpetual.series("TY",rollDay="open_interest crossover")
ty.p1 <- get.perpetual.series("TY",rollDay="1 day after open_interest crossover")
ty.adj <- get.perpetual.series("TY",rollDay="open_interest crossover",rollPolicy="backward adjusted prices")
}
\keyword{ts}
|
install.packages("twitteR")
install.packages("ROAuth")
install.packages("tm")
install.packages("ggplot2")
install.packages("wordcloud")
install.packages("plyr")
install.packages("RTextTools")
install.packages("e1071")
library(e1071)
library(twitteR)
library(ROAuth)
library(tm)
library(ggplot2)
library(wordcloud)
library(plyr)
library(RTextTools)
library(e1071)
setup_twitter_oauth("JsZqhclFgxd0U1VG1jmeKzLfB","40l9QX8fZOscjgG1UvFmhFOoziedKnw8HJWYO7c5sO7T7fXBcn","861413684467220480-gGYKh6cU87FrKem09cYUvP08iBUvbTv","agaOa07UN9S5xhZUZ7B41tfGdO2qtXl8LHhSTTGpH8ZSn")
tweets <- userTimeline("Banjir", n = 10)
n.tweet <- length(tweets)
# convert tweets to a data frame
tweets.df <- twListToDF(tweets)
myCorpus <- Corpus(VectorSource(tweets.df$text))
# convert to lower case
myCorpus <- tm_map(myCorpus, content_transformer(tolower))
# remove URLs
removeURL <- function(x) gsub("http[^[:space:]]*", "", x)
myCorpus <- tm_map(myCorpus, content_transformer(removeURL))
# remove anything other than English letters or space
removeNumPunct <- function(x) gsub("[^[:alpha:][:space:]]*", "", x)
myCorpus <- tm_map(myCorpus, content_transformer(removeNumPunct))
# remove stopwords
myStopwords <- c(setdiff(stopwords('english'), c("r", "big")),"use", "see", "used", "via", "amp")
myCorpus <- tm_map(myCorpus, removeWords, myStopwords)
# remove extra whitespace
myCorpus <- tm_map(myCorpus, stripWhitespace)
# keep a copy for stem completion later
myCorpusCopy <- myCorpus
myCorpus
term.freq <- rowSums(as.matrix(tdm))
tdm <- TermDocumentMatrix(myCorpus)
tdmat <- as.matrix(removeSparseTerms(tdm, sparse=0.3))
# compute distances
distMatrix <- dist(scale(tdm))
fit <- hclust(distMatrix, method="ward.D2")
plot(fit)
fit <- hclust(distMatrix, method="single")
plot(fit)
| /klastering.r | no_license | bagasdhika/bahasaR | R | false | false | 1,811 | r | install.packages("twitteR")
install.packages("ROAuth")
install.packages("tm")
install.packages("ggplot2")
install.packages("wordcloud")
install.packages("plyr")
install.packages("RTextTools")
install.packages("e1071")
library(e1071)
library(twitteR)
library(ROAuth)
library(tm)
library(ggplot2)
library(wordcloud)
library(plyr)
library(RTextTools)
library(e1071)
setup_twitter_oauth("JsZqhclFgxd0U1VG1jmeKzLfB","40l9QX8fZOscjgG1UvFmhFOoziedKnw8HJWYO7c5sO7T7fXBcn","861413684467220480-gGYKh6cU87FrKem09cYUvP08iBUvbTv","agaOa07UN9S5xhZUZ7B41tfGdO2qtXl8LHhSTTGpH8ZSn")
tweets <- userTimeline("Banjir", n = 10)
n.tweet <- length(tweets)
# convert tweets to a data frame
tweets.df <- twListToDF(tweets)
myCorpus <- Corpus(VectorSource(tweets.df$text))
# convert to lower case
myCorpus <- tm_map(myCorpus, content_transformer(tolower))
# remove URLs
removeURL <- function(x) gsub("http[^[:space:]]*", "", x)
myCorpus <- tm_map(myCorpus, content_transformer(removeURL))
# remove anything other than English letters or space
removeNumPunct <- function(x) gsub("[^[:alpha:][:space:]]*", "", x)
myCorpus <- tm_map(myCorpus, content_transformer(removeNumPunct))
# remove stopwords
myStopwords <- c(setdiff(stopwords('english'), c("r", "big")),"use", "see", "used", "via", "amp")
myCorpus <- tm_map(myCorpus, removeWords, myStopwords)
# remove extra whitespace
myCorpus <- tm_map(myCorpus, stripWhitespace)
# keep a copy for stem completion later
myCorpusCopy <- myCorpus
myCorpus
term.freq <- rowSums(as.matrix(tdm))
tdm <- TermDocumentMatrix(myCorpus)
tdmat <- as.matrix(removeSparseTerms(tdm, sparse=0.3))
# compute distances
distMatrix <- dist(scale(tdm))
fit <- hclust(distMatrix, method="ward.D2")
plot(fit)
fit <- hclust(distMatrix, method="single")
plot(fit)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/signalp_parallel.R
\name{split_XStringSet}
\alias{split_XStringSet}
\title{split XStringSet objects}
\usage{
split_XStringSet(string_set, chunk_size)
}
\arguments{
\item{string_set}{input AAStringSet object;}
\item{chunk_size}{the number of sequenses in a single chunk;}
}
\value{
list of AAStringSet chunks.
}
\description{
This function splits large XStringSet objects into chunks of given size and
returns a list of AAStringSet objects.
}
\examples{
# Read fasta file:
aa <- readAAStringSet(system.file("extdata", "sample_prot_100.fasta",
package = "SecretSanta"))
# Split it into chunks
# with 10 sequences each:
split_XStringSet(aa,10)
}
| /man/split_XStringSet.Rd | no_license | zhangpan19935/SecretSanta | R | false | true | 722 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/signalp_parallel.R
\name{split_XStringSet}
\alias{split_XStringSet}
\title{split XStringSet objects}
\usage{
split_XStringSet(string_set, chunk_size)
}
\arguments{
\item{string_set}{input AAStringSet object;}
\item{chunk_size}{the number of sequenses in a single chunk;}
}
\value{
list of AAStringSet chunks.
}
\description{
This function splits large XStringSet objects into chunks of given size and
returns a list of AAStringSet objects.
}
\examples{
# Read fasta file:
aa <- readAAStringSet(system.file("extdata", "sample_prot_100.fasta",
package = "SecretSanta"))
# Split it into chunks
# with 10 sequences each:
split_XStringSet(aa,10)
}
|
#' Calculate the kernel matrix
#'
#' @param Y the n by q confounder matrix, where n is the number of samples, q is the number of confounding factors. Missing values in Y should be labeled as NA.
#' @param kernel the kernel to use: "linear", "gaussian".
#' @param bandwidth bandwidth h for Gaussian kernel. Optional.
#' @param scaleY scale the columns in Y to unit standard deviation. Default is False.
#' @return The kernel matrix
#' \item{K}{the n by n kernel matrix for Y}
#' @export
#' @examples
#' Y <- data_tree$ConfounderMat
#' K1 <- calkernel(Y, kernel="linear") ##linear kernel
#' K2 <- calkernel(Y, kernel="gaussian", bandwidth=1) ##Gaussian kernel
calkernel <- function(Y, kernel, bandwidth, scaleY=F){
Y <- scale(Y, center = F, scale = scaleY)
####missing data
Y[is.na(Y)] <- mean(Y, na.rm=T)
if (kernel=="linear"){
K <- tcrossprod(Y)
} else if (kernel=="gaussian"){
if (is.null(bandwidth)==T){
stop("For gaussian kernel, please specify the bandwidth")
} else{
K <- as.matrix(dist(Y, method = "euclidean"))
K <- exp(-K^2/2/bandwidth^2)
}
} else {
stop("Please select a valid kernel, linear kernel or gaussian kernel")
}
return(K)
}
| /R/calkernel.R | no_license | HongY23/acPCoA | R | false | false | 1,198 | r | #' Calculate the kernel matrix
#'
#' @param Y the n by q confounder matrix, where n is the number of samples, q is the number of confounding factors. Missing values in Y should be labeled as NA.
#' @param kernel the kernel to use: "linear", "gaussian".
#' @param bandwidth bandwidth h for Gaussian kernel. Optional.
#' @param scaleY scale the columns in Y to unit standard deviation. Default is False.
#' @return The kernel matrix
#' \item{K}{the n by n kernel matrix for Y}
#' @export
#' @examples
#' Y <- data_tree$ConfounderMat
#' K1 <- calkernel(Y, kernel="linear") ##linear kernel
#' K2 <- calkernel(Y, kernel="gaussian", bandwidth=1) ##Gaussian kernel
calkernel <- function(Y, kernel, bandwidth, scaleY=F){
Y <- scale(Y, center = F, scale = scaleY)
####missing data
Y[is.na(Y)] <- mean(Y, na.rm=T)
if (kernel=="linear"){
K <- tcrossprod(Y)
} else if (kernel=="gaussian"){
if (is.null(bandwidth)==T){
stop("For gaussian kernel, please specify the bandwidth")
} else{
K <- as.matrix(dist(Y, method = "euclidean"))
K <- exp(-K^2/2/bandwidth^2)
}
} else {
stop("Please select a valid kernel, linear kernel or gaussian kernel")
}
return(K)
}
|
/proyecto.R | no_license | JorgeRamos01/Feature-engineer-para-precios-de-casas | R | false | false | 7,669 | r | ||
\name{write.fasta}
\alias{write.fasta}
\title{
Write fasta format object to file
}
\description{
To save the fasta format object to speciefied file.
}
\usage{
write.fasta(sequences, file = NULL)
}
\arguments{
\item{sequences}{
The fasta object to be saved.
}
\item{file}{
A character string naming the file to be saved to.
}
}
\details{
\code{sequences} must be an object of class fasta.
}
\value{
Saved fasta file.
}
\references{
None.
}
\author{
Jinlong Zhang \email{jinlongzhang01@gmail.com}
}
\seealso{
See Also \code{\link{read.fasta}}
}
\examples{
data(fil.fas)
write.fasta(fil.fas, "example.fasta")
## Remove the file.
unlink("example.fasta")
}
\keyword{ fasta }
| /man/write.fasta.Rd | no_license | helixcn/seqRFLP | R | false | false | 720 | rd | \name{write.fasta}
\alias{write.fasta}
\title{
Write fasta format object to file
}
\description{
To save the fasta format object to speciefied file.
}
\usage{
write.fasta(sequences, file = NULL)
}
\arguments{
\item{sequences}{
The fasta object to be saved.
}
\item{file}{
A character string naming the file to be saved to.
}
}
\details{
\code{sequences} must be an object of class fasta.
}
\value{
Saved fasta file.
}
\references{
None.
}
\author{
Jinlong Zhang \email{jinlongzhang01@gmail.com}
}
\seealso{
See Also \code{\link{read.fasta}}
}
\examples{
data(fil.fas)
write.fasta(fil.fas, "example.fasta")
## Remove the file.
unlink("example.fasta")
}
\keyword{ fasta }
|
# set the type to fit
estimator <- "Muthen"
# set the working director
try({
baseDir <- "/nas/longleaf/home/mgiordan/forumPres"
setwd(baseDir)
})
try({
baseDir <- "C:/users/mgiordan/git/mlmcfasimulation/presentationSim"
setwd(baseDir)
})
# reading in the parameters of the model
simParams <- readRDS("SimParams.rds")
designMatrix <- simParams$designMatrix
iterationsPer <- simParams$iterationsPer
wModelTrue <- simParams$wModelTrue
wModelMis <- simParams$wModelMis
wModelMis1 <- simParams$wModelMis1
wModelMis2 <- simParams$wModelMis2
wModelMis3 <- simParams$wModelMis3
bModelTrue <- simParams$bModelTrue
#----------------------------------------------------------------------------
# Should not need to edit below this line
#----------------------------------------------------------------------------
# load relevant packages
try({
library("lavaan", lib.loc="/nas/longleaf/home/mgiordan/Rlibs")
library("MIIVsem", lib.loc="/nas/longleaf/home/mgiordan/Rlibs")
library("nlme", lib.loc="/nas/longleaf/home/mgiordan/Rlibs")
})
try({
library("lavaan")
library("MIIVsem")
library("nlme")
})
# source relevant functions
try({
source("SimulationFunctions.R") # for longleaf
})
try({
source("../SimulationFunctions.R") # for my computer
})
# subset just the estimator we want
designMatrix <- designMatrix[which(designMatrix$estimators==estimator),]
for (i in 5201:5400) {
print(i)
# if the current row is the FIML estimator move to next bc fiml is all Mplus
if (designMatrix$estimators[[i]]=="FIML") {
next
}
# set the model spec
if (designMatrix$modelSpec[[i]]=="trueModel") {
wModel <- wModelTrue
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec") {
wModel <- wModelMis
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec1") {
wModel <- wModelMis1
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec2") {
wModel <- wModelMis2
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec3") {
wModel <- wModelMis3
bModel <- bModelTrue
}
# read in data
df <- read.table(designMatrix$dfName[[i]])
names(df) <- c(paste0("y", 1:6), "cluster")
df$id <- 1:nrow(df)
fit <- tryCatch({
mlcfaMIIV(withinModel = wModel,
betweenModel = bModel,
estimator = designMatrix$estimators[[i]],
allIndicators = paste0("y", 1:6),
l1Var = "id",
l2Var = "cluster",
df = df)
}, warning = function(e) {
message(e)
return("model did not fit properly")
}, error = function(e) {
message(e)
return("model did not fit properly")
})
#save as RDS
saveRDS(fit, file = designMatrix$rdsName[[i]])
}
| /presentationSim/ZsimRun_muthen27.R | no_license | mlgiordano1/mlmCFASimulation | R | false | false | 2,841 | r | # set the type to fit
estimator <- "Muthen"
# set the working director
try({
baseDir <- "/nas/longleaf/home/mgiordan/forumPres"
setwd(baseDir)
})
try({
baseDir <- "C:/users/mgiordan/git/mlmcfasimulation/presentationSim"
setwd(baseDir)
})
# reading in the parameters of the model
simParams <- readRDS("SimParams.rds")
designMatrix <- simParams$designMatrix
iterationsPer <- simParams$iterationsPer
wModelTrue <- simParams$wModelTrue
wModelMis <- simParams$wModelMis
wModelMis1 <- simParams$wModelMis1
wModelMis2 <- simParams$wModelMis2
wModelMis3 <- simParams$wModelMis3
bModelTrue <- simParams$bModelTrue
#----------------------------------------------------------------------------
# Should not need to edit below this line
#----------------------------------------------------------------------------
# load relevant packages
try({
library("lavaan", lib.loc="/nas/longleaf/home/mgiordan/Rlibs")
library("MIIVsem", lib.loc="/nas/longleaf/home/mgiordan/Rlibs")
library("nlme", lib.loc="/nas/longleaf/home/mgiordan/Rlibs")
})
try({
library("lavaan")
library("MIIVsem")
library("nlme")
})
# source relevant functions
try({
source("SimulationFunctions.R") # for longleaf
})
try({
source("../SimulationFunctions.R") # for my computer
})
# subset just the estimator we want
designMatrix <- designMatrix[which(designMatrix$estimators==estimator),]
for (i in 5201:5400) {
print(i)
# if the current row is the FIML estimator move to next bc fiml is all Mplus
if (designMatrix$estimators[[i]]=="FIML") {
next
}
# set the model spec
if (designMatrix$modelSpec[[i]]=="trueModel") {
wModel <- wModelTrue
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec") {
wModel <- wModelMis
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec1") {
wModel <- wModelMis1
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec2") {
wModel <- wModelMis2
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec3") {
wModel <- wModelMis3
bModel <- bModelTrue
}
# read in data
df <- read.table(designMatrix$dfName[[i]])
names(df) <- c(paste0("y", 1:6), "cluster")
df$id <- 1:nrow(df)
fit <- tryCatch({
mlcfaMIIV(withinModel = wModel,
betweenModel = bModel,
estimator = designMatrix$estimators[[i]],
allIndicators = paste0("y", 1:6),
l1Var = "id",
l2Var = "cluster",
df = df)
}, warning = function(e) {
message(e)
return("model did not fit properly")
}, error = function(e) {
message(e)
return("model did not fit properly")
})
#save as RDS
saveRDS(fit, file = designMatrix$rdsName[[i]])
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @export
sal_identify_objects <- function(indat, threshold, maxobj) {
.Call('_harpSpatial_sal_identify_objects', PACKAGE = 'harpSpatial', indat, threshold, maxobj)
}
cumsum2d <- function(indat) {
.Call('_harpSpatial_cumsum2d', PACKAGE = 'harpSpatial', indat)
}
windowMeanFromCumsum <- function(indat, radius) {
.Call('_harpSpatial_windowMeanFromCumsum', PACKAGE = 'harpSpatial', indat, radius)
}
windowMean <- function(indat, radius) {
.Call('_harpSpatial_windowMean', PACKAGE = 'harpSpatial', indat, radius)
}
| /R/RcppExports.R | permissive | roman7011/harpSpatial | R | false | false | 660 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @export
sal_identify_objects <- function(indat, threshold, maxobj) {
.Call('_harpSpatial_sal_identify_objects', PACKAGE = 'harpSpatial', indat, threshold, maxobj)
}
cumsum2d <- function(indat) {
.Call('_harpSpatial_cumsum2d', PACKAGE = 'harpSpatial', indat)
}
windowMeanFromCumsum <- function(indat, radius) {
.Call('_harpSpatial_windowMeanFromCumsum', PACKAGE = 'harpSpatial', indat, radius)
}
windowMean <- function(indat, radius) {
.Call('_harpSpatial_windowMean', PACKAGE = 'harpSpatial', indat, radius)
}
|
library(tidyverse)
library(ape)
library(phytools)
# read in metadata table
meta <- read.table("../../../metadata/metadata.txt",header=TRUE,sep="\t",comment.char="",quote="",stringsAsFactors=FALSE)
rownames(meta) <- meta[,1]
# get vcf header line
# OS X line:
f <- pipe("gzcat ../results/freebayes/vvinifera_fb.vcf.gz | grep CHROM")
# LINUX line:
# f <- pipe("gzcat ../results/freebayes/vvinifera_fb.vcf.gz | grep CHROM")
h <- scan(f,what="character")
vcf <- read.table("../results/freebayes/vvinifera_fb.vcf.gz",stringsAsFactors=FALSE)
hcf <- read.table("../results/freebayes/vvinifera_fb_hap.vcf.gz",stringsAsFactors=FALSE)
colnames(vcf) <- h
colnames(hcf) <- h
keepi <- read.table("../../../metadata/retained_samples.txt",stringsAsFactors=FALSE) %>% unlist()
# keep high quality & biallelic variants
hq <- vcf[,6] > 50 & !grepl(",",vcf[,5])
vcf <- vcf[hq,]
hcf <- hcf[hq,]
# haploidized genotypes from filtered individuals only
hcfk <- hcf[,keepi] %>% as.matrix()
class(hcfk) <- "numeric"
# missing genotypes by site
rowSums(!is.na(hcfk)) %>% table() %>% plot()
# keep sites with more than X genotypes
keepl <- rowSums(!is.na(hcfk)) > 5 &
rowSums(hcfk,na.rm=TRUE) > 1 &
rowSums(-1 * (hcfk - 1),na.rm=TRUE) > 1
vcf <- vcf[keepl,]
hcf <- hcf[keepl,]
hcfk <- hcfk[keepl,]
dd <- t(hcfk) %>% dist()
mds <- cmdscale(dd)
plot(mds,pch=20,col=factor(substring(rownames(mds),1,5)))
nj(dd) %>% midpoint.root() %>% plot()
| /analysis/stacks/freebayes/Step2b_explorevariants.R | no_license | nreid/grapes | R | false | false | 1,433 | r | library(tidyverse)
library(ape)
library(phytools)
# read in metadata table
meta <- read.table("../../../metadata/metadata.txt",header=TRUE,sep="\t",comment.char="",quote="",stringsAsFactors=FALSE)
rownames(meta) <- meta[,1]
# get vcf header line
# OS X line:
f <- pipe("gzcat ../results/freebayes/vvinifera_fb.vcf.gz | grep CHROM")
# LINUX line:
# f <- pipe("gzcat ../results/freebayes/vvinifera_fb.vcf.gz | grep CHROM")
h <- scan(f,what="character")
vcf <- read.table("../results/freebayes/vvinifera_fb.vcf.gz",stringsAsFactors=FALSE)
hcf <- read.table("../results/freebayes/vvinifera_fb_hap.vcf.gz",stringsAsFactors=FALSE)
colnames(vcf) <- h
colnames(hcf) <- h
keepi <- read.table("../../../metadata/retained_samples.txt",stringsAsFactors=FALSE) %>% unlist()
# keep high quality & biallelic variants
hq <- vcf[,6] > 50 & !grepl(",",vcf[,5])
vcf <- vcf[hq,]
hcf <- hcf[hq,]
# haploidized genotypes from filtered individuals only
hcfk <- hcf[,keepi] %>% as.matrix()
class(hcfk) <- "numeric"
# missing genotypes by site
rowSums(!is.na(hcfk)) %>% table() %>% plot()
# keep sites with more than X genotypes
keepl <- rowSums(!is.na(hcfk)) > 5 &
rowSums(hcfk,na.rm=TRUE) > 1 &
rowSums(-1 * (hcfk - 1),na.rm=TRUE) > 1
vcf <- vcf[keepl,]
hcf <- hcf[keepl,]
hcfk <- hcfk[keepl,]
dd <- t(hcfk) %>% dist()
mds <- cmdscale(dd)
plot(mds,pch=20,col=factor(substring(rownames(mds),1,5)))
nj(dd) %>% midpoint.root() %>% plot()
|
# function performs least-squares phylogeny inference by nni
# written by Liam J. Revell 2011, 2013, 2015, 2019
optim.phylo.ls<-function(D,stree=NULL,set.neg.to.zero=TRUE,fixed=FALSE,tol=1e-10,collapse=TRUE){
# change D to a matrix (if actually an object of class "dist")
if(class(D)=="dist") D<-as.matrix(D)
# compute the number of species
n<-nrow(D)
if(is.null(stree))
stree<-rtree(n=n,tip.label=rownames(D),br=NULL,rooted=F) # random starting tree
else if(!inherits(stree,"phylo")){
cat("starting tree must be an object of class \"phylo.\" using random starting tree.\n")
stree<-rtree(n=n,tip.label=rownames(D),br=NULL,rooted=F) # random starting tree
}
if(!is.binary(stree)) stree<-multi2di(stree)
if(is.rooted(stree)) stree<-unroot(stree)
# get ls branch lengths for stree
best.tree<-ls.tree(stree,D)
Q<-attr(best.tree,"Q-score")
bestQ<-0 # to start the loop
# for search
Nnni<-0
# loop while Q is not improved by nni
while(bestQ-Q<tol&&fixed==FALSE){
nni.trees<-lapply(nni(best.tree),ls.tree,D=D)
nniQ<-sapply(nni.trees,function(x) attr(x,"Q-score"))
ii<-which(nniQ==min(nniQ))
bestQ<-nniQ[ii]
if(bestQ<Q){
best.tree<-nni.trees[[ii]]
Nnni<-Nnni+1
Q<-attr(best.tree,"Q-score")
cat(paste(Nnni,"set(s) of nearest neighbor interchanges. best Q so far =",round(Q,10),"\n",collapse=""))
flush.console()
} else bestQ<-Inf
}
cat(paste("best Q score of",round(Q,10),"found after",Nnni,"nearest neighbor interchange(s).\n",collapse=""))
if(set.neg.to.zero) best.tree$edge.length[best.tree$edge.length<0]<-0
attr(best.tree,"Q-score")<-Q
if(collapse) best.tree<-di2multi(best.tree)
best.tree
}
# function computes the ls branch lengths and Q score for a tree
# written by Liam J. Revell 2011
ls.tree<-function(tree,D){
# compute design matrix for tree i
X<-phyloDesign(tree)
# sort and columnarize D
D<-D[tree$tip.label,tree$tip.label]
colD<-D[lower.tri(D)]
# compute the least squares branches conditioned on tree i
v<-solve(t(X)%*%X)%*%t(X)%*%colD
# give the tree its estimated branch lengths
tree$edge.length<-v
# compute the distances for this tree
d<-X%*%v
# compute Q
Q<-sum((colD-d)^2)
# assign attribute to tree
attr(tree,"Q-score")<-Q
tree
}
# function computes design matrix for least squares given a topology
# written by Liam J. Revell 2011, totally re-written 2015
phyloDesign<-function(tree){
N<-Ntip(tree)
A<-lapply(1:N,function(n,t) c(getAncestors(t,n),n),t=tree)
X<-matrix(0,N*(N-1)/2,nrow(tree$edge))
colnames(X)<-apply(tree$edge,1,paste,collapse=",")
rn<-sapply(1:N,function(x,y) sapply(y,paste,x=x,sep=","),y=1:N)
rownames(X)<-rn[upper.tri(rn)]
ii<-1
for(i in 1:(N-1)) for(j in (i+1):N){
e<-c(setdiff(A[[i]],A[[j]]),setdiff(A[[j]],A[[i]]))
e<-sapply(e,function(x,y) which(y==x),y=tree$edge[,2])
X[ii,e]<-1
ii<-ii+1
}
X
}
# function computes the ancestor node numbers for each tip number
# written by Liam J. Revell 2011
compute.ancestor.nodes<-function(tree){
n<-length(tree$tip)
m<-tree$Nnode
X<-matrix(0,n,n+m,dimnames=list(1:n,1:(n+m)))
for(i in 1:n){
currnode<-i
while(currnode!=(n+1)){
X[i,currnode]<-1
currnode<-tree$edge[match(currnode,tree$edge[,2]),1]
}
X[i,currnode]<-1
}
X
}
| /R/optim.phylo.ls.R | no_license | phamasaur/phytools | R | false | false | 3,215 | r | # function performs least-squares phylogeny inference by nni
# written by Liam J. Revell 2011, 2013, 2015, 2019
optim.phylo.ls<-function(D,stree=NULL,set.neg.to.zero=TRUE,fixed=FALSE,tol=1e-10,collapse=TRUE){
# change D to a matrix (if actually an object of class "dist")
if(class(D)=="dist") D<-as.matrix(D)
# compute the number of species
n<-nrow(D)
if(is.null(stree))
stree<-rtree(n=n,tip.label=rownames(D),br=NULL,rooted=F) # random starting tree
else if(!inherits(stree,"phylo")){
cat("starting tree must be an object of class \"phylo.\" using random starting tree.\n")
stree<-rtree(n=n,tip.label=rownames(D),br=NULL,rooted=F) # random starting tree
}
if(!is.binary(stree)) stree<-multi2di(stree)
if(is.rooted(stree)) stree<-unroot(stree)
# get ls branch lengths for stree
best.tree<-ls.tree(stree,D)
Q<-attr(best.tree,"Q-score")
bestQ<-0 # to start the loop
# for search
Nnni<-0
# loop while Q is not improved by nni
while(bestQ-Q<tol&&fixed==FALSE){
nni.trees<-lapply(nni(best.tree),ls.tree,D=D)
nniQ<-sapply(nni.trees,function(x) attr(x,"Q-score"))
ii<-which(nniQ==min(nniQ))
bestQ<-nniQ[ii]
if(bestQ<Q){
best.tree<-nni.trees[[ii]]
Nnni<-Nnni+1
Q<-attr(best.tree,"Q-score")
cat(paste(Nnni,"set(s) of nearest neighbor interchanges. best Q so far =",round(Q,10),"\n",collapse=""))
flush.console()
} else bestQ<-Inf
}
cat(paste("best Q score of",round(Q,10),"found after",Nnni,"nearest neighbor interchange(s).\n",collapse=""))
if(set.neg.to.zero) best.tree$edge.length[best.tree$edge.length<0]<-0
attr(best.tree,"Q-score")<-Q
if(collapse) best.tree<-di2multi(best.tree)
best.tree
}
# function computes the ls branch lengths and Q score for a tree
# written by Liam J. Revell 2011
ls.tree<-function(tree,D){
# compute design matrix for tree i
X<-phyloDesign(tree)
# sort and columnarize D
D<-D[tree$tip.label,tree$tip.label]
colD<-D[lower.tri(D)]
# compute the least squares branches conditioned on tree i
v<-solve(t(X)%*%X)%*%t(X)%*%colD
# give the tree its estimated branch lengths
tree$edge.length<-v
# compute the distances for this tree
d<-X%*%v
# compute Q
Q<-sum((colD-d)^2)
# assign attribute to tree
attr(tree,"Q-score")<-Q
tree
}
# function computes design matrix for least squares given a topology
# written by Liam J. Revell 2011, totally re-written 2015
phyloDesign<-function(tree){
N<-Ntip(tree)
A<-lapply(1:N,function(n,t) c(getAncestors(t,n),n),t=tree)
X<-matrix(0,N*(N-1)/2,nrow(tree$edge))
colnames(X)<-apply(tree$edge,1,paste,collapse=",")
rn<-sapply(1:N,function(x,y) sapply(y,paste,x=x,sep=","),y=1:N)
rownames(X)<-rn[upper.tri(rn)]
ii<-1
for(i in 1:(N-1)) for(j in (i+1):N){
e<-c(setdiff(A[[i]],A[[j]]),setdiff(A[[j]],A[[i]]))
e<-sapply(e,function(x,y) which(y==x),y=tree$edge[,2])
X[ii,e]<-1
ii<-ii+1
}
X
}
# function computes the ancestor node numbers for each tip number
# written by Liam J. Revell 2011
compute.ancestor.nodes<-function(tree){
n<-length(tree$tip)
m<-tree$Nnode
X<-matrix(0,n,n+m,dimnames=list(1:n,1:(n+m)))
for(i in 1:n){
currnode<-i
while(currnode!=(n+1)){
X[i,currnode]<-1
currnode<-tree$edge[match(currnode,tree$edge[,2]),1]
}
X[i,currnode]<-1
}
X
}
|
# Boot: A reimplementation of bootCase using the 'boot' package to do the
# work. The main function 'Boot' creates the 'statistic' argument to
# 'boot', and passes this function to 'boot'
# For the call b1 <- Boot(m1) and b2 <- bootCase(m1),
# b2 was the returned bootstaps; this is in b1$t
# b1 is of class c("Boot", "boot", so ALL the 'boot' generic methods work
# 'Boot' has new generic methods 'summary', 'confint' and 'hist'
# notes: See Davison and Hinkley Chapters 6 and 7.
# Boot.lm, method="case" is the simple case resampling
# method="residual" uses algorithm 6.3, p. 271
# The use of weights comes from using 'pearson' residuals
# This is equivalent to alg. 6.1, p262, unweighted
# Boot.glm method="case" as for lm
# method="residual" not implemented. Too problematic.
# May 23, 2012 Sanford Weisberg sandy@umn.edu
# June 1, 2012: changed from class c("Boot", "boot") to just class "boot"
# 2012-12-10 replaced .GlobalEnv with .carEnv to avoid warnings
# 2013-07-08 changed .carEnv to car:::.carEnv so 'boot' could find the environment
# 4014-08-17: added calls to requireNamespace() and :: where necessary. J. Fox
# 2015-01-27 .carEnv now in global environment. John
# 2015-02-20: fixed coding error in Boot.nls(). John
# 2017-06-12: added a default for f in the generic method to suppress an error generated by Rstudio
# 2017-06-22: added a vcov.boot method that simply returns cov(object$t)
# 2017-06-22: fixed args to hist.boot as suggested by Achim Zeileis
# 2017-06-22: Fixed bugs in Boot.default; updated .Rd file as suggested by Achim Zeileis
# 2017-06-24: (Z) added '...' argument to generic and all methods
# set labels=names(f(object)) with f() rather than coef()
# simplified and avoid bug in computation of 'out' and check for $qr in Boot.default
# do not rely on $model to be available
# instead set up empty dummy data with right number of rows (either via nobs or
# NROW(residuals(...)))
# optionally use original estimates as starting values in update(object, ...)
# within Boot.default
# 2017-06-25: modified bca confidence intervals to default to 'perc' if adjustment is out of range
# 2017-06-26: consistently use inherits(..., "try-error") rather than class(...) == "try-error"
# 2017-09-16: Changed to vcov.boot method to pass arguments to cov. In
# particular, if some of the bootstrap reps are NA, then the argument
# use="complete.obs" may be desirable.
# 2017-10-06: Corrected bug that put the wrong estimates in t0 if missing values were
# present with case resampling.
# 2017-10-19: Added "norm" as an option on histograms
# 2017-11-30: Use carPalette() for colors in hist.boot()
# 2017-12-24: Removed parallel argument that was added. If ncores<=1, no parallel processing is used. If ncores>1
# selects the correct parallel environment, and implements with that number of cores.
# 2018-01-28: Changed print.summary.boot to print R once only if it is constant
Boot <- function(object, f=coef, labels=names(f(object)), R=999, method=c("case", "residual"), ncores=1, ...){UseMethod("Boot")}
Boot.default <- function(object, f=coef, labels=names(f(object)),
R=999, method=c("case", "residual"), ncores=1, start=FALSE,...) {
if(!(requireNamespace("boot"))) stop("The 'boot' package is missing")
## original statistic
f0 <- f(object)
if(length(labels) != length(f0)) labels <- paste0("V", seq_along(f0))
## process starting values (if any)
if(isTRUE(start)) start <- f0
## set up bootstrap handling for case vs. residual bootstrapping
method <- match.arg(method, c("case", "residual"))
if(method=="case") {
boot.f <- function(data, indices, .fn) {
assign(".boot.indices", indices, envir=.carEnv)
mod <- if(identical(start, FALSE)) {
update(object, subset=get(".boot.indices", envir=.carEnv))
} else {
update(object, subset=get(".boot.indices", envir=.carEnv), start=start)
}
out <- if(!is.null(object$qr) && (mod$qr$rank != object$qr$rank)) f0 * NA else .fn(mod)
out
}
} else {
boot.f <- function(data, indices, .fn) {
first <- all(indices == seq(length(indices)))
res <- if(first) residuals(object, type="pearson") else
residuals(object, type="pearson")/sqrt(1 - hatvalues(object))
res <- if(!first) (res - mean(res)) else res
val <- fitted(object) + res[indices]
if (!is.null(object$na.action)){
pad <- object$na.action
attr(pad, "class") <- "exclude"
val <- naresid(pad, val)
}
assign(".y.boot", val, envir=.carEnv)
mod <- if(identical(start, FALSE)) {
update(object, get(".y.boot", envir=.carEnv) ~ .)
} else {
update(object, get(".y.boot", envir=.carEnv) ~ ., start=start)
}
out <- if(!is.null(object$qr) && (mod$qr$rank != object$qr$rank)) f0 * NA else .fn(mod)
out
}
}
## try to determine number of observations and set up empty dummy data
nobs0 <- function(x, ...) {
rval <- try(stats::nobs(x, ...), silent = TRUE)
if(inherits(rval, "try-error") | is.null(rval)) rval <- NROW(residuals(x, ...))
return(rval)
}
n <- nobs0(object)
dd <- data.frame(.zero = rep.int(0L, n))
if(ncores<=1){
parallel_env="no"
ncores=getOption("boot.ncpus",1L)
}else{
if(.Platform$OS.type=="unix"){
parallel_env="multicore"
}else{
parallel_env="snow"
}
}
## call boot() but set nice labels
b <- boot::boot(dd, boot.f, R, .fn=f,parallel=parallel_env,ncpus=ncores, ...)
colnames(b$t) <- labels
## clean up and return
if(exists(".y.boot", envir=.carEnv))
remove(".y.boot", envir=.carEnv)
if(exists(".boot.indices", envir=.carEnv))
remove(".boot.indices", envir=.carEnv)
b
}
Boot.lm <- function(object, f=coef, labels=names(f(object)),
R=999, method=c("case", "residual"), ncores=1, ...){
obj <- update(object, data=model.frame(object)) # removes missing data, if any
Boot.default(obj, f, labels, R, method,ncores, ...)
}
Boot.glm <- function(object, f=coef, labels=names(f(object)),
R=999, method=c("case", "residual"), ncores=1, ...) {
method <- match.arg(method, c("case", "residual"))
if(method=="case") {
obj <- update(object, data=model.frame(object))
Boot.default(obj, f, labels, R, method,ncores, ...)
} else {
stop("Residual bootstrap not implemented in the 'car' function 'Boot'.
Use the 'boot' function in the 'boot' package to write
your own version of residual bootstrap for a glm.")
}
}
Boot.nls <- function(object, f=coef, labels=names(f(object)),
R=999, method=c("case", "residual"),ncores=1, ...) {
f0 <- f(obj)
### Remove rows with missing data from the data object
all.names <- all.vars(object$m$formula())
param.names <- names(object$m$getPars())
vars <- all.names[!(all.names %in% param.names)]
obj <- update(object, data = na.omit(eval(object$data)[,vars]), start=coef(object))
if(!(requireNamespace("boot"))) stop("The 'boot' package is missing")
if(length(labels) != length(f0)) labels <- paste("V", seq(length(f0)), sep="")
method <- match.arg(method)
opt<-options(show.error.messages = FALSE)
if(method=="case") {
boot.f <- function(data, indices, .fn) {
assign(".boot.indices", indices, envir=.carEnv)
mod <- try(update(obj, subset=get(".boot.indices", envir=.carEnv),
start=coef(obj)))
if(inherits(mod, "try-error")){
out <- .fn(obj)
out <- rep(NA, length(out)) } else {out <- .fn(mod)}
out
}
} else {
boot.f <- function(data, indices, .fn) {
first <- all(indices == seq(length(indices)))
res <- residuals(object)
val <- fitted(object) + res[indices]
if (!is.null(object$na.action)){
pad <- object$na.action
attr(pad, "class") <- "exclude"
val <- naresid(pad, val)
}
assign(".y.boot", val, envir=.carEnv)
mod <- try(update(object, get(".y.boot", envir=.carEnv) ~ .,
start=coef(object)))
if(inherits(mod, "try-error")){
out <- .fn(object)
out <- rep(NA, length(out)) } else {out <- .fn(mod)}
out
}
}
if(ncores<=1){
parallel_env="no"
ncores=getOption("boot.ncpus",1L)
}else{
if(.Platform$OS.type=="unix"){
parallel_env="multicore"
}else{
parallel_env="snow"
}
}
b <- boot::boot(data.frame(update(object, model=TRUE)$model), boot.f, R, .fn=f,parallel = parallel_env,ncpus = ncores, ...)
colnames(b$t) <- labels
if(exists(".y.boot", envir=.carEnv))
remove(".y.boot", envir=.carEnv)
if(exists(".boot.indices", envir=.carEnv))
remove(".boot.indices", envir=.carEnv)
options(opt)
d <- dim(na.omit(b$t))[1]
if(d != R)
cat( paste("\n","Number of bootstraps was", d, "out of", R, "attempted", "\n"))
b
}
Confint.boot <- function(object, parm, level = 0.95,
type = c("bca", "norm", "basic", "perc"), ...){
ci <- confint(object, parm, level, type, ...)
co <- object$t0
co <- co[names(co) %in% rownames(ci)]
cbind(Estimate=co, ci)
}
confint.boot <- function(object, parm, level = 0.95,
type = c("bca", "norm", "basic", "perc"), ...){
if (!requireNamespace("boot")) "boot package is missing"
cl <- match.call()
type <- match.arg(type)
if(type=="all") stop("Use 'boot::boot.ci' if you want to see 'all' types")
types <- c("bca", "norm", "basic", "perc")
typelab <- c("bca", "normal", "basic", "percent")[match(type, types)]
nn <- colnames(object$t)
names(nn) <- nn
parm <- if(missing(parm)) which(!is.na(object$t0)) else parm
out <- list()
for (j in 1:length(parm)){
out[[j]] <- try(boot::boot.ci(object, conf=level, type=type, index=parm[j], ...), silent=TRUE)
if(inherits(out[[j]], "try-error") && type=="bca"){
warning("BCa method fails for this problem. Using 'perc' instead")
return(confint(object, parm, level = 0.95, type = "perc", ...))}
}
levs <- unlist(lapply(level, function(x) c( (1-x)/2, 1 - (1-x)/2)))
ints <- matrix(0, nrow=length(parm), ncol=length(levs))
rownames(ints) <- nn[parm]
for (j in 1:length(parm)){
which <- if(typelab=="normal") 2:3 else 4:5
ints[j, ] <- as.vector(t(out[[j]][[typelab]][, which]))
}
or <- order(levs)
levs <- levs[or]
ints <- ints[, or, drop=FALSE]
colnames(ints) <- paste(round(100*levs, 1), " %",sep="")
attr(ints,"type") <- typelab
class(ints) <- c("confint.boot", class(ints))
ints
}
print.confint.boot <- function(x, ...) {
cat("Bootstrap quantiles, type = ", attr(x, "type"), "\n\n")
print(as.data.frame(x), ...)
}
summary.boot <- function (object, parm, high.moments = FALSE,
extremes=FALSE, ...)
{
cl <- match.call()
skew1 <- function(x){
x <- x[!is.na(x)]
xbar <- mean(x)
sum((x-xbar)^3)/(length(x) * sd(x)^3)
}
kurtosis1 <- function (x) {
x <- x[!is.na(x)]
xbar <- mean(x)
sum((x - xbar)^4)/(length(x) * sd(x)^4) - 3
}
not.aliased <- !is.na(object$t0)
boots <- object$t[ , not.aliased, drop=FALSE ]
nc <- if(is.matrix(boots)) ncol(boots) else 1
stats <- matrix(rep(NA, nc * 10), ncol = 10)
rownames(stats) <- colnames(boots)
stats[, 1] <- apply(boots, 2, function(x) sum(!is.na(x))) # num. obs
stats[, 2] <- object$t0[not.aliased] # point estimate
stats[, 3] <- apply(boots, 2, function(x) mean(x, na.rm=TRUE)) - stats[, 2]
stats[, 5] <- apply(boots, 2, function(x) median(x, na.rm=TRUE))
stats[, 4] <- apply(boots, 2, function(x) sd(x, na.rm=TRUE))
stats[, 6] <- apply(boots, 2, function(x) min(x, na.rm=TRUE))
stats[, 7] <- apply(boots, 2, function(x) max(x, na.rm=TRUE))
stats[, 8] <- stats[, 7] - stats[, 6]
stats[, 9] <- apply(boots, 2, skew1)
stats[, 10] <- apply(boots, 2, kurtosis1)
colnames(stats) <- c(
"R", "original", "bootBias", "bootSE", "bootMed", "bootMin",
"bootMax", "bootRange", "bootSkew", "bootKurtosis")
stats <- as.data.frame(stats)
class(stats) <- c("summary.boot", "data.frame")
use <- rep(TRUE, 10)
if (high.moments == FALSE) use[9:10] <- FALSE
if (extremes==FALSE) use[6:8] <- FALSE
parm <- if(missing(parm)) 1:dim(stats)[1] else parm
return(stats[parm , use])
}
print.summary.boot <-
function(x, digits = max(getOption("digits") - 2, 3), ...) {
if(dim(x)[1] == 1L){print.data.frame(x, digits=digits, ...)} else{
if(sd(x[, 1]) < 1.e-8 ) {
cat(paste("\nNumber of bootstrap replications R =", x[1, 1], "\n", sep=" "))
print.data.frame(x[, -1], digits=digits, ...)} else
print.data.frame(x, digits=digits, ...)
}}
hist.boot <- function(x, parm, layout=NULL, ask, main="", freq=FALSE,
estPoint = TRUE, point.col=carPalette()[1], point.lty=2, point.lwd=2,
estDensity = !freq, den.col=carPalette()[2], den.lty=1, den.lwd=2,
estNormal = !freq, nor.col=carPalette()[3], nor.lty=2, nor.lwd=2,
ci=c("bca", "none", "perc", "norm"), level=0.95,
legend=c("top", "none", "separate"), box=TRUE, ...){
not.aliased <- which(!is.na(x$t0))
ci <- match.arg(ci)
legend <- match.arg(legend)
pe <- x$t0[not.aliased]
if(is.null(names(pe))) names(pe) <- colnames(x$t)
if(missing(parm)) parm <- not.aliased
nt <- length(parm) + if(legend == "separate") 1 else 0
if (nt > 1 & (is.null(layout) || is.numeric(layout))) {
if(is.null(layout)){
layout <- switch(min(nt, 9), c(1, 1), c(1, 2), c(2, 2), c(2, 2),
c(3, 2), c(3, 2), c(3, 3), c(3, 3), c(3, 3))
}
ask <- if(missing(ask) || is.null(ask)) prod(layout) < nt else ask
oma3 <- if(legend == "top") 0.5 + estPoint + estDensity + estNormal
else 1.5
op <- par(mfrow=layout, ask=ask, no.readonly=TRUE,
oma=c(0, 0, oma3, 0), mar=c(5, 4, 1, 2) + .1)
on.exit(par(op))
}
if(ci != "none") clim <- confint(x, type=ci, level=level)
pn <- colnames(x$t)
names(pn) <- pn
what <- c(estNormal & !freq, estDensity & !freq, ci != "none", estPoint)
for (j in parm){
# determine the range of the y-axis
z <- na.omit(x$t[, j])
h <- hist(z, plot=FALSE)
d <- density(z)
n <- pnorm(0)/(sd <- sd(z))
m <- if(freq == FALSE) max(h$density, d$y, n) else max(h$counts)
plot(h, xlab=pn[j], freq=freq,
main=if(length(parm)==1) main else "", ylim=c(0, m), ...)
if(estDensity & !freq){
lines(d, col=den.col, lty=den.lty, lwd=den.lwd)
}
if(estNormal & !freq){
z <- na.omit(x$t[, j])
xx <- seq(-4, 4, length=400)
xbar <- mean(z)
sd <- sd(z)
lines( xbar + sd*xx, dnorm(xx)/sd, col=nor.col, lty=nor.lty,
lwd=nor.lwd)
}
if(ci != "none") lines( clim[j ,], c(0, 0), lwd=4)
if(estPoint) abline(v=pe[j], lty=point.lty, col=point.col, lwd=point.lwd)
if(box) box()
if( j == parm[1] & legend == "top" ) { # add legend
usr <- par("usr")
legend.coords <- list(x=usr[1], y=usr[4] + 1.3 * (1 + sum(what)) *strheight("N"))
legend( legend.coords,
c("Normal Density", "Kernel Density",
paste(ci, " ", round(100*level), "% CI", sep=""),
"Obs. Value")[what],
lty=c(nor.lty, den.lty, 1, point.lty)[what],
col=c(nor.col, den.col, "black", point.col)[what],
fill=c(nor.col, den.col, "black", point.col)[what],
lwd=c(2, 2, 4, 2)[what],
border=c(nor.col, den.col, "black", point.col)[what],
bty="n", cex=0.9, xpd=NA)#, #horiz=TRUE, offset=
}
}
mtext(side=3, outer=TRUE, main, cex=1.2)
if(legend == "separate") {
plot(0:1, 0:1, xaxt="n", yaxt="n", xlab="", ylab="", type="n")
use <- (1:4)[c( estNormal, estDensity, TRUE, ci != "none")]
curves <- c("fitted normal density", "Kernel density est",
paste(100*level, "% ", ci, " confidence interval", sep=""),
"Observed value of statistic")
colors <- c(nor.col, den.col, "black", point.col)
lines <- c(nor.lty, den.lty, 1, point.lty)
widths<- c(nor.lwd, den.lwd, 2, point.lty)
legend("center", curves[use], lty=lines[use], lwd=widths[use],
col=colors[use], box.col=par()$bg,
title="Bootstrap histograms")
}
invisible(NULL)
}
vcov.boot <- function(object, ...){cov(object$t, ...)}
| /R/Boot.R | no_license | ekatko1/car | R | false | false | 16,654 | r | # Boot: A reimplementation of bootCase using the 'boot' package to do the
# work. The main function 'Boot' creates the 'statistic' argument to
# 'boot', and passes this function to 'boot'
# For the call b1 <- Boot(m1) and b2 <- bootCase(m1),
# b2 was the returned bootstaps; this is in b1$t
# b1 is of class c("Boot", "boot", so ALL the 'boot' generic methods work
# 'Boot' has new generic methods 'summary', 'confint' and 'hist'
# notes: See Davison and Hinkley Chapters 6 and 7.
# Boot.lm, method="case" is the simple case resampling
# method="residual" uses algorithm 6.3, p. 271
# The use of weights comes from using 'pearson' residuals
# This is equivalent to alg. 6.1, p262, unweighted
# Boot.glm method="case" as for lm
# method="residual" not implemented. Too problematic.
# May 23, 2012 Sanford Weisberg sandy@umn.edu
# June 1, 2012: changed from class c("Boot", "boot") to just class "boot"
# 2012-12-10 replaced .GlobalEnv with .carEnv to avoid warnings
# 2013-07-08 changed .carEnv to car:::.carEnv so 'boot' could find the environment
# 4014-08-17: added calls to requireNamespace() and :: where necessary. J. Fox
# 2015-01-27 .carEnv now in global environment. John
# 2015-02-20: fixed coding error in Boot.nls(). John
# 2017-06-12: added a default for f in the generic method to suppress an error generated by Rstudio
# 2017-06-22: added a vcov.boot method that simply returns cov(object$t)
# 2017-06-22: fixed args to hist.boot as suggested by Achim Zeileis
# 2017-06-22: Fixed bugs in Boot.default; updated .Rd file as suggested by Achim Zeileis
# 2017-06-24: (Z) added '...' argument to generic and all methods
# set labels=names(f(object)) with f() rather than coef()
# simplified and avoid bug in computation of 'out' and check for $qr in Boot.default
# do not rely on $model to be available
# instead set up empty dummy data with right number of rows (either via nobs or
# NROW(residuals(...)))
# optionally use original estimates as starting values in update(object, ...)
# within Boot.default
# 2017-06-25: modified bca confidence intervals to default to 'perc' if adjustment is out of range
# 2017-06-26: consistently use inherits(..., "try-error") rather than class(...) == "try-error"
# 2017-09-16: Changed to vcov.boot method to pass arguments to cov. In
# particular, if some of the bootstrap reps are NA, then the argument
# use="complete.obs" may be desirable.
# 2017-10-06: Corrected bug that put the wrong estimates in t0 if missing values were
# present with case resampling.
# 2017-10-19: Added "norm" as an option on histograms
# 2017-11-30: Use carPalette() for colors in hist.boot()
# 2017-12-24: Removed parallel argument that was added. If ncores<=1, no parallel processing is used. If ncores>1
# selects the correct parallel environment, and implements with that number of cores.
# 2018-01-28: Changed print.summary.boot to print R once only if it is constant
Boot <- function(object, f=coef, labels=names(f(object)), R=999, method=c("case", "residual"), ncores=1, ...){UseMethod("Boot")}
Boot.default <- function(object, f=coef, labels=names(f(object)),
R=999, method=c("case", "residual"), ncores=1, start=FALSE,...) {
if(!(requireNamespace("boot"))) stop("The 'boot' package is missing")
## original statistic
f0 <- f(object)
if(length(labels) != length(f0)) labels <- paste0("V", seq_along(f0))
## process starting values (if any)
if(isTRUE(start)) start <- f0
## set up bootstrap handling for case vs. residual bootstrapping
method <- match.arg(method, c("case", "residual"))
if(method=="case") {
boot.f <- function(data, indices, .fn) {
assign(".boot.indices", indices, envir=.carEnv)
mod <- if(identical(start, FALSE)) {
update(object, subset=get(".boot.indices", envir=.carEnv))
} else {
update(object, subset=get(".boot.indices", envir=.carEnv), start=start)
}
out <- if(!is.null(object$qr) && (mod$qr$rank != object$qr$rank)) f0 * NA else .fn(mod)
out
}
} else {
boot.f <- function(data, indices, .fn) {
first <- all(indices == seq(length(indices)))
res <- if(first) residuals(object, type="pearson") else
residuals(object, type="pearson")/sqrt(1 - hatvalues(object))
res <- if(!first) (res - mean(res)) else res
val <- fitted(object) + res[indices]
if (!is.null(object$na.action)){
pad <- object$na.action
attr(pad, "class") <- "exclude"
val <- naresid(pad, val)
}
assign(".y.boot", val, envir=.carEnv)
mod <- if(identical(start, FALSE)) {
update(object, get(".y.boot", envir=.carEnv) ~ .)
} else {
update(object, get(".y.boot", envir=.carEnv) ~ ., start=start)
}
out <- if(!is.null(object$qr) && (mod$qr$rank != object$qr$rank)) f0 * NA else .fn(mod)
out
}
}
## try to determine number of observations and set up empty dummy data
nobs0 <- function(x, ...) {
rval <- try(stats::nobs(x, ...), silent = TRUE)
if(inherits(rval, "try-error") | is.null(rval)) rval <- NROW(residuals(x, ...))
return(rval)
}
n <- nobs0(object)
dd <- data.frame(.zero = rep.int(0L, n))
if(ncores<=1){
parallel_env="no"
ncores=getOption("boot.ncpus",1L)
}else{
if(.Platform$OS.type=="unix"){
parallel_env="multicore"
}else{
parallel_env="snow"
}
}
## call boot() but set nice labels
b <- boot::boot(dd, boot.f, R, .fn=f,parallel=parallel_env,ncpus=ncores, ...)
colnames(b$t) <- labels
## clean up and return
if(exists(".y.boot", envir=.carEnv))
remove(".y.boot", envir=.carEnv)
if(exists(".boot.indices", envir=.carEnv))
remove(".boot.indices", envir=.carEnv)
b
}
Boot.lm <- function(object, f=coef, labels=names(f(object)),
R=999, method=c("case", "residual"), ncores=1, ...){
obj <- update(object, data=model.frame(object)) # removes missing data, if any
Boot.default(obj, f, labels, R, method,ncores, ...)
}
Boot.glm <- function(object, f=coef, labels=names(f(object)),
R=999, method=c("case", "residual"), ncores=1, ...) {
method <- match.arg(method, c("case", "residual"))
if(method=="case") {
obj <- update(object, data=model.frame(object))
Boot.default(obj, f, labels, R, method,ncores, ...)
} else {
stop("Residual bootstrap not implemented in the 'car' function 'Boot'.
Use the 'boot' function in the 'boot' package to write
your own version of residual bootstrap for a glm.")
}
}
Boot.nls <- function(object, f=coef, labels=names(f(object)),
R=999, method=c("case", "residual"),ncores=1, ...) {
f0 <- f(obj)
### Remove rows with missing data from the data object
all.names <- all.vars(object$m$formula())
param.names <- names(object$m$getPars())
vars <- all.names[!(all.names %in% param.names)]
obj <- update(object, data = na.omit(eval(object$data)[,vars]), start=coef(object))
if(!(requireNamespace("boot"))) stop("The 'boot' package is missing")
if(length(labels) != length(f0)) labels <- paste("V", seq(length(f0)), sep="")
method <- match.arg(method)
opt<-options(show.error.messages = FALSE)
if(method=="case") {
boot.f <- function(data, indices, .fn) {
assign(".boot.indices", indices, envir=.carEnv)
mod <- try(update(obj, subset=get(".boot.indices", envir=.carEnv),
start=coef(obj)))
if(inherits(mod, "try-error")){
out <- .fn(obj)
out <- rep(NA, length(out)) } else {out <- .fn(mod)}
out
}
} else {
boot.f <- function(data, indices, .fn) {
first <- all(indices == seq(length(indices)))
res <- residuals(object)
val <- fitted(object) + res[indices]
if (!is.null(object$na.action)){
pad <- object$na.action
attr(pad, "class") <- "exclude"
val <- naresid(pad, val)
}
assign(".y.boot", val, envir=.carEnv)
mod <- try(update(object, get(".y.boot", envir=.carEnv) ~ .,
start=coef(object)))
if(inherits(mod, "try-error")){
out <- .fn(object)
out <- rep(NA, length(out)) } else {out <- .fn(mod)}
out
}
}
if(ncores<=1){
parallel_env="no"
ncores=getOption("boot.ncpus",1L)
}else{
if(.Platform$OS.type=="unix"){
parallel_env="multicore"
}else{
parallel_env="snow"
}
}
b <- boot::boot(data.frame(update(object, model=TRUE)$model), boot.f, R, .fn=f,parallel = parallel_env,ncpus = ncores, ...)
colnames(b$t) <- labels
if(exists(".y.boot", envir=.carEnv))
remove(".y.boot", envir=.carEnv)
if(exists(".boot.indices", envir=.carEnv))
remove(".boot.indices", envir=.carEnv)
options(opt)
d <- dim(na.omit(b$t))[1]
if(d != R)
cat( paste("\n","Number of bootstraps was", d, "out of", R, "attempted", "\n"))
b
}
Confint.boot <- function(object, parm, level = 0.95,
type = c("bca", "norm", "basic", "perc"), ...){
ci <- confint(object, parm, level, type, ...)
co <- object$t0
co <- co[names(co) %in% rownames(ci)]
cbind(Estimate=co, ci)
}
confint.boot <- function(object, parm, level = 0.95,
type = c("bca", "norm", "basic", "perc"), ...){
if (!requireNamespace("boot")) "boot package is missing"
cl <- match.call()
type <- match.arg(type)
if(type=="all") stop("Use 'boot::boot.ci' if you want to see 'all' types")
types <- c("bca", "norm", "basic", "perc")
typelab <- c("bca", "normal", "basic", "percent")[match(type, types)]
nn <- colnames(object$t)
names(nn) <- nn
parm <- if(missing(parm)) which(!is.na(object$t0)) else parm
out <- list()
for (j in 1:length(parm)){
out[[j]] <- try(boot::boot.ci(object, conf=level, type=type, index=parm[j], ...), silent=TRUE)
if(inherits(out[[j]], "try-error") && type=="bca"){
warning("BCa method fails for this problem. Using 'perc' instead")
return(confint(object, parm, level = 0.95, type = "perc", ...))}
}
levs <- unlist(lapply(level, function(x) c( (1-x)/2, 1 - (1-x)/2)))
ints <- matrix(0, nrow=length(parm), ncol=length(levs))
rownames(ints) <- nn[parm]
for (j in 1:length(parm)){
which <- if(typelab=="normal") 2:3 else 4:5
ints[j, ] <- as.vector(t(out[[j]][[typelab]][, which]))
}
or <- order(levs)
levs <- levs[or]
ints <- ints[, or, drop=FALSE]
colnames(ints) <- paste(round(100*levs, 1), " %",sep="")
attr(ints,"type") <- typelab
class(ints) <- c("confint.boot", class(ints))
ints
}
print.confint.boot <- function(x, ...) {
cat("Bootstrap quantiles, type = ", attr(x, "type"), "\n\n")
print(as.data.frame(x), ...)
}
summary.boot <- function (object, parm, high.moments = FALSE,
extremes=FALSE, ...)
{
cl <- match.call()
skew1 <- function(x){
x <- x[!is.na(x)]
xbar <- mean(x)
sum((x-xbar)^3)/(length(x) * sd(x)^3)
}
kurtosis1 <- function (x) {
x <- x[!is.na(x)]
xbar <- mean(x)
sum((x - xbar)^4)/(length(x) * sd(x)^4) - 3
}
not.aliased <- !is.na(object$t0)
boots <- object$t[ , not.aliased, drop=FALSE ]
nc <- if(is.matrix(boots)) ncol(boots) else 1
stats <- matrix(rep(NA, nc * 10), ncol = 10)
rownames(stats) <- colnames(boots)
stats[, 1] <- apply(boots, 2, function(x) sum(!is.na(x))) # num. obs
stats[, 2] <- object$t0[not.aliased] # point estimate
stats[, 3] <- apply(boots, 2, function(x) mean(x, na.rm=TRUE)) - stats[, 2]
stats[, 5] <- apply(boots, 2, function(x) median(x, na.rm=TRUE))
stats[, 4] <- apply(boots, 2, function(x) sd(x, na.rm=TRUE))
stats[, 6] <- apply(boots, 2, function(x) min(x, na.rm=TRUE))
stats[, 7] <- apply(boots, 2, function(x) max(x, na.rm=TRUE))
stats[, 8] <- stats[, 7] - stats[, 6]
stats[, 9] <- apply(boots, 2, skew1)
stats[, 10] <- apply(boots, 2, kurtosis1)
colnames(stats) <- c(
"R", "original", "bootBias", "bootSE", "bootMed", "bootMin",
"bootMax", "bootRange", "bootSkew", "bootKurtosis")
stats <- as.data.frame(stats)
class(stats) <- c("summary.boot", "data.frame")
use <- rep(TRUE, 10)
if (high.moments == FALSE) use[9:10] <- FALSE
if (extremes==FALSE) use[6:8] <- FALSE
parm <- if(missing(parm)) 1:dim(stats)[1] else parm
return(stats[parm , use])
}
print.summary.boot <-
function(x, digits = max(getOption("digits") - 2, 3), ...) {
if(dim(x)[1] == 1L){print.data.frame(x, digits=digits, ...)} else{
if(sd(x[, 1]) < 1.e-8 ) {
cat(paste("\nNumber of bootstrap replications R =", x[1, 1], "\n", sep=" "))
print.data.frame(x[, -1], digits=digits, ...)} else
print.data.frame(x, digits=digits, ...)
}}
hist.boot <- function(x, parm, layout=NULL, ask, main="", freq=FALSE,
estPoint = TRUE, point.col=carPalette()[1], point.lty=2, point.lwd=2,
estDensity = !freq, den.col=carPalette()[2], den.lty=1, den.lwd=2,
estNormal = !freq, nor.col=carPalette()[3], nor.lty=2, nor.lwd=2,
ci=c("bca", "none", "perc", "norm"), level=0.95,
legend=c("top", "none", "separate"), box=TRUE, ...){
not.aliased <- which(!is.na(x$t0))
ci <- match.arg(ci)
legend <- match.arg(legend)
pe <- x$t0[not.aliased]
if(is.null(names(pe))) names(pe) <- colnames(x$t)
if(missing(parm)) parm <- not.aliased
nt <- length(parm) + if(legend == "separate") 1 else 0
if (nt > 1 & (is.null(layout) || is.numeric(layout))) {
if(is.null(layout)){
layout <- switch(min(nt, 9), c(1, 1), c(1, 2), c(2, 2), c(2, 2),
c(3, 2), c(3, 2), c(3, 3), c(3, 3), c(3, 3))
}
ask <- if(missing(ask) || is.null(ask)) prod(layout) < nt else ask
oma3 <- if(legend == "top") 0.5 + estPoint + estDensity + estNormal
else 1.5
op <- par(mfrow=layout, ask=ask, no.readonly=TRUE,
oma=c(0, 0, oma3, 0), mar=c(5, 4, 1, 2) + .1)
on.exit(par(op))
}
if(ci != "none") clim <- confint(x, type=ci, level=level)
pn <- colnames(x$t)
names(pn) <- pn
what <- c(estNormal & !freq, estDensity & !freq, ci != "none", estPoint)
for (j in parm){
# determine the range of the y-axis
z <- na.omit(x$t[, j])
h <- hist(z, plot=FALSE)
d <- density(z)
n <- pnorm(0)/(sd <- sd(z))
m <- if(freq == FALSE) max(h$density, d$y, n) else max(h$counts)
plot(h, xlab=pn[j], freq=freq,
main=if(length(parm)==1) main else "", ylim=c(0, m), ...)
if(estDensity & !freq){
lines(d, col=den.col, lty=den.lty, lwd=den.lwd)
}
if(estNormal & !freq){
z <- na.omit(x$t[, j])
xx <- seq(-4, 4, length=400)
xbar <- mean(z)
sd <- sd(z)
lines( xbar + sd*xx, dnorm(xx)/sd, col=nor.col, lty=nor.lty,
lwd=nor.lwd)
}
if(ci != "none") lines( clim[j ,], c(0, 0), lwd=4)
if(estPoint) abline(v=pe[j], lty=point.lty, col=point.col, lwd=point.lwd)
if(box) box()
if( j == parm[1] & legend == "top" ) { # add legend
usr <- par("usr")
legend.coords <- list(x=usr[1], y=usr[4] + 1.3 * (1 + sum(what)) *strheight("N"))
legend( legend.coords,
c("Normal Density", "Kernel Density",
paste(ci, " ", round(100*level), "% CI", sep=""),
"Obs. Value")[what],
lty=c(nor.lty, den.lty, 1, point.lty)[what],
col=c(nor.col, den.col, "black", point.col)[what],
fill=c(nor.col, den.col, "black", point.col)[what],
lwd=c(2, 2, 4, 2)[what],
border=c(nor.col, den.col, "black", point.col)[what],
bty="n", cex=0.9, xpd=NA)#, #horiz=TRUE, offset=
}
}
mtext(side=3, outer=TRUE, main, cex=1.2)
if(legend == "separate") {
plot(0:1, 0:1, xaxt="n", yaxt="n", xlab="", ylab="", type="n")
use <- (1:4)[c( estNormal, estDensity, TRUE, ci != "none")]
curves <- c("fitted normal density", "Kernel density est",
paste(100*level, "% ", ci, " confidence interval", sep=""),
"Observed value of statistic")
colors <- c(nor.col, den.col, "black", point.col)
lines <- c(nor.lty, den.lty, 1, point.lty)
widths<- c(nor.lwd, den.lwd, 2, point.lty)
legend("center", curves[use], lty=lines[use], lwd=widths[use],
col=colors[use], box.col=par()$bg,
title="Bootstrap histograms")
}
invisible(NULL)
}
vcov.boot <- function(object, ...){cov(object$t, ...)}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jppa.R
\name{jppa}
\alias{jppa}
\title{Joint Potential Path Area of Two Animals}
\usage{
jppa(traj1, traj2, t.int = 0.1 *
as.numeric(names(sort(-table(ld(traj1)$dt)))[1]),
tol = max(ld(traj1)$dt, na.rm = T), dissolve = TRUE,
proj4string = CRS(as.character(NA)), ePoints = 360, ...)
}
\arguments{
\item{traj1}{an object of the class \code{ltraj} which contains the time-stamped
movement fixes of the first object. Note this object must be a \code{type II
ltraj} object. For more information on objects of this type see \code{
help(ltraj)}.}
\item{traj2}{same as \code{traj1}.}
\item{t.int}{(optional) time parameter (in seconds) used to determine the frequency of time slices
used to delineate the joint activity space. Default is 1/10th of the mode of the temporal sampling
interval from \code{traj1}. Smaller values for \code{t.int} will result in smoother output polygons.}
\item{tol}{(optional) parameter used to filter out those segments where the time between fixes is overly
large (often due to irregular sampling or missing fixes); which leads to an overestimation of the
activity space via the PPA method. Default is the maximum sampling interval from \code{traj1}.}
\item{dissolve}{logical parameter indicating whether (\code{=TRUE}; the default) or not (\code{=FALSE})
to return a spatially dissolved polygon of the joint activity space.}
\item{proj4string}{a string object containing the projection information to be passed included in the output
\code{SpatialPolygonsDataFrame} object. For more information see the \code{CRS-class} in the packages
\code{sp} and \code{rgdal}. Default is \code{NA}.}
\item{ePoints}{number of vertices used to construct each PPA ellipse. More points will necessarily provide
a more detailed ellipse shape, but will slow computation; default is 360.}
\item{...}{additional parameters to be passed to the function \code{dynvmax}. For example, should include options for
\code{dynamic} and \code{method}; see the documentation for \code{dynvmax} for more detailed information
on what to include here.}
}
\value{
This function returns a \code{SpatialPolygonsDataFrame} representing the joint accessibility space between
the two animals.
}
\description{
The function \code{jppa} computes the joint accessibility space between two animals. It can be used
to map (as a spatial polygon) the area that could have been jointly accessed by two individual animals
in space ant time. The jPPA represents a spatial measure of spatial-temporal interaction.
}
\details{
The function \code{jppa} can be used to map areas of potential interaction between two animals.
Specifically, this represents a measure of spatial overlap that also considers the temporal sequencing
of telemetry points. In this respect it improves significantly over static measures of home range overlap,
often used to measure static interaction, and can be considered as a spatial measure of dynamic interaction.
}
\references{
Long, J.A., Webb, S.L., Nelson, T.A., Gee, K. (2015) Mapping areas of spatial-temporal overlap from wildlife telemetry data. Movement Ecology. 3:38.
}
\seealso{
dynvmax, dynppa
}
| /man/jppa.Rd | no_license | jedalong/wildlifeTG | R | false | true | 3,218 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jppa.R
\name{jppa}
\alias{jppa}
\title{Joint Potential Path Area of Two Animals}
\usage{
jppa(traj1, traj2, t.int = 0.1 *
as.numeric(names(sort(-table(ld(traj1)$dt)))[1]),
tol = max(ld(traj1)$dt, na.rm = T), dissolve = TRUE,
proj4string = CRS(as.character(NA)), ePoints = 360, ...)
}
\arguments{
\item{traj1}{an object of the class \code{ltraj} which contains the time-stamped
movement fixes of the first object. Note this object must be a \code{type II
ltraj} object. For more information on objects of this type see \code{
help(ltraj)}.}
\item{traj2}{same as \code{traj1}.}
\item{t.int}{(optional) time parameter (in seconds) used to determine the frequency of time slices
used to delineate the joint activity space. Default is 1/10th of the mode of the temporal sampling
interval from \code{traj1}. Smaller values for \code{t.int} will result in smoother output polygons.}
\item{tol}{(optional) parameter used to filter out those segments where the time between fixes is overly
large (often due to irregular sampling or missing fixes); which leads to an overestimation of the
activity space via the PPA method. Default is the maximum sampling interval from \code{traj1}.}
\item{dissolve}{logical parameter indicating whether (\code{=TRUE}; the default) or not (\code{=FALSE})
to return a spatially dissolved polygon of the joint activity space.}
\item{proj4string}{a string object containing the projection information to be passed included in the output
\code{SpatialPolygonsDataFrame} object. For more information see the \code{CRS-class} in the packages
\code{sp} and \code{rgdal}. Default is \code{NA}.}
\item{ePoints}{number of vertices used to construct each PPA ellipse. More points will necessarily provide
a more detailed ellipse shape, but will slow computation; default is 360.}
\item{...}{additional parameters to be passed to the function \code{dynvmax}. For example, should include options for
\code{dynamic} and \code{method}; see the documentation for \code{dynvmax} for more detailed information
on what to include here.}
}
\value{
This function returns a \code{SpatialPolygonsDataFrame} representing the joint accessibility space between
the two animals.
}
\description{
The function \code{jppa} computes the joint accessibility space between two animals. It can be used
to map (as a spatial polygon) the area that could have been jointly accessed by two individual animals
in space ant time. The jPPA represents a spatial measure of spatial-temporal interaction.
}
\details{
The function \code{jppa} can be used to map areas of potential interaction between two animals.
Specifically, this represents a measure of spatial overlap that also considers the temporal sequencing
of telemetry points. In this respect it improves significantly over static measures of home range overlap,
often used to measure static interaction, and can be considered as a spatial measure of dynamic interaction.
}
\references{
Long, J.A., Webb, S.L., Nelson, T.A., Gee, K. (2015) Mapping areas of spatial-temporal overlap from wildlife telemetry data. Movement Ecology. 3:38.
}
\seealso{
dynvmax, dynppa
}
|
## ---- message=FALSE, warning=FALSE---------------------------------------
library(plotly)
library(ggplot2)
library(pkmngor)
p <- ggplot(pkmn, aes(x=stats.baseAttack, y=stats.baseSqrtDefTimesStam, text=pokemonId)) +
geom_point(aes(col=type)) +
xlab("Base attack stat") +
ylab("Defensive bulk") +
scale_color_manual(values=types$color)
p <- ggplotly(p, width=720, height=720, tooltip=c("text","x","y"))
p
| /inst/doc/basestats.R | no_license | chjackson/pkmngor | R | false | false | 418 | r | ## ---- message=FALSE, warning=FALSE---------------------------------------
library(plotly)
library(ggplot2)
library(pkmngor)
p <- ggplot(pkmn, aes(x=stats.baseAttack, y=stats.baseSqrtDefTimesStam, text=pokemonId)) +
geom_point(aes(col=type)) +
xlab("Base attack stat") +
ylab("Defensive bulk") +
scale_color_manual(values=types$color)
p <- ggplotly(p, width=720, height=720, tooltip=c("text","x","y"))
p
|
# Always need to set the enviroment before running rHadoop
Sys.setenv("HADOOP_CMD"="/Users/yuancalvin/hadoop-2.6.0/bin/hadoop")
Sys.setenv("HADOOP_STREAMING"="/Users/yuancalvin/hadoop-2.6.0/share/hadoop/tools/lib/hadoop-streaming-2.6.0.jar")
Sys.setenv(HADOOP_HOME="/Users/yuancalvin/hadoop-2.6.0")
Sys.setenv(JAVA_HOME="/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Home")
library(rhbase)
library(rhdfs)
library(rmr2)
# Map each word with a keypair like (the , 1), and (mine, 1)
map_word <- function(k, lines){
wordsList <- strsplit(lines, '\\s')
words <- unlist(wordsList)
return(keyval(words, 1))
}
# For each word, we sum the total counts
reduce <- function(word, counts){
keyval(word, sum(counts))
}
wordcount <- function(input, output=NULL){
mapreduce(input=input, output = output, input.format = "text", map=map_word, reduce = reduce)
}
# Set up data source from hdfs
hdfs.root <- '/user/hang'
hdfs.data <- file.path(hdfs.root, 'data')
hdfs.out <- file.path(hdfs.root, 'out')
system.time(out <- wordcount(hdfs.data, hdfs.out))
result <- from.dfs(out)
results.df <- as.data.frame(result, stringsAsFactors = F)
colnames(results.df) <- c('word', 'count') | /scripts/cityExample.R | no_license | angerhang/hadoopAndR | R | false | false | 1,182 | r | # Always need to set the enviroment before running rHadoop
Sys.setenv("HADOOP_CMD"="/Users/yuancalvin/hadoop-2.6.0/bin/hadoop")
Sys.setenv("HADOOP_STREAMING"="/Users/yuancalvin/hadoop-2.6.0/share/hadoop/tools/lib/hadoop-streaming-2.6.0.jar")
Sys.setenv(HADOOP_HOME="/Users/yuancalvin/hadoop-2.6.0")
Sys.setenv(JAVA_HOME="/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Home")
library(rhbase)
library(rhdfs)
library(rmr2)
# Map each word with a keypair like (the , 1), and (mine, 1)
map_word <- function(k, lines){
wordsList <- strsplit(lines, '\\s')
words <- unlist(wordsList)
return(keyval(words, 1))
}
# For each word, we sum the total counts
reduce <- function(word, counts){
keyval(word, sum(counts))
}
wordcount <- function(input, output=NULL){
mapreduce(input=input, output = output, input.format = "text", map=map_word, reduce = reduce)
}
# Set up data source from hdfs
hdfs.root <- '/user/hang'
hdfs.data <- file.path(hdfs.root, 'data')
hdfs.out <- file.path(hdfs.root, 'out')
system.time(out <- wordcount(hdfs.data, hdfs.out))
result <- from.dfs(out)
results.df <- as.data.frame(result, stringsAsFactors = F)
colnames(results.df) <- c('word', 'count') |
tb_1 = read.csv('tubercolusis_from 2007_WHO.csv')
#tb_2 = read.csv('tuberculosis_data_WHO.csv')
tb_1 = read.csv(/input/tubercolusis_from 2007_WHO.csv)
#require('stringr')
require('ggplot2')
require('animation')
#require('maptools')
require('grid')
#Load the map data,
s = map_data("world")
tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV = as.character(tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV)
tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV = gsub(" ", "", tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV)
tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV = as.numeric(tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV)
t = as.data.frame(table(tb_1$Country))
ex = (t$Var1 == 'South Sudan')
t = t[!ex,]
t$y_2007 = 0
t$y_2008 = 0
t$y_2009 = 0
t$y_2010 = 0
t$y_2011 = 0
t$y_2012 = 0
t$y_2013 = 0
t$y_2014 = 0
i=1
while (i<=length(t$Var1)) {
t[i,3] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2007]
t[i,4] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2008]
t[i,5] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2009]
t[i,6] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2010]
t[i,7] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2011]
t[i,8] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2012]
t[i,9] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2013]
t[i,10] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2014]
i=i+1
}
ex = (t$y_2007 < 1000)
t = t[!ex,]
#Loop through the rows and save the gif...
z=1
c_check = data.frame(t$Var1)
c_check$False = 0
c_check$True = 0
while (z <= length(t$Var1)) {
temp = as.data.frame(table(s$region == t[z,1]))
c_check[z,2] = temp[1,2]
c_check[z,3] = temp[2,2]
z=z+1
}
t$Var1 = as.character(t$Var1)
t$Var1[t$Var1 == 'Congo'] = 'Republic of Congo'
t$Var1[t$Var1 == 'Cote d\'Ivoire'] = 'Ivory Coast'
t$Var1[t$Var1 == 'Democratic People\'s Republic of Korea'] = 'North Korea'
t$Var1[t$Var1 == 'Iran (Islamic Republic of)'] = 'Iran'
t$Var1[t$Var1 == 'Lao People\'s Democratic Republic'] = 'Laos'
t$Var1[t$Var1 == 'Russian Federation'] = 'Russia'
t$Var1[t$Var1 == 'United Republic of Tanzania'] = 'Tanzania'
t$Var1[t$Var1 == 'Viet Nam'] = 'VietNam'
tb_1$Country = as.character(tb_1$Country)
tb_1$Country[tb_1$Country == 'Congo'] = 'Republic of Congo'
tb_1$Country[tb_1$Country == 'Cote d\'Ivoire'] = 'Ivory Coast'
tb_1$Country[tb_1$Country == 'Democratic People\'s Republic of Korea'] = 'North Korea'
tb_1$Country[tb_1$Country == 'Iran (Islamic Republic of)'] = 'Iran'
tb_1$Country[tb_1$Country == 'Lao People\'s Democratic Republic'] = 'Laos'
tb_1$Country[tb_1$Country == 'Russian Federation'] = 'Russia'
tb_1$Country[tb_1$Country == 'United Republic of Tanzania'] = 'Tanzania'
tb_1$Country[tb_1$Country == 'Viet Nam'] = 'VietNam'
i=1
while (i<=length(t$Var1)) {
t[i,10] = ((t[i,10] - t[i,3]) / t[i,3]) * 100
t[i,9] = ((t[i,9] - t[i,3]) / t[i,3]) * 100
t[i,8] = ((t[i,8] - t[i,3]) / t[i,3]) * 100
t[i,7] = ((t[i,7] - t[i,3]) / t[i,3]) * 100
t[i,6] = ((t[i,6] - t[i,3]) / t[i,3]) * 100
t[i,5] = ((t[i,5] - t[i,3]) / t[i,3]) * 100
t[i,4] = ((t[i,4] - t[i,3]) / t[i,3]) * 100
t[i,3] = ((t[i,3] - t[i,3]) / t[i,3]) * 100
i=i+1
}
g <- rasterGrob(blues9, width=unit(1,"npc"), height = unit(1,"npc"),
interpolate = TRUE)
i=1
saveGIF(while (i<=8) {
y=1
while (y<=length(t$Var1)) {
s$colour[t[y,1] == s$region] = (t[y,i+2])
y=y+1
}
print(m <- ggplot(s, aes(x=long, y=lat, group=group, fill=colour)) + #Set ggplot2
annotation_custom(g, xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
geom_polygon(alpha=1) + #Set transparency
geom_path(data = s, aes(x=long, y=lat, group=group), colour="black") + #Plot the Earth
scale_fill_gradient(low = "green", high = "red", guide = "colourbar", limits=c(-77,77)) + #Set the colours,
theme(plot.title = element_text(size = rel(2)),
panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + #Change the text size,
ggtitle(paste("The Spread of TB: ", 2006+i)))
ani.pause()
i=i+1
}, movie.name = "tb_ani.gif", interval = 1.5, convert = "convert", ani.width = 800,
ani.height = 560)
| /TB/TB.R | no_license | RobHarrand/kaggle | R | false | false | 4,737 | r | tb_1 = read.csv('tubercolusis_from 2007_WHO.csv')
#tb_2 = read.csv('tuberculosis_data_WHO.csv')
tb_1 = read.csv(/input/tubercolusis_from 2007_WHO.csv)
#require('stringr')
require('ggplot2')
require('animation')
#require('maptools')
require('grid')
#Load the map data,
s = map_data("world")
tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV = as.character(tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV)
tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV = gsub(" ", "", tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV)
tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV = as.numeric(tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV)
t = as.data.frame(table(tb_1$Country))
ex = (t$Var1 == 'South Sudan')
t = t[!ex,]
t$y_2007 = 0
t$y_2008 = 0
t$y_2009 = 0
t$y_2010 = 0
t$y_2011 = 0
t$y_2012 = 0
t$y_2013 = 0
t$y_2014 = 0
i=1
while (i<=length(t$Var1)) {
t[i,3] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2007]
t[i,4] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2008]
t[i,5] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2009]
t[i,6] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2010]
t[i,7] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2011]
t[i,8] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2012]
t[i,9] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2013]
t[i,10] = tb_1$Number.of.deaths.due.to.tuberculosis..excluding.HIV[tb_1$Country == t[i,1] & tb_1$Year == 2014]
i=i+1
}
ex = (t$y_2007 < 1000)
t = t[!ex,]
#Loop through the rows and save the gif...
z=1
c_check = data.frame(t$Var1)
c_check$False = 0
c_check$True = 0
while (z <= length(t$Var1)) {
temp = as.data.frame(table(s$region == t[z,1]))
c_check[z,2] = temp[1,2]
c_check[z,3] = temp[2,2]
z=z+1
}
t$Var1 = as.character(t$Var1)
t$Var1[t$Var1 == 'Congo'] = 'Republic of Congo'
t$Var1[t$Var1 == 'Cote d\'Ivoire'] = 'Ivory Coast'
t$Var1[t$Var1 == 'Democratic People\'s Republic of Korea'] = 'North Korea'
t$Var1[t$Var1 == 'Iran (Islamic Republic of)'] = 'Iran'
t$Var1[t$Var1 == 'Lao People\'s Democratic Republic'] = 'Laos'
t$Var1[t$Var1 == 'Russian Federation'] = 'Russia'
t$Var1[t$Var1 == 'United Republic of Tanzania'] = 'Tanzania'
t$Var1[t$Var1 == 'Viet Nam'] = 'VietNam'
tb_1$Country = as.character(tb_1$Country)
tb_1$Country[tb_1$Country == 'Congo'] = 'Republic of Congo'
tb_1$Country[tb_1$Country == 'Cote d\'Ivoire'] = 'Ivory Coast'
tb_1$Country[tb_1$Country == 'Democratic People\'s Republic of Korea'] = 'North Korea'
tb_1$Country[tb_1$Country == 'Iran (Islamic Republic of)'] = 'Iran'
tb_1$Country[tb_1$Country == 'Lao People\'s Democratic Republic'] = 'Laos'
tb_1$Country[tb_1$Country == 'Russian Federation'] = 'Russia'
tb_1$Country[tb_1$Country == 'United Republic of Tanzania'] = 'Tanzania'
tb_1$Country[tb_1$Country == 'Viet Nam'] = 'VietNam'
i=1
while (i<=length(t$Var1)) {
t[i,10] = ((t[i,10] - t[i,3]) / t[i,3]) * 100
t[i,9] = ((t[i,9] - t[i,3]) / t[i,3]) * 100
t[i,8] = ((t[i,8] - t[i,3]) / t[i,3]) * 100
t[i,7] = ((t[i,7] - t[i,3]) / t[i,3]) * 100
t[i,6] = ((t[i,6] - t[i,3]) / t[i,3]) * 100
t[i,5] = ((t[i,5] - t[i,3]) / t[i,3]) * 100
t[i,4] = ((t[i,4] - t[i,3]) / t[i,3]) * 100
t[i,3] = ((t[i,3] - t[i,3]) / t[i,3]) * 100
i=i+1
}
g <- rasterGrob(blues9, width=unit(1,"npc"), height = unit(1,"npc"),
interpolate = TRUE)
i=1
saveGIF(while (i<=8) {
y=1
while (y<=length(t$Var1)) {
s$colour[t[y,1] == s$region] = (t[y,i+2])
y=y+1
}
print(m <- ggplot(s, aes(x=long, y=lat, group=group, fill=colour)) + #Set ggplot2
annotation_custom(g, xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
geom_polygon(alpha=1) + #Set transparency
geom_path(data = s, aes(x=long, y=lat, group=group), colour="black") + #Plot the Earth
scale_fill_gradient(low = "green", high = "red", guide = "colourbar", limits=c(-77,77)) + #Set the colours,
theme(plot.title = element_text(size = rel(2)),
panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + #Change the text size,
ggtitle(paste("The Spread of TB: ", 2006+i)))
ani.pause()
i=i+1
}, movie.name = "tb_ani.gif", interval = 1.5, convert = "convert", ani.width = 800,
ani.height = 560)
|
source("load_data.R")
plot3 <- function() {
data <- load_data()
png("plot3.png", width=480, height=480)
plot(data$Time, data$Sub_metering_1,
type="l",
col="black",
xlab="",
ylab="Energy sub metering")
lines(data$Time, data$Sub_metering_2, col="red")
lines(data$Time, data$Sub_metering_3, col="blue")
legend("topright",
col=c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=1)
dev.off()
}
plot3() | /plot3.R | no_license | thesmashing/ExData_Plotting1 | R | false | false | 542 | r | source("load_data.R")
plot3 <- function() {
data <- load_data()
png("plot3.png", width=480, height=480)
plot(data$Time, data$Sub_metering_1,
type="l",
col="black",
xlab="",
ylab="Energy sub metering")
lines(data$Time, data$Sub_metering_2, col="red")
lines(data$Time, data$Sub_metering_3, col="blue")
legend("topright",
col=c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=1)
dev.off()
}
plot3() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trackeRdata_summary.R
\name{summary.trackeRdata}
\alias{summary.trackeRdata}
\title{Summary of training sessions.}
\usage{
\method{summary}{trackeRdata}(object, session = NULL,
movingThreshold = NULL, ...)
}
\arguments{
\item{object}{An object of class \code{\link{trackeRdata}}.}
\item{session}{A numeric vector of the sessions to be summarised, defaults to all sessions.}
\item{movingThreshold}{The threshold above which speed an athlete is considered moving (given in the unit of the speed measurements in \code{object}. If \code{NULL}, the default, the threshold corresponds to a slow walking speed (1 m/s, converted to another speed unit, if necessary). For reference, the preferred walking speed for humans is around 1.4 m/s (Bohannon, 1997).}
\item{...}{Currently not used.}
}
\value{
An object of class \code{trackeRdataSummary}.
}
\description{
Summary of training sessions.
}
\examples{
data('runs', package = 'trackeR')
runSummary <- summary(runs, session = 1:2)
## print summary
runSummary
print(runSummary, digits = 3)
## change units
changeUnits(runSummary, variable = 'speed', unit = 'km_per_h')
## plot summary
runSummaryFull <- summary(runs)
plot(runSummaryFull)
plot(runSummaryFull, group = c('total', 'moving'),
what = c('avgSpeed', 'distance', 'duration', 'avgHeartRate'))
}
\references{
Bohannon RW (1997). 'Comfortable and Maximum Walking Speed of Adults Aged 20--79 Years: Reference Values and Determinants.' Age and Ageing, 26(1), 15--19. doi: 10.1093/ageing/26.1.15.
}
\seealso{
\code{\link{plot.trackeRdataSummary}}
}
| /man/summary.trackeRdata.Rd | no_license | DrRoad/trackeR | R | false | true | 1,631 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trackeRdata_summary.R
\name{summary.trackeRdata}
\alias{summary.trackeRdata}
\title{Summary of training sessions.}
\usage{
\method{summary}{trackeRdata}(object, session = NULL,
movingThreshold = NULL, ...)
}
\arguments{
\item{object}{An object of class \code{\link{trackeRdata}}.}
\item{session}{A numeric vector of the sessions to be summarised, defaults to all sessions.}
\item{movingThreshold}{The threshold above which speed an athlete is considered moving (given in the unit of the speed measurements in \code{object}. If \code{NULL}, the default, the threshold corresponds to a slow walking speed (1 m/s, converted to another speed unit, if necessary). For reference, the preferred walking speed for humans is around 1.4 m/s (Bohannon, 1997).}
\item{...}{Currently not used.}
}
\value{
An object of class \code{trackeRdataSummary}.
}
\description{
Summary of training sessions.
}
\examples{
data('runs', package = 'trackeR')
runSummary <- summary(runs, session = 1:2)
## print summary
runSummary
print(runSummary, digits = 3)
## change units
changeUnits(runSummary, variable = 'speed', unit = 'km_per_h')
## plot summary
runSummaryFull <- summary(runs)
plot(runSummaryFull)
plot(runSummaryFull, group = c('total', 'moving'),
what = c('avgSpeed', 'distance', 'duration', 'avgHeartRate'))
}
\references{
Bohannon RW (1997). 'Comfortable and Maximum Walking Speed of Adults Aged 20--79 Years: Reference Values and Determinants.' Age and Ageing, 26(1), 15--19. doi: 10.1093/ageing/26.1.15.
}
\seealso{
\code{\link{plot.trackeRdataSummary}}
}
|
#' Simulate RNA-seq experiment
#'
#' create FASTA files containing RNA-seq reads simulated from provided
#' transcripts, with optional differential expression between two groups
#' (designated via read count matrix)
#' @param fasta path to FASTA file containing transcripts from which to simulate
#' reads. See details.
#' @param gtf path to GTF file containing transcript structures from which reads
#' should be simulated. See details.
#' @param seqpath path to folder containing one FASTA file (\code{.fa}
#' extension) for each chromosome in \code{gtf}. See details.
#' @param readmat matrix with rows representing transcripts and columns
#' representing samples. Entry i,j specifies how many reads to simulate from
#' transcript i for sample j.
#' @param outdir character, path to folder where simulated reads should be
#' written, without a slash at the end of the folder name. By default, reads
#' written to the working directory.
#' @param fraglen Mean RNA fragment length. Sequences will be read off the
#' end(s) of these fragments.
#' @param fragsd Standard deviation of fragment lengths.
#' @param readlen Read length
#' @param error_rate Sequencing error rate. Must be between 0 and 1. A uniform
#' error model is assumed.
#' @param error_model one of \code{'uniform'}, \code{'custom'},
#' \code{'illumina4'}, \code{'illumina5'}, or \code{'roche454'} specifying
#' which sequencing error model to use while generating reads. See
#' \code{?add_platform_error} for more information.
#' @param model_path If using a custom error model, the output folder you
#' provided to \code{build_error_model.py}. Should contain either two files
#' suffixed _mate1 and _mate2, or a file suffixed _single.
#' @param model_prefix If using a custom error model, the prefix argument you
#' provided to \code{build_error_model.py}. This is whatever comes before
#' _mate1 and _mate2 or _single files in \code{model_path}.
#' @param paired If \code{TRUE}, paired-end reads are simulated; else single-end
#' reads are simulated.
#' @param seed Optional seed to set before simulating reads, for
#' reproducibility.
#' @param ... Further arguments to pass to \code{seq_gtf}, if \code{gtf} is not
#' \code{NULL}.
#' @return No return, but simulated reads are written to \code{outdir}.
#' @export
#' @details Reads can either be simulated from a FASTA file of transcripts
#' (provided with the \code{fasta} argument) or from a GTF file plus DNA
#' sequences (provided with the \code{gtf} and \code{seqpath} arguments).
#' Simulating from a GTF file and DNA sequences may be a bit slower: it took
#' about 6 minutes to parse the GTF/sequence files for chromosomes 1-22,
#' X, and Y in hg19.
#' @examples \donttest{
#' fastapath = system.file("extdata", "chr22.fa", package="polyester")
#' numtx = count_transcripts(fastapath)
#' readmat = matrix(20, ncol=10, nrow=numtx)
#' readmat[1:30, 1:5] = 40
#'
#' simulate_experiment_countmat(fasta=fastapath,
#' readmat=readmat, outdir='simulated_reads_2', seed=5)
#'}
simulate_experiment_countmat = function(fasta=NULL, gtf=NULL, seqpath=NULL,
readmat, outdir=".", fraglen=250, fragsd=25, readlen=100, error_rate=0.005,
error_model='uniform', model_path=NULL, model_prefix=NULL, paired=TRUE,
seed=NULL, ...){
if(!is.null(seed)) set.seed(seed)
if(!is.null(fasta) & is.null(gtf) & is.null(seqpath)){
transcripts = readDNAStringSet(fasta)
}else if(is.null(fasta) & !is.null(gtf) & !is.null(seqpath)){
message('parsing gtf and sequences...')
transcripts = seq_gtf(gtf, seqpath, ...)
message('done parsing')
}else{
stop('must provide either fasta or both gtf and seqpath')
}
stopifnot(class(readmat) == 'matrix')
stopifnot(nrow(readmat) == length(transcripts))
# check error model
error_model = match.arg(error_model, c('uniform', 'illumina4', 'illumina5',
'roche454', 'custom'))
if(error_model == 'uniform'){
stopifnot(error_rate >= 0 & error_rate <= 1)
}
if(error_model == 'custom'){
if(is.null(model_path) | is.null(model_prefix)){
stop(.makepretty('with custom error models, you must provide both
the path to the folder that holds your error model
(model_path) and the prefix of your error model (model_prefix),
where the prefix is whatever comes before _mate1 and _mate2
(for paired reads) or _single (for single-end reads). (You
provided prefix when running build_error_models.py)'))
}
if(paired){
if(!file.exists(paste0(model_path, '/', model_prefix, '_mate1')) |
!file.exists(paste0(model_path, '/', model_prefix, '_mate2'))){
stop('could not find error model.')
}
}else if(!file.exists(paste0(model_path, '/', model_prefix, '_single'))){
stop('could not find error model.')
}
path = paste0(model_path, '/', model_prefix)
}
if(error_model == 'roche454' & paired){
stop(.makepretty('The Roche 454 error model is only available for
single-end reads'))
}
sysoutdir = gsub(' ', '\\\\ ', outdir)
if(.Platform$OS.type == 'windows'){
shell(paste('mkdir', sysoutdir))
}else{
system(paste('mkdir -p', sysoutdir))
}
for(i in 1:ncol(readmat)){
tObj = rep(transcripts, times=readmat[,i])
#get fragments
tFrags = generate_fragments(tObj, fraglen=fraglen, fragsd=fragsd)
#reverse_complement some of those fragments
rctFrags = reverse_complement(tFrags)
#get reads from fragments
reads = get_reads(rctFrags, readlen, paired)
#add sequencing error
if(error_model == 'uniform'){
errReads = add_error(reads, error_rate)
}else if(error_model == 'custom'){
errReads = add_platform_error(reads, 'custom', paired, path)
}else{
errReads = add_platform_error(reads, error_model, paired)
}
#write read pairs
write_reads(errReads, readlen=readlen,
fname=paste0(outdir, '/sample_', sprintf('%02d', i)),
paired=paired)
}
} | /R/simulate_experiment_countmat.R | no_license | xguse/polyester | R | false | false | 6,321 | r | #' Simulate RNA-seq experiment
#'
#' create FASTA files containing RNA-seq reads simulated from provided
#' transcripts, with optional differential expression between two groups
#' (designated via read count matrix)
#' @param fasta path to FASTA file containing transcripts from which to simulate
#' reads. See details.
#' @param gtf path to GTF file containing transcript structures from which reads
#' should be simulated. See details.
#' @param seqpath path to folder containing one FASTA file (\code{.fa}
#' extension) for each chromosome in \code{gtf}. See details.
#' @param readmat matrix with rows representing transcripts and columns
#' representing samples. Entry i,j specifies how many reads to simulate from
#' transcript i for sample j.
#' @param outdir character, path to folder where simulated reads should be
#' written, without a slash at the end of the folder name. By default, reads
#' written to the working directory.
#' @param fraglen Mean RNA fragment length. Sequences will be read off the
#' end(s) of these fragments.
#' @param fragsd Standard deviation of fragment lengths.
#' @param readlen Read length
#' @param error_rate Sequencing error rate. Must be between 0 and 1. A uniform
#' error model is assumed.
#' @param error_model one of \code{'uniform'}, \code{'custom'},
#' \code{'illumina4'}, \code{'illumina5'}, or \code{'roche454'} specifying
#' which sequencing error model to use while generating reads. See
#' \code{?add_platform_error} for more information.
#' @param model_path If using a custom error model, the output folder you
#' provided to \code{build_error_model.py}. Should contain either two files
#' suffixed _mate1 and _mate2, or a file suffixed _single.
#' @param model_prefix If using a custom error model, the prefix argument you
#' provided to \code{build_error_model.py}. This is whatever comes before
#' _mate1 and _mate2 or _single files in \code{model_path}.
#' @param paired If \code{TRUE}, paired-end reads are simulated; else single-end
#' reads are simulated.
#' @param seed Optional seed to set before simulating reads, for
#' reproducibility.
#' @param ... Further arguments to pass to \code{seq_gtf}, if \code{gtf} is not
#' \code{NULL}.
#' @return No return, but simulated reads are written to \code{outdir}.
#' @export
#' @details Reads can either be simulated from a FASTA file of transcripts
#' (provided with the \code{fasta} argument) or from a GTF file plus DNA
#' sequences (provided with the \code{gtf} and \code{seqpath} arguments).
#' Simulating from a GTF file and DNA sequences may be a bit slower: it took
#' about 6 minutes to parse the GTF/sequence files for chromosomes 1-22,
#' X, and Y in hg19.
#' @examples \donttest{
#' fastapath = system.file("extdata", "chr22.fa", package="polyester")
#' numtx = count_transcripts(fastapath)
#' readmat = matrix(20, ncol=10, nrow=numtx)
#' readmat[1:30, 1:5] = 40
#'
#' simulate_experiment_countmat(fasta=fastapath,
#' readmat=readmat, outdir='simulated_reads_2', seed=5)
#'}
simulate_experiment_countmat = function(fasta=NULL, gtf=NULL, seqpath=NULL,
readmat, outdir=".", fraglen=250, fragsd=25, readlen=100, error_rate=0.005,
error_model='uniform', model_path=NULL, model_prefix=NULL, paired=TRUE,
seed=NULL, ...){
if(!is.null(seed)) set.seed(seed)
if(!is.null(fasta) & is.null(gtf) & is.null(seqpath)){
transcripts = readDNAStringSet(fasta)
}else if(is.null(fasta) & !is.null(gtf) & !is.null(seqpath)){
message('parsing gtf and sequences...')
transcripts = seq_gtf(gtf, seqpath, ...)
message('done parsing')
}else{
stop('must provide either fasta or both gtf and seqpath')
}
stopifnot(class(readmat) == 'matrix')
stopifnot(nrow(readmat) == length(transcripts))
# check error model
error_model = match.arg(error_model, c('uniform', 'illumina4', 'illumina5',
'roche454', 'custom'))
if(error_model == 'uniform'){
stopifnot(error_rate >= 0 & error_rate <= 1)
}
if(error_model == 'custom'){
if(is.null(model_path) | is.null(model_prefix)){
stop(.makepretty('with custom error models, you must provide both
the path to the folder that holds your error model
(model_path) and the prefix of your error model (model_prefix),
where the prefix is whatever comes before _mate1 and _mate2
(for paired reads) or _single (for single-end reads). (You
provided prefix when running build_error_models.py)'))
}
if(paired){
if(!file.exists(paste0(model_path, '/', model_prefix, '_mate1')) |
!file.exists(paste0(model_path, '/', model_prefix, '_mate2'))){
stop('could not find error model.')
}
}else if(!file.exists(paste0(model_path, '/', model_prefix, '_single'))){
stop('could not find error model.')
}
path = paste0(model_path, '/', model_prefix)
}
if(error_model == 'roche454' & paired){
stop(.makepretty('The Roche 454 error model is only available for
single-end reads'))
}
sysoutdir = gsub(' ', '\\\\ ', outdir)
if(.Platform$OS.type == 'windows'){
shell(paste('mkdir', sysoutdir))
}else{
system(paste('mkdir -p', sysoutdir))
}
for(i in 1:ncol(readmat)){
tObj = rep(transcripts, times=readmat[,i])
#get fragments
tFrags = generate_fragments(tObj, fraglen=fraglen, fragsd=fragsd)
#reverse_complement some of those fragments
rctFrags = reverse_complement(tFrags)
#get reads from fragments
reads = get_reads(rctFrags, readlen, paired)
#add sequencing error
if(error_model == 'uniform'){
errReads = add_error(reads, error_rate)
}else if(error_model == 'custom'){
errReads = add_platform_error(reads, 'custom', paired, path)
}else{
errReads = add_platform_error(reads, error_model, paired)
}
#write read pairs
write_reads(errReads, readlen=readlen,
fname=paste0(outdir, '/sample_', sprintf('%02d', i)),
paired=paired)
}
} |
context("Predicted_vector")
library(fairness)
data("compas")
predvec <- compas$predicted
test_that("no errors in acc_parity", {
expect_error(acc_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in dem_parity", {
expect_error(dem_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec,
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in equal_odds", {
expect_error(equal_odds(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in fnr_parity", {
expect_error(fnr_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in fpr_parity", {
expect_error(fpr_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in mcc_parity", {
expect_error(mcc_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in npv_parity", {
expect_error(npv_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in pred_rate_parity", {
expect_error(pred_rate_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in prop_parity", {
expect_error(prop_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec,
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in spec_parity", {
expect_error(spec_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
| /tests/testthat/test.preds-vec.R | permissive | minghao2016/fairness | R | false | false | 2,940 | r | context("Predicted_vector")
library(fairness)
data("compas")
predvec <- compas$predicted
test_that("no errors in acc_parity", {
expect_error(acc_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in dem_parity", {
expect_error(dem_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec,
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in equal_odds", {
expect_error(equal_odds(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in fnr_parity", {
expect_error(fnr_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in fpr_parity", {
expect_error(fpr_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in mcc_parity", {
expect_error(mcc_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in npv_parity", {
expect_error(npv_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in pred_rate_parity", {
expect_error(pred_rate_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in prop_parity", {
expect_error(prop_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec,
cutoff = 0.5, base = "Caucasian"), NA)})
test_that("no errors in spec_parity", {
expect_error(spec_parity(data = compas, outcome = "Two_yr_Recidivism", group = "ethnicity",
probs = NULL, preds = predvec, preds_levels = c("no", "yes"),
cutoff = 0.5, base = "Caucasian"), NA)})
|
library(gapminder)
library(plyr)
library(dplyr)
library(ggplot2)
#modelling life expectancy as a function of year
#create a new function for the model
le_lin_fit <- function(dat, offset = 1952) {
the_fit <- lm(lifeExp ~ I(year - offset), dat)
setNames(data.frame(t(coef(the_fit))), c("intercept", "slope"))
} #this function will only work if the dataset inputted has the specified variables (aka for the gapminder dataset)
gapminder %>% filter(country == "Canada") %>%
le_lin_fit()
#conducting this function onto every country in an elegant way
#using dplyr
gcoefs <- gapminder %>%
group_by(country, continent) %>%
do(le_lin_fit(.)) %>%
ungroup()
gcoefs
#using plyr
gcoefs2 <- ddply(gapminder, ~ country + continent, le_lin_fit)
gcoefs2
#learning the factors of the newly created data frame
str(gcoefs, give.attr = FALSE)
levels(gcoefs$country)
head(gcoefs$country)
#the order of factors matter
ggplot(gcoefs, aes(x = slope, y = country)) + geom_point() #data puke; aka not useful to look at
ggplot(gcoefs, aes(x = slope, y = reorder(country, slope))) + geom_point() #easier to understand the data
#ordering numeric class vs. ordering factors
post_arrange <- gcoefs %>% arrange(slope)
post_reorder <- gcoefs %>%
mutate(country = reorder(country, slope))
post_both <- gcoefs %>% mutate(country = reorder(country, slope)) %>%
arrange(country)
ggplot(post_arrange, aes(x = slope, y = country)) + geom_point() #the dataframe was arranged by slope, but the reordering of numeric data does not allow for graphing functions to understand
ggplot(post_reorder, aes(x = slope, y = country)) + geom_point() #the reordering of factors did not make a difference in the visualization of the dataframe, but the graphing functions understood the change in factor order
post_reorder$country %>% levels #show the change in the factor order by slope
ggplot(post_both, aes(x = slope, y = country)) + geom_point() #ordered the dataframe and graphing thus allowing for both visualization methods to show meaningful display of slope ~ country
#dropping unused factors
h_countries <- c("Egypt", "Haiti", "Romania", "Thailand", "Venezuela")
hDat <- gapminder %>% filter(country %in% h_countries)
hDat %>% str
#notice how the country factor still displays all the old factors that are now not present due to the previous filtering step
#this may affect downstream analysis as these factors are still recognized
table(hDat$country)
levels(hDat$country)
nlevels(hDat$country)
#to remove these factors
iDat <- hDat %>% droplevels()
iDat %>% str
table(iDat$country)
levels(iDat$country)
nlevels(iDat$country)
#reordering factor levels revisted
i_le_max <- iDat %>%
group_by(country) %>%
summarize(max_le = max(lifeExp))
i_le_max
ggplot(i_le_max, aes(x = country, y = max_le, group = 1)) +
geom_path() + geom_point()
ggplot(iDat, aes(x = year, y = lifeExp, group = country)) +
geom_line(aes(color = country))
jDat <- iDat %>%
mutate(country = reorder(country, lifeExp, max))
data.frame(before = levels(iDat$country), after = levels(jDat$country))
j_le_max <- jDat %>%
group_by(country) %>%
summarize(max_le = max(lifeExp))
j_le_max
ggplot(j_le_max, aes(x = country, y = max_le, group = 1)) +
geom_path() + geom_point()
ggplot(jDat, aes(x = year, y = lifeExp)) +
geom_line(aes(color = country)) +
guides(color = guide_legend(reverse = TRUE))
#reordering continent
head(gcoefs)
ggplot(gcoefs, aes(x = continent, y = intercept)) +
geom_jitter(width = 0.25)
newgcoefs <- gcoefs %>% mutate(continent = reorder(continent, intercept, mean))
ggplot(newgcoefs, aes(x = continent, y = intercept)) +
geom_jitter(width = 0.25)
#recoding factor values
k_countries <- c("Australia", "Korea, Dem. Rep.", "Korea, Rep.")
kDat <- gapminder %>%
filter(country %in% k_countries, year > 2000) %>%
droplevels()
kDat
levels(kDat$country)
kDat <- kDat %>%
mutate(new_country = revalue(country, c("Australia" = "Oz",
"Korea, Dem. Rep." = "North Korea",
"Korea, Rep." = "South Korea")))
data.frame(levels(kDat$country), levels(kDat$new_country))
#combinind tables and growing factors together
#best approach is to use rbind
usa <- gapminder %>%
filter(country == "United States", year > 2000) %>%
droplevels()
mex <- gapminder %>%
filter(country == "Mexico", year > 2000) %>%
droplevels()
str(usa) #only a single level for country
str(mex)
usa_mex <- rbind(usa, mex) #combining the two dataframes into one
str(usa_mex) #now 2 factors in the dataframe
#avoid using the concatenate function c() to combine factors
(nono <- c(usa$country, mex$country)) #not the output we want
#you may want to use this roundabout way
(maybe <- factor(c(levels(usa$country)[usa$country], levels(mex$country)[mex$country]))
#if you are combining factors of different levels, first convert to character, combine, and then reconvert to factors
gapminder$continent <- as.character(gapminder$continent)
str(gapminder)
head(gapminder)
gapminder$continent <- factor(gapminder$continent)
str(gapminder)
head(gapminder)
| /factors-practice.R | no_license | louiekenny/learning_450k | R | false | false | 5,195 | r | library(gapminder)
library(plyr)
library(dplyr)
library(ggplot2)
#modelling life expectancy as a function of year
#create a new function for the model
le_lin_fit <- function(dat, offset = 1952) {
the_fit <- lm(lifeExp ~ I(year - offset), dat)
setNames(data.frame(t(coef(the_fit))), c("intercept", "slope"))
} #this function will only work if the dataset inputted has the specified variables (aka for the gapminder dataset)
gapminder %>% filter(country == "Canada") %>%
le_lin_fit()
#conducting this function onto every country in an elegant way
#using dplyr
gcoefs <- gapminder %>%
group_by(country, continent) %>%
do(le_lin_fit(.)) %>%
ungroup()
gcoefs
#using plyr
gcoefs2 <- ddply(gapminder, ~ country + continent, le_lin_fit)
gcoefs2
#learning the factors of the newly created data frame
str(gcoefs, give.attr = FALSE)
levels(gcoefs$country)
head(gcoefs$country)
#the order of factors matter
ggplot(gcoefs, aes(x = slope, y = country)) + geom_point() #data puke; aka not useful to look at
ggplot(gcoefs, aes(x = slope, y = reorder(country, slope))) + geom_point() #easier to understand the data
#ordering numeric class vs. ordering factors
post_arrange <- gcoefs %>% arrange(slope)
post_reorder <- gcoefs %>%
mutate(country = reorder(country, slope))
post_both <- gcoefs %>% mutate(country = reorder(country, slope)) %>%
arrange(country)
ggplot(post_arrange, aes(x = slope, y = country)) + geom_point() #the dataframe was arranged by slope, but the reordering of numeric data does not allow for graphing functions to understand
ggplot(post_reorder, aes(x = slope, y = country)) + geom_point() #the reordering of factors did not make a difference in the visualization of the dataframe, but the graphing functions understood the change in factor order
post_reorder$country %>% levels #show the change in the factor order by slope
ggplot(post_both, aes(x = slope, y = country)) + geom_point() #ordered the dataframe and graphing thus allowing for both visualization methods to show meaningful display of slope ~ country
#dropping unused factors
h_countries <- c("Egypt", "Haiti", "Romania", "Thailand", "Venezuela")
hDat <- gapminder %>% filter(country %in% h_countries)
hDat %>% str
#notice how the country factor still displays all the old factors that are now not present due to the previous filtering step
#this may affect downstream analysis as these factors are still recognized
table(hDat$country)
levels(hDat$country)
nlevels(hDat$country)
#to remove these factors
iDat <- hDat %>% droplevels()
iDat %>% str
table(iDat$country)
levels(iDat$country)
nlevels(iDat$country)
#reordering factor levels revisted
i_le_max <- iDat %>%
group_by(country) %>%
summarize(max_le = max(lifeExp))
i_le_max
ggplot(i_le_max, aes(x = country, y = max_le, group = 1)) +
geom_path() + geom_point()
ggplot(iDat, aes(x = year, y = lifeExp, group = country)) +
geom_line(aes(color = country))
jDat <- iDat %>%
mutate(country = reorder(country, lifeExp, max))
data.frame(before = levels(iDat$country), after = levels(jDat$country))
j_le_max <- jDat %>%
group_by(country) %>%
summarize(max_le = max(lifeExp))
j_le_max
ggplot(j_le_max, aes(x = country, y = max_le, group = 1)) +
geom_path() + geom_point()
ggplot(jDat, aes(x = year, y = lifeExp)) +
geom_line(aes(color = country)) +
guides(color = guide_legend(reverse = TRUE))
#reordering continent
head(gcoefs)
ggplot(gcoefs, aes(x = continent, y = intercept)) +
geom_jitter(width = 0.25)
newgcoefs <- gcoefs %>% mutate(continent = reorder(continent, intercept, mean))
ggplot(newgcoefs, aes(x = continent, y = intercept)) +
geom_jitter(width = 0.25)
#recoding factor values
k_countries <- c("Australia", "Korea, Dem. Rep.", "Korea, Rep.")
kDat <- gapminder %>%
filter(country %in% k_countries, year > 2000) %>%
droplevels()
kDat
levels(kDat$country)
kDat <- kDat %>%
mutate(new_country = revalue(country, c("Australia" = "Oz",
"Korea, Dem. Rep." = "North Korea",
"Korea, Rep." = "South Korea")))
data.frame(levels(kDat$country), levels(kDat$new_country))
#combinind tables and growing factors together
#best approach is to use rbind
usa <- gapminder %>%
filter(country == "United States", year > 2000) %>%
droplevels()
mex <- gapminder %>%
filter(country == "Mexico", year > 2000) %>%
droplevels()
str(usa) #only a single level for country
str(mex)
usa_mex <- rbind(usa, mex) #combining the two dataframes into one
str(usa_mex) #now 2 factors in the dataframe
#avoid using the concatenate function c() to combine factors
(nono <- c(usa$country, mex$country)) #not the output we want
#you may want to use this roundabout way
(maybe <- factor(c(levels(usa$country)[usa$country], levels(mex$country)[mex$country]))
#if you are combining factors of different levels, first convert to character, combine, and then reconvert to factors
gapminder$continent <- as.character(gapminder$continent)
str(gapminder)
head(gapminder)
gapminder$continent <- factor(gapminder$continent)
str(gapminder)
head(gapminder)
|
#The function to check whether input probability is in correct form.
check_prob<-function(prob){
if(length(prob)==1){
if(prob>=0&prob<=1) return(TRUE)
else stop('p has to be a number betwen 0 and 1')
}
else stop('the length of p should be 1')
}
#The function to check whether input trials is in correct form.
check_trials<-function(trials){
if(length(trials)==1){
if(is.numeric(trials)){
if(round(trials)==trials&trials>=0) return(TRUE)
else stop('trials value should be a non-negative integer')
}
else stop('trials value should be a non-negative integer')
}
else stop('the length of trials value should be 1')
}
#The function to check whether input success vector is in correct form.
check_success<-function(success,trials){
if(is.vector(success)){
if(is.numeric(success)){
if(all(round(success)==success)&all(success>=0&success<=trials)) return(TRUE)
else stop('invalid success value')
}
else stop('invalid success value')
}
else('success should be a vector')
}
#the mean of the certain binomial distribution
aux_mean<-function(trials,prob){
return(trials*prob)
}
#the variance of the certain binomial distribution
aux_variance<-function(trials,prob){
return(trials*prob*(1-prob))
}
#the mode of the certain binomial distribution
aux_mode<-function(trials,prob){
if(round(trials*prob+prob)==trials*prob+prob) return(c((trials*prob+prob),(trials*prob+prob)-1))
else return(floor(trials * prob + prob))
}
#the skewness of the certain binomial distribution
aux_skewness<-function(trials,prob){
if(prob==0|prob==1) return('Undefined')
else return((1-2*prob)/sqrt(trials*prob*(1-prob)))
}
#the kurtosis of the certain binomial distribution
aux_kurtosis<-function(trials,prob){
if(prob==0|prob==1) return('Undefined')
else return((1-6*prob*(1-prob))/(trials*prob*(1-prob)))
}
#'@title Binomial Choose
#'@description function to calculate the combinatorial number
#'@param n the number of trials
#'@param k the list of numbers of success
#'@return the number of combinations
#'@export
#'@examples
#'bin_choose(n=5,k=2)
#'bin_choose(5,0)
#'bin_choose(5,1:3)
bin_choose<-function(n,k){
if(!(all(is.numeric(k))&is.numeric(n))) stop('invalid input')
else{
if(!(all(k%%1==0)&(n%%1==0))) stop('invalid input')
else{
if(any(k>n)) stop('invalid input')
else{
if(n<0|any(k<0)) stop('invalid input')
else return(factorial(n)/(factorial(k)*factorial(n-k)))
}
}
}
}
#'@title Binomial Probability
#'@description function to calculate the probability of binomial random variable
#'@param success the list of numbers of success
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return the list that contains the probabilities of success given trials and success
#'@export
#'@examples
#'bin_probability(success = 2, trials = 5, prob = 0.5)
#'bin_probability(success = 0:2, trials = 5, prob = 0.5)
#'bin_probability(success = 55, trials = 100, prob = 0.45)
bin_probability<-function(success,trials,prob){
check_trials(trials)
check_prob(prob)
check_success(success, trials)
return(bin_choose(trials,success)*prob^success*(1-prob)^(trials-success))
}
#'@title Binomial Distribution
#'@description function to calculate the distribution of binomial random variable
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return a data frame with the probability distribution
#'@export
#'@examples
#'bin_distribution(trials = 5, prob = 0.5)
bin_distribution<-function(trials,prob){
bd<-data.frame(success=0:trials,probability=bin_probability(0:trials,trials,prob))
bd<-structure(bd,class=c('bindis','data.frame'))
return(bd)
}
#To plot the barplot of the distribution of the certain binomial distribution
#'@export
plot.bindis<-function(bindis){
ggplot2::ggplot(bindis,aes(x=bindis$success,y=bindis$probability))+
geom_col()
}
#'@title Binomial Cumulative
#'@description function to calculate the cumulative of binomial random variable
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return data frame with both the probability distribution and the cumulative probabilities
#'@export
#'@examples
#'bin_cumulative(trials = 5, prob = 0.5)
bin_cumulative <- function(trials, prob){
bc<-data.frame(success=0:trials,probability=bin_probability(0:trials,trials,prob),cumulative=bin_probability(0:trials,trials,prob))
bc<-structure(bc,class=c("bincum", "data.frame"))
for(i in 1:(nrow(bc)-1)){
bc[i+1,3]<-bc[i,3]+bc[i+1,3]
}
return(bc)
}
#To plot the barplot of the distribution and cumulative probability of the certain binomial distribution
#'@export
plot.bincum<-function(bincum){
ggplot2::ggplot(bincum,aes(x=bincum$success,y=bincum$cumulative))+
geom_line(color='blue')+
geom_point(color='blue',size=1.5)
}
#'@title Binomial Variable
#'@description function to generate a binomial variable with class binvar
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return a binomial random variable object
#'@export
#'@examples
#'bin_variable(trials=5,prob=0.5)
bin_variable<-function(trials,prob){
check_prob(prob)
check_trials(trials)
bv<-list(trials=trials,prob=prob)
structure(bv,class='binvar')
}
#To print the basic information of the binomial variable
#'@export
print.binvar<-function(binvar){
cat("\"Binomial Variable\"\n\n")
cat('Parameter\n')
cat(paste('- number of trials:',binvar[[1]],'\n'))
cat(paste('- prob of success :',binvar[[2]],'\n'))
invisible(binvar)
}
#To summary the certain binomial distribution
#'@export
summary.binvar<-function(binvar){
ls<-list(
trials=binvar[[1]],
prob=binvar[[2]],
mean=aux_mean(binvar[[1]],binvar[[2]]),
variance=aux_variance(binvar[[1]],binvar[[2]]),
mode=aux_mode(binvar[[1]],binvar[[2]]),
skewness=aux_skewness(binvar[[1]],binvar[[2]]),
kurtosis=aux_kurtosis(binvar[[1]],binvar[[2]])
)
class(ls)<-'summary.binvar'
return(ls)
}
#To print some statisics of the certain binomial distribution
#'@export
print.summary.binvar<-function(binvar){
cat("\"Summary Binomial\"\n\n")
cat('Parameter\n')
cat(paste('- number of trials:',binvar$trials),'\n')
cat(paste('- prob of success :',binvar$prob),'\n\n')
cat('Measure\n')
cat(paste('- mean :',binvar$mean,'\n'))
cat(paste('- variance:',binvar$variance,'\n'))
cat(paste('- mode :',binvar$mode,'\n'))
cat(paste('- skewness:',binvar$skewness,'\n'))
cat(paste('- kurtosis:',binvar$kurtosis,'\n'))
invisible(binvar)
}
#'@title Binomial Mean
#'@description function to calculate the mean of the binomial distribution
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return the mean of the binomial distribution
#'@export
#'@examples
#'bin_mean(10, 0.3)
bin_mean<-function(trials,prob){
check_trials(trials)
check_prob(prob)
return(aux_mean(trials,prob))
}
#'@title Binomial Variance
#'@description function to calculate the variance of the binomial distribution
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return the variance of the binomial distribution
#'@export
#'@examples
#'bin_variance(10, 0.3)
bin_variance<-function(trials,prob){
check_trials(trials)
check_prob(prob)
return(aux_variance(trials,prob))
}
#'@title Binomial Mode
#'@description function to calculate the mode of the binomial distribution
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return the mode of the binomial distribution
#'@export
#'@examples
#'bin_mode(10, 0.3)
bin_mode<-function(trials,prob){
check_trials(trials)
check_prob(prob)
return(aux_mode(trials,prob))
}
#'@title Binomial Skewness
#'@description function to calculate the skewness of the binomial distribution
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return the skewness of the binomial distribution
#'@export
#'@examples
#'bin_skewness(10, 0.3)
bin_skewness<-function(trials,prob){
check_trials(trials)
check_prob(prob)
return(aux_skewness(trials,prob))
}
#'@title Binomial Kurtosis
#'@description function to calculate the kurtosis of the binomial distribution
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return the kurtosis of the binomial distribution
#'@export
#'@examples
#'bin_kurtosis(10, 0.3)
bin_kurtosis<-function(trials,prob){
check_trials(trials)
check_prob(prob)
return(aux_kurtosis(trials,prob))
}
| /binomial/binomial/R/workout3_Zimeng_Zheng.R | no_license | stat133-sp19/hw-stat133-zzm1610133 | R | false | false | 8,611 | r | #The function to check whether input probability is in correct form.
check_prob<-function(prob){
if(length(prob)==1){
if(prob>=0&prob<=1) return(TRUE)
else stop('p has to be a number betwen 0 and 1')
}
else stop('the length of p should be 1')
}
#The function to check whether input trials is in correct form.
check_trials<-function(trials){
if(length(trials)==1){
if(is.numeric(trials)){
if(round(trials)==trials&trials>=0) return(TRUE)
else stop('trials value should be a non-negative integer')
}
else stop('trials value should be a non-negative integer')
}
else stop('the length of trials value should be 1')
}
#The function to check whether input success vector is in correct form.
check_success<-function(success,trials){
if(is.vector(success)){
if(is.numeric(success)){
if(all(round(success)==success)&all(success>=0&success<=trials)) return(TRUE)
else stop('invalid success value')
}
else stop('invalid success value')
}
else('success should be a vector')
}
#the mean of the certain binomial distribution
aux_mean<-function(trials,prob){
return(trials*prob)
}
#the variance of the certain binomial distribution
aux_variance<-function(trials,prob){
return(trials*prob*(1-prob))
}
#the mode of the certain binomial distribution
aux_mode<-function(trials,prob){
if(round(trials*prob+prob)==trials*prob+prob) return(c((trials*prob+prob),(trials*prob+prob)-1))
else return(floor(trials * prob + prob))
}
#the skewness of the certain binomial distribution
aux_skewness<-function(trials,prob){
if(prob==0|prob==1) return('Undefined')
else return((1-2*prob)/sqrt(trials*prob*(1-prob)))
}
#the kurtosis of the certain binomial distribution
aux_kurtosis<-function(trials,prob){
if(prob==0|prob==1) return('Undefined')
else return((1-6*prob*(1-prob))/(trials*prob*(1-prob)))
}
#'@title Binomial Choose
#'@description function to calculate the combinatorial number
#'@param n the number of trials
#'@param k the list of numbers of success
#'@return the number of combinations
#'@export
#'@examples
#'bin_choose(n=5,k=2)
#'bin_choose(5,0)
#'bin_choose(5,1:3)
bin_choose<-function(n,k){
if(!(all(is.numeric(k))&is.numeric(n))) stop('invalid input')
else{
if(!(all(k%%1==0)&(n%%1==0))) stop('invalid input')
else{
if(any(k>n)) stop('invalid input')
else{
if(n<0|any(k<0)) stop('invalid input')
else return(factorial(n)/(factorial(k)*factorial(n-k)))
}
}
}
}
#'@title Binomial Probability
#'@description function to calculate the probability of binomial random variable
#'@param success the list of numbers of success
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return the list that contains the probabilities of success given trials and success
#'@export
#'@examples
#'bin_probability(success = 2, trials = 5, prob = 0.5)
#'bin_probability(success = 0:2, trials = 5, prob = 0.5)
#'bin_probability(success = 55, trials = 100, prob = 0.45)
bin_probability<-function(success,trials,prob){
check_trials(trials)
check_prob(prob)
check_success(success, trials)
return(bin_choose(trials,success)*prob^success*(1-prob)^(trials-success))
}
#'@title Binomial Distribution
#'@description function to calculate the distribution of binomial random variable
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return a data frame with the probability distribution
#'@export
#'@examples
#'bin_distribution(trials = 5, prob = 0.5)
bin_distribution<-function(trials,prob){
bd<-data.frame(success=0:trials,probability=bin_probability(0:trials,trials,prob))
bd<-structure(bd,class=c('bindis','data.frame'))
return(bd)
}
#To plot the barplot of the distribution of the certain binomial distribution
#'@export
plot.bindis<-function(bindis){
ggplot2::ggplot(bindis,aes(x=bindis$success,y=bindis$probability))+
geom_col()
}
#'@title Binomial Cumulative
#'@description function to calculate the cumulative of binomial random variable
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return data frame with both the probability distribution and the cumulative probabilities
#'@export
#'@examples
#'bin_cumulative(trials = 5, prob = 0.5)
bin_cumulative <- function(trials, prob){
bc<-data.frame(success=0:trials,probability=bin_probability(0:trials,trials,prob),cumulative=bin_probability(0:trials,trials,prob))
bc<-structure(bc,class=c("bincum", "data.frame"))
for(i in 1:(nrow(bc)-1)){
bc[i+1,3]<-bc[i,3]+bc[i+1,3]
}
return(bc)
}
#To plot the barplot of the distribution and cumulative probability of the certain binomial distribution
#'@export
plot.bincum<-function(bincum){
ggplot2::ggplot(bincum,aes(x=bincum$success,y=bincum$cumulative))+
geom_line(color='blue')+
geom_point(color='blue',size=1.5)
}
#'@title Binomial Variable
#'@description function to generate a binomial variable with class binvar
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return a binomial random variable object
#'@export
#'@examples
#'bin_variable(trials=5,prob=0.5)
bin_variable<-function(trials,prob){
check_prob(prob)
check_trials(trials)
bv<-list(trials=trials,prob=prob)
structure(bv,class='binvar')
}
#To print the basic information of the binomial variable
#'@export
print.binvar<-function(binvar){
cat("\"Binomial Variable\"\n\n")
cat('Parameter\n')
cat(paste('- number of trials:',binvar[[1]],'\n'))
cat(paste('- prob of success :',binvar[[2]],'\n'))
invisible(binvar)
}
#To summary the certain binomial distribution
#'@export
summary.binvar<-function(binvar){
ls<-list(
trials=binvar[[1]],
prob=binvar[[2]],
mean=aux_mean(binvar[[1]],binvar[[2]]),
variance=aux_variance(binvar[[1]],binvar[[2]]),
mode=aux_mode(binvar[[1]],binvar[[2]]),
skewness=aux_skewness(binvar[[1]],binvar[[2]]),
kurtosis=aux_kurtosis(binvar[[1]],binvar[[2]])
)
class(ls)<-'summary.binvar'
return(ls)
}
#To print some statisics of the certain binomial distribution
#'@export
print.summary.binvar<-function(binvar){
cat("\"Summary Binomial\"\n\n")
cat('Parameter\n')
cat(paste('- number of trials:',binvar$trials),'\n')
cat(paste('- prob of success :',binvar$prob),'\n\n')
cat('Measure\n')
cat(paste('- mean :',binvar$mean,'\n'))
cat(paste('- variance:',binvar$variance,'\n'))
cat(paste('- mode :',binvar$mode,'\n'))
cat(paste('- skewness:',binvar$skewness,'\n'))
cat(paste('- kurtosis:',binvar$kurtosis,'\n'))
invisible(binvar)
}
#'@title Binomial Mean
#'@description function to calculate the mean of the binomial distribution
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return the mean of the binomial distribution
#'@export
#'@examples
#'bin_mean(10, 0.3)
bin_mean<-function(trials,prob){
check_trials(trials)
check_prob(prob)
return(aux_mean(trials,prob))
}
#'@title Binomial Variance
#'@description function to calculate the variance of the binomial distribution
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return the variance of the binomial distribution
#'@export
#'@examples
#'bin_variance(10, 0.3)
bin_variance<-function(trials,prob){
check_trials(trials)
check_prob(prob)
return(aux_variance(trials,prob))
}
#'@title Binomial Mode
#'@description function to calculate the mode of the binomial distribution
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return the mode of the binomial distribution
#'@export
#'@examples
#'bin_mode(10, 0.3)
bin_mode<-function(trials,prob){
check_trials(trials)
check_prob(prob)
return(aux_mode(trials,prob))
}
#'@title Binomial Skewness
#'@description function to calculate the skewness of the binomial distribution
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return the skewness of the binomial distribution
#'@export
#'@examples
#'bin_skewness(10, 0.3)
bin_skewness<-function(trials,prob){
check_trials(trials)
check_prob(prob)
return(aux_skewness(trials,prob))
}
#'@title Binomial Kurtosis
#'@description function to calculate the kurtosis of the binomial distribution
#'@param trials the number of trials
#'@param prob the probability that success occurs
#'@return the kurtosis of the binomial distribution
#'@export
#'@examples
#'bin_kurtosis(10, 0.3)
bin_kurtosis<-function(trials,prob){
check_trials(trials)
check_prob(prob)
return(aux_kurtosis(trials,prob))
}
|
#(без проверки!)
#ЗАДАЧА 1
#Сравните между собой непарными тестами Вилкоксона и Стьюдента выборки из файла pair_1.csv. Постройте графики. Что происходит?
A=read.csv(file="~/Documents/RStudio(домахи)/pair_1.csv")
AA=A$A
AB=A$B
par(mfrow=c(1,2))
plot(density(AA,from=-10,to=20),col="red")
lines(density(AB,from=-10,to=20),col="green")
plot(density(AA-AB),col="green")
curve(dnorm(x,mean=0,sd=sd(AA-AB)),add = T,col="red")
t.test(AA-AB,alternative="two.sided") #p-value < 2.2e-16 => true location is not equal to 0
wilcox.test(AA-AB,alternative = "two.sided") #p-value < 2.2e-16 => true location is not equal to 0
#у AA и AB есть выбросы, именно поэтому не подтверждается гипотеза H0 о нормальности со средним 0
#даже несмотря на то, что распределение АА-АВ на графике похоже на нормальное (но это не точно, я пока не придумала лучшего объяснения)
#ЗАДАЧА 2
#Сравните между собой непарными тестами Вилкоксона и Стьюдента выборки из файла pair_2.csv. Постройте графики. Да господи, что происходит?
B=read.csv(file="~/Documents/RStudio(домахи)/pair_2.csv")
BA=B$A
BB=B$B
par(mfrow=c(1,2))
plot(density(BA,from=-20,to=20),col="red")
lines(density(BB,from=-20,to=20),col="green")
plot(density(BA-BB),col="green")
curve(dnorm(x,mean=0,sd=sd(BA-BB)),add = T,col="red")
t.test(BA-BB,alternative="two.sided") #p-value = 6.569e-14 => true location is not equal to 0
wilcox.test(BA-BB,alternative = "two.sided") #p-value = 1.999e-05 => true location is not equal to 0
#здесь та же история, что и в 1 задаче
#ЗАДАЧА 3
#Пощупайте понятия корреляции.
W=read.csv(file="~/Documents/RStudio(домахи)/std_correlations.csv")
a=W$A
b1=W$B1
b2=W$B2
b3=W$B3
b4=W$B4
#a) Для этого исследуйте корреляцию Пиросна случайной величины А с величинами В1, В2, В3 и В4 из файла std_correlations.csv . Сделайте это на четырёх графиках, построенных одновременно. Каждый график должен выглядеть как-то так:
r1=cor(x=a,y=b1,method = "pearson") #1
r2=cor(x=a,y=b2,method = "pearson") #0.7088821 #С помощью коэффициента корреляции можно определить силу линейной взаимосвязи между переменными
r3=cor(x=a,y=b3,method = "pearson") #0.4456699
r4=cor(x=a,y=b4,method = "pearson") #0.04133461
par(mfrow=c(2,2))
plot(x=a,y=b1,type = "p",col="blue")
location = "bottomright"
legend(location, legend=c("r = 1"),cex = 0.7)
plot(x=a,y=b2,type = "p",col="blue")
legend(location, legend=c("r = 0.7088821"),cex = 0.7)
plot(x=a,y=b3,type = "p",col="blue")
legend(location, legend=c("r = 0.4456699"),cex = 0.7)
plot(x=a,y=b4,type = "p",col="blue")
legend(location, legend=c("r = 0.04133461"),cex = 0.7)
#b) На этих же данных исследуйте корреляцию Спирмена и Кендалла. График:
#не знаю пока, как в легенду графика вставлять имя переменной=значение переменной, поэтому пока сделала это вручную
tho1=cor(x=a,y=b1,method = "spearman") # 1
tho2=cor(x=a,y=b2,method = "spearman") # 0.6905024
tho3=cor(x=a,y=b3,method = "spearman") # 0.4271926
tho4=cor(x=a,y=b4,method = "spearman") # 0.03951613
tau1=cor(x=a,y=b1,method = "kendall") # 0.9999999
tau2=cor(x=a,y=b2,method = "kendall") # 0.5001273
tau3=cor(x=a,y=b3,method = "kendall") # 0.2925176
tau4=cor(x=a,y=b4,method = "kendall") # 0.02632247
par(mfrow=c(2,2))
plot(x=a,y=b1,type = "p",col="blue")
location = "bottomright"
legend(location, legend=c("tho = 1","tau = 0.9999999"),cex = 0.7)
plot(x=a,y=b2,type = "p",col="blue")
legend(location, legend=c("tho = 0.6905024","tau = 0.5001273"),cex = 0.7)
plot(x=a,y=b3,type = "p",col="blue")
legend(location, legend=c("tho = 0.4271926","tau = 0.2925176"),cex = 0.7)
plot(x=a,y=b4,type = "p",col="blue")
legend(location, legend=c("tho = 0.03951613","tau = 0.02632247"),cex = 0.7)
#тут спирмен везде больше кендалла
#c) Сравните в общих словах Пирсона, Спирмена и Кендалла.
#Пирсон будет неустойчив к выбросам
#Спирмен сильнее реагирует на несогласие ранжировок(конкордантные/дисконкондартные пары),
#чем Кендалл (это следует из формул для коэффциентов корреляции), поэтому в b) получили Спирмена больше Кендалла
#ЗАДАЧА 4
#Пощупайте понятия корреляции поплотнее. Для этого исследуйте корреляции Пирсона, Спирмена и Кендалла случайной величины А с величинами С1, С2, С3 и С4 из файла notstd_correlations.csv по схеме из предыдущего задания
#Предполагается, что вы возьмёте старый код и немного его адаптируете.
Q=read.csv(file="~/Documents/RStudio(домахи)/notstd_correlations.csv")
aq=Q$A
c1=Q$C1
c2=Q$C2
c3=Q$C3
c4=Q$C4
par(mfrow=c(2,2))
plot(x=aq,y=c1,type = "p",col="blue")
plot(x=aq,y=c2,type = "p",col="blue")
plot(x=aq,y=c3,type = "p",col="blue")
plot(x=aq,y=c4,type = "p",col="blue")
#на 2, 3 и 4 графиках видны выбросы, уберём их, чтобы корреляция была точнее
#с выбросами
rq1=cor(x=aq,y=c1,method = "pearson") # -0.01642752
rq2=cor(x=aq,y=c2,method = "pearson") # -0.009572435
rq3=cor(x=aq,y=c3,method = "pearson") # 0.4440186
rq4=cor(x=aq,y=c4,method = "pearson") # 0.00381705
#в случае 3 по значению корреляции Пирсона можно предположить, что линейная взаимосвязь есть,
#но даже по графику видно, что это не так (ну вроде бы, хотя я могу и ошибаться) и поэтому я тут дальше убираю выбросы
#без выбросов
ind_aq=which(aq %in% boxplot.stats(aq)$out)
aqq=aq[-ind_aq]
ind_c1=which(c1 %in% boxplot.stats(c1)$out)
c1q=c1[-ind_c1]
ind_c2=which(c2 %in% boxplot.stats(c2)$out)
c2q=c2[-ind_c2]
ind_c3=which(c3 %in% boxplot.stats(c3)$out)
c3q=c3[-ind_c3]
ind_c4=which(c4 %in% boxplot.stats(c4)$out)
c4q=c4[-ind_c4]
cor(x=aqq,y=c1q[1:length(aqq)],method = "pearson") # было: -0.01642752 стало: -0.01144581
cor(x=aqq,y=c2q[1:length(aqq)],method = "pearson") # было: -0.009572435 стало: -0.008564575
cor(x=c3q,y=aqq[1:length(c3q)],method = "pearson") # было: 0.4440186 стало: -0.01155698
cor(x=c4q,y=aqq[1:length(c4q)],method = "pearson") # было: 0.00381705 стало: -0.01028641
#с выбросами
cor(x=aq,y=c1,method = "spearman") # -0.009133402
cor(x=aq,y=c2,method = "spearman") # 0.6897809
cor(x=aq,y=c3,method = "spearman") # 0.6905024
cor(x=aq,y=c4,method = "spearman") # 0.4999971
#без
cor(x=aqq,y=c1q[1:length(aqq)],method = "spearman") # -0.01483261
cor(x=aqq,y=c2q[1:length(aqq)],method = "spearman") # -0.01116652
cor(x=aqq[1:length(c3q)],y=c3q,method = "spearman") # -0.006466429
cor(x=aqq[1:length(c4q)],y=c4q,method = "spearman") # -0.006750441
#с выбросами
cor(x=aq,y=c1,method = "kendall") # -0.006527953
cor(x=aq,y=c2,method = "kendall") # 0.4996757
cor(x=aq,y=c3,method = "kendall") # 0.5001273
cor(x=aq,y=c4,method = "kendall") # 9.814982e-05
#без
cor(x=aqq,y=c1q[1:length(aqq)],method = "kendall") # -0.009832267
cor(x=aqq,y=c2q[1:length(aqq)],method = "kendall") # -0.007462295
cor(x=aqq[1:length(c3q)],y=c3q,method = "kendall") # -0.004391201
cor(x=aqq[1:length(c4q)],y=c4q,method = "kendall") # -0.004472036
| /task5/task5 (robustness and correlation).r | no_license | ktrndy/home_task_applied_statistics | R | false | false | 8,426 | r | #(без проверки!)
#ЗАДАЧА 1
#Сравните между собой непарными тестами Вилкоксона и Стьюдента выборки из файла pair_1.csv. Постройте графики. Что происходит?
A=read.csv(file="~/Documents/RStudio(домахи)/pair_1.csv")
AA=A$A
AB=A$B
par(mfrow=c(1,2))
plot(density(AA,from=-10,to=20),col="red")
lines(density(AB,from=-10,to=20),col="green")
plot(density(AA-AB),col="green")
curve(dnorm(x,mean=0,sd=sd(AA-AB)),add = T,col="red")
t.test(AA-AB,alternative="two.sided") #p-value < 2.2e-16 => true location is not equal to 0
wilcox.test(AA-AB,alternative = "two.sided") #p-value < 2.2e-16 => true location is not equal to 0
#у AA и AB есть выбросы, именно поэтому не подтверждается гипотеза H0 о нормальности со средним 0
#даже несмотря на то, что распределение АА-АВ на графике похоже на нормальное (но это не точно, я пока не придумала лучшего объяснения)
#ЗАДАЧА 2
#Сравните между собой непарными тестами Вилкоксона и Стьюдента выборки из файла pair_2.csv. Постройте графики. Да господи, что происходит?
B=read.csv(file="~/Documents/RStudio(домахи)/pair_2.csv")
BA=B$A
BB=B$B
par(mfrow=c(1,2))
plot(density(BA,from=-20,to=20),col="red")
lines(density(BB,from=-20,to=20),col="green")
plot(density(BA-BB),col="green")
curve(dnorm(x,mean=0,sd=sd(BA-BB)),add = T,col="red")
t.test(BA-BB,alternative="two.sided") #p-value = 6.569e-14 => true location is not equal to 0
wilcox.test(BA-BB,alternative = "two.sided") #p-value = 1.999e-05 => true location is not equal to 0
#здесь та же история, что и в 1 задаче
#ЗАДАЧА 3
#Пощупайте понятия корреляции.
W=read.csv(file="~/Documents/RStudio(домахи)/std_correlations.csv")
a=W$A
b1=W$B1
b2=W$B2
b3=W$B3
b4=W$B4
#a) Для этого исследуйте корреляцию Пиросна случайной величины А с величинами В1, В2, В3 и В4 из файла std_correlations.csv . Сделайте это на четырёх графиках, построенных одновременно. Каждый график должен выглядеть как-то так:
r1=cor(x=a,y=b1,method = "pearson") #1
r2=cor(x=a,y=b2,method = "pearson") #0.7088821 #С помощью коэффициента корреляции можно определить силу линейной взаимосвязи между переменными
r3=cor(x=a,y=b3,method = "pearson") #0.4456699
r4=cor(x=a,y=b4,method = "pearson") #0.04133461
par(mfrow=c(2,2))
plot(x=a,y=b1,type = "p",col="blue")
location = "bottomright"
legend(location, legend=c("r = 1"),cex = 0.7)
plot(x=a,y=b2,type = "p",col="blue")
legend(location, legend=c("r = 0.7088821"),cex = 0.7)
plot(x=a,y=b3,type = "p",col="blue")
legend(location, legend=c("r = 0.4456699"),cex = 0.7)
plot(x=a,y=b4,type = "p",col="blue")
legend(location, legend=c("r = 0.04133461"),cex = 0.7)
#b) На этих же данных исследуйте корреляцию Спирмена и Кендалла. График:
#не знаю пока, как в легенду графика вставлять имя переменной=значение переменной, поэтому пока сделала это вручную
tho1=cor(x=a,y=b1,method = "spearman") # 1
tho2=cor(x=a,y=b2,method = "spearman") # 0.6905024
tho3=cor(x=a,y=b3,method = "spearman") # 0.4271926
tho4=cor(x=a,y=b4,method = "spearman") # 0.03951613
tau1=cor(x=a,y=b1,method = "kendall") # 0.9999999
tau2=cor(x=a,y=b2,method = "kendall") # 0.5001273
tau3=cor(x=a,y=b3,method = "kendall") # 0.2925176
tau4=cor(x=a,y=b4,method = "kendall") # 0.02632247
par(mfrow=c(2,2))
plot(x=a,y=b1,type = "p",col="blue")
location = "bottomright"
legend(location, legend=c("tho = 1","tau = 0.9999999"),cex = 0.7)
plot(x=a,y=b2,type = "p",col="blue")
legend(location, legend=c("tho = 0.6905024","tau = 0.5001273"),cex = 0.7)
plot(x=a,y=b3,type = "p",col="blue")
legend(location, legend=c("tho = 0.4271926","tau = 0.2925176"),cex = 0.7)
plot(x=a,y=b4,type = "p",col="blue")
legend(location, legend=c("tho = 0.03951613","tau = 0.02632247"),cex = 0.7)
#тут спирмен везде больше кендалла
#c) Сравните в общих словах Пирсона, Спирмена и Кендалла.
#Пирсон будет неустойчив к выбросам
#Спирмен сильнее реагирует на несогласие ранжировок(конкордантные/дисконкондартные пары),
#чем Кендалл (это следует из формул для коэффциентов корреляции), поэтому в b) получили Спирмена больше Кендалла
#ЗАДАЧА 4
#Пощупайте понятия корреляции поплотнее. Для этого исследуйте корреляции Пирсона, Спирмена и Кендалла случайной величины А с величинами С1, С2, С3 и С4 из файла notstd_correlations.csv по схеме из предыдущего задания
#Предполагается, что вы возьмёте старый код и немного его адаптируете.
Q=read.csv(file="~/Documents/RStudio(домахи)/notstd_correlations.csv")
aq=Q$A
c1=Q$C1
c2=Q$C2
c3=Q$C3
c4=Q$C4
par(mfrow=c(2,2))
plot(x=aq,y=c1,type = "p",col="blue")
plot(x=aq,y=c2,type = "p",col="blue")
plot(x=aq,y=c3,type = "p",col="blue")
plot(x=aq,y=c4,type = "p",col="blue")
#на 2, 3 и 4 графиках видны выбросы, уберём их, чтобы корреляция была точнее
#с выбросами
rq1=cor(x=aq,y=c1,method = "pearson") # -0.01642752
rq2=cor(x=aq,y=c2,method = "pearson") # -0.009572435
rq3=cor(x=aq,y=c3,method = "pearson") # 0.4440186
rq4=cor(x=aq,y=c4,method = "pearson") # 0.00381705
#в случае 3 по значению корреляции Пирсона можно предположить, что линейная взаимосвязь есть,
#но даже по графику видно, что это не так (ну вроде бы, хотя я могу и ошибаться) и поэтому я тут дальше убираю выбросы
#без выбросов
ind_aq=which(aq %in% boxplot.stats(aq)$out)
aqq=aq[-ind_aq]
ind_c1=which(c1 %in% boxplot.stats(c1)$out)
c1q=c1[-ind_c1]
ind_c2=which(c2 %in% boxplot.stats(c2)$out)
c2q=c2[-ind_c2]
ind_c3=which(c3 %in% boxplot.stats(c3)$out)
c3q=c3[-ind_c3]
ind_c4=which(c4 %in% boxplot.stats(c4)$out)
c4q=c4[-ind_c4]
cor(x=aqq,y=c1q[1:length(aqq)],method = "pearson") # было: -0.01642752 стало: -0.01144581
cor(x=aqq,y=c2q[1:length(aqq)],method = "pearson") # было: -0.009572435 стало: -0.008564575
cor(x=c3q,y=aqq[1:length(c3q)],method = "pearson") # было: 0.4440186 стало: -0.01155698
cor(x=c4q,y=aqq[1:length(c4q)],method = "pearson") # было: 0.00381705 стало: -0.01028641
#с выбросами
cor(x=aq,y=c1,method = "spearman") # -0.009133402
cor(x=aq,y=c2,method = "spearman") # 0.6897809
cor(x=aq,y=c3,method = "spearman") # 0.6905024
cor(x=aq,y=c4,method = "spearman") # 0.4999971
#без
cor(x=aqq,y=c1q[1:length(aqq)],method = "spearman") # -0.01483261
cor(x=aqq,y=c2q[1:length(aqq)],method = "spearman") # -0.01116652
cor(x=aqq[1:length(c3q)],y=c3q,method = "spearman") # -0.006466429
cor(x=aqq[1:length(c4q)],y=c4q,method = "spearman") # -0.006750441
#с выбросами
cor(x=aq,y=c1,method = "kendall") # -0.006527953
cor(x=aq,y=c2,method = "kendall") # 0.4996757
cor(x=aq,y=c3,method = "kendall") # 0.5001273
cor(x=aq,y=c4,method = "kendall") # 9.814982e-05
#без
cor(x=aqq,y=c1q[1:length(aqq)],method = "kendall") # -0.009832267
cor(x=aqq,y=c2q[1:length(aqq)],method = "kendall") # -0.007462295
cor(x=aqq[1:length(c3q)],y=c3q,method = "kendall") # -0.004391201
cor(x=aqq[1:length(c4q)],y=c4q,method = "kendall") # -0.004472036
|
#' Parse Pos Variant By TriMutContext With Annotation
#'
#' @param geneDFunique geneDFunique data frame
#' @param mutationDistMatrix mutationDistMatrix data frame
#' @param useCore default is one
#'
#' @return variantTriMutCategoryParsed data frame
#'
#' @examples
#' #date<-getRunDates(latest=TRUE)
#' cancerType<-"KIRC"
#' selectedSampleId<-NA
#' #worDir<-getwd()
#' mutSig2CVthreshold<-0.1
#' rareMutationUpperLimit<-0.3
#' rareMutationLowerLimit<-0.1
#' rareMutationFreq<-0.02
#'
#' #runNetBox2(dataDir,cancerType,
#' # mutationList,ampGeneList,delGeneList,epiSilencedList,
#' # mutationFreq,ampGeneFreq,delGeneFreq,epiSilencedFreq,
#' # pathwayCommonsDb,directed,
#' # linkerPValThreshold,communityDetectionMethod,
#' # keepIsolatedNodes,verbose=TRUE)
#'
#' @concept CNCDriver
#' @export
#' @importFrom stringr str_extract_all
#' @importFrom parallel mclapply
#' @importFrom plyr rbind.fill
parsePosVariantByTriMutContextWithAnnotation5<-function(geneDFunique,mutationDistMatrix,useCores=1){
#stringVector<-a1$categoryCounts
stringVector<-geneDFunique$categoryCounts
categoryMatch<-gsub("[[0-9]+]","",stringVector)
counts<-str_extract_all(stringVector,"([0-9]+)")
countsRatio<-sapply(1:length(counts), function(x){paste(counts[[x]],collapse=":")})
counts<-sapply(1:length(counts), function(x){sum(as.numeric(counts[[x]]))})
tmpStr<-strsplit(stringVector,"\\,")
tmpStr<-mclapply(1:length(tmpStr), function(x){
tmp<-strsplit(tmpStr[[x]],":")
tmp2<-sapply(1:length(tmp),function(y){tmp[[y]][1]})
tumorName<-paste(unique(tmp2),collapse=",")
#tumorName<-paste(tmp2,collapse=",")
numOfTumorType<-length(unique(tmp2))
tmp3<-str_extract_all(tmp,"[ACGT][ACGT]@[ACGT]+.[ACGT]+")
categoryName<-paste(unique(tmp3),collapse=",")
numOfCategory<-length(unique(tmp3))
data.frame(tumorName,numOfTumorType,categoryName,numOfCategory,stringsAsFactors = FALSE)
},mc.cores=useCores)
tmpStr<-rbind.fill(tmpStr)
triMutContextAnnotation<-data.frame(tmpStr,countsRatio,counts,categoryMatch,stringsAsFactors = FALSE)
geneDFuniqueSimple<-data.frame(geneDFunique$compositeScore,geneDFunique$compositeScoreScaled,geneDFunique$posIndex,geneDFunique$signalValue,geneDFunique$geneSymbol,stringsAsFactors = FALSE)
colnames(geneDFuniqueSimple)<-c("compositeScore","compositeScoreScaled","posIndex","signalValue","geneSymbol")
str<-categoryMatch
splitedDat<-mclapply(1:length(str), function(x){
#splitedDat<-lapply(1:length(str), function(x){
#cat(sprintf("iter %s\n",x))
tmpStr<-unlist(strsplit(str[x],","))
tmpStr2<-strsplit(tmpStr,":")
tmpCounts<-strsplit(countsRatio[x],":")
tmpDF3<-data.frame(do.call(rbind,tmpStr2),tmpCounts,stringsAsFactors = FALSE)
colnames(tmpDF3)<-c("tumorType","categoryName","counts")
return(tmpDF3)
#})
},mc.cores=useCores)
posCategoryFreq<-mclapply(1:length(splitedDat), function(y){
#posCategoryFreq<-lapply(1:length(splitedDat), function(y){
# cat(sprintf("iter %s\n",y))
categoryFreq<-sapply(1:nrow(splitedDat[[y]]), function(z){
selectedCol<-which(colnames(mutationDistMatrix) %in% splitedDat[[y]][z,1])
freq<-mutationDistMatrix[splitedDat[[y]][z,2],selectedCol]
})
splitedDat[[y]]$prob<-categoryFreq
dat1<-splitedDat[[y]]
weightedFreq<-sum(as.numeric(dat1$counts)*dat1$prob)/sum(as.numeric(dat1$counts))
return(weightedFreq)
#})
},mc.cores=useCores)
posCategoryFreq<-unlist(posCategoryFreq)
#result<-data.frame(triMutContextAnnotation,geneDFuniqueSimple,posCategoryFreq,stringsAsFactors = FALSE)
result<-data.frame(triMutContextAnnotation,geneDFunique,posCategoryFreq,stringsAsFactors = FALSE)
return(result)
} | /R/utils-parsePosVariantByTriMutcontextWithAnnotation5.R | permissive | evanbiederstedt/CNCDriver | R | false | false | 3,792 | r | #' Parse Pos Variant By TriMutContext With Annotation
#'
#' @param geneDFunique geneDFunique data frame
#' @param mutationDistMatrix mutationDistMatrix data frame
#' @param useCore default is one
#'
#' @return variantTriMutCategoryParsed data frame
#'
#' @examples
#' #date<-getRunDates(latest=TRUE)
#' cancerType<-"KIRC"
#' selectedSampleId<-NA
#' #worDir<-getwd()
#' mutSig2CVthreshold<-0.1
#' rareMutationUpperLimit<-0.3
#' rareMutationLowerLimit<-0.1
#' rareMutationFreq<-0.02
#'
#' #runNetBox2(dataDir,cancerType,
#' # mutationList,ampGeneList,delGeneList,epiSilencedList,
#' # mutationFreq,ampGeneFreq,delGeneFreq,epiSilencedFreq,
#' # pathwayCommonsDb,directed,
#' # linkerPValThreshold,communityDetectionMethod,
#' # keepIsolatedNodes,verbose=TRUE)
#'
#' @concept CNCDriver
#' @export
#' @importFrom stringr str_extract_all
#' @importFrom parallel mclapply
#' @importFrom plyr rbind.fill
parsePosVariantByTriMutContextWithAnnotation5<-function(geneDFunique,mutationDistMatrix,useCores=1){
#stringVector<-a1$categoryCounts
stringVector<-geneDFunique$categoryCounts
categoryMatch<-gsub("[[0-9]+]","",stringVector)
counts<-str_extract_all(stringVector,"([0-9]+)")
countsRatio<-sapply(1:length(counts), function(x){paste(counts[[x]],collapse=":")})
counts<-sapply(1:length(counts), function(x){sum(as.numeric(counts[[x]]))})
tmpStr<-strsplit(stringVector,"\\,")
tmpStr<-mclapply(1:length(tmpStr), function(x){
tmp<-strsplit(tmpStr[[x]],":")
tmp2<-sapply(1:length(tmp),function(y){tmp[[y]][1]})
tumorName<-paste(unique(tmp2),collapse=",")
#tumorName<-paste(tmp2,collapse=",")
numOfTumorType<-length(unique(tmp2))
tmp3<-str_extract_all(tmp,"[ACGT][ACGT]@[ACGT]+.[ACGT]+")
categoryName<-paste(unique(tmp3),collapse=",")
numOfCategory<-length(unique(tmp3))
data.frame(tumorName,numOfTumorType,categoryName,numOfCategory,stringsAsFactors = FALSE)
},mc.cores=useCores)
tmpStr<-rbind.fill(tmpStr)
triMutContextAnnotation<-data.frame(tmpStr,countsRatio,counts,categoryMatch,stringsAsFactors = FALSE)
geneDFuniqueSimple<-data.frame(geneDFunique$compositeScore,geneDFunique$compositeScoreScaled,geneDFunique$posIndex,geneDFunique$signalValue,geneDFunique$geneSymbol,stringsAsFactors = FALSE)
colnames(geneDFuniqueSimple)<-c("compositeScore","compositeScoreScaled","posIndex","signalValue","geneSymbol")
str<-categoryMatch
splitedDat<-mclapply(1:length(str), function(x){
#splitedDat<-lapply(1:length(str), function(x){
#cat(sprintf("iter %s\n",x))
tmpStr<-unlist(strsplit(str[x],","))
tmpStr2<-strsplit(tmpStr,":")
tmpCounts<-strsplit(countsRatio[x],":")
tmpDF3<-data.frame(do.call(rbind,tmpStr2),tmpCounts,stringsAsFactors = FALSE)
colnames(tmpDF3)<-c("tumorType","categoryName","counts")
return(tmpDF3)
#})
},mc.cores=useCores)
posCategoryFreq<-mclapply(1:length(splitedDat), function(y){
#posCategoryFreq<-lapply(1:length(splitedDat), function(y){
# cat(sprintf("iter %s\n",y))
categoryFreq<-sapply(1:nrow(splitedDat[[y]]), function(z){
selectedCol<-which(colnames(mutationDistMatrix) %in% splitedDat[[y]][z,1])
freq<-mutationDistMatrix[splitedDat[[y]][z,2],selectedCol]
})
splitedDat[[y]]$prob<-categoryFreq
dat1<-splitedDat[[y]]
weightedFreq<-sum(as.numeric(dat1$counts)*dat1$prob)/sum(as.numeric(dat1$counts))
return(weightedFreq)
#})
},mc.cores=useCores)
posCategoryFreq<-unlist(posCategoryFreq)
#result<-data.frame(triMutContextAnnotation,geneDFuniqueSimple,posCategoryFreq,stringsAsFactors = FALSE)
result<-data.frame(triMutContextAnnotation,geneDFunique,posCategoryFreq,stringsAsFactors = FALSE)
return(result)
} |
#-------------------------------------------------------------------------------
# Revision history:
# 2009-09-28 by J. Fox (renamed)
# 2010-04-14 by J. Fox fixed error in reporting largest abs rstudent
# 2012-12-12 by J. Fox fixed handling of labels argument
# 2019-01-02 by J. Fox added lmerMod method
# 2019-05-12 by J. Fox fixed spelling of "Bonferroni"
#-------------------------------------------------------------------------------
# Bonferroni test for an outlier (J. Fox)
outlierTest <- function(model, ...){
UseMethod("outlierTest")
}
outlierTest.lm <- function(model, cutoff=0.05, n.max=10, order=TRUE, labels=names(rstudent), ...){
rstudent <- rstudent(model)
if (length(rstudent) != length(labels))
stop("Number of labels does not correspond to number of residuals.")
else names(rstudent) <- labels
df <- df.residual(model) - 1
rstudent <- rstudent[!is.na(rstudent)]
n <- length(rstudent)
p <- if (class(model)[1] == "glm")
2*(pnorm(abs(rstudent), lower.tail=FALSE))
else 2*(pt(abs(rstudent), df, lower.tail=FALSE))
bp <- n*p
ord <- if (order) order(bp) else 1:n
ord <- ord[bp[ord] <= cutoff]
result <- if (length(ord) == 0){
which <- which.max(abs(rstudent))
list(rstudent=rstudent[which], p=p[which], bonf.p=bp[which], signif=FALSE, cutoff=cutoff)
}
else {
if (length(ord) > n.max) ord <- ord[1:n.max]
result <- list(rstudent=rstudent[ord], p=p[ord], bonf.p=bp[ord], signif=TRUE, cutoff=cutoff)
}
class(result)<-"outlierTest"
result
}
outlierTest.lmerMod <- function(model, ...){
outlierTest.lm(model, ...)
}
print.outlierTest<-function(x, digits=5, ...){
if (!x$signif){
cat("No Studentized residuals with Bonferroni p <", x$cutoff)
cat("\nLargest |rstudent|:\n")
}
bp <- x$bonf
bp[bp > 1] <- NA
table <- data.frame(rstudent=x$rstudent,
"unadjusted p-value"=signif(x$p, digits), "Bonferroni p"=signif(bp, digits),
check.names=FALSE)
rownames(table) <- names(x$rstudent)
print(table)
invisible(x)
}
| /R/outlierTest.R | no_license | cran/car | R | false | false | 2,038 | r | #-------------------------------------------------------------------------------
# Revision history:
# 2009-09-28 by J. Fox (renamed)
# 2010-04-14 by J. Fox fixed error in reporting largest abs rstudent
# 2012-12-12 by J. Fox fixed handling of labels argument
# 2019-01-02 by J. Fox added lmerMod method
# 2019-05-12 by J. Fox fixed spelling of "Bonferroni"
#-------------------------------------------------------------------------------
# Bonferroni test for an outlier (J. Fox)
outlierTest <- function(model, ...){
UseMethod("outlierTest")
}
outlierTest.lm <- function(model, cutoff=0.05, n.max=10, order=TRUE, labels=names(rstudent), ...){
rstudent <- rstudent(model)
if (length(rstudent) != length(labels))
stop("Number of labels does not correspond to number of residuals.")
else names(rstudent) <- labels
df <- df.residual(model) - 1
rstudent <- rstudent[!is.na(rstudent)]
n <- length(rstudent)
p <- if (class(model)[1] == "glm")
2*(pnorm(abs(rstudent), lower.tail=FALSE))
else 2*(pt(abs(rstudent), df, lower.tail=FALSE))
bp <- n*p
ord <- if (order) order(bp) else 1:n
ord <- ord[bp[ord] <= cutoff]
result <- if (length(ord) == 0){
which <- which.max(abs(rstudent))
list(rstudent=rstudent[which], p=p[which], bonf.p=bp[which], signif=FALSE, cutoff=cutoff)
}
else {
if (length(ord) > n.max) ord <- ord[1:n.max]
result <- list(rstudent=rstudent[ord], p=p[ord], bonf.p=bp[ord], signif=TRUE, cutoff=cutoff)
}
class(result)<-"outlierTest"
result
}
outlierTest.lmerMod <- function(model, ...){
outlierTest.lm(model, ...)
}
print.outlierTest<-function(x, digits=5, ...){
if (!x$signif){
cat("No Studentized residuals with Bonferroni p <", x$cutoff)
cat("\nLargest |rstudent|:\n")
}
bp <- x$bonf
bp[bp > 1] <- NA
table <- data.frame(rstudent=x$rstudent,
"unadjusted p-value"=signif(x$p, digits), "Bonferroni p"=signif(bp, digits),
check.names=FALSE)
rownames(table) <- names(x$rstudent)
print(table)
invisible(x)
}
|
### R code from vignette source 'SiMRiv.Rnw'
###################################################
### code chunk number 1: version
###################################################
#options(width = 60)
version <- packageDescription("SiMRiv")
#colorramp <- rgb(c(seq(4, 9, len = 5), rep(9, 5)), c(rep(9, 5), seq(9, 4, len = 5)), 0, max = 9)
#colorramp <- rgb(9, 9:0, 9:0, max = 9)
###################################################
### code chunk number 2: SiMRiv.Rnw:21-38
###################################################
my.Swd <- function(name, width, height, ...) {
grDevices::png(filename = paste(name, "png", sep = "."),
width = 8, height = 8, res = 100, units = "in")
}
my.Swd.off <- function() {
grDevices::dev.off()
}
my.Swd2 <- function(name, width, height, ...) {
grDevices::png(filename = paste(name, "png", sep = "."),
width = 8, height = 8 * 2, res = 100, units = "in")
}
my.Swd2.off <- function() {
grDevices::dev.off()
}
library(SiMRiv)
###################################################
### code chunk number 3: simriv-1
###################################################
# define a species with a single-state movement type
# characterized by a random walk
rand.walker <- species(state.RW())
# simulate one individual of this species, 10000 simulation steps
sim.rw <- simulate(rand.walker, 10000)
# plot trajectory
plot(sim.rw, type = "l", asp = 1, main = "Random walk")
###################################################
### code chunk number 4: simriv-2
###################################################
# define a species with a single-state movement type characterized
# by a correlated random walk with concentration=0.98
c.rand.walker <- species(state.CRW(0.98))
# simulate one individual of this species
# 10000 simulation steps
sim.crw <- simulate(c.rand.walker, 10000)
plot(sim.crw, type = "l", asp = 1, main = "Correlated Random walk")
###################################################
### code chunk number 5: simriv-3
###################################################
# define a species with a correlated random walk
# and step length = 15
c.rand.walker.15 <- species(state.CRW(0.98) + 15)
# which, in single-state species, is the same as:
c.rand.walker.15 <- species(state.CRW(0.98)) + 15
###################################################
### code chunk number 6: simriv-4
###################################################
# a Lévy walker can be approximated by a two-state walker
# composed of a random walk state and a correlated
# random walk state.
levy.walker <- species(state.RW() + state.CRW(0.98)
, trans = transitionMatrix(0.005, 0.01)) + 25
sim.lw <- simulate(levy.walker, 10000)
plot(sim.lw, type = "l", asp = 1, main = "Lévy-like walker")
###################################################
### code chunk number 7: simriv-5
###################################################
resistance <- resistanceFromShape(
system.file("doc/landcover.shp", package="SiMRiv")
, res = 100)
plot(resistance, axes = F)
###################################################
### code chunk number 8: simriv-6
###################################################
resistance <- resistanceFromShape(
system.file("doc/landcover.shp", package="SiMRiv")
, res = 100, field = "coverclass", mapvalues = c(
"forest" = 0.5, "urban" = 1, "dam" = 0
, "shrubland" = 0.75)
, background = 0.9, margin = 3000)
plot(resistance, axes = F)
###################################################
### code chunk number 9: simriv-7
###################################################
resistance <- resistanceFromShape(
system.file("doc/river-sample.shp", package="SiMRiv")
, res = 100, field = "Order", mapvalues = c("2" = 0
, "3" = 0.2, "4" = 0.4, "5" = 0.6, "6" = 0.8)
, buffer = 150, background = 0.95, margin = 3000)
plot(resistance, axes = F)
###################################################
### code chunk number 10: simriv-8
###################################################
# load shapefile
river.shape <- shapefile(system.file("doc/river-sample.shp", package="SiMRiv"))
# below you can provide the shapefile filename, or the
# R shapefile object itself
resistance <- resistanceFromShape(river.shape, res = 100
, buffer = (9 - river.shape@data$Order) ^ 3
, background = 0.95, margin = 3000)
# buffer here is just some magical function to convert river
# order into a meaningful value in the [0, 1] range!
plot(resistance, axes = F)
###################################################
### code chunk number 11: simriv-9
###################################################
landcover <- resistanceFromShape(
system.file("doc/landcover.shp", package="SiMRiv")
, res = 50, field = "coverclass", mapvalues = c(
"forest" = 0.5, "urban" = 1, "dam" = 0
, "shrubland" = 0.75), background = 0.95)
river.landcover <- resistanceFromShape(
system.file("doc/river-sample.shp", package="SiMRiv")
, baseRaster = landcover, buffer = 100, field = 0
, background = 0.95, margin = 3000)
plot(river.landcover, axes = F)
###################################################
### code chunk number 12: simriv-10
###################################################
# set starting coordinates anywhere within the river
init = xyFromCell(river.landcover, sample(which(values(river.landcover) == 0), 1))
# adding a number to a species is a shortcut for setting
# the step lengths of all states
# multiplying is a shortcut for setting the perceptual range radius
levy.walker <- (levy.walker + 15) * 1000
sim.lw.river <- simulate(levy.walker, 40000
, resist = river.landcover, coords = init)
# plot resistance
plot(river.landcover, axes = F
, ylim = range(sim.lw.river[, 2]), xlim = range(sim.lw.river[, 1]))
# plot trajectory on top of resistance
lines(sim.lw.river)
| /inst/doc/SiMRiv.R | no_license | cran/SiMRiv | R | false | false | 5,814 | r | ### R code from vignette source 'SiMRiv.Rnw'
###################################################
### code chunk number 1: version
###################################################
#options(width = 60)
version <- packageDescription("SiMRiv")
#colorramp <- rgb(c(seq(4, 9, len = 5), rep(9, 5)), c(rep(9, 5), seq(9, 4, len = 5)), 0, max = 9)
#colorramp <- rgb(9, 9:0, 9:0, max = 9)
###################################################
### code chunk number 2: SiMRiv.Rnw:21-38
###################################################
my.Swd <- function(name, width, height, ...) {
grDevices::png(filename = paste(name, "png", sep = "."),
width = 8, height = 8, res = 100, units = "in")
}
my.Swd.off <- function() {
grDevices::dev.off()
}
my.Swd2 <- function(name, width, height, ...) {
grDevices::png(filename = paste(name, "png", sep = "."),
width = 8, height = 8 * 2, res = 100, units = "in")
}
my.Swd2.off <- function() {
grDevices::dev.off()
}
library(SiMRiv)
###################################################
### code chunk number 3: simriv-1
###################################################
# define a species with a single-state movement type
# characterized by a random walk
rand.walker <- species(state.RW())
# simulate one individual of this species, 10000 simulation steps
sim.rw <- simulate(rand.walker, 10000)
# plot trajectory
plot(sim.rw, type = "l", asp = 1, main = "Random walk")
###################################################
### code chunk number 4: simriv-2
###################################################
# define a species with a single-state movement type characterized
# by a correlated random walk with concentration=0.98
c.rand.walker <- species(state.CRW(0.98))
# simulate one individual of this species
# 10000 simulation steps
sim.crw <- simulate(c.rand.walker, 10000)
plot(sim.crw, type = "l", asp = 1, main = "Correlated Random walk")
###################################################
### code chunk number 5: simriv-3
###################################################
# define a species with a correlated random walk
# and step length = 15
c.rand.walker.15 <- species(state.CRW(0.98) + 15)
# which, in single-state species, is the same as:
c.rand.walker.15 <- species(state.CRW(0.98)) + 15
###################################################
### code chunk number 6: simriv-4
###################################################
# a Lévy walker can be approximated by a two-state walker
# composed of a random walk state and a correlated
# random walk state.
levy.walker <- species(state.RW() + state.CRW(0.98)
, trans = transitionMatrix(0.005, 0.01)) + 25
sim.lw <- simulate(levy.walker, 10000)
plot(sim.lw, type = "l", asp = 1, main = "Lévy-like walker")
###################################################
### code chunk number 7: simriv-5
###################################################
resistance <- resistanceFromShape(
system.file("doc/landcover.shp", package="SiMRiv")
, res = 100)
plot(resistance, axes = F)
###################################################
### code chunk number 8: simriv-6
###################################################
resistance <- resistanceFromShape(
system.file("doc/landcover.shp", package="SiMRiv")
, res = 100, field = "coverclass", mapvalues = c(
"forest" = 0.5, "urban" = 1, "dam" = 0
, "shrubland" = 0.75)
, background = 0.9, margin = 3000)
plot(resistance, axes = F)
###################################################
### code chunk number 9: simriv-7
###################################################
resistance <- resistanceFromShape(
system.file("doc/river-sample.shp", package="SiMRiv")
, res = 100, field = "Order", mapvalues = c("2" = 0
, "3" = 0.2, "4" = 0.4, "5" = 0.6, "6" = 0.8)
, buffer = 150, background = 0.95, margin = 3000)
plot(resistance, axes = F)
###################################################
### code chunk number 10: simriv-8
###################################################
# load shapefile
river.shape <- shapefile(system.file("doc/river-sample.shp", package="SiMRiv"))
# below you can provide the shapefile filename, or the
# R shapefile object itself
resistance <- resistanceFromShape(river.shape, res = 100
, buffer = (9 - river.shape@data$Order) ^ 3
, background = 0.95, margin = 3000)
# buffer here is just some magical function to convert river
# order into a meaningful value in the [0, 1] range!
plot(resistance, axes = F)
###################################################
### code chunk number 11: simriv-9
###################################################
landcover <- resistanceFromShape(
system.file("doc/landcover.shp", package="SiMRiv")
, res = 50, field = "coverclass", mapvalues = c(
"forest" = 0.5, "urban" = 1, "dam" = 0
, "shrubland" = 0.75), background = 0.95)
river.landcover <- resistanceFromShape(
system.file("doc/river-sample.shp", package="SiMRiv")
, baseRaster = landcover, buffer = 100, field = 0
, background = 0.95, margin = 3000)
plot(river.landcover, axes = F)
###################################################
### code chunk number 12: simriv-10
###################################################
# set starting coordinates anywhere within the river
init = xyFromCell(river.landcover, sample(which(values(river.landcover) == 0), 1))
# adding a number to a species is a shortcut for setting
# the step lengths of all states
# multiplying is a shortcut for setting the perceptual range radius
levy.walker <- (levy.walker + 15) * 1000
sim.lw.river <- simulate(levy.walker, 40000
, resist = river.landcover, coords = init)
# plot resistance
plot(river.landcover, axes = F
, ylim = range(sim.lw.river[, 2]), xlim = range(sim.lw.river[, 1]))
# plot trajectory on top of resistance
lines(sim.lw.river)
|
library(ggplot2)
library(gridExtra)
library(RCurl)
library(data.table)
source('~/Desktop/aneuploidy_analysis-master/aneuploidy_functions.R', chdir = TRUE)
URL <- "https://raw.githubusercontent.com/rmccoy7541/aneuploidy-analysis/master/data/aaa3337-McCoy-SM.table_S2.csv" # import the data
url <- getURL(URL)
data <- fread(url, sep=",", header=T)
data_filtered <- filterDataTable(data)
data_filtered <- callPloidyTable(data_filtered)
data_blastomere <- selectSampleType(data_filtered, blastomere)
data_te <- selectSampleType(data_filtered, TE)
####################################################
se <- function(p, n) {
sqrt((p * (1 - p)) / n)
}
####################################################
aneuploidChroms <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- (data[, i, with = F] != "H110") & (data[, i, with = F] != "H101") & (data[, i + 69, with = F] != 1) & !is.na(data[, i, with = F])
aneuploid_frame[, i - 6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
maternalErrs <- function(data) {
maternal_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- (data[, i, with = F] == "H200" | data[, i, with = F] == "H020" | data[, i, with = F] == "H010" | data[, i, with = F] == "H001" | data[, i, with = F] == "H000" | data[, i, with = F] == "H210" | data[, i, with = F] == "H201" | data[, i, with = F] == "H021") & (data[, i + 69, with = F] != 1)
maternal_frame[, i - 6] <- new
}
return(rowSums(maternal_frame, na.rm = T))
}
paternalErrs <- function(data) {
paternal_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- (data[, i, with = F] == "H200" | data[, i, with = F] == "H020" | data[, i, with = F] == "H100" | data[, i, with = F] == "H000" | data[, i, with = F] == "H102" | data[, i, with = F] == "H120" | data[, i, with = F] == "H201" | data[, i, with = F] == "H021" | data[, i, with = F] == "H111") & (data[, i + 69, with = F] != 1)
paternal_frame[,i - 6] <- new
}
return(rowSums(paternal_frame, na.rm = T))
}
totalChroms <- function(data) {
chroms_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
aneuploid_frame <- data[, 7:29, with = F]
chroms_frame[aneuploid_frame == "H110"] <- 2
chroms_frame[aneuploid_frame == "H101"] <- 2
chroms_frame[aneuploid_frame == "H011"] <- 2
chroms_frame[aneuploid_frame == "H210"] <- 3
chroms_frame[aneuploid_frame == "H120"] <- 3
chroms_frame[aneuploid_frame == "H111"] <- 3
chroms_frame[aneuploid_frame == "H201"] <- 3
chroms_frame[aneuploid_frame == "H102"] <- 3
chroms_frame[aneuploid_frame == "H100"] <- 1
chroms_frame[aneuploid_frame == "H010"] <- 1
chroms_frame[aneuploid_frame == "H001"] <- 1
chroms_frame[aneuploid_frame == "H000"] <- 0
return(rowSums(chroms_frame, na.rm = T))
}
totalMatChroms <- function(data) {
chroms_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
aneuploid_frame <- data[, 7:29, with = F]
chroms_frame[aneuploid_frame == "H110"] <- 1
chroms_frame[aneuploid_frame == "H101"] <- 1
chroms_frame[aneuploid_frame == "H011"] <- 0
chroms_frame[aneuploid_frame == "H210"] <- 2
chroms_frame[aneuploid_frame == "H120"] <- 1
chroms_frame[aneuploid_frame == "H111"] <- 1
chroms_frame[aneuploid_frame == "H201"] <- 2
chroms_frame[aneuploid_frame == "H102"] <- 1
chroms_frame[aneuploid_frame == "H100"] <- 1
chroms_frame[aneuploid_frame == "H010"] <- 0
chroms_frame[aneuploid_frame == "H001"] <- 0
chroms_frame[aneuploid_frame == "H000"] <- 0
return(rowSums(chroms_frame, na.rm = T))
}
totalPatChroms <- function(data) {
chroms_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
aneuploid_frame <- data[, 7:29, with = F]
chroms_frame[aneuploid_frame == "H110"] <- 1
chroms_frame[aneuploid_frame == "H101"] <- 1
chroms_frame[aneuploid_frame == "H011"] <- 2
chroms_frame[aneuploid_frame == "H210"] <- 1
chroms_frame[aneuploid_frame == "H120"] <- 2
chroms_frame[aneuploid_frame == "H111"] <- 2
chroms_frame[aneuploid_frame == "H201"] <- 1
chroms_frame[aneuploid_frame == "H102"] <- 2
chroms_frame[aneuploid_frame == "H100"] <- 0
chroms_frame[aneuploid_frame == "H010"] <- 1
chroms_frame[aneuploid_frame == "H001"] <- 1
chroms_frame[aneuploid_frame == "H000"] <- 0
return(rowSums(chroms_frame, na.rm = T))
}
data_blastomere$maternalChroms <- totalMatChroms(data_blastomere)
data_blastomere$paternalChroms <- totalPatChroms(data_blastomere)
data_blastomere$totalChroms <- totalChroms(data_blastomere)
data_te$maternalChroms <- totalMatChroms(data_te)
data_te$paternalChroms <- totalPatChroms(data_te)
data_te$totalChroms <- totalChroms(data_te)
set.seed(42)
data_sampled <- rbind(data_blastomere[sample(nrow(data_te)),], data_te)
data_unsampled <- rbind(data_blastomere[complete.cases(data_blastomere[, 7:29, with = F]),], data_te[complete.cases(data_te[, 7:29, with = F]),])
df <- data.frame(mat = data_sampled$maternalChroms, pat = data_sampled$paternalChroms, sample_type = data_sampled$sample_type)
levels(df$sample_type) <- c("Day-3 Blastomere", "Day-5 TE Biopsy")
df2 <- data.frame(table(df))
df2$mat <- as.numeric(df2$mat)
df2$pat <- as.numeric(df2$pat)
df3 <- data.frame(table(data.frame(mat = data_unsampled$maternalChroms, pat = data_unsampled$paternalChroms, sample_type = data_unsampled$sample_type)))
df3$prop <- NA
df3[df3$sample_type == "blastomere",]$prop <- df3[df3$sample_type == "blastomere",]$Freq / nrow(data_blastomere)
df3[df3$sample_type == "TE",]$prop <- df3[df3$sample_type == "TE",]$Freq / nrow(data_te)
df3$mat <- as.numeric(as.character(df3$mat))
df3$pat <- as.numeric(as.character(df3$pat))
levels(df3$sample_type) <- c("Day-3 Blastomere", "Day-5 TE Biopsy")
p <- ggplot(df, aes(x = mat, y = pat)) + stat_binhex() + scale_fill_gradientn(colours = rev(rainbow(3)), name = "Samples", trans = "log", breaks = 10^(0:6))
p + facet_grid(. ~ sample_type) + theme_bw() + ylab('Number of Paternal Chromosomes') + xlab('Number of Maternal Chromosomes')
q <- ggplot(df2[df2$Freq != 0,], aes(x = mat, y = pat, fill = Freq)) + geom_tile() + scale_fill_gradientn(colours = rev(rainbow(3)), name = "Samples", trans = "log", breaks = 10^(0:6))
q + facet_grid(. ~ sample_type) + theme_bw() + ylab('Number of Paternal Chromosomes') + xlab('Number of Maternal Chromosomes')
r <- ggplot(df3[df3$prop != 0,], aes(x = as.numeric(mat), y = as.numeric(pat), fill = prop)) + geom_tile() + scale_fill_gradientn(colours = rev(rainbow(3)), name = "Proportion", trans = "log", breaks = 10^(-6:-0))
r + facet_grid(. ~ sample_type) + theme_bw() + ylab('Number of Paternal Chromosomes') + xlab('Number of Maternal Chromosomes')
####################################################
trisomy <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- ((data[, i, with = F] == "H120") | (data[, i, with = F] == "H102") | (data[, i, with = F] == "H210")) & (data[, i + 69, with = F] != 1)
aneuploid_frame[, i - 6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
sum(trisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1)
singleTrisomy <- sum(trisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1) / nrow(data_blastomere)
se(singleTrisomy, nrow(data_blastomere))
sum(trisomy(data_te) == 1 & aneuploidChroms(data_te) == 1)
singleTrisomy <- sum(trisomy(data_te) == 1 & aneuploidChroms(data_te) == 1) / nrow(data_te)
se(singleTrisomy, nrow(data_te))
####################################################
maternalTrisomy <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- (data[, i, with = F] == "H210") & (data[, i + 69, with = F] != 1)
aneuploid_frame[, i - 6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
sum(maternalTrisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1)
singleMatTrisomy <- sum(maternalTrisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1) / nrow(data_blastomere)
se(singleMatTrisomy, nrow(data_blastomere))
sum(maternalTrisomy(data_te) == 1 & aneuploidChroms(data_te) == 1)
singleMatTrisomy <- sum(maternalTrisomy(data_te) == 1 & aneuploidChroms(data_te) == 1) / nrow(data_te)
se(singleMatTrisomy, nrow(data_te))
####################################################
paternalTrisomy <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- ((data[, i, with = F] == "H120") | (data[, i, with = F] == "H102")) & (data[, i + 69, with = F] != 1)
aneuploid_frame[, i - 6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
sum(paternalTrisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1)
singlePatTrisomy <- sum(paternalTrisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1) / nrow(data_blastomere)
se(singlePatTrisomy, nrow(data_blastomere))
sum(paternalTrisomy(data_te) == 1 & aneuploidChroms(data_te) == 1)
singlePatTrisomy <- sum(paternalTrisomy(data_te) == 1 & aneuploidChroms(data_te) == 1) / nrow(data_te)
se(singlePatTrisomy, nrow(data_te))
####################################################
maternalMonosomy <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- (data[, i, with = F] == "H010") & (data[, i + 69, with = F] != 1)
aneuploid_frame[, i-6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
sum(maternalMonosomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1)
singleMatMonosomy <- sum(maternalMonosomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1) / nrow(data_blastomere)
se(singleMatMonosomy, nrow(data_blastomere))
sum(maternalMonosomy(data_te) == 1 & aneuploidChroms(data_te) == 1)
singleMatMonosomy <- sum(maternalMonosomy(data_te) == 1 & aneuploidChroms(data_te) == 1) / nrow(data_te)
se(singleMatMonosomy, nrow(data_te))
####################################################
paternalMonosomy <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- (data[, i, with = F] == "H100") & (data[, i + 69, with = F] != 1)
aneuploid_frame[, i - 6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
sum(paternalMonosomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1)
singlePatMonosomy <- sum(paternalMonosomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1) / nrow(data_blastomere)
se(singlePatMonosomy, nrow(data_blastomere))
sum(paternalMonosomy(data_te) == 1 & aneuploidChroms(data_te) == 1)
singlePatMonosomy <- sum(paternalMonosomy(data_te) == 1 & aneuploidChroms(data_te) == 1) / nrow(data_te)
se(singlePatMonosomy, nrow(data_te))
####################################################
nullisomy <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- (data[, i, with = F] == "H000") & (data[, i + 69, with = F] != 1)
aneuploid_frame[, i - 6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
sum(nullisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1)
singleNullisomy <- sum(nullisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1) / nrow(data_blastomere)
se(singleNullisomy, nrow(data_blastomere))
sum(nullisomy(data_te) == 1 & aneuploidChroms(data_te) == 1)
singleNullisomy <- sum(nullisomy(data_te) == 1 & aneuploidChroms(data_te) == 1) / nrow(data_te)
se(singleNullisomy, nrow(data_te))
####################################################
sum(trisomy(data_blastomere) > 19)
triploidy <- sum(trisomy(data_blastomere) > 19) / nrow(data_blastomere)
se(triploidy, nrow(data_blastomere))
sum(trisomy(data_te) > 19)
triploidy <- sum(trisomy(data_te) > 19) / nrow(data_te)
se(triploidy, nrow(data_te))
####################################################
sum(maternalTrisomy(data_blastomere) > 19)
matTriploidy <- sum(maternalTrisomy(data_blastomere) > 19) / nrow(data_blastomere)
se(matTriploidy, nrow(data_blastomere))
sum(maternalTrisomy(data_te) > 19)
matTriploidy <- sum(maternalTrisomy(data_te) > 19) / nrow(data_te)
se(matTriploidy, nrow(data_te))
####################################################
sum(paternalTrisomy(data_blastomere) > 19)
patTriploidy <- sum(paternalTrisomy(data_blastomere) > 19) / nrow(data_blastomere)
se(patTriploidy, nrow(data_blastomere))
sum(paternalTrisomy(data_te) > 19)
patTriploidy <- sum(paternalTrisomy(data_te) > 19) / nrow(data_te)
se(patTriploidy, nrow(data_te))
####################################################
monosomy <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- ((data[, i, with = F] == "H100") | (data[, i, with = F] == "H010") | (data[, i, with = F] == "H001")) & (data[, i + 69, with = F] != 1)
aneuploid_frame[, i - 6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
sum(monosomy(data_blastomere) > 19)
haploidy <- sum(monosomy(data_blastomere) > 19) / nrow(data_blastomere)
se(haploidy, nrow(data_blastomere))
sum(monosomy(data_te) > 19)
haploidy <- sum(monosomy(data_te) > 19) / nrow(data_te)
se(haploidy, nrow(data_te))
####################################################
sum(maternalMonosomy(data_blastomere) > 19)
matHaploidy <- sum(maternalMonosomy(data_blastomere) > 19) / nrow(data_blastomere)
se(matHaploidy, nrow(data_blastomere))
sum(maternalMonosomy(data_te) > 19)
matHaploidy <- sum(maternalMonosomy(data_te) > 19) / nrow(data_te)
se(matHaploidy, nrow(data_te))
####################################################
sum(paternalMonosomy(data_blastomere) > 19)
patHaploidy <- sum(paternalMonosomy(data_blastomere) > 19) / nrow(data_blastomere)
se(patHaploidy, nrow(data_blastomere))
sum(paternalMonosomy(data_te) > 19)
patHaploidy <- sum(paternalMonosomy(data_te) > 19) / nrow(data_te)
se(patHaploidy, nrow(data_te))
####################################################
sum(aneuploidChroms(data_blastomere) > 2 & aneuploidChroms(data_blastomere) < 20 )
complex <- sum(aneuploidChroms(data_blastomere) > 2 & aneuploidChroms(data_blastomere) < 20 ) / nrow(data_blastomere)
se(complex, nrow(data_blastomere))
sum(aneuploidChroms(data_te) > 2 & aneuploidChroms(data_te) < 20 )
complex <- sum(aneuploidChroms(data_te) > 2 & aneuploidChroms(data_te) < 20 ) / nrow(data_te)
se(complex, nrow(data_te))
| /figures/f05_and_table_2.R | permissive | rmccoy7541/aneuploidy_analysis | R | false | false | 14,362 | r | library(ggplot2)
library(gridExtra)
library(RCurl)
library(data.table)
source('~/Desktop/aneuploidy_analysis-master/aneuploidy_functions.R', chdir = TRUE)
URL <- "https://raw.githubusercontent.com/rmccoy7541/aneuploidy-analysis/master/data/aaa3337-McCoy-SM.table_S2.csv" # import the data
url <- getURL(URL)
data <- fread(url, sep=",", header=T)
data_filtered <- filterDataTable(data)
data_filtered <- callPloidyTable(data_filtered)
data_blastomere <- selectSampleType(data_filtered, blastomere)
data_te <- selectSampleType(data_filtered, TE)
####################################################
se <- function(p, n) {
sqrt((p * (1 - p)) / n)
}
####################################################
aneuploidChroms <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- (data[, i, with = F] != "H110") & (data[, i, with = F] != "H101") & (data[, i + 69, with = F] != 1) & !is.na(data[, i, with = F])
aneuploid_frame[, i - 6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
maternalErrs <- function(data) {
maternal_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- (data[, i, with = F] == "H200" | data[, i, with = F] == "H020" | data[, i, with = F] == "H010" | data[, i, with = F] == "H001" | data[, i, with = F] == "H000" | data[, i, with = F] == "H210" | data[, i, with = F] == "H201" | data[, i, with = F] == "H021") & (data[, i + 69, with = F] != 1)
maternal_frame[, i - 6] <- new
}
return(rowSums(maternal_frame, na.rm = T))
}
paternalErrs <- function(data) {
paternal_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- (data[, i, with = F] == "H200" | data[, i, with = F] == "H020" | data[, i, with = F] == "H100" | data[, i, with = F] == "H000" | data[, i, with = F] == "H102" | data[, i, with = F] == "H120" | data[, i, with = F] == "H201" | data[, i, with = F] == "H021" | data[, i, with = F] == "H111") & (data[, i + 69, with = F] != 1)
paternal_frame[,i - 6] <- new
}
return(rowSums(paternal_frame, na.rm = T))
}
totalChroms <- function(data) {
chroms_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
aneuploid_frame <- data[, 7:29, with = F]
chroms_frame[aneuploid_frame == "H110"] <- 2
chroms_frame[aneuploid_frame == "H101"] <- 2
chroms_frame[aneuploid_frame == "H011"] <- 2
chroms_frame[aneuploid_frame == "H210"] <- 3
chroms_frame[aneuploid_frame == "H120"] <- 3
chroms_frame[aneuploid_frame == "H111"] <- 3
chroms_frame[aneuploid_frame == "H201"] <- 3
chroms_frame[aneuploid_frame == "H102"] <- 3
chroms_frame[aneuploid_frame == "H100"] <- 1
chroms_frame[aneuploid_frame == "H010"] <- 1
chroms_frame[aneuploid_frame == "H001"] <- 1
chroms_frame[aneuploid_frame == "H000"] <- 0
return(rowSums(chroms_frame, na.rm = T))
}
totalMatChroms <- function(data) {
chroms_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
aneuploid_frame <- data[, 7:29, with = F]
chroms_frame[aneuploid_frame == "H110"] <- 1
chroms_frame[aneuploid_frame == "H101"] <- 1
chroms_frame[aneuploid_frame == "H011"] <- 0
chroms_frame[aneuploid_frame == "H210"] <- 2
chroms_frame[aneuploid_frame == "H120"] <- 1
chroms_frame[aneuploid_frame == "H111"] <- 1
chroms_frame[aneuploid_frame == "H201"] <- 2
chroms_frame[aneuploid_frame == "H102"] <- 1
chroms_frame[aneuploid_frame == "H100"] <- 1
chroms_frame[aneuploid_frame == "H010"] <- 0
chroms_frame[aneuploid_frame == "H001"] <- 0
chroms_frame[aneuploid_frame == "H000"] <- 0
return(rowSums(chroms_frame, na.rm = T))
}
totalPatChroms <- function(data) {
chroms_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
aneuploid_frame <- data[, 7:29, with = F]
chroms_frame[aneuploid_frame == "H110"] <- 1
chroms_frame[aneuploid_frame == "H101"] <- 1
chroms_frame[aneuploid_frame == "H011"] <- 2
chroms_frame[aneuploid_frame == "H210"] <- 1
chroms_frame[aneuploid_frame == "H120"] <- 2
chroms_frame[aneuploid_frame == "H111"] <- 2
chroms_frame[aneuploid_frame == "H201"] <- 1
chroms_frame[aneuploid_frame == "H102"] <- 2
chroms_frame[aneuploid_frame == "H100"] <- 0
chroms_frame[aneuploid_frame == "H010"] <- 1
chroms_frame[aneuploid_frame == "H001"] <- 1
chroms_frame[aneuploid_frame == "H000"] <- 0
return(rowSums(chroms_frame, na.rm = T))
}
data_blastomere$maternalChroms <- totalMatChroms(data_blastomere)
data_blastomere$paternalChroms <- totalPatChroms(data_blastomere)
data_blastomere$totalChroms <- totalChroms(data_blastomere)
data_te$maternalChroms <- totalMatChroms(data_te)
data_te$paternalChroms <- totalPatChroms(data_te)
data_te$totalChroms <- totalChroms(data_te)
set.seed(42)
data_sampled <- rbind(data_blastomere[sample(nrow(data_te)),], data_te)
data_unsampled <- rbind(data_blastomere[complete.cases(data_blastomere[, 7:29, with = F]),], data_te[complete.cases(data_te[, 7:29, with = F]),])
df <- data.frame(mat = data_sampled$maternalChroms, pat = data_sampled$paternalChroms, sample_type = data_sampled$sample_type)
levels(df$sample_type) <- c("Day-3 Blastomere", "Day-5 TE Biopsy")
df2 <- data.frame(table(df))
df2$mat <- as.numeric(df2$mat)
df2$pat <- as.numeric(df2$pat)
df3 <- data.frame(table(data.frame(mat = data_unsampled$maternalChroms, pat = data_unsampled$paternalChroms, sample_type = data_unsampled$sample_type)))
df3$prop <- NA
df3[df3$sample_type == "blastomere",]$prop <- df3[df3$sample_type == "blastomere",]$Freq / nrow(data_blastomere)
df3[df3$sample_type == "TE",]$prop <- df3[df3$sample_type == "TE",]$Freq / nrow(data_te)
df3$mat <- as.numeric(as.character(df3$mat))
df3$pat <- as.numeric(as.character(df3$pat))
levels(df3$sample_type) <- c("Day-3 Blastomere", "Day-5 TE Biopsy")
p <- ggplot(df, aes(x = mat, y = pat)) + stat_binhex() + scale_fill_gradientn(colours = rev(rainbow(3)), name = "Samples", trans = "log", breaks = 10^(0:6))
p + facet_grid(. ~ sample_type) + theme_bw() + ylab('Number of Paternal Chromosomes') + xlab('Number of Maternal Chromosomes')
q <- ggplot(df2[df2$Freq != 0,], aes(x = mat, y = pat, fill = Freq)) + geom_tile() + scale_fill_gradientn(colours = rev(rainbow(3)), name = "Samples", trans = "log", breaks = 10^(0:6))
q + facet_grid(. ~ sample_type) + theme_bw() + ylab('Number of Paternal Chromosomes') + xlab('Number of Maternal Chromosomes')
r <- ggplot(df3[df3$prop != 0,], aes(x = as.numeric(mat), y = as.numeric(pat), fill = prop)) + geom_tile() + scale_fill_gradientn(colours = rev(rainbow(3)), name = "Proportion", trans = "log", breaks = 10^(-6:-0))
r + facet_grid(. ~ sample_type) + theme_bw() + ylab('Number of Paternal Chromosomes') + xlab('Number of Maternal Chromosomes')
####################################################
trisomy <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- ((data[, i, with = F] == "H120") | (data[, i, with = F] == "H102") | (data[, i, with = F] == "H210")) & (data[, i + 69, with = F] != 1)
aneuploid_frame[, i - 6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
sum(trisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1)
singleTrisomy <- sum(trisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1) / nrow(data_blastomere)
se(singleTrisomy, nrow(data_blastomere))
sum(trisomy(data_te) == 1 & aneuploidChroms(data_te) == 1)
singleTrisomy <- sum(trisomy(data_te) == 1 & aneuploidChroms(data_te) == 1) / nrow(data_te)
se(singleTrisomy, nrow(data_te))
####################################################
maternalTrisomy <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- (data[, i, with = F] == "H210") & (data[, i + 69, with = F] != 1)
aneuploid_frame[, i - 6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
sum(maternalTrisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1)
singleMatTrisomy <- sum(maternalTrisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1) / nrow(data_blastomere)
se(singleMatTrisomy, nrow(data_blastomere))
sum(maternalTrisomy(data_te) == 1 & aneuploidChroms(data_te) == 1)
singleMatTrisomy <- sum(maternalTrisomy(data_te) == 1 & aneuploidChroms(data_te) == 1) / nrow(data_te)
se(singleMatTrisomy, nrow(data_te))
####################################################
paternalTrisomy <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- ((data[, i, with = F] == "H120") | (data[, i, with = F] == "H102")) & (data[, i + 69, with = F] != 1)
aneuploid_frame[, i - 6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
sum(paternalTrisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1)
singlePatTrisomy <- sum(paternalTrisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1) / nrow(data_blastomere)
se(singlePatTrisomy, nrow(data_blastomere))
sum(paternalTrisomy(data_te) == 1 & aneuploidChroms(data_te) == 1)
singlePatTrisomy <- sum(paternalTrisomy(data_te) == 1 & aneuploidChroms(data_te) == 1) / nrow(data_te)
se(singlePatTrisomy, nrow(data_te))
####################################################
maternalMonosomy <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- (data[, i, with = F] == "H010") & (data[, i + 69, with = F] != 1)
aneuploid_frame[, i-6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
sum(maternalMonosomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1)
singleMatMonosomy <- sum(maternalMonosomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1) / nrow(data_blastomere)
se(singleMatMonosomy, nrow(data_blastomere))
sum(maternalMonosomy(data_te) == 1 & aneuploidChroms(data_te) == 1)
singleMatMonosomy <- sum(maternalMonosomy(data_te) == 1 & aneuploidChroms(data_te) == 1) / nrow(data_te)
se(singleMatMonosomy, nrow(data_te))
####################################################
paternalMonosomy <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- (data[, i, with = F] == "H100") & (data[, i + 69, with = F] != 1)
aneuploid_frame[, i - 6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
sum(paternalMonosomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1)
singlePatMonosomy <- sum(paternalMonosomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1) / nrow(data_blastomere)
se(singlePatMonosomy, nrow(data_blastomere))
sum(paternalMonosomy(data_te) == 1 & aneuploidChroms(data_te) == 1)
singlePatMonosomy <- sum(paternalMonosomy(data_te) == 1 & aneuploidChroms(data_te) == 1) / nrow(data_te)
se(singlePatMonosomy, nrow(data_te))
####################################################
nullisomy <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- (data[, i, with = F] == "H000") & (data[, i + 69, with = F] != 1)
aneuploid_frame[, i - 6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
sum(nullisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1)
singleNullisomy <- sum(nullisomy(data_blastomere) == 1 & aneuploidChroms(data_blastomere) == 1) / nrow(data_blastomere)
se(singleNullisomy, nrow(data_blastomere))
sum(nullisomy(data_te) == 1 & aneuploidChroms(data_te) == 1)
singleNullisomy <- sum(nullisomy(data_te) == 1 & aneuploidChroms(data_te) == 1) / nrow(data_te)
se(singleNullisomy, nrow(data_te))
####################################################
sum(trisomy(data_blastomere) > 19)
triploidy <- sum(trisomy(data_blastomere) > 19) / nrow(data_blastomere)
se(triploidy, nrow(data_blastomere))
sum(trisomy(data_te) > 19)
triploidy <- sum(trisomy(data_te) > 19) / nrow(data_te)
se(triploidy, nrow(data_te))
####################################################
sum(maternalTrisomy(data_blastomere) > 19)
matTriploidy <- sum(maternalTrisomy(data_blastomere) > 19) / nrow(data_blastomere)
se(matTriploidy, nrow(data_blastomere))
sum(maternalTrisomy(data_te) > 19)
matTriploidy <- sum(maternalTrisomy(data_te) > 19) / nrow(data_te)
se(matTriploidy, nrow(data_te))
####################################################
sum(paternalTrisomy(data_blastomere) > 19)
patTriploidy <- sum(paternalTrisomy(data_blastomere) > 19) / nrow(data_blastomere)
se(patTriploidy, nrow(data_blastomere))
sum(paternalTrisomy(data_te) > 19)
patTriploidy <- sum(paternalTrisomy(data_te) > 19) / nrow(data_te)
se(patTriploidy, nrow(data_te))
####################################################
monosomy <- function(data) {
aneuploid_frame <- data.frame(matrix(ncol = 23, nrow = nrow(data)))
for (i in 7:29) {
new <- ((data[, i, with = F] == "H100") | (data[, i, with = F] == "H010") | (data[, i, with = F] == "H001")) & (data[, i + 69, with = F] != 1)
aneuploid_frame[, i - 6] <- new
}
return(rowSums(aneuploid_frame, na.rm = T))
}
sum(monosomy(data_blastomere) > 19)
haploidy <- sum(monosomy(data_blastomere) > 19) / nrow(data_blastomere)
se(haploidy, nrow(data_blastomere))
sum(monosomy(data_te) > 19)
haploidy <- sum(monosomy(data_te) > 19) / nrow(data_te)
se(haploidy, nrow(data_te))
####################################################
sum(maternalMonosomy(data_blastomere) > 19)
matHaploidy <- sum(maternalMonosomy(data_blastomere) > 19) / nrow(data_blastomere)
se(matHaploidy, nrow(data_blastomere))
sum(maternalMonosomy(data_te) > 19)
matHaploidy <- sum(maternalMonosomy(data_te) > 19) / nrow(data_te)
se(matHaploidy, nrow(data_te))
####################################################
sum(paternalMonosomy(data_blastomere) > 19)
patHaploidy <- sum(paternalMonosomy(data_blastomere) > 19) / nrow(data_blastomere)
se(patHaploidy, nrow(data_blastomere))
sum(paternalMonosomy(data_te) > 19)
patHaploidy <- sum(paternalMonosomy(data_te) > 19) / nrow(data_te)
se(patHaploidy, nrow(data_te))
####################################################
sum(aneuploidChroms(data_blastomere) > 2 & aneuploidChroms(data_blastomere) < 20 )
complex <- sum(aneuploidChroms(data_blastomere) > 2 & aneuploidChroms(data_blastomere) < 20 ) / nrow(data_blastomere)
se(complex, nrow(data_blastomere))
sum(aneuploidChroms(data_te) > 2 & aneuploidChroms(data_te) < 20 )
complex <- sum(aneuploidChroms(data_te) > 2 & aneuploidChroms(data_te) < 20 ) / nrow(data_te)
se(complex, nrow(data_te))
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 5131
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 5123
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 5123
c
c Input Parameter (command line, file):
c input filename QBFLIB/Wintersteiger/RankingFunctions/rankfunc36_signed_64.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1869
c no.of clauses 5131
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 5123
c
c QBFLIB/Wintersteiger/RankingFunctions/rankfunc36_signed_64.qdimacs 1869 5131 E1 [450 451 770 771 1219 1220 1608 1609] 0 128 1731 5123 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Wintersteiger/RankingFunctions/rankfunc36_signed_64/rankfunc36_signed_64.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 785 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 5131
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 5123
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 5123
c
c Input Parameter (command line, file):
c input filename QBFLIB/Wintersteiger/RankingFunctions/rankfunc36_signed_64.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1869
c no.of clauses 5131
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 5123
c
c QBFLIB/Wintersteiger/RankingFunctions/rankfunc36_signed_64.qdimacs 1869 5131 E1 [450 451 770 771 1219 1220 1608 1609] 0 128 1731 5123 RED
|
#'
#' HCA on PCA/MIA/PARAFAC scores from a Spectra or Spectra2D Object
#'
#' A wrapper which performs HCA on the scores from a PCA of a
#' \code{\link[ChemoSpec]{Spectra}} object or POP/MIA/PARAFAC of a \code{\link[ChemoSpec2D]{Spectra2D}} object.
#' Many methods for computing the clusters and distances are
#' available.
#'
#' @param spectra `r .writeDoc_Spectra3()`
#'
#' @param so "Score Object" One of the following:
#' \itemize{
#' \item An object of class \code{\link{prcomp}}, created by \code{ChemoSpec} functions
#' \code{\link[ChemoSpec]{c_pcaSpectra}}, \code{\link[ChemoSpec]{r_pcaSpectra}},
#' \code{\link[ChemoSpec]{irlba_pcaSpectra}} or \code{\link[ChemoSpec]{s_pcaSpectra}}.
#' \item An object of class \code{mia} produced by
#' function \code{\link[ChemoSpec2D]{miaSpectra2D}}.
#' \item An object of class \code{parafac} produced by
#' function \code{\link[ChemoSpec2D]{pfacSpectra2D}}.
#' \item An object of class \code{pop} produced by
#' function \code{\link[ChemoSpec2D]{popSpectra2D}}.
#' }
#' Any of the above score objects will have been modified to include a
#' list element called \code{$method}, a character string describing the
#' pre-processing carried out and the type of PCA performed (used to annotate the
#' plot).
#'
#' @param scores A vector of integers specifying the components (scores) to plot.
#'
#' @param c.method A character string describing the clustering method; must be
#' acceptable to \code{\link{hclust}}.
#'
#' @param d.method A character string describing the distance calculation
#' method; must be acceptable as a method in \code{\link{rowDist}}.
#'
#' @param use.sym A logical; if true, use no color and use lower-case letters
#' to indicate group membership. Applies only to \code{Spectra} objects.
#'
#' @param leg.loc Character; if \code{"none"} no legend will be drawn.
#' Otherwise, any string acceptable to \code{\link{legend}}.
#'
#' @param \dots `r .writeDoc_GraphicsDots()`
#'
#' @return A list, containing an object of class \code{\link{hclust}} and an
#' object of class \code{\link{dendrogram}}. The side effect is a plot.
#'
#' @author `r .writeDoc_Authors("BH")`
#'
#' @seealso \code{\link{hclust}} for the underlying function. See
#' \code{\link[ChemoSpec]{hcaSpectra}} for HCA of the entire data set stored in the
#' \code{\link[ChemoSpec]{Spectra}} object.
#'
#' @keywords multivariate cluster
#' @export
#'
#' @examples
#' if (checkForPackageWithVersion("ChemoSpec", 6.0)) {
#' library("ChemoSpec")
#' data(metMUD1)
#'
#' pca <- c_pcaSpectra(metMUD1)
#' hca <- hcaScores(metMUD1, pca, main = "metMUD1 NMR Data PCA Scores")
#' }
#'
#' if (checkForPackageWithVersion("ChemoSpec2D", 0.5)) {
#' library("ChemoSpec2D")
#' data(MUD1)
#'
#' mia <- miaSpectra2D(MUD1)
#' hca <- hcaScores(MUD1, mia, scores = 1:2, main = "MUD1 MIA Scores")
#'
#' set.seed(123)
#' pfac <- pfacSpectra2D(MUD1, parallel = FALSE, nfac = 2)
#' hca <- hcaScores(MUD1, pfac, scores = 1:2, main = "MUD1 PARAFAC Scores")
#' }
hcaScores <- function(spectra, so, scores = c(1:5),
c.method = "complete", d.method = "euclidean",
use.sym = FALSE, leg.loc = "topright", ...) {
UseMethod("hcaScores")
}
| /R/hcaScores.R | no_license | cran/ChemoSpecUtils | R | false | false | 3,213 | r | #'
#' HCA on PCA/MIA/PARAFAC scores from a Spectra or Spectra2D Object
#'
#' A wrapper which performs HCA on the scores from a PCA of a
#' \code{\link[ChemoSpec]{Spectra}} object or POP/MIA/PARAFAC of a \code{\link[ChemoSpec2D]{Spectra2D}} object.
#' Many methods for computing the clusters and distances are
#' available.
#'
#' @param spectra `r .writeDoc_Spectra3()`
#'
#' @param so "Score Object" One of the following:
#' \itemize{
#' \item An object of class \code{\link{prcomp}}, created by \code{ChemoSpec} functions
#' \code{\link[ChemoSpec]{c_pcaSpectra}}, \code{\link[ChemoSpec]{r_pcaSpectra}},
#' \code{\link[ChemoSpec]{irlba_pcaSpectra}} or \code{\link[ChemoSpec]{s_pcaSpectra}}.
#' \item An object of class \code{mia} produced by
#' function \code{\link[ChemoSpec2D]{miaSpectra2D}}.
#' \item An object of class \code{parafac} produced by
#' function \code{\link[ChemoSpec2D]{pfacSpectra2D}}.
#' \item An object of class \code{pop} produced by
#' function \code{\link[ChemoSpec2D]{popSpectra2D}}.
#' }
#' Any of the above score objects will have been modified to include a
#' list element called \code{$method}, a character string describing the
#' pre-processing carried out and the type of PCA performed (used to annotate the
#' plot).
#'
#' @param scores A vector of integers specifying the components (scores) to plot.
#'
#' @param c.method A character string describing the clustering method; must be
#' acceptable to \code{\link{hclust}}.
#'
#' @param d.method A character string describing the distance calculation
#' method; must be acceptable as a method in \code{\link{rowDist}}.
#'
#' @param use.sym A logical; if true, use no color and use lower-case letters
#' to indicate group membership. Applies only to \code{Spectra} objects.
#'
#' @param leg.loc Character; if \code{"none"} no legend will be drawn.
#' Otherwise, any string acceptable to \code{\link{legend}}.
#'
#' @param \dots `r .writeDoc_GraphicsDots()`
#'
#' @return A list, containing an object of class \code{\link{hclust}} and an
#' object of class \code{\link{dendrogram}}. The side effect is a plot.
#'
#' @author `r .writeDoc_Authors("BH")`
#'
#' @seealso \code{\link{hclust}} for the underlying function. See
#' \code{\link[ChemoSpec]{hcaSpectra}} for HCA of the entire data set stored in the
#' \code{\link[ChemoSpec]{Spectra}} object.
#'
#' @keywords multivariate cluster
#' @export
#'
#' @examples
#' if (checkForPackageWithVersion("ChemoSpec", 6.0)) {
#' library("ChemoSpec")
#' data(metMUD1)
#'
#' pca <- c_pcaSpectra(metMUD1)
#' hca <- hcaScores(metMUD1, pca, main = "metMUD1 NMR Data PCA Scores")
#' }
#'
#' if (checkForPackageWithVersion("ChemoSpec2D", 0.5)) {
#' library("ChemoSpec2D")
#' data(MUD1)
#'
#' mia <- miaSpectra2D(MUD1)
#' hca <- hcaScores(MUD1, mia, scores = 1:2, main = "MUD1 MIA Scores")
#'
#' set.seed(123)
#' pfac <- pfacSpectra2D(MUD1, parallel = FALSE, nfac = 2)
#' hca <- hcaScores(MUD1, pfac, scores = 1:2, main = "MUD1 PARAFAC Scores")
#' }
hcaScores <- function(spectra, so, scores = c(1:5),
c.method = "complete", d.method = "euclidean",
use.sym = FALSE, leg.loc = "topright", ...) {
UseMethod("hcaScores")
}
|
context("Test dependency related code")
# Copied from \package{pkgload} in order to avoid dependency
test_that("Parse dependencies", {
deps <- parse_deps("\nhttr (< 2.1),\nRCurl (>= 3),\nutils (== 2.12.1),\ntools,\nR (>= 2.10),\nmemoise")
expect_equal(nrow(deps), 5)
expect_false("R" %in% deps$name)
expect_equal(deps$compare, c("<", ">=", "==", NA, NA))
expect_equal(deps$version, c("2.1", "3", "2.12.1", NA, NA))
expect_null(parse_deps(NULL))
expect_null(parse_deps(" "))
# Invalid version specifications
expect_error(parse_deps("\nhttr (< 2.1),\nRCurl (3.0)"))
expect_error(parse_deps("\nhttr (< 2.1),\nRCurl ( 3.0)"))
expect_error(parse_deps("\nhttr (< 2.1),\nRCurl (==3.0)"))
expect_error(parse_deps("\nhttr (< 2.1),\nRCurl (==3.0 )"))
expect_error(parse_deps("\nhttr (< 2.1),\nRCurl ( ==3.0)"))
# This should be OK (no error)
deps <- parse_deps("\nhttr (< 2.1),\nRCurl (== 3.0.1)")
expect_equal(deps$compare, c("<", "=="))
expect_equal(deps$version, c("2.1", "3.0.1"))
})
test_that("Base dependencies are filtered", {
expect_equal(filter_base_dependencies(c("tools", "stats")), character(0))
deps <- c("ggplot2", "dplyr")
expect_equal(filter_base_dependencies(c("tools", "stats", deps)), deps)
})
test_that("Get package dependencies", {
example_deps <- c("dplyr", "ggplot2", "sf", "rgdal")
example_deps_versioned <- c("dplyr (< 1.0.0)", "ggplot2 (== 3.3.2)", "sf", "rgdal")
example_package_dir <- get_tempdir("test-package-dependencies")
write.dcf(
list("Imports" = paste0("\n ", example_deps_versioned, collapse = ",\n ")),
file.path(example_package_dir, "DESCRIPTION"),
keep.white = "Imports"
)
avail_pkgs <- available.packages(repos = "cloud.r-project.org")
# Creates a subset of available packages for test case, removes Imports so
# that the changes in dependencies going forward doesn't break this test
specific_avail_pkgs <- avail_pkgs[c("dplyr", "ggplot2", "sf", "rgdal"), ]
specific_avail_pkgs[, "Imports"] <- NA
specific_avail_pkgs[, "Depends"] <- NA
package_deps <- get_package_deps(example_package_dir, specific_avail_pkgs)
expect_setequal(package_deps, example_deps)
write.dcf(
list("Depends" = ""),
file.path(example_package_dir, "DESCRIPTION"),
keep.white = "Depends"
)
expect_equal(
get_package_deps(example_package_dir, specific_avail_pkgs),
c()
)
expect_true(length(get_package_deps("quietR", avail_pkgs)) == 0)
expect_error(get_package_deps("thispackagedoesn'texist", avail_pkgs))
unlink(example_package_dir, TRUE)
})
test_that("clean_available_packages", {
avail_packages <- clean_available_packages(
available.packages(repos = "cloud.r-project.org")
)
expect_true(is.data.frame(avail_packages))
example_PACKAGES <- file.path(get_tempdir("test-clean_available_packages"),
"PACKAGES")
write.dcf(avail_packages[1, ], example_PACKAGES,
keep.white = names(avail_packages))
single_avail_pkgs <- clean_available_packages(
available.packages(paste0("file://", dirname(example_PACKAGES)))
)
expect_true(is.data.frame(single_avail_pkgs))
unlink(get_tempdir("test-clean_available_packages"), TRUE)
})
| /CRANpiled/tests/testthat/test-dependencies.R | permissive | USEPA/cflinuxfs3-CRAN | R | false | false | 3,224 | r | context("Test dependency related code")
# Copied from \package{pkgload} in order to avoid dependency
test_that("Parse dependencies", {
deps <- parse_deps("\nhttr (< 2.1),\nRCurl (>= 3),\nutils (== 2.12.1),\ntools,\nR (>= 2.10),\nmemoise")
expect_equal(nrow(deps), 5)
expect_false("R" %in% deps$name)
expect_equal(deps$compare, c("<", ">=", "==", NA, NA))
expect_equal(deps$version, c("2.1", "3", "2.12.1", NA, NA))
expect_null(parse_deps(NULL))
expect_null(parse_deps(" "))
# Invalid version specifications
expect_error(parse_deps("\nhttr (< 2.1),\nRCurl (3.0)"))
expect_error(parse_deps("\nhttr (< 2.1),\nRCurl ( 3.0)"))
expect_error(parse_deps("\nhttr (< 2.1),\nRCurl (==3.0)"))
expect_error(parse_deps("\nhttr (< 2.1),\nRCurl (==3.0 )"))
expect_error(parse_deps("\nhttr (< 2.1),\nRCurl ( ==3.0)"))
# This should be OK (no error)
deps <- parse_deps("\nhttr (< 2.1),\nRCurl (== 3.0.1)")
expect_equal(deps$compare, c("<", "=="))
expect_equal(deps$version, c("2.1", "3.0.1"))
})
test_that("Base dependencies are filtered", {
expect_equal(filter_base_dependencies(c("tools", "stats")), character(0))
deps <- c("ggplot2", "dplyr")
expect_equal(filter_base_dependencies(c("tools", "stats", deps)), deps)
})
test_that("Get package dependencies", {
example_deps <- c("dplyr", "ggplot2", "sf", "rgdal")
example_deps_versioned <- c("dplyr (< 1.0.0)", "ggplot2 (== 3.3.2)", "sf", "rgdal")
example_package_dir <- get_tempdir("test-package-dependencies")
write.dcf(
list("Imports" = paste0("\n ", example_deps_versioned, collapse = ",\n ")),
file.path(example_package_dir, "DESCRIPTION"),
keep.white = "Imports"
)
avail_pkgs <- available.packages(repos = "cloud.r-project.org")
# Creates a subset of available packages for test case, removes Imports so
# that the changes in dependencies going forward doesn't break this test
specific_avail_pkgs <- avail_pkgs[c("dplyr", "ggplot2", "sf", "rgdal"), ]
specific_avail_pkgs[, "Imports"] <- NA
specific_avail_pkgs[, "Depends"] <- NA
package_deps <- get_package_deps(example_package_dir, specific_avail_pkgs)
expect_setequal(package_deps, example_deps)
write.dcf(
list("Depends" = ""),
file.path(example_package_dir, "DESCRIPTION"),
keep.white = "Depends"
)
expect_equal(
get_package_deps(example_package_dir, specific_avail_pkgs),
c()
)
expect_true(length(get_package_deps("quietR", avail_pkgs)) == 0)
expect_error(get_package_deps("thispackagedoesn'texist", avail_pkgs))
unlink(example_package_dir, TRUE)
})
test_that("clean_available_packages", {
avail_packages <- clean_available_packages(
available.packages(repos = "cloud.r-project.org")
)
expect_true(is.data.frame(avail_packages))
example_PACKAGES <- file.path(get_tempdir("test-clean_available_packages"),
"PACKAGES")
write.dcf(avail_packages[1, ], example_PACKAGES,
keep.white = names(avail_packages))
single_avail_pkgs <- clean_available_packages(
available.packages(paste0("file://", dirname(example_PACKAGES)))
)
expect_true(is.data.frame(single_avail_pkgs))
unlink(get_tempdir("test-clean_available_packages"), TRUE)
})
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 9214
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 9214
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/wgrowing/ctrl.e#1.a#3.E#128.A#48.c#.w#3.s#28.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3247
c no.of clauses 9214
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 9214
c
c QBFLIB/Amendola-Ricca-Truszczynski/wgrowing/ctrl.e#1.a#3.E#128.A#48.c#.w#3.s#28.asp.qdimacs 3247 9214 E1 [] 0 128 3119 9214 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Amendola-Ricca-Truszczynski/wgrowing/ctrl.e#1.a#3.E#128.A#48.c#.w#3.s#28.asp/ctrl.e#1.a#3.E#128.A#48.c#.w#3.s#28.asp.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 714 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 9214
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 9214
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/wgrowing/ctrl.e#1.a#3.E#128.A#48.c#.w#3.s#28.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3247
c no.of clauses 9214
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 9214
c
c QBFLIB/Amendola-Ricca-Truszczynski/wgrowing/ctrl.e#1.a#3.E#128.A#48.c#.w#3.s#28.asp.qdimacs 3247 9214 E1 [] 0 128 3119 9214 NONE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oneway.R
\name{oneway}
\alias{oneway}
\title{One Way Analysis of Variance}
\usage{
oneway(formula, data)
}
\arguments{
\item{formula}{an object of class formula,
relating the dependent variable to the
grouping variable}
\item{data}{a data frame containing the variables
in the model.}
}
\value{
a list with 2 elements:
\item{oneway}{a list with the lm results}
\item{summarystats}{a data frame with the summary statistics}
}
\description{
\code{oneway} computes a one-way analysis of variance
and includes group-level summary statistics.
}
\details{
This function computes a standard one-way ANOVA,
means, and standard deviation. Missing values are handled
via list-wise deletion
}
\examples{
mileage <- oneway(hwy ~ class, cars)
summary(mileage)
print(mileage)
plot(mileage)
}
\author{
Shane Ross <saross@wesleyan.edu>
}
| /man/oneway.Rd | no_license | sross15/oneway | R | false | true | 904 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oneway.R
\name{oneway}
\alias{oneway}
\title{One Way Analysis of Variance}
\usage{
oneway(formula, data)
}
\arguments{
\item{formula}{an object of class formula,
relating the dependent variable to the
grouping variable}
\item{data}{a data frame containing the variables
in the model.}
}
\value{
a list with 2 elements:
\item{oneway}{a list with the lm results}
\item{summarystats}{a data frame with the summary statistics}
}
\description{
\code{oneway} computes a one-way analysis of variance
and includes group-level summary statistics.
}
\details{
This function computes a standard one-way ANOVA,
means, and standard deviation. Missing values are handled
via list-wise deletion
}
\examples{
mileage <- oneway(hwy ~ class, cars)
summary(mileage)
print(mileage)
plot(mileage)
}
\author{
Shane Ross <saross@wesleyan.edu>
}
|
##########################################
#### GAM MODELS FOR PREMATURITY STUDY ####
##########################################
#Load data
data.NMF <- read.csv("/data/joy/BBL/projects/pncPreterm/subjectData/n278_Prematurity_allData.csv", header=TRUE, na.strings = "NA")
#Make race2 a factor with three levels (White, African American, and Other)
data.NMF$race2 <- as.factor(data.NMF$race2)
#Load library
library(mgcv)
#Get NMF variable names
nmfComponents <- names(data.NMF)[grep("Nmf26",names(data.NMF))]
#Run gam models with race 2 (white, african american, other)
#NmfModels <- lapply(nmfComponents, function(x) {
# gam(substitute(i ~ s(age) + sex + race2 + medu1 + ga, list(i = as.name(x))), method="REML", data = data.NMF)
#})
#OR Run gam models with white (white vs nonwhite)
NmfModels <- lapply(nmfComponents, function(x) {
gam(substitute(i ~ s(age) + sex + white + medu1 + ga, list(i = as.name(x))), method="REML", data = data.NMF)
})
#Look at model summaries
models <- lapply(NmfModels, summary)
#Pull p-values
p <- sapply(NmfModels, function(v) summary(v)$p.table[5,4])
#Convert to data frame
p <- as.data.frame(p)
#Print original p-values to three decimal places
p_round <- round(p,3)
#FDR correct p-values
pfdr <- p.adjust(p[,1],method="fdr")
#Convert to data frame
pfdr <- as.data.frame(pfdr)
#To print fdr-corrected p-values to three decimal places
pfdr_round <- round(pfdr,3)
#List the NMF components that survive FDR correction
Nmf_fdr <- row.names(pfdr)[pfdr<0.05]
##Only look at the 11 significant components
nmfComponents11 <- c("Nmf26C1","Nmf26C2","Nmf26C4","Nmf26C7","Nmf26C8","Nmf26C10","Nmf26C18","Nmf26C19","Nmf26C22","Nmf26C23","Nmf26C26")
#Run gam models with white (white vs nonwhite)
NmfModels11 <- lapply(nmfComponents11, function(x) {
gam(substitute(i ~ s(age) + sex + white + medu1 + ga, list(i = as.name(x))), method="REML", data = data.NMF)
})
#Look at model summaries
models11 <- lapply(NmfModels11, summary)
#Pull p-values
p11 <- sapply(NmfModels11, function(v) summary(v)$p.table[5,4])
#Convert to data frame
p11 <- as.data.frame(p11)
#Print original p-values to three decimal places
p11_round <- round(p11,3)
#FDR correct p-values
pfdr11 <- p.adjust(p11[,1],method="fdr")
#Convert to data frame
pfdr11 <- as.data.frame(pfdr11)
#To print fdr-corrected p-values to three decimal places
pfdr11_round <- round(pfdr11,3)
#Add row names
rownames(pfdr11_round) <- c(1, 2, 4, 7, 8, 10, 18, 19, 22, 23, 26)
#List the NMF components that survive FDR correction
Nmf_fdr11 <- row.names(pfdr11_round)[pfdr11_round<0.05]
| /GamAnalyses_withRace.R | no_license | PennBBL/pncPreterm | R | false | false | 2,576 | r | ##########################################
#### GAM MODELS FOR PREMATURITY STUDY ####
##########################################
#Load data
data.NMF <- read.csv("/data/joy/BBL/projects/pncPreterm/subjectData/n278_Prematurity_allData.csv", header=TRUE, na.strings = "NA")
#Make race2 a factor with three levels (White, African American, and Other)
data.NMF$race2 <- as.factor(data.NMF$race2)
#Load library
library(mgcv)
#Get NMF variable names
nmfComponents <- names(data.NMF)[grep("Nmf26",names(data.NMF))]
#Run gam models with race 2 (white, african american, other)
#NmfModels <- lapply(nmfComponents, function(x) {
# gam(substitute(i ~ s(age) + sex + race2 + medu1 + ga, list(i = as.name(x))), method="REML", data = data.NMF)
#})
#OR Run gam models with white (white vs nonwhite)
NmfModels <- lapply(nmfComponents, function(x) {
gam(substitute(i ~ s(age) + sex + white + medu1 + ga, list(i = as.name(x))), method="REML", data = data.NMF)
})
#Look at model summaries
models <- lapply(NmfModels, summary)
#Pull p-values
p <- sapply(NmfModels, function(v) summary(v)$p.table[5,4])
#Convert to data frame
p <- as.data.frame(p)
#Print original p-values to three decimal places
p_round <- round(p,3)
#FDR correct p-values
pfdr <- p.adjust(p[,1],method="fdr")
#Convert to data frame
pfdr <- as.data.frame(pfdr)
#To print fdr-corrected p-values to three decimal places
pfdr_round <- round(pfdr,3)
#List the NMF components that survive FDR correction
Nmf_fdr <- row.names(pfdr)[pfdr<0.05]
##Only look at the 11 significant components
nmfComponents11 <- c("Nmf26C1","Nmf26C2","Nmf26C4","Nmf26C7","Nmf26C8","Nmf26C10","Nmf26C18","Nmf26C19","Nmf26C22","Nmf26C23","Nmf26C26")
#Run gam models with white (white vs nonwhite)
NmfModels11 <- lapply(nmfComponents11, function(x) {
gam(substitute(i ~ s(age) + sex + white + medu1 + ga, list(i = as.name(x))), method="REML", data = data.NMF)
})
#Look at model summaries
models11 <- lapply(NmfModels11, summary)
#Pull p-values
p11 <- sapply(NmfModels11, function(v) summary(v)$p.table[5,4])
#Convert to data frame
p11 <- as.data.frame(p11)
#Print original p-values to three decimal places
p11_round <- round(p11,3)
#FDR correct p-values
pfdr11 <- p.adjust(p11[,1],method="fdr")
#Convert to data frame
pfdr11 <- as.data.frame(pfdr11)
#To print fdr-corrected p-values to three decimal places
pfdr11_round <- round(pfdr11,3)
#Add row names
rownames(pfdr11_round) <- c(1, 2, 4, 7, 8, 10, 18, 19, 22, 23, 26)
#List the NMF components that survive FDR correction
Nmf_fdr11 <- row.names(pfdr11_round)[pfdr11_round<0.05]
|
#' COM Poisson Binomial Distribution
#'
#' These functions provide the ability for generating probability function values and
#' cumulative probability function values for the COM Poisson Binomial Distribution.
#'
#' @usage
#' dCOMPBin(x,n,p,v)
#'
#' @param x vector of binomial random variables.
#' @param n single value for no of binomial trials.
#' @param p single value for probability of success.
#' @param v single value for v.
#'
#' @details
#' The probability function and cumulative function can be constructed and are denoted below
#'
#' The cumulative probability function is the summation of probability function values.
#'
#' \deqn{P_{COMPBin}(x) = \frac{{n \choose x}^v p^x (1-p)^{n-x}}{\sum_{j=0}^{n} {n \choose j}^v p^j (1-p)^{(n-j)}}}
#' \deqn{x = 0,1,2,3,...n}
#' \deqn{n = 1,2,3,...}
#' \deqn{0 < p < 1}
#' \deqn{-\infty < v < +\infty }
#'
#' \strong{NOTE} : If input parameters are not in given domain conditions
#' necessary error messages will be provided to go further.
#'
#' @return
#' The output of \code{dCOMPBin} gives a list format consisting
#'
#' \code{pdf} probability function values in vector form.
#'
#' \code{mean} mean of COM Poisson Binomial Distribution.
#'
#' \code{var} variance of COM Poisson Binomial Distribution.
#'
#' @references
#' Extracted from
#'
#' Borges, P., Rodrigues, J., Balakrishnan, N. and Bazan, J., 2014. A COM-Poisson type
#' generalization of the binomial distribution and its properties and applications.
#' Statistics & Probability Letters, 87, pp.158-166.
#'
#' Available at: \doi{10.1016/j.spl.2014.01.019}
#'
#' @examples
#' #plotting the random variables and probability values
#' col <- rainbow(5)
#' a <- c(0.58,0.59,0.6,0.61,0.62)
#' b <- c(0.022,0.023,0.024,0.025,0.026)
#' plot(0,0,main="COM Poisson Binomial probability function graph",xlab="Binomial random variable",
#' ylab="Probability function values",xlim = c(0,10),ylim = c(0,0.5))
#' for (i in 1:5)
#' {
#' lines(0:10,dCOMPBin(0:10,10,a[i],b[i])$pdf,col = col[i],lwd=2.85)
#' points(0:10,dCOMPBin(0:10,10,a[i],b[i])$pdf,col = col[i],pch=16)
#' }
#'
#' dCOMPBin(0:10,10,0.58,0.022)$pdf #extracting the pdf values
#' dCOMPBin(0:10,10,0.58,0.022)$mean #extracting the mean
#' dCOMPBin(0:10,10,0.58,0.022)$var #extracting the variance
#'
#' #plotting the random variables and cumulative probability values
#' col <- rainbow(5)
#' a <- c(0.58,0.59,0.6,0.61,0.62)
#' b <- c(0.022,0.023,0.024,0.025,0.026)
#' plot(0,0,main="COM Poisson Binomial probability function graph",xlab="Binomial random variable",
#' ylab="Probability function values",xlim = c(0,10),ylim = c(0,1))
#' for (i in 1:5)
#' {
#' lines(0:10,pCOMPBin(0:10,10,a[i],b[i]),col = col[i],lwd=2.85)
#' points(0:10,pCOMPBin(0:10,10,a[i],b[i]),col = col[i],pch=16)
#' }
#'
#' pCOMPBin(0:10,10,0.58,0.022) #acquiring the cumulative probability values
#'
#' @export
dCOMPBin<-function(x,n,p,v)
{
#checking if inputs consist NA(not assigned)values, infinite values or NAN(not a number)values
#if so creating an error message as well as stopping the function progress.
if(any(is.na(c(x,n,p,v))) | any(is.infinite(c(x,n,p,v))) | any(is.nan(c(x,n,p,v))) )
{
stop("NA or Infinite or NAN values in the Input")
}
else
{
#checking if at any chance the binomial random variable is greater than binomial trial value
#if so providing an error message and stopping the function progress
if(max(x) > n )
{
stop("Binomial random variable cannot be greater than binomial trial value")
}
#checking if any random variable or trial value is negative if so providig an error message
#and stopping the function progress
else if(any(x<0) | n<0)
{
stop("Binomial random variable or binomial trial value cannot be negative")
}
else
{
#checking the probability value is inbetween zero and one
if( p <= 0 | p >= 1 )
{
stop("Probability value doesnot satisfy conditions")
}
else
{
value<-NULL
#constructing the probability values for all random variables
y<-0:n
value1<-NULL
for(i in 1:length(y))
{
value1[i]<-(((choose(n,y[i]))^v)*(p^y[i])*((1-p)^(n-y[i])))/
(sum(((choose(n,y))^v)*(p^y)*((1-p)^(n-y))))
}
check1<-sum(value1)
#checking if the sum of all probability values leads upto one
#if not providing an error message and stopping the function progress
if(check1 < 0.9999 | check1 >1.0001 | any(value1 < 0) | any(value1 >1))
{
stop("Input parameter combinations of probability of success and covariance does
not create proper probability function")
}
else
{
#for each random variable in the input vector below calculations occur
for (i in 1:length(x))
{
value[i]<-(((choose(n,x[i]))^v)*(p^x[i])*((1-p)^(n-x[i])))/
(sum(((choose(n,y))^v)*(p^y)*((1-p)^(n-y))))
}
# generating an output in list format consisting pdf,mean and variance
return(list("pdf"=value,"mean"=sum(value1*y),
"var"=sum((y^2)*value1)-(sum(value1*y))^2))
}
}
}
}
}
#' COM Poisson Binomial Distribution
#'
#' These functions provide the ability for generating probability function values and
#' cumulative probability function values for the COM Poisson Binomial Distribution.
#'
#' @usage
#' pCOMPBin(x,n,p,v)
#'
#' @param x vector of binomial random variables.
#' @param n single value for no of binomial trials.
#' @param p single value for probability of success.
#' @param v single value for v.
#'
#' @details
#' The probability function and cumulative function can be constructed and are denoted below
#'
#' The cumulative probability function is the summation of probability function values.
#'
#' \deqn{P_{COMPBin}(x) = \frac{{n \choose x}^v p^x (1-p)^{n-x}}{\sum_{j=0}^{n} {n \choose j}^v p^j (1-p)^{(n-j)}}}
#' \deqn{x = 0,1,2,3,...n}
#' \deqn{n = 1,2,3,...}
#' \deqn{0 < p < 1}
#' \deqn{-\infty < v < +\infty }
#'
#' \strong{NOTE} : If input parameters are not in given domain conditions
#' necessary error messages will be provided to go further.
#'
#' @return
#' The output of \code{pCOMPBin} gives cumulative probability values in vector form.
#'
#' @references
#' Extracted from
#'
#' Borges, P., Rodrigues, J., Balakrishnan, N. and Bazan, J., 2014. A COM-Poisson type
#' generalization of the binomial distribution and its properties and applications.
#' Statistics & Probability Letters, 87, pp.158-166.
#'
#' Available at: \doi{10.1016/j.spl.2014.01.019}
#'
#' @examples
#' #plotting the random variables and probability values
#' col <- rainbow(5)
#' a <- c(0.58,0.59,0.6,0.61,0.62)
#' b <- c(0.022,0.023,0.024,0.025,0.026)
#' plot(0,0,main="COM Poisson Binomial probability function graph",xlab="Binomial random variable",
#' ylab="Probability function values",xlim = c(0,10),ylim = c(0,0.5))
#' for (i in 1:5)
#' {
#' lines(0:10,dCOMPBin(0:10,10,a[i],b[i])$pdf,col = col[i],lwd=2.85)
#' points(0:10,dCOMPBin(0:10,10,a[i],b[i])$pdf,col = col[i],pch=16)
#' }
#'
#' dCOMPBin(0:10,10,0.58,0.022)$pdf #extracting the pdf values
#' dCOMPBin(0:10,10,0.58,0.022)$mean #extracting the mean
#' dCOMPBin(0:10,10,0.58,0.022)$var #extracting the variance
#'
#' #plotting the random variables and cumulative probability values
#' col <- rainbow(5)
#' a <- c(0.58,0.59,0.6,0.61,0.62)
#' b <- c(0.022,0.023,0.024,0.025,0.026)
#' plot(0,0,main="COM Poisson Binomial probability function graph",xlab="Binomial random variable",
#' ylab="Probability function values",xlim = c(0,10),ylim = c(0,1))
#' for (i in 1:5)
#' {
#' lines(0:10,pCOMPBin(0:10,10,a[i],b[i]),col = col[i],lwd=2.85)
#' points(0:10,pCOMPBin(0:10,10,a[i],b[i]),col = col[i],pch=16)
#' }
#'
#' pCOMPBin(0:10,10,0.58,0.022) #acquiring the cumulative probability values
#'
#' @export
pCOMPBin<-function(x,n,p,v)
{
ans<-NULL
#for each binomial random variable in the input vector the cumulative proability function
#values are calculated
for(i in 1:length(x))
{
ans[i]<-sum(dCOMPBin(0:x[i],n,p,v)$pdf)
}
#generating an ouput vector cumulative probability function values
return(ans)
}
#' Negative Log Likelihood value of COM Poisson Binomial distribution
#'
#' This function will calculate the negative log likelihood value when the vector of binomial random
#' variables and vector of corresponding frequencies are given with the input parameters.
#'
#' @usage
#' NegLLCOMPBin(x,freq,p,v)
#'
#' @param x vector of binomial random variables.
#' @param freq vector of frequencies.
#' @param p single value for probability of success.
#' @param v single value for v.
#'
#' @details
#' \deqn{freq \ge 0}
#' \deqn{x = 0,1,2,..}
#' \deqn{0 < p < 1}
#' \deqn{-\infty < v < +\infty}
#'
#' \strong{NOTE} : If input parameters are not in given domain conditions
#' necessary error messages will be provided to go further.
#'
#' @return
#' The output of \code{NegLLCOMPBin} will produce a single numeric value.
#'
#' @references
#' Borges, P., Rodrigues, J., Balakrishnan, N. and Bazan, J., 2014. A COM-Poisson type
#' generalization of the binomial distribution and its properties and applications.
#' Statistics & Probability Letters, 87, pp.158-166.
#'
#' Available at: \doi{10.1016/j.spl.2014.01.019}
#'
#' @examples
#' No.D.D <- 0:7 #assigning the random variables
#' Obs.fre.1 <- c(47,54,43,40,40,41,39,95) #assigning the corresponding frequencies
#'
#' NegLLCOMPBin(No.D.D,Obs.fre.1,.5,.03) #acquiring the negative log likelihood value
#'
#' @export
NegLLCOMPBin<-function(x,freq,p,v)
{
#constructing the data set using the random variables vector and frequency vector
n<-max(x)
data<-rep(x,freq)
#checking if inputs consist NA(not assigned)values, infinite values or NAN(not a number)values
#if so creating an error message as well as stopping the function progress.
if(any(is.na(c(x,freq,p,v))) | any(is.infinite(c(x,freq,p,v))) |
any(is.nan(c(x,freq,p,v))) )
{
stop("NA or Infinite or NAN values in the Input")
}
else
{
#checking if any of the random variables of frequencies are less than zero if so
#creating a error message as well as stopping the function progress
if(any(c(x,freq) < 0) )
{
stop("Binomial random variable or frequency values cannot be negative")
}
#checking the probability value is inbetween zero and one or covariance is greater than zero
else if( p <= 0 | p >= 1)
{
stop("Probability value doesnot satisfy conditions")
}
else
{
value<-NULL
#constructing the probability values for all random variables
y<-0:n
value1<-NULL
for(i in 1:length(y))
{
value1[i]<-(((choose(n,y[i]))^v)*(p^y[i])*((1-p)^(n-y[i])))/
(sum(((choose(n,y))^v)*(p^y)*((1-p)^(n-y))))
}
check1<-sum(value1)
#checking if the sum of all probability values leads upto one
#if not providing an error message and stopping the function progress
if(check1 < 0.9999 | check1 >1.0001 | any(value1 < 0) | any(value1 >1))
{
stop("Input parameter combinations of probability of success and covariance does
not create proper probability function")
}
else
{
#calculating the negative log likelihood value and representing as a single output value
return(-(v*sum(log(choose(n,data[1:sum(freq)]))) +
log(p)*sum(data[1:sum(freq)]) + log(1-p)*sum(n-data[1:sum(freq)]) -
sum(freq)*log(sum(((choose(n,y))^v)*(p^y)*((1-p)^(n-y))))))
}
}
}
}
#' Estimating the probability of success and v parameter for COM Poisson Binomial
#' Distribution
#'
#' The function will estimate the probability of success and v parameter using the maximum log
#' likelihood method for the COM Poisson Binomial distribution when the binomial random
#' variables and corresponding frequencies are given.
#'
#' @usage
#' EstMLECOMPBin(x,freq,p,v,...)
#'
#' @param x vector of binomial random variables.
#' @param freq vector of frequencies.
#' @param p single value for probability of success.
#' @param v single value for v.
#' @param ... mle2 function inputs except data and estimating parameter.
#'
#' @details
#' \deqn{x = 0,1,2,...}
#' \deqn{freq \ge 0}
#' \deqn{0 < p < 1}
#' \deqn{-\infty < v < +\infty}
#'
#' \strong{NOTE} : If input parameters are not in given domain conditions
#' necessary error messages will be provided to go further.
#'
#' @return
#' \code{EstMLECOMPBin} here is used as a wrapper for the \code{mle2} function of \pkg{bbmle} package
#' therefore output is of class of mle2.
#'
#' @references
#' Borges, P., Rodrigues, J., Balakrishnan, N. and Bazan, J., 2014. A COM-Poisson type
#' generalization of the binomial distribution and its properties and applications.
#' Statistics & Probability Letters, 87, pp.158-166.
#'
#' Available at: \doi{10.1016/j.spl.2014.01.019}
#'
#' @examples
#' No.D.D <- 0:7 #assigning the random variables
#' Obs.fre.1 <- c(47,54,43,40,40,41,39,95) #assigning the corresponding frequencies
#'
#' #estimating the parameters using maximum log likelihood value and assigning it
#' parameters <- EstMLECOMPBin(x=No.D.D,freq=Obs.fre.1,p=0.5,v=0.1)
#'
#' bbmle::coef(parameters) #extracting the parameters
#'
#'@export
EstMLECOMPBin<-function(x,freq,p,v,...)
{
suppressWarnings2 <-function(expr, regex=character())
{
withCallingHandlers(expr, warning=function(w)
{
if (length(regex) == 1 && length(grep(regex, conditionMessage(w))))
{
invokeRestart("muffleWarning")
}
} )
}
output<-suppressWarnings2(bbmle::mle2(.EstMLECOMPBin,data=list(x=x,freq=freq),
start = list(p=p,v=v),...),"NaN")
return(output)
}
.EstMLECOMPBin<-function(x,freq,p,v)
{
#with respective to using bbmle package function mle2 there is no need impose any restrictions
#therefor the output is directly a single numeric value for the negative log likelihood value of
#COM Poisson Binomial distribution
value<-NULL
n<-max(x)
y<-0:n
data<-rep(x,freq)
return(-(v*sum(log(choose(n,data[1:sum(freq)]))) + log(p)*sum(data[1:sum(freq)]) +
log(1-p)*sum(n-data[1:sum(freq)]) - sum(freq)*log(sum(((choose(n,y))^v)*(p^y)*((1-p)^(n-y))))))
}
#' Fitting the COM Poisson Binomial Distribution when binomial
#' random variable, frequency, probability of success and v parameter are given
#'
#' The function will fit the COM Poisson Binomial Distribution
#' when random variables, corresponding frequencies, probability of success and v parameter are given.
#' It will provide the expected frequencies, chi-squared test statistics value, p value,
#' and degree of freedom so that it can be seen if this distribution fits the data.
#'
#' @usage
#' fitCOMPBin(x,obs.freq,p,v)
#'
#' @param x vector of binomial random variables.
#' @param obs.freq vector of frequencies.
#' @param p single value for probability of success.
#' @param v single value for v.
#'
#' @details
#' \deqn{obs.freq \ge 0}
#' \deqn{x = 0,1,2,..}
#' \deqn{0 < p < 1}
#' \deqn{-\infty < v < +\infty}
#'
#' \strong{NOTE} : If input parameters are not in given domain conditions
#' necessary error messages will be provided to go further.
#'
#' @return
#' The output of \code{fitCOMPBin} gives the class format \code{fitCPB} and \code{fit} consisting a list
#'
#' \code{bin.ran.var} binomial random variables.
#'
#' \code{obs.freq} corresponding observed frequencies.
#'
#' \code{exp.freq} corresponding expected frequencies.
#'
#' \code{statistic} chi-squared test statistics.
#'
#' \code{df} degree of freedom.
#'
#' \code{p.value} probability value by chi-squared test statistic.
#'
#' \code{fitCPB} fitted probability values of \code{dCOMPBin}.
#'
#' \code{NegLL} Negative Log Likelihood value.
#'
#' \code{p} estimated probability value.
#'
#' \code{v} estimated v parameter value.
#'
#' \code{AIC} AIC value.
#'
#' \code{call} the inputs of the function.
#'
#' Methods \code{summary}, \code{print}, \code{AIC}, \code{residuals} and \code{fitted}
#' can be used to extract specific outputs.
#'
#' @references
#' Borges, P., Rodrigues, J., Balakrishnan, N. and Bazan, J., 2014. A COM-Poisson type
#' generalization of the binomial distribution and its properties and applications.
#' Statistics & Probability Letters, 87, pp.158-166.
#'
#' Available at: \doi{10.1016/j.spl.2014.01.019}
#'
#' @examples
#' No.D.D <- 0:7 #assigning the random variables
#' Obs.fre.1 <- c(47,54,43,40,40,41,39,95) #assigning the corresponding frequencies
#'
#' #estimating the parameters using maximum log likelihood value and assigning it
#' parameters <- EstMLECOMPBin(x=No.D.D,freq=Obs.fre.1,p=0.5,v=0.050)
#'
#' pCOMPBin <- bbmle::coef(parameters)[1]
#' vCOMPBin <- bbmle::coef(parameters)[2]
#'
#' #fitting when the random variable,frequencies,probability and v parameter are given
#' results <- fitCOMPBin(No.D.D,Obs.fre.1,pCOMPBin,vCOMPBin)
#' results
#'
#' #extracting the AIC value
#' AIC(results)
#'
#' #extract fitted values
#' fitted(results)
#'
#' @export
fitCOMPBin<-function(x,obs.freq,p,v)
{
#checking if inputs consist NA(not assigned)values, infinite values or NAN(not a number)values
#if so creating an error message as well as stopping the function progress.
if(any(is.na(c(x,obs.freq,p,v))) | any(is.infinite(c(x,obs.freq,p,v))) |
any(is.nan(c(x,obs.freq,p,v))) )
{
stop("NA or Infinite or NAN values in the Input")
}
else
{
est<-dCOMPBin(x,max(x),p,v)
#for given random variables and parameters calculating the estimated probability values
est.prob<-est$pdf
#using the estimated probability values the expected frequencies are calculated
exp.freq<-round((sum(obs.freq)*est.prob),2)
#chi-squared test statistics is calculated with observed frequency and expected frequency
statistic<-sum(((obs.freq-exp.freq)^2)/exp.freq)
#degree of freedom is calculated
df<-length(x)-3
#p value of chi-squared test statistic is calculated
p.value<-1-stats::pchisq(statistic,df)
#checking if df is less than or equal to zero
if(df<0 | df==0)
{
stop("Degrees of freedom cannot be less than or equal to zero")
}
#checking if any of the expected frequencies are less than five and greater than zero, if so
#a warning message is provided in interpreting the results
if(min(exp.freq)<5 && min(exp.freq) > 0)
{
message("Chi-squared approximation may be doubtful because expected frequency is less than 5")
}
#checking if expected frequency is zero, if so providing a warning message in interpreting
#the results
if(min(exp.freq)==0)
{
message("Chi-squared approximation is not suitable because expected frequency approximates to zero")
}
NegLL<-NegLLCOMPBin(x,obs.freq,p,v)
names(NegLL)<-NULL
#the final output is in a list format containing the calculated values
final<-list("bin.ran.var"=x,"obs.freq"=obs.freq,"exp.freq"=exp.freq,"statistic"=round(statistic,4),
"df"=df,"p.value"=round(p.value,4),"fitCPB"=est,
"NegLL"=NegLL,"p"=p,"v"=v,"AIC"=2*2+2*NegLL,"call"=match.call())
class(final)<-c("fitCPB","fit")
return(final)
}
}
#' @method fitCOMPBin default
#' @export
fitCOMPBin.default<-function(x,obs.freq,p,v)
{
return(fitCOMPBin(x,obs.freq,p,v))
}
#' @method print fitCPB
#' @export
print.fitCPB<-function(x,...)
{
cat("Call: \n")
print(x$call)
cat("\nChi-squared test for COM Poisson Binomial Distribution \n\t
Observed Frequency : ",x$obs.freq,"\n\t
expected Frequency : ",x$exp.freq,"\n\t
estimated p value :",x$p," ,estimated v parameter :",x$v,"\n\t
X-squared :",x$statistic," ,df :",x$df," ,p-value :",x$p.value,"\n")
}
#' @method summary fitCPB
#' @export
summary.fitCPB<-function(object,...)
{
cat("Call: \n")
print(object$call)
cat("\nChi-squared test for COM Poisson Binomial Distribution \n\t
Observed Frequency : ",object$obs.freq,"\n\t
expected Frequency : ",object$exp.freq,"\n\t
estimated p value :",object$p," ,estimated v parameter :",object$v,"\n\t
X-squared :",object$statistic," ,df :",object$df," ,p-value :",object$p.value,"\n\t
Negative Loglikehood value :",object$NegLL,"\n\t
AIC value :",object$AIC,"\n")
}
| /R/COMPBin.R | no_license | cran/fitODBOD | R | false | false | 21,324 | r | #' COM Poisson Binomial Distribution
#'
#' These functions provide the ability for generating probability function values and
#' cumulative probability function values for the COM Poisson Binomial Distribution.
#'
#' @usage
#' dCOMPBin(x,n,p,v)
#'
#' @param x vector of binomial random variables.
#' @param n single value for no of binomial trials.
#' @param p single value for probability of success.
#' @param v single value for v.
#'
#' @details
#' The probability function and cumulative function can be constructed and are denoted below
#'
#' The cumulative probability function is the summation of probability function values.
#'
#' \deqn{P_{COMPBin}(x) = \frac{{n \choose x}^v p^x (1-p)^{n-x}}{\sum_{j=0}^{n} {n \choose j}^v p^j (1-p)^{(n-j)}}}
#' \deqn{x = 0,1,2,3,...n}
#' \deqn{n = 1,2,3,...}
#' \deqn{0 < p < 1}
#' \deqn{-\infty < v < +\infty }
#'
#' \strong{NOTE} : If input parameters are not in given domain conditions
#' necessary error messages will be provided to go further.
#'
#' @return
#' The output of \code{dCOMPBin} gives a list format consisting
#'
#' \code{pdf} probability function values in vector form.
#'
#' \code{mean} mean of COM Poisson Binomial Distribution.
#'
#' \code{var} variance of COM Poisson Binomial Distribution.
#'
#' @references
#' Extracted from
#'
#' Borges, P., Rodrigues, J., Balakrishnan, N. and Bazan, J., 2014. A COM-Poisson type
#' generalization of the binomial distribution and its properties and applications.
#' Statistics & Probability Letters, 87, pp.158-166.
#'
#' Available at: \doi{10.1016/j.spl.2014.01.019}
#'
#' @examples
#' #plotting the random variables and probability values
#' col <- rainbow(5)
#' a <- c(0.58,0.59,0.6,0.61,0.62)
#' b <- c(0.022,0.023,0.024,0.025,0.026)
#' plot(0,0,main="COM Poisson Binomial probability function graph",xlab="Binomial random variable",
#' ylab="Probability function values",xlim = c(0,10),ylim = c(0,0.5))
#' for (i in 1:5)
#' {
#' lines(0:10,dCOMPBin(0:10,10,a[i],b[i])$pdf,col = col[i],lwd=2.85)
#' points(0:10,dCOMPBin(0:10,10,a[i],b[i])$pdf,col = col[i],pch=16)
#' }
#'
#' dCOMPBin(0:10,10,0.58,0.022)$pdf #extracting the pdf values
#' dCOMPBin(0:10,10,0.58,0.022)$mean #extracting the mean
#' dCOMPBin(0:10,10,0.58,0.022)$var #extracting the variance
#'
#' #plotting the random variables and cumulative probability values
#' col <- rainbow(5)
#' a <- c(0.58,0.59,0.6,0.61,0.62)
#' b <- c(0.022,0.023,0.024,0.025,0.026)
#' plot(0,0,main="COM Poisson Binomial probability function graph",xlab="Binomial random variable",
#' ylab="Probability function values",xlim = c(0,10),ylim = c(0,1))
#' for (i in 1:5)
#' {
#' lines(0:10,pCOMPBin(0:10,10,a[i],b[i]),col = col[i],lwd=2.85)
#' points(0:10,pCOMPBin(0:10,10,a[i],b[i]),col = col[i],pch=16)
#' }
#'
#' pCOMPBin(0:10,10,0.58,0.022) #acquiring the cumulative probability values
#'
#' @export
dCOMPBin<-function(x,n,p,v)
{
#checking if inputs consist NA(not assigned)values, infinite values or NAN(not a number)values
#if so creating an error message as well as stopping the function progress.
if(any(is.na(c(x,n,p,v))) | any(is.infinite(c(x,n,p,v))) | any(is.nan(c(x,n,p,v))) )
{
stop("NA or Infinite or NAN values in the Input")
}
else
{
#checking if at any chance the binomial random variable is greater than binomial trial value
#if so providing an error message and stopping the function progress
if(max(x) > n )
{
stop("Binomial random variable cannot be greater than binomial trial value")
}
#checking if any random variable or trial value is negative if so providig an error message
#and stopping the function progress
else if(any(x<0) | n<0)
{
stop("Binomial random variable or binomial trial value cannot be negative")
}
else
{
#checking the probability value is inbetween zero and one
if( p <= 0 | p >= 1 )
{
stop("Probability value doesnot satisfy conditions")
}
else
{
value<-NULL
#constructing the probability values for all random variables
y<-0:n
value1<-NULL
for(i in 1:length(y))
{
value1[i]<-(((choose(n,y[i]))^v)*(p^y[i])*((1-p)^(n-y[i])))/
(sum(((choose(n,y))^v)*(p^y)*((1-p)^(n-y))))
}
check1<-sum(value1)
#checking if the sum of all probability values leads upto one
#if not providing an error message and stopping the function progress
if(check1 < 0.9999 | check1 >1.0001 | any(value1 < 0) | any(value1 >1))
{
stop("Input parameter combinations of probability of success and covariance does
not create proper probability function")
}
else
{
#for each random variable in the input vector below calculations occur
for (i in 1:length(x))
{
value[i]<-(((choose(n,x[i]))^v)*(p^x[i])*((1-p)^(n-x[i])))/
(sum(((choose(n,y))^v)*(p^y)*((1-p)^(n-y))))
}
# generating an output in list format consisting pdf,mean and variance
return(list("pdf"=value,"mean"=sum(value1*y),
"var"=sum((y^2)*value1)-(sum(value1*y))^2))
}
}
}
}
}
#' COM Poisson Binomial Distribution
#'
#' These functions provide the ability for generating probability function values and
#' cumulative probability function values for the COM Poisson Binomial Distribution.
#'
#' @usage
#' pCOMPBin(x,n,p,v)
#'
#' @param x vector of binomial random variables.
#' @param n single value for no of binomial trials.
#' @param p single value for probability of success.
#' @param v single value for v.
#'
#' @details
#' The probability function and cumulative function can be constructed and are denoted below
#'
#' The cumulative probability function is the summation of probability function values.
#'
#' \deqn{P_{COMPBin}(x) = \frac{{n \choose x}^v p^x (1-p)^{n-x}}{\sum_{j=0}^{n} {n \choose j}^v p^j (1-p)^{(n-j)}}}
#' \deqn{x = 0,1,2,3,...n}
#' \deqn{n = 1,2,3,...}
#' \deqn{0 < p < 1}
#' \deqn{-\infty < v < +\infty }
#'
#' \strong{NOTE} : If input parameters are not in given domain conditions
#' necessary error messages will be provided to go further.
#'
#' @return
#' The output of \code{pCOMPBin} gives cumulative probability values in vector form.
#'
#' @references
#' Extracted from
#'
#' Borges, P., Rodrigues, J., Balakrishnan, N. and Bazan, J., 2014. A COM-Poisson type
#' generalization of the binomial distribution and its properties and applications.
#' Statistics & Probability Letters, 87, pp.158-166.
#'
#' Available at: \doi{10.1016/j.spl.2014.01.019}
#'
#' @examples
#' #plotting the random variables and probability values
#' col <- rainbow(5)
#' a <- c(0.58,0.59,0.6,0.61,0.62)
#' b <- c(0.022,0.023,0.024,0.025,0.026)
#' plot(0,0,main="COM Poisson Binomial probability function graph",xlab="Binomial random variable",
#' ylab="Probability function values",xlim = c(0,10),ylim = c(0,0.5))
#' for (i in 1:5)
#' {
#' lines(0:10,dCOMPBin(0:10,10,a[i],b[i])$pdf,col = col[i],lwd=2.85)
#' points(0:10,dCOMPBin(0:10,10,a[i],b[i])$pdf,col = col[i],pch=16)
#' }
#'
#' dCOMPBin(0:10,10,0.58,0.022)$pdf #extracting the pdf values
#' dCOMPBin(0:10,10,0.58,0.022)$mean #extracting the mean
#' dCOMPBin(0:10,10,0.58,0.022)$var #extracting the variance
#'
#' #plotting the random variables and cumulative probability values
#' col <- rainbow(5)
#' a <- c(0.58,0.59,0.6,0.61,0.62)
#' b <- c(0.022,0.023,0.024,0.025,0.026)
#' plot(0,0,main="COM Poisson Binomial probability function graph",xlab="Binomial random variable",
#' ylab="Probability function values",xlim = c(0,10),ylim = c(0,1))
#' for (i in 1:5)
#' {
#' lines(0:10,pCOMPBin(0:10,10,a[i],b[i]),col = col[i],lwd=2.85)
#' points(0:10,pCOMPBin(0:10,10,a[i],b[i]),col = col[i],pch=16)
#' }
#'
#' pCOMPBin(0:10,10,0.58,0.022) #acquiring the cumulative probability values
#'
#' @export
pCOMPBin<-function(x,n,p,v)
{
ans<-NULL
#for each binomial random variable in the input vector the cumulative proability function
#values are calculated
for(i in 1:length(x))
{
ans[i]<-sum(dCOMPBin(0:x[i],n,p,v)$pdf)
}
#generating an ouput vector cumulative probability function values
return(ans)
}
#' Negative Log Likelihood value of COM Poisson Binomial distribution
#'
#' This function will calculate the negative log likelihood value when the vector of binomial random
#' variables and vector of corresponding frequencies are given with the input parameters.
#'
#' @usage
#' NegLLCOMPBin(x,freq,p,v)
#'
#' @param x vector of binomial random variables.
#' @param freq vector of frequencies.
#' @param p single value for probability of success.
#' @param v single value for v.
#'
#' @details
#' \deqn{freq \ge 0}
#' \deqn{x = 0,1,2,..}
#' \deqn{0 < p < 1}
#' \deqn{-\infty < v < +\infty}
#'
#' \strong{NOTE} : If input parameters are not in given domain conditions
#' necessary error messages will be provided to go further.
#'
#' @return
#' The output of \code{NegLLCOMPBin} will produce a single numeric value.
#'
#' @references
#' Borges, P., Rodrigues, J., Balakrishnan, N. and Bazan, J., 2014. A COM-Poisson type
#' generalization of the binomial distribution and its properties and applications.
#' Statistics & Probability Letters, 87, pp.158-166.
#'
#' Available at: \doi{10.1016/j.spl.2014.01.019}
#'
#' @examples
#' No.D.D <- 0:7 #assigning the random variables
#' Obs.fre.1 <- c(47,54,43,40,40,41,39,95) #assigning the corresponding frequencies
#'
#' NegLLCOMPBin(No.D.D,Obs.fre.1,.5,.03) #acquiring the negative log likelihood value
#'
#' @export
NegLLCOMPBin<-function(x,freq,p,v)
{
#constructing the data set using the random variables vector and frequency vector
n<-max(x)
data<-rep(x,freq)
#checking if inputs consist NA(not assigned)values, infinite values or NAN(not a number)values
#if so creating an error message as well as stopping the function progress.
if(any(is.na(c(x,freq,p,v))) | any(is.infinite(c(x,freq,p,v))) |
any(is.nan(c(x,freq,p,v))) )
{
stop("NA or Infinite or NAN values in the Input")
}
else
{
#checking if any of the random variables of frequencies are less than zero if so
#creating a error message as well as stopping the function progress
if(any(c(x,freq) < 0) )
{
stop("Binomial random variable or frequency values cannot be negative")
}
#checking the probability value is inbetween zero and one or covariance is greater than zero
else if( p <= 0 | p >= 1)
{
stop("Probability value doesnot satisfy conditions")
}
else
{
value<-NULL
#constructing the probability values for all random variables
y<-0:n
value1<-NULL
for(i in 1:length(y))
{
value1[i]<-(((choose(n,y[i]))^v)*(p^y[i])*((1-p)^(n-y[i])))/
(sum(((choose(n,y))^v)*(p^y)*((1-p)^(n-y))))
}
check1<-sum(value1)
#checking if the sum of all probability values leads upto one
#if not providing an error message and stopping the function progress
if(check1 < 0.9999 | check1 >1.0001 | any(value1 < 0) | any(value1 >1))
{
stop("Input parameter combinations of probability of success and covariance does
not create proper probability function")
}
else
{
#calculating the negative log likelihood value and representing as a single output value
return(-(v*sum(log(choose(n,data[1:sum(freq)]))) +
log(p)*sum(data[1:sum(freq)]) + log(1-p)*sum(n-data[1:sum(freq)]) -
sum(freq)*log(sum(((choose(n,y))^v)*(p^y)*((1-p)^(n-y))))))
}
}
}
}
#' Estimating the probability of success and v parameter for COM Poisson Binomial
#' Distribution
#'
#' The function will estimate the probability of success and v parameter using the maximum log
#' likelihood method for the COM Poisson Binomial distribution when the binomial random
#' variables and corresponding frequencies are given.
#'
#' @usage
#' EstMLECOMPBin(x,freq,p,v,...)
#'
#' @param x vector of binomial random variables.
#' @param freq vector of frequencies.
#' @param p single value for probability of success.
#' @param v single value for v.
#' @param ... mle2 function inputs except data and estimating parameter.
#'
#' @details
#' \deqn{x = 0,1,2,...}
#' \deqn{freq \ge 0}
#' \deqn{0 < p < 1}
#' \deqn{-\infty < v < +\infty}
#'
#' \strong{NOTE} : If input parameters are not in given domain conditions
#' necessary error messages will be provided to go further.
#'
#' @return
#' \code{EstMLECOMPBin} here is used as a wrapper for the \code{mle2} function of \pkg{bbmle} package
#' therefore output is of class of mle2.
#'
#' @references
#' Borges, P., Rodrigues, J., Balakrishnan, N. and Bazan, J., 2014. A COM-Poisson type
#' generalization of the binomial distribution and its properties and applications.
#' Statistics & Probability Letters, 87, pp.158-166.
#'
#' Available at: \doi{10.1016/j.spl.2014.01.019}
#'
#' @examples
#' No.D.D <- 0:7 #assigning the random variables
#' Obs.fre.1 <- c(47,54,43,40,40,41,39,95) #assigning the corresponding frequencies
#'
#' #estimating the parameters using maximum log likelihood value and assigning it
#' parameters <- EstMLECOMPBin(x=No.D.D,freq=Obs.fre.1,p=0.5,v=0.1)
#'
#' bbmle::coef(parameters) #extracting the parameters
#'
#'@export
EstMLECOMPBin<-function(x,freq,p,v,...)
{
suppressWarnings2 <-function(expr, regex=character())
{
withCallingHandlers(expr, warning=function(w)
{
if (length(regex) == 1 && length(grep(regex, conditionMessage(w))))
{
invokeRestart("muffleWarning")
}
} )
}
output<-suppressWarnings2(bbmle::mle2(.EstMLECOMPBin,data=list(x=x,freq=freq),
start = list(p=p,v=v),...),"NaN")
return(output)
}
.EstMLECOMPBin<-function(x,freq,p,v)
{
#with respective to using bbmle package function mle2 there is no need impose any restrictions
#therefor the output is directly a single numeric value for the negative log likelihood value of
#COM Poisson Binomial distribution
value<-NULL
n<-max(x)
y<-0:n
data<-rep(x,freq)
return(-(v*sum(log(choose(n,data[1:sum(freq)]))) + log(p)*sum(data[1:sum(freq)]) +
log(1-p)*sum(n-data[1:sum(freq)]) - sum(freq)*log(sum(((choose(n,y))^v)*(p^y)*((1-p)^(n-y))))))
}
#' Fitting the COM Poisson Binomial Distribution when binomial
#' random variable, frequency, probability of success and v parameter are given
#'
#' The function will fit the COM Poisson Binomial Distribution
#' when random variables, corresponding frequencies, probability of success and v parameter are given.
#' It will provide the expected frequencies, chi-squared test statistics value, p value,
#' and degree of freedom so that it can be seen if this distribution fits the data.
#'
#' @usage
#' fitCOMPBin(x,obs.freq,p,v)
#'
#' @param x vector of binomial random variables.
#' @param obs.freq vector of frequencies.
#' @param p single value for probability of success.
#' @param v single value for v.
#'
#' @details
#' \deqn{obs.freq \ge 0}
#' \deqn{x = 0,1,2,..}
#' \deqn{0 < p < 1}
#' \deqn{-\infty < v < +\infty}
#'
#' \strong{NOTE} : If input parameters are not in given domain conditions
#' necessary error messages will be provided to go further.
#'
#' @return
#' The output of \code{fitCOMPBin} gives the class format \code{fitCPB} and \code{fit} consisting a list
#'
#' \code{bin.ran.var} binomial random variables.
#'
#' \code{obs.freq} corresponding observed frequencies.
#'
#' \code{exp.freq} corresponding expected frequencies.
#'
#' \code{statistic} chi-squared test statistics.
#'
#' \code{df} degree of freedom.
#'
#' \code{p.value} probability value by chi-squared test statistic.
#'
#' \code{fitCPB} fitted probability values of \code{dCOMPBin}.
#'
#' \code{NegLL} Negative Log Likelihood value.
#'
#' \code{p} estimated probability value.
#'
#' \code{v} estimated v parameter value.
#'
#' \code{AIC} AIC value.
#'
#' \code{call} the inputs of the function.
#'
#' Methods \code{summary}, \code{print}, \code{AIC}, \code{residuals} and \code{fitted}
#' can be used to extract specific outputs.
#'
#' @references
#' Borges, P., Rodrigues, J., Balakrishnan, N. and Bazan, J., 2014. A COM-Poisson type
#' generalization of the binomial distribution and its properties and applications.
#' Statistics & Probability Letters, 87, pp.158-166.
#'
#' Available at: \doi{10.1016/j.spl.2014.01.019}
#'
#' @examples
#' No.D.D <- 0:7 #assigning the random variables
#' Obs.fre.1 <- c(47,54,43,40,40,41,39,95) #assigning the corresponding frequencies
#'
#' #estimating the parameters using maximum log likelihood value and assigning it
#' parameters <- EstMLECOMPBin(x=No.D.D,freq=Obs.fre.1,p=0.5,v=0.050)
#'
#' pCOMPBin <- bbmle::coef(parameters)[1]
#' vCOMPBin <- bbmle::coef(parameters)[2]
#'
#' #fitting when the random variable,frequencies,probability and v parameter are given
#' results <- fitCOMPBin(No.D.D,Obs.fre.1,pCOMPBin,vCOMPBin)
#' results
#'
#' #extracting the AIC value
#' AIC(results)
#'
#' #extract fitted values
#' fitted(results)
#'
#' @export
fitCOMPBin<-function(x,obs.freq,p,v)
{
#checking if inputs consist NA(not assigned)values, infinite values or NAN(not a number)values
#if so creating an error message as well as stopping the function progress.
if(any(is.na(c(x,obs.freq,p,v))) | any(is.infinite(c(x,obs.freq,p,v))) |
any(is.nan(c(x,obs.freq,p,v))) )
{
stop("NA or Infinite or NAN values in the Input")
}
else
{
est<-dCOMPBin(x,max(x),p,v)
#for given random variables and parameters calculating the estimated probability values
est.prob<-est$pdf
#using the estimated probability values the expected frequencies are calculated
exp.freq<-round((sum(obs.freq)*est.prob),2)
#chi-squared test statistics is calculated with observed frequency and expected frequency
statistic<-sum(((obs.freq-exp.freq)^2)/exp.freq)
#degree of freedom is calculated
df<-length(x)-3
#p value of chi-squared test statistic is calculated
p.value<-1-stats::pchisq(statistic,df)
#checking if df is less than or equal to zero
if(df<0 | df==0)
{
stop("Degrees of freedom cannot be less than or equal to zero")
}
#checking if any of the expected frequencies are less than five and greater than zero, if so
#a warning message is provided in interpreting the results
if(min(exp.freq)<5 && min(exp.freq) > 0)
{
message("Chi-squared approximation may be doubtful because expected frequency is less than 5")
}
#checking if expected frequency is zero, if so providing a warning message in interpreting
#the results
if(min(exp.freq)==0)
{
message("Chi-squared approximation is not suitable because expected frequency approximates to zero")
}
NegLL<-NegLLCOMPBin(x,obs.freq,p,v)
names(NegLL)<-NULL
#the final output is in a list format containing the calculated values
final<-list("bin.ran.var"=x,"obs.freq"=obs.freq,"exp.freq"=exp.freq,"statistic"=round(statistic,4),
"df"=df,"p.value"=round(p.value,4),"fitCPB"=est,
"NegLL"=NegLL,"p"=p,"v"=v,"AIC"=2*2+2*NegLL,"call"=match.call())
class(final)<-c("fitCPB","fit")
return(final)
}
}
#' @method fitCOMPBin default
#' @export
fitCOMPBin.default<-function(x,obs.freq,p,v)
{
return(fitCOMPBin(x,obs.freq,p,v))
}
#' @method print fitCPB
#' @export
print.fitCPB<-function(x,...)
{
cat("Call: \n")
print(x$call)
cat("\nChi-squared test for COM Poisson Binomial Distribution \n\t
Observed Frequency : ",x$obs.freq,"\n\t
expected Frequency : ",x$exp.freq,"\n\t
estimated p value :",x$p," ,estimated v parameter :",x$v,"\n\t
X-squared :",x$statistic," ,df :",x$df," ,p-value :",x$p.value,"\n")
}
#' @method summary fitCPB
#' @export
summary.fitCPB<-function(object,...)
{
cat("Call: \n")
print(object$call)
cat("\nChi-squared test for COM Poisson Binomial Distribution \n\t
Observed Frequency : ",object$obs.freq,"\n\t
expected Frequency : ",object$exp.freq,"\n\t
estimated p value :",object$p," ,estimated v parameter :",object$v,"\n\t
X-squared :",object$statistic," ,df :",object$df," ,p-value :",object$p.value,"\n\t
Negative Loglikehood value :",object$NegLL,"\n\t
AIC value :",object$AIC,"\n")
}
|
kochPattern <-
function()
{
pts = getPattern()
modKochPattern(pts)
}
| /Fractal/R/kochPattern.R | no_license | ddizhang/fractal | R | false | false | 73 | r | kochPattern <-
function()
{
pts = getPattern()
modKochPattern(pts)
}
|
## This starts from my initial example code with some changes from Karthik
## 9/12/12
##library(varDev)
## Try tradeoff between maturation fecundity
source('tradeoff3Code.R')
m <- 0.3
sA <- 0.7
sJ <- 0.7
a <- 6.0 # intercept
b <- -2.0 # slope
jps <- function(x) return((1/x)*(1/x)) ## shape = 1/CV^2. Set CV = .1:.1:1
CV <- seq(0.1, 1, by = 0.1)
juvshape <- jps(CV)
## F.from.m <- function(m, a, b) a + b*m ## b should be negative
## debug(solve.tradeoff.cor)
## debug(VD.tradeoff.curve.cor)
## solve.tradeoff.cor(a, b, seq(0.01, 0.99, length = 20), corr = 0.7, second.m.length = 50, VDtradeoffFunction = VD.tradeoff.curve.cor)
## ## When it gets stuck, there are essentially ZERO survivors happening, and the max iteration threshold is not being triggered.
## ## Temporary solution is to use a nearly 1 so that at max there are survivors
## js4 <- solve.tradeoff.cor(a, b, seq(0.01, 0.99, length = 20), corr = 0, second.m.length = 50, VDtradeoffFunction = VD.tradeoff.curve.juvgamma.cor, juvshape = 4)
## debug(VD.tradeoff.curve.juvgamma.cor)
## onerun <- VD.tradeoff.curve.juvgamma.cor(a, b, m.grid = seq(0.01, 0.99, length = 20), corr = 0, juvshape = 8)
## appears to work ok
growthFecundityTradeoff <- list()
growthFecundityDetails <- list()
growthFecundityMatrixCase <- list()
bvalues <- c(-1, -2, -3)
for(ib in seq_along(bvalues)) {
ans <- list()
detailsrho <- list()
b <- bvalues[ib]
growthFecundityMatrixCase[[paste('b=',b,sep='')]] <- solve.matrix.tradeoff.curve(a,b)
for(rho in c(-.5, 0, .5)) {
mstar <- numeric(length(juvshape))
r <- numeric(length(juvshape))
detailsjuvshape <- list()
for(i in seq_along(juvshape)) {
{
setTimeLimit(cpu = 600, transient = TRUE)
oneAns <- try(solve.tradeoff.cor(a, b, seq(0.01, 0.99, length = 20), corr = rho, second.m.length = 50, VDtradeoffFunction = VD.tradeoff.curve.juvgamma.cor, juvshape = juvshape[i]))
}
detailsjuvshape[[paste('juvshape=',juvshape[i],sep='')]] <- oneAns
if(!inherits(oneAns, 'try-error')) {
r[i] <- oneAns$second.opt$objective
mstar[i] <- oneAns$second.opt$maximum
} else {
r[i] <- mstar[i] <- NA
}
writeLines('\n')
writeLines(paste('b=',b,' rho=',rho,' juvshape[',i,']=',juvshape[i],' mstar=',mstar[i],' r=',r[i],sep=''))
writeLines('\n')
}
ans[[paste("rho=",rho,sep='')]] <- data.frame(CV = CV, juvshape = juvshape, r = r, mstar = mstar)
detailsrho[[paste('rho=',rho,sep='')]] <- detailsjuvshape
writeLines(paste('Finished for rho=',rho,sep=''))
print(ans[[paste("rho=",rho,sep='')]])
}
growthFecundityTradeoff[[paste('b=',b,sep='')]] <- ans
growthFecundityDetails[[paste('b=',b,sep='')]] <- detailsrho
}
save(growthFecundityTradeoff, growthFecundityDetails, growthFecundityMatrixCase, file = 'growthFecundityResults.RData')
q('no')
| /B_analysts_sources_github/karthik/tradeoff/tradeoff3.R | no_license | Irbis3/crantasticScrapper | R | false | false | 2,858 | r | ## This starts from my initial example code with some changes from Karthik
## 9/12/12
##library(varDev)
## Try tradeoff between maturation fecundity
source('tradeoff3Code.R')
m <- 0.3
sA <- 0.7
sJ <- 0.7
a <- 6.0 # intercept
b <- -2.0 # slope
jps <- function(x) return((1/x)*(1/x)) ## shape = 1/CV^2. Set CV = .1:.1:1
CV <- seq(0.1, 1, by = 0.1)
juvshape <- jps(CV)
## F.from.m <- function(m, a, b) a + b*m ## b should be negative
## debug(solve.tradeoff.cor)
## debug(VD.tradeoff.curve.cor)
## solve.tradeoff.cor(a, b, seq(0.01, 0.99, length = 20), corr = 0.7, second.m.length = 50, VDtradeoffFunction = VD.tradeoff.curve.cor)
## ## When it gets stuck, there are essentially ZERO survivors happening, and the max iteration threshold is not being triggered.
## ## Temporary solution is to use a nearly 1 so that at max there are survivors
## js4 <- solve.tradeoff.cor(a, b, seq(0.01, 0.99, length = 20), corr = 0, second.m.length = 50, VDtradeoffFunction = VD.tradeoff.curve.juvgamma.cor, juvshape = 4)
## debug(VD.tradeoff.curve.juvgamma.cor)
## onerun <- VD.tradeoff.curve.juvgamma.cor(a, b, m.grid = seq(0.01, 0.99, length = 20), corr = 0, juvshape = 8)
## appears to work ok
growthFecundityTradeoff <- list()
growthFecundityDetails <- list()
growthFecundityMatrixCase <- list()
bvalues <- c(-1, -2, -3)
for(ib in seq_along(bvalues)) {
ans <- list()
detailsrho <- list()
b <- bvalues[ib]
growthFecundityMatrixCase[[paste('b=',b,sep='')]] <- solve.matrix.tradeoff.curve(a,b)
for(rho in c(-.5, 0, .5)) {
mstar <- numeric(length(juvshape))
r <- numeric(length(juvshape))
detailsjuvshape <- list()
for(i in seq_along(juvshape)) {
{
setTimeLimit(cpu = 600, transient = TRUE)
oneAns <- try(solve.tradeoff.cor(a, b, seq(0.01, 0.99, length = 20), corr = rho, second.m.length = 50, VDtradeoffFunction = VD.tradeoff.curve.juvgamma.cor, juvshape = juvshape[i]))
}
detailsjuvshape[[paste('juvshape=',juvshape[i],sep='')]] <- oneAns
if(!inherits(oneAns, 'try-error')) {
r[i] <- oneAns$second.opt$objective
mstar[i] <- oneAns$second.opt$maximum
} else {
r[i] <- mstar[i] <- NA
}
writeLines('\n')
writeLines(paste('b=',b,' rho=',rho,' juvshape[',i,']=',juvshape[i],' mstar=',mstar[i],' r=',r[i],sep=''))
writeLines('\n')
}
ans[[paste("rho=",rho,sep='')]] <- data.frame(CV = CV, juvshape = juvshape, r = r, mstar = mstar)
detailsrho[[paste('rho=',rho,sep='')]] <- detailsjuvshape
writeLines(paste('Finished for rho=',rho,sep=''))
print(ans[[paste("rho=",rho,sep='')]])
}
growthFecundityTradeoff[[paste('b=',b,sep='')]] <- ans
growthFecundityDetails[[paste('b=',b,sep='')]] <- detailsrho
}
save(growthFecundityTradeoff, growthFecundityDetails, growthFecundityMatrixCase, file = 'growthFecundityResults.RData')
q('no')
|
#' @inheritParams ggplot2::stat_identity
#'
#' @param step the number of quantiles to use to compute bins
#'
#' @section Computed variables:
#' \describe{
#' \item{ymin}{the lower limit of the quantile}
#' \item{ymax}{the upper limit of the quantile}
#' \item{id}{an identifier for the quantile}
#' \item{percent}{the fill colorto use in \code{geom_fan}}
#' }
#'
#' @rdname geom_fan
#' @export
stat_fan <- function(mapping = NULL, data = NULL, geom = NULL,
position = "identity", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, step=0.01, ...) {
list(
layer(
stat = StatFan, data = data, mapping = mapping, geom = geom,
position = position, show.legend = show.legend, inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
)
}
#' StatFan
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatFan <- ggproto("StatFan", Stat,
required_aes = "y",
default_aes = aes(fill=stat(percent),group=stat(id)),
compute_group = function(data,scales,step=0.01) do_fan(data$y,step)
)
| /R/stat_fan.R | no_license | cran/cytofan | R | false | false | 1,209 | r | #' @inheritParams ggplot2::stat_identity
#'
#' @param step the number of quantiles to use to compute bins
#'
#' @section Computed variables:
#' \describe{
#' \item{ymin}{the lower limit of the quantile}
#' \item{ymax}{the upper limit of the quantile}
#' \item{id}{an identifier for the quantile}
#' \item{percent}{the fill colorto use in \code{geom_fan}}
#' }
#'
#' @rdname geom_fan
#' @export
stat_fan <- function(mapping = NULL, data = NULL, geom = NULL,
position = "identity", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, step=0.01, ...) {
list(
layer(
stat = StatFan, data = data, mapping = mapping, geom = geom,
position = position, show.legend = show.legend, inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
)
}
#' StatFan
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatFan <- ggproto("StatFan", Stat,
required_aes = "y",
default_aes = aes(fill=stat(percent),group=stat(id)),
compute_group = function(data,scales,step=0.01) do_fan(data$y,step)
)
|
# This program illustrates how to calculate the CEF
# and use it for two different purposes
wagedata <- read.csv("data/wage2.csv")
attach(wagedata)
# Example 1: Binary Variable
# Use of CEF (I): Prediction
# Approach 1:
aggregate(wage,by=list(married),FUN=mean)
results <-aggregate(wage,by=list(married),FUN=mean)[2]
results
# Approach 2:
library(dplyr)
wagedata %>%
group_by(married) %>%
summarise(mean = mean(wage,na.rm = TRUE))
# Use of CEF (II): Partial Effects of Marriage (or Marriage Premium)
results
results[2,1]-results[1,1]
# Example 2: Multivalued Discrete Variable
wagedata$education <- NA
wagedata$education[educ<12] <- 1
wagedata$education[educ==12] <- 2
wagedata$education[educ>12] <- 3
attach(wagedata)
# Approach 1:
aggregate(wage,by=list(education),FUN=mean,data=wagedata)
# Approach 2:
wagedata %>%
group_by(education) %>%
summarise(mean = mean(wage,na.rm = TRUE))
| /lecture/example/mv06_cond_expectation01.R | no_license | anhnguyendepocen/man2 | R | false | false | 980 | r | # This program illustrates how to calculate the CEF
# and use it for two different purposes
wagedata <- read.csv("data/wage2.csv")
attach(wagedata)
# Example 1: Binary Variable
# Use of CEF (I): Prediction
# Approach 1:
aggregate(wage,by=list(married),FUN=mean)
results <-aggregate(wage,by=list(married),FUN=mean)[2]
results
# Approach 2:
library(dplyr)
wagedata %>%
group_by(married) %>%
summarise(mean = mean(wage,na.rm = TRUE))
# Use of CEF (II): Partial Effects of Marriage (or Marriage Premium)
results
results[2,1]-results[1,1]
# Example 2: Multivalued Discrete Variable
wagedata$education <- NA
wagedata$education[educ<12] <- 1
wagedata$education[educ==12] <- 2
wagedata$education[educ>12] <- 3
attach(wagedata)
# Approach 1:
aggregate(wage,by=list(education),FUN=mean,data=wagedata)
# Approach 2:
wagedata %>%
group_by(education) %>%
summarise(mean = mean(wage,na.rm = TRUE))
|
#'@export
get_kp_data <- function(dat, yearExclude = NULL, yearFilter = NULL, resultKnown = T, useAdj = F) {
kp <- plyr::ldply(list.files("data/kenpom_data/", pattern = ".csv"), function(fname) {
t <- read.csv(file = paste0("data/kenpom_data/", fname), header = T, stringsAsFactors = F)
t$year <- as.integer(substr(fname, 1, 4))
return(t)
})
kp <- as.data.table(kp)
kp[is.na(Season), Season:=year]
kp[is.na(AdjEM), AdjEM:=(AdjOE-AdjDE)]
kp <- kp[order(kp$year, -kp$AdjEM),]
kp$RankAdjEM <- as.numeric(kp$RankAdjEM)
kp[is.na(RankAdjEM), RankAdjEM:=rank(-AdjEM), by = list(year)]
kp <- as.data.frame(kp)
if(!is.null(yearExclude)) {
kp <- kp %>% filter(year != yearExclude)
}
if(!is.null(yearFilter)) {
kp <- kp %>% filter(year == yearFilter)
}
kp <- cleanKP(kp)
if(useAdj) {
kp$Tempo <- kp$AdjTempo
kp$RankTempo <- kp$RankAdjTempo
kp$OE <- kp$AdjOE
kp$RankOE <- kp$RankAdjOE
kp$DE <- kp$AdjDE
kp$RankDE <- kp$RankAdjDE
kp$EM <- kp$AdjEM
kp$RankEM <- kp$RankAdjTempo
}
# kp %>% filter(year == results$Year[1], TeamName %in% c(results$HomeTeam[1], results$AwayTeam[1]))
preddata <- data.frame()
problems <- data.frame()
for(i in 1:nrow(dat)) {
print(i/nrow(dat))
home <- kp %>% filter(year == dat$Year[i], TeamName == dat$HomeTeam[i])
away <- kp %>% filter(year == dat$Year[i], TeamName == dat$AwayTeam[i])
if(nrow(home) == 1 && nrow(away) == 1) {
##Home Team
t <- as.data.frame(matrix(ncol = 0, nrow = 1))
t$GID <- dat$GID[i]
t$Year <- dat$Year[i]
t$Round <- dat$Round[i]
if("Region" %in% colnames(dat)) {
t$Region <- dat$Region[i]
}
if(resultKnown) {
t$Result <- ifelse(dat$HomeScore[i] > dat$AwayScore[i], 1, 0)
}
t$Team <- dat$HomeTeam[i]
t$Seed <- dat$HomeSeed[i]
if(resultKnown) {
t$Score <- dat$HomeScore[i]
}
t$Tempo <- home$Tempo
t$OffEff <- home$OE
t$RankOE <- home$RankOE
t$DefEff <- home$DE
t$RankDE <- home$RankDE
t$oTeam <- dat$AwayTeam[i]
t$oSeed <- dat$AwaySeed[i]
if(resultKnown) {
t$oScore <- dat$AwayScore[i]
}
t$oTempo <- away$Tempo
t$oOffEff <- away$OE
t$oRankOE <- away$RankOE
t$oDefEff <- away$DE
t$oRankDE <- away$RankDE
preddata <- rbind(preddata, t)
t <- as.data.frame(matrix(ncol = 0, nrow = 1))
t$GID <- dat$GID[i]
t$Year <- dat$Year[i]
t$Round <- dat$Round[i]
if("Region" %in% colnames(dat)) {
t$Region <- dat$Region[i]
}
if(resultKnown) {
t$Result <- ifelse(dat$AwayScore[i] > dat$HomeScore[i], 1, 0)
}
t$Team <- dat$AwayTeam[i]
t$Seed <- dat$AwaySeed[i]
if(resultKnown) {
t$Score <- dat$AwayScore[i]
}
t$Tempo <- away$Tempo
t$OffEff <- away$OE
t$RankOE <- away$RankOE
t$DefEff <- away$DE
t$RankDE <- away$RankDE
t$oTeam <- dat$HomeTeam[i]
t$oSeed <- dat$HomeSeed[i]
if(resultKnown) {
t$oScore <- dat$HomeScore[i]
}
t$oTempo <- home$Tempo
t$oOffEff <- home$OE
t$oRankOE <- home$RankOE
t$oDefEff <- home$DE
t$oRankDE <- home$RankDE
preddata <- rbind(preddata, t)
} else {
t <- dat[i,]
problems <- rbind(problems, t)
}
}
return(list(preddata, problems))
}
| /R/get_kp_data.R | no_license | ctloftin/NCAATournament | R | false | false | 3,413 | r | #'@export
get_kp_data <- function(dat, yearExclude = NULL, yearFilter = NULL, resultKnown = T, useAdj = F) {
kp <- plyr::ldply(list.files("data/kenpom_data/", pattern = ".csv"), function(fname) {
t <- read.csv(file = paste0("data/kenpom_data/", fname), header = T, stringsAsFactors = F)
t$year <- as.integer(substr(fname, 1, 4))
return(t)
})
kp <- as.data.table(kp)
kp[is.na(Season), Season:=year]
kp[is.na(AdjEM), AdjEM:=(AdjOE-AdjDE)]
kp <- kp[order(kp$year, -kp$AdjEM),]
kp$RankAdjEM <- as.numeric(kp$RankAdjEM)
kp[is.na(RankAdjEM), RankAdjEM:=rank(-AdjEM), by = list(year)]
kp <- as.data.frame(kp)
if(!is.null(yearExclude)) {
kp <- kp %>% filter(year != yearExclude)
}
if(!is.null(yearFilter)) {
kp <- kp %>% filter(year == yearFilter)
}
kp <- cleanKP(kp)
if(useAdj) {
kp$Tempo <- kp$AdjTempo
kp$RankTempo <- kp$RankAdjTempo
kp$OE <- kp$AdjOE
kp$RankOE <- kp$RankAdjOE
kp$DE <- kp$AdjDE
kp$RankDE <- kp$RankAdjDE
kp$EM <- kp$AdjEM
kp$RankEM <- kp$RankAdjTempo
}
# kp %>% filter(year == results$Year[1], TeamName %in% c(results$HomeTeam[1], results$AwayTeam[1]))
preddata <- data.frame()
problems <- data.frame()
for(i in 1:nrow(dat)) {
print(i/nrow(dat))
home <- kp %>% filter(year == dat$Year[i], TeamName == dat$HomeTeam[i])
away <- kp %>% filter(year == dat$Year[i], TeamName == dat$AwayTeam[i])
if(nrow(home) == 1 && nrow(away) == 1) {
##Home Team
t <- as.data.frame(matrix(ncol = 0, nrow = 1))
t$GID <- dat$GID[i]
t$Year <- dat$Year[i]
t$Round <- dat$Round[i]
if("Region" %in% colnames(dat)) {
t$Region <- dat$Region[i]
}
if(resultKnown) {
t$Result <- ifelse(dat$HomeScore[i] > dat$AwayScore[i], 1, 0)
}
t$Team <- dat$HomeTeam[i]
t$Seed <- dat$HomeSeed[i]
if(resultKnown) {
t$Score <- dat$HomeScore[i]
}
t$Tempo <- home$Tempo
t$OffEff <- home$OE
t$RankOE <- home$RankOE
t$DefEff <- home$DE
t$RankDE <- home$RankDE
t$oTeam <- dat$AwayTeam[i]
t$oSeed <- dat$AwaySeed[i]
if(resultKnown) {
t$oScore <- dat$AwayScore[i]
}
t$oTempo <- away$Tempo
t$oOffEff <- away$OE
t$oRankOE <- away$RankOE
t$oDefEff <- away$DE
t$oRankDE <- away$RankDE
preddata <- rbind(preddata, t)
t <- as.data.frame(matrix(ncol = 0, nrow = 1))
t$GID <- dat$GID[i]
t$Year <- dat$Year[i]
t$Round <- dat$Round[i]
if("Region" %in% colnames(dat)) {
t$Region <- dat$Region[i]
}
if(resultKnown) {
t$Result <- ifelse(dat$AwayScore[i] > dat$HomeScore[i], 1, 0)
}
t$Team <- dat$AwayTeam[i]
t$Seed <- dat$AwaySeed[i]
if(resultKnown) {
t$Score <- dat$AwayScore[i]
}
t$Tempo <- away$Tempo
t$OffEff <- away$OE
t$RankOE <- away$RankOE
t$DefEff <- away$DE
t$RankDE <- away$RankDE
t$oTeam <- dat$HomeTeam[i]
t$oSeed <- dat$HomeSeed[i]
if(resultKnown) {
t$oScore <- dat$HomeScore[i]
}
t$oTempo <- home$Tempo
t$oOffEff <- home$OE
t$oRankOE <- home$RankOE
t$oDefEff <- home$DE
t$oRankDE <- home$RankDE
preddata <- rbind(preddata, t)
} else {
t <- dat[i,]
problems <- rbind(problems, t)
}
}
return(list(preddata, problems))
}
|
#############################################################
# Count sequence number and read usage for a Change-O table #
#############################################################
aux_dir <- snakemake@params[["aux"]]
source(file.path(aux_dir, "aux.R"))
parse_snakemake(snakemake)
write_log("Parsed global Snakemake properties.")
write_log("Loaded packages and auxiliary functions.")
if (!exists("collapsed")) collapsed <- TRUE
write_log("Counting reads from",
ifelse(collapsed, "provided CONSCOUNT values.",
"number of input rows."))
if (!exists("count_raw")) count_raw <- !exists("inpath_raw")
write_log("Computing raw read count from",
ifelse(count_raw, "number of input rows.",
"provided raw-read count table."))
# Read Change-O table
write_log("\nImporting Change-O table...", newline = FALSE)
tab <- suppressMessages(readChangeoDb(inpath_tab))
log_done()
write_log(nrow(tab), "sequence entries imported.")
# Read raw read count from count table, if necessary
write_log("\nObtaining raw read count...", newline = FALSE)
if (count_raw){
raw <- nrow(tab)
} else {
raw <- read_tsv(inpath_raw, col_names = FALSE)[1,2] %>%
as.numeric()
}
log_done()
write_log("Raw read count:", raw)
# Make count table
write_log("\nGenerating count table...", newline = FALSE)
count_tab <- tibble(
STAGE = stage,
SEQCOUNT = nrow(tab),
CONSCOUNT = ifelse(collapsed, sum(tab$CONSCOUNT), nrow(tab)),
SAMPLE = sample, # From params
SIZE = size, # From params
ITERATION = iter, # From params
CLUSTER_BARCODES = cluster_barcodes, # From params
CLUSTER_SETS = cluster_sets, # From params
CONSCOUNT_RAW = raw,
) %>% mutate(CONSCOUNT_PC = CONSCOUNT/CONSCOUNT_RAW)
log_done()
# Write output
write_log("Writing count table to file...")
write_tsv(count_tab, outpath)
log_done()
| /preprocessing/source/scripts/count_presto_fastq.R | permissive | willbradshaw/killifish-igseq | R | false | false | 2,030 | r | #############################################################
# Count sequence number and read usage for a Change-O table #
#############################################################
aux_dir <- snakemake@params[["aux"]]
source(file.path(aux_dir, "aux.R"))
parse_snakemake(snakemake)
write_log("Parsed global Snakemake properties.")
write_log("Loaded packages and auxiliary functions.")
if (!exists("collapsed")) collapsed <- TRUE
write_log("Counting reads from",
ifelse(collapsed, "provided CONSCOUNT values.",
"number of input rows."))
if (!exists("count_raw")) count_raw <- !exists("inpath_raw")
write_log("Computing raw read count from",
ifelse(count_raw, "number of input rows.",
"provided raw-read count table."))
# Read Change-O table
write_log("\nImporting Change-O table...", newline = FALSE)
tab <- suppressMessages(readChangeoDb(inpath_tab))
log_done()
write_log(nrow(tab), "sequence entries imported.")
# Read raw read count from count table, if necessary
write_log("\nObtaining raw read count...", newline = FALSE)
if (count_raw){
raw <- nrow(tab)
} else {
raw <- read_tsv(inpath_raw, col_names = FALSE)[1,2] %>%
as.numeric()
}
log_done()
write_log("Raw read count:", raw)
# Make count table
write_log("\nGenerating count table...", newline = FALSE)
count_tab <- tibble(
STAGE = stage,
SEQCOUNT = nrow(tab),
CONSCOUNT = ifelse(collapsed, sum(tab$CONSCOUNT), nrow(tab)),
SAMPLE = sample, # From params
SIZE = size, # From params
ITERATION = iter, # From params
CLUSTER_BARCODES = cluster_barcodes, # From params
CLUSTER_SETS = cluster_sets, # From params
CONSCOUNT_RAW = raw,
) %>% mutate(CONSCOUNT_PC = CONSCOUNT/CONSCOUNT_RAW)
log_done()
# Write output
write_log("Writing count table to file...")
write_tsv(count_tab, outpath)
log_done()
|
#' 'Find the Nearest Pirme Number'
#'
#' this function finds the nearest prime number in a given number. If the number of prime exists at the same distance, print both values.
#'
#' @examples
#'
#' near_prime(10)
near_prime <- function(n) {
if (n > 2) {
for (i in 2:n) {
d<-0
for (j in 2:(i-1)){
if (i %% j == 0){
d <- d+1
}}
if (d==0) {
prime <- i
}}
if ((n-prime) > 0){
for (k in 1:(n-prime)) {
d<-0
for (j in 2:(n+k-1)){
if ( (n+k) %% j == 0){
d <- d+1
}
}
if (d==0) {
if (k==(n-prime)){
prime <- c(prime,n+k)}
else {prime <- n+k
}
break
}}}
print(prime)
}
else if (n==2) {
print(n)
}
else
{
print("Please enter a number greater than 2")
}
}
| /R/near_prime.R | no_license | summer-1123/gaeulpakage | R | false | false | 871 | r | #' 'Find the Nearest Pirme Number'
#'
#' this function finds the nearest prime number in a given number. If the number of prime exists at the same distance, print both values.
#'
#' @examples
#'
#' near_prime(10)
near_prime <- function(n) {
if (n > 2) {
for (i in 2:n) {
d<-0
for (j in 2:(i-1)){
if (i %% j == 0){
d <- d+1
}}
if (d==0) {
prime <- i
}}
if ((n-prime) > 0){
for (k in 1:(n-prime)) {
d<-0
for (j in 2:(n+k-1)){
if ( (n+k) %% j == 0){
d <- d+1
}
}
if (d==0) {
if (k==(n-prime)){
prime <- c(prime,n+k)}
else {prime <- n+k
}
break
}}}
print(prime)
}
else if (n==2) {
print(n)
}
else
{
print("Please enter a number greater than 2")
}
}
|
#Salary
myplot(Salary)
myplot(Salary / Games)
myplot(Salary / FieldGoals)
#In-Game Metrics
myplot(MinutesPlayed)
myplot(Points)
#In-Game Metrics Normalized
myplot(FieldGoals/Games)
myplot(FieldGoals/FieldGoalAttempts)
myplot(FieldGoalAttempts/Games)
#Interesting Observations
myplot(MinutesPlayed/Games)
myplot(Games)
#Time is valuable
myplot(FieldGoals/MinutesPlayed)
#Player Style
myplot(Points/FieldGoals)
| /03 - Matrices/24 - Basketball Insights.R | no_license | panchalashish4/R-Programming-A-Z | R | false | false | 416 | r | #Salary
myplot(Salary)
myplot(Salary / Games)
myplot(Salary / FieldGoals)
#In-Game Metrics
myplot(MinutesPlayed)
myplot(Points)
#In-Game Metrics Normalized
myplot(FieldGoals/Games)
myplot(FieldGoals/FieldGoalAttempts)
myplot(FieldGoalAttempts/Games)
#Interesting Observations
myplot(MinutesPlayed/Games)
myplot(Games)
#Time is valuable
myplot(FieldGoals/MinutesPlayed)
#Player Style
myplot(Points/FieldGoals)
|
## long-term monitoring data received probably from Environment Agency but cannot remember anymore
## (received as part of PhD work); corresponds to site names used by EA-online so assuming EA
##load necessary packages
library('reshape2')
library('ggplot2')
library('extrafont')
loadfonts()
## nice ggplot theme
papertheme <- theme_bw(base_size=12, base_family = 'Arial') +
theme(legend.position='top')
## create function that converts mg L PO4 to ug L P.
po4top <- function(x) {Pmass <- 30.974
Omass <- 15.999
pfrac <- Pmass/(Pmass+Omass*4)
x <- pfrac*x*1000}
## read in files and make date interpretable
mal <- read.csv("../dat-orig/EA/Malham_data150909.csv")
mal$datetime <- paste(mal$DATE, mal$TIME)
mal$datetime <- as.POSIXct(mal$datetime, format="%d-%b-%y %H%M", tz="GMT")
## change column names to sensible options;
## FIXME: what is BOD ATU? what is PV.N.80.4Hrs..mg.l, how is oxidised N produced and how is orgC produced
## SiO2.Rv.Filt..mg.l, DtrgtAncSyn..mg.l, Dtrgt.NncSyn..mg.l,
## WethPresTemp..UNITLESS" "X1183..WethPresPrec..UNITLESS, Liqcolour.st..UNITLESS" "X6517..
## Liqcolour.mn..UNITLESS" "X6518..Liqcolour.se..UNITLESS" etc etc
oldnames <- names(mal)
names(mal) <- c('URN','Site', 'DATE','TIME','MAT','LAB.NO','PURP','pH','Cond20uS','Colour','TempWater',
'Cond25uS','Orel','Oabs', 'BODmgL','BOD5daymgL','CODO2mgL','PVN','OrgCmgL','AmmNmgL','KjelNmgL',
'OxNmgL','NitrateNmgL','NitriteNmgL','UnionNH3mgL','SuspSol105mgL','HardnessmgL','Alk4.5',
'ChloridemgL','OrthoPmgL', 'SiO2mgL','SO4mgL','FiltOrthoPmgL','NamgL','KmgL',
'MgmL','CamgL','OrgCFiltmgL','PmgL','AncSyn','NncSyn','Oil','ChlAmgL','GlycolsmgL',
'OscillatoriaNOml','WPTemp','WPPresc','AphanizomenonNOml','GomphosphaerNOml','LyngbyaNOml',
'PhormidiumNOml','ColeosphaerNOml','TurbidityNTU','AnabaenaBNOml','MicrocystisNOml',
'CuFiltugL','ZnugL','LiqColSt','LiqColMn','LiqColse','SolCol','SolQuan','SolText','OdourNat',
'OdourSt','WethVisib','Weth7Prec','Weth7Temp','Weth7Vsy','LASmgL','NDigestedugL','PhenolYesNo',
'PdigestedugL','ChlorophyllugL','ChlorAugL','Flow','NonylPhenolugL','FoamYesNo','NitrogenNmgL',
'FiltOrthoPmgL2','Orel2','Oabs2','FiltOxNmgL','FiltNH3NmgL','datetime')
malsub <- mal[,c('Site', 'DATE','TIME','pH','Cond20uS','Cond25uS','Orel','Oabs', 'BODmgL','TempWater','Colour',
'OrgCmgL','AmmNmgL','KjelNmgL',
'OxNmgL','NitrateNmgL','NitriteNmgL','UnionNH3mgL','SuspSol105mgL','HardnessmgL','Alk4.5',
'OrthoPmgL', 'SiO2mgL','SO4mgL','FiltOrthoPmgL','TurbidityNTU','NDigestedugL','ChlAmgL',
'ChlorophyllugL','PdigestedugL','NitrogenNmgL','PmgL',
'FiltOrthoPmgL2','Orel2','Oabs2','FiltOxNmgL','FiltNH3NmgL','datetime')]
malsub$Alk4.5 <- as.character(malsub$Alk4.5)
malsub$Alk4.5 <- gsub(x=malsub$Alk4.5, pattern= "<",replacement= "")
malsub$Alk4.5 <- as.numeric(malsub$Alk4.5)
## capture those cases where detection limit is likely to be active
mallessthan <- malsub
mallessthan <- as.data.frame(apply(mallessthan, 2, gsub, pattern = "<", replacement=""))
## =============================================================================================
## Nitrogen
## ====================================================================================
## what are the duplicated columns all about?
## FIXME: are the NH3s reported as mg/L N or not?
malN <- melt(malsub, id.vars = c('Site','datetime'),
measure.vars =c('AmmNmgL','KjelNmgL','OxNmgL','NitrateNmgL','UnionNH3mgL',
'NDigestedugL','NitrogenNmgL','FiltNH3NmgL','FiltOxNmgL'))
malN$lessthan <- FALSE
malN$lessthan[grep('<', malN$value)] <- TRUE
malN$value <- gsub(x=malN$value, pattern= "<",replacement= "")
malN$value <- as.numeric(malN$value)
malN$value[grep('ugL', malN$variable)] <- malN$value[grep('ugL', malN$variable)]/1000
realnames <- data.frame(variable=unique(malN$variable))
realnames$NType <- c('Ammonia/um','TN','TN','Nitrate','Ammonia','TN','TN','Nitrate','TN-filtered')
malN <- merge(malN, realnames)
#allNplot <-
ggplot(malN, aes(y=value, x=datetime, col=NType)) +
papertheme +
geom_point(aes(shape=lessthan), alpha=0.6) +
facet_wrap(~Site, scales='free_y', ncol=1)+
theme(legend.direction = 'vertical') + labs(shape="Below detection limit") +
scale_color_manual(values=c('#e66101','#fdb863','black','#b2abd2','#5e3c99')) +
scale_x_datetime(date_labels = "%b %y", date_breaks = '1 year') +
guides(color=guide_legend(ncol=3)) +
ylab('Nitrogen, mg/L') +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggsave(plot=allNplot, file="../figs/allNdata.pdf", width=13, height=8)
## ============================================================================================
## Phosphorus
## ===========================================================================================
malP <- melt(malsub, id.vars = c('Site','datetime'),
measure.vars =c('OrthoPmgL','FiltOrthoPmgL','PdigestedugL','FiltOrthoPmgL2','PmgL'))
malP$lessthan <- FALSE
malP$lessthan[grep('<', malP$value)] <- TRUE
malP$value <- gsub(x=malP$value, pattern= "<",replacement= "")
malP$value <- as.numeric(malP$value)
malP$value[grep("Ortho", malP$variable)] <- po4top(malP$value[grep("Ortho", malP$variable)])
malP$value[malP$variable=="PmgL"] <- malP$value[malP$variable=="PmgL"]*1000
realnamesP <- data.frame(variable = unique(malP$variable))
realnamesP$PType <- c('OrthoP','OrthoP','TP','OrthoP','TP')
malP <- merge(malP, realnamesP)
#allPplot <-
ggplot(malP, aes(y=value, x=datetime, col=PType)) +
papertheme +
geom_point(aes(shape=lessthan), alpha=0.6) +
facet_wrap(~Site, scales='free_y', ncol=1) +
ylab("P (ug/L) (converted from all cases)") +
geom_hline(yintercept = 12) + # CSM limit for Malham Tarn-depth lakes in TP
theme(legend.direction = 'vertical') + labs(shape="Below detection limit") +
scale_color_manual(values=c("#386cb0", "#f0027f", "#bf5b17")) +
scale_x_datetime(date_labels = "%b %y", date_breaks = '1 year') +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggsave(plot=allPplot, file="../figs/allPdata.pdf",height=8, width=13)
## ====================================================================================================
## oxygen etc.
## =================================================================================================
malOx <- melt(malsub, id.vars = c("Site","datetime"),
measure.vars = c("Orel","Oabs","BODmgL","Colour","OrgCmgL","Orel2" ,"Oabs2"))
malOx$lessthan <- FALSE
malOx$lessthan[grep('<', malOx$value)] <- TRUE
malOx$value <- gsub(x=malOx$value, pattern= "<",replacement= "")
malOx$value <- as.numeric(malOx$value)
malOx$variable[grep("Orel2", malOx$variable)] <- "Orel" # seem to be a continuation saved as diff name
malOx$variable[grep("Oabs2", malOx$variable)] <- "Oabs"
ggplot(malOx[malOx$variable %in% c('Orel','BODmgL'),], aes(y=value, x=datetime, col=Site)) +
papertheme +
geom_path() +
geom_point(aes(shape=lessthan), alpha=0.6) +
ylab("Oxygen (% or mg/L)") +
theme(legend.direction = 'vertical', axis.text.x = element_text(angle=40, hjust=1)) + labs(shape="Below detection limit") +
scale_color_manual(values=c("#386cb0", "#f0027f", "#bf5b17")) +
scale_x_datetime(date_labels = "%b %y", date_breaks = "1 year") +
facet_wrap(~variable, ncol=1, scales="free_y")
## other remaining vars
malchl <- melt(malsub, id.vars = c("Site",'datetime'), measure.vars = c("ChlAmgL" ,"ChlorophyllugL"))
ggplot(na.omit(malchl), aes(x=datetime, y=value)) +
papertheme +
geom_point(aes(col=Site), alpha=0.6)+
scale_color_manual(values=c("#386cb0", "#f0027f", "#bf5b17"))
#pHplot <-
ggplot(malsub[-which(is.na(malsub$datetime)),], aes(y=pH, x=datetime)) +
papertheme +
geom_point(aes(col=Site),alpha=0.6) +
scale_color_manual(values=c("#386cb0", "#f0027f", "#bf5b17")) +
facet_wrap(~format(datetime, "%m")) +
theme(legend.direction = 'vertical', axis.text.x = element_text(angle=90)) +
scale_x_datetime(date_labels = "%y", date_breaks = "1 years") +
xlab("")
ggsave(plot=pHplot, file="../figs/pHdata.pdf", width=8, height=8)
inflowdat <- subset(malsub, Site=="TARN BECK AT ENTRANCE TO MALHAM TARN")
ggplot(inflowdat[-which(is.na(inflowdat$datetime)),], aes(y=pH, x=datetime)) +
papertheme +
geom_point() +
#scale_color_manual(values=c("#386cb0", "#f0027f", "#bf5b17")) +
#facet_wrap(~format(datetime, "%m")) +
theme(legend.direction = 'vertical', axis.text.x = element_text(angle=90)) +
scale_x_datetime(date_labels = "%y", date_breaks = "1 years") +
xlab("Year")
ggplot(malsub[-which(is.na(malsub$datetime)),], aes(y=Alk4.5, x=datetime)) +
papertheme +
geom_point(aes(col=Site),alpha=0.6) +
scale_color_manual(values=c("#386cb0", "#f0027f", "#bf5b17")) +
facet_wrap(~format(datetime, "%m"))
ggplot(malsub[-which(is.na(malsub$datetime)),], aes(y=pH, x=datetime)) +
papertheme +
geom_point(aes(size=Alk4.5),alpha=0.6) +
facet_wrap(~Site, ncol=1)
## save some processed data
saveRDS(malsub, "../dat-mod/malham-EA-decadal.rds")
saveRDS(malP, "../dat-mod/malham-EA-P-decadal.rds")
saveRDS(malN, "../dat-mod/malham-EA-N-decadal.rds")
| /scripts/get-EA-longtermdata.R | no_license | ewiik/malham | R | false | false | 9,303 | r | ## long-term monitoring data received probably from Environment Agency but cannot remember anymore
## (received as part of PhD work); corresponds to site names used by EA-online so assuming EA
##load necessary packages
library('reshape2')
library('ggplot2')
library('extrafont')
loadfonts()
## nice ggplot theme
papertheme <- theme_bw(base_size=12, base_family = 'Arial') +
theme(legend.position='top')
## create function that converts mg L PO4 to ug L P.
po4top <- function(x) {Pmass <- 30.974
Omass <- 15.999
pfrac <- Pmass/(Pmass+Omass*4)
x <- pfrac*x*1000}
## read in files and make date interpretable
mal <- read.csv("../dat-orig/EA/Malham_data150909.csv")
mal$datetime <- paste(mal$DATE, mal$TIME)
mal$datetime <- as.POSIXct(mal$datetime, format="%d-%b-%y %H%M", tz="GMT")
## change column names to sensible options;
## FIXME: what is BOD ATU? what is PV.N.80.4Hrs..mg.l, how is oxidised N produced and how is orgC produced
## SiO2.Rv.Filt..mg.l, DtrgtAncSyn..mg.l, Dtrgt.NncSyn..mg.l,
## WethPresTemp..UNITLESS" "X1183..WethPresPrec..UNITLESS, Liqcolour.st..UNITLESS" "X6517..
## Liqcolour.mn..UNITLESS" "X6518..Liqcolour.se..UNITLESS" etc etc
oldnames <- names(mal)
names(mal) <- c('URN','Site', 'DATE','TIME','MAT','LAB.NO','PURP','pH','Cond20uS','Colour','TempWater',
'Cond25uS','Orel','Oabs', 'BODmgL','BOD5daymgL','CODO2mgL','PVN','OrgCmgL','AmmNmgL','KjelNmgL',
'OxNmgL','NitrateNmgL','NitriteNmgL','UnionNH3mgL','SuspSol105mgL','HardnessmgL','Alk4.5',
'ChloridemgL','OrthoPmgL', 'SiO2mgL','SO4mgL','FiltOrthoPmgL','NamgL','KmgL',
'MgmL','CamgL','OrgCFiltmgL','PmgL','AncSyn','NncSyn','Oil','ChlAmgL','GlycolsmgL',
'OscillatoriaNOml','WPTemp','WPPresc','AphanizomenonNOml','GomphosphaerNOml','LyngbyaNOml',
'PhormidiumNOml','ColeosphaerNOml','TurbidityNTU','AnabaenaBNOml','MicrocystisNOml',
'CuFiltugL','ZnugL','LiqColSt','LiqColMn','LiqColse','SolCol','SolQuan','SolText','OdourNat',
'OdourSt','WethVisib','Weth7Prec','Weth7Temp','Weth7Vsy','LASmgL','NDigestedugL','PhenolYesNo',
'PdigestedugL','ChlorophyllugL','ChlorAugL','Flow','NonylPhenolugL','FoamYesNo','NitrogenNmgL',
'FiltOrthoPmgL2','Orel2','Oabs2','FiltOxNmgL','FiltNH3NmgL','datetime')
malsub <- mal[,c('Site', 'DATE','TIME','pH','Cond20uS','Cond25uS','Orel','Oabs', 'BODmgL','TempWater','Colour',
'OrgCmgL','AmmNmgL','KjelNmgL',
'OxNmgL','NitrateNmgL','NitriteNmgL','UnionNH3mgL','SuspSol105mgL','HardnessmgL','Alk4.5',
'OrthoPmgL', 'SiO2mgL','SO4mgL','FiltOrthoPmgL','TurbidityNTU','NDigestedugL','ChlAmgL',
'ChlorophyllugL','PdigestedugL','NitrogenNmgL','PmgL',
'FiltOrthoPmgL2','Orel2','Oabs2','FiltOxNmgL','FiltNH3NmgL','datetime')]
malsub$Alk4.5 <- as.character(malsub$Alk4.5)
malsub$Alk4.5 <- gsub(x=malsub$Alk4.5, pattern= "<",replacement= "")
malsub$Alk4.5 <- as.numeric(malsub$Alk4.5)
## capture those cases where detection limit is likely to be active
mallessthan <- malsub
mallessthan <- as.data.frame(apply(mallessthan, 2, gsub, pattern = "<", replacement=""))
## =============================================================================================
## Nitrogen
## ====================================================================================
## what are the duplicated columns all about?
## FIXME: are the NH3s reported as mg/L N or not?
malN <- melt(malsub, id.vars = c('Site','datetime'),
measure.vars =c('AmmNmgL','KjelNmgL','OxNmgL','NitrateNmgL','UnionNH3mgL',
'NDigestedugL','NitrogenNmgL','FiltNH3NmgL','FiltOxNmgL'))
malN$lessthan <- FALSE
malN$lessthan[grep('<', malN$value)] <- TRUE
malN$value <- gsub(x=malN$value, pattern= "<",replacement= "")
malN$value <- as.numeric(malN$value)
malN$value[grep('ugL', malN$variable)] <- malN$value[grep('ugL', malN$variable)]/1000
realnames <- data.frame(variable=unique(malN$variable))
realnames$NType <- c('Ammonia/um','TN','TN','Nitrate','Ammonia','TN','TN','Nitrate','TN-filtered')
malN <- merge(malN, realnames)
#allNplot <-
ggplot(malN, aes(y=value, x=datetime, col=NType)) +
papertheme +
geom_point(aes(shape=lessthan), alpha=0.6) +
facet_wrap(~Site, scales='free_y', ncol=1)+
theme(legend.direction = 'vertical') + labs(shape="Below detection limit") +
scale_color_manual(values=c('#e66101','#fdb863','black','#b2abd2','#5e3c99')) +
scale_x_datetime(date_labels = "%b %y", date_breaks = '1 year') +
guides(color=guide_legend(ncol=3)) +
ylab('Nitrogen, mg/L') +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggsave(plot=allNplot, file="../figs/allNdata.pdf", width=13, height=8)
## ============================================================================================
## Phosphorus
## ===========================================================================================
malP <- melt(malsub, id.vars = c('Site','datetime'),
measure.vars =c('OrthoPmgL','FiltOrthoPmgL','PdigestedugL','FiltOrthoPmgL2','PmgL'))
malP$lessthan <- FALSE
malP$lessthan[grep('<', malP$value)] <- TRUE
malP$value <- gsub(x=malP$value, pattern= "<",replacement= "")
malP$value <- as.numeric(malP$value)
malP$value[grep("Ortho", malP$variable)] <- po4top(malP$value[grep("Ortho", malP$variable)])
malP$value[malP$variable=="PmgL"] <- malP$value[malP$variable=="PmgL"]*1000
realnamesP <- data.frame(variable = unique(malP$variable))
realnamesP$PType <- c('OrthoP','OrthoP','TP','OrthoP','TP')
malP <- merge(malP, realnamesP)
#allPplot <-
ggplot(malP, aes(y=value, x=datetime, col=PType)) +
papertheme +
geom_point(aes(shape=lessthan), alpha=0.6) +
facet_wrap(~Site, scales='free_y', ncol=1) +
ylab("P (ug/L) (converted from all cases)") +
geom_hline(yintercept = 12) + # CSM limit for Malham Tarn-depth lakes in TP
theme(legend.direction = 'vertical') + labs(shape="Below detection limit") +
scale_color_manual(values=c("#386cb0", "#f0027f", "#bf5b17")) +
scale_x_datetime(date_labels = "%b %y", date_breaks = '1 year') +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggsave(plot=allPplot, file="../figs/allPdata.pdf",height=8, width=13)
## ====================================================================================================
## oxygen etc.
## =================================================================================================
malOx <- melt(malsub, id.vars = c("Site","datetime"),
measure.vars = c("Orel","Oabs","BODmgL","Colour","OrgCmgL","Orel2" ,"Oabs2"))
malOx$lessthan <- FALSE
malOx$lessthan[grep('<', malOx$value)] <- TRUE
malOx$value <- gsub(x=malOx$value, pattern= "<",replacement= "")
malOx$value <- as.numeric(malOx$value)
malOx$variable[grep("Orel2", malOx$variable)] <- "Orel" # seem to be a continuation saved as diff name
malOx$variable[grep("Oabs2", malOx$variable)] <- "Oabs"
ggplot(malOx[malOx$variable %in% c('Orel','BODmgL'),], aes(y=value, x=datetime, col=Site)) +
papertheme +
geom_path() +
geom_point(aes(shape=lessthan), alpha=0.6) +
ylab("Oxygen (% or mg/L)") +
theme(legend.direction = 'vertical', axis.text.x = element_text(angle=40, hjust=1)) + labs(shape="Below detection limit") +
scale_color_manual(values=c("#386cb0", "#f0027f", "#bf5b17")) +
scale_x_datetime(date_labels = "%b %y", date_breaks = "1 year") +
facet_wrap(~variable, ncol=1, scales="free_y")
## other remaining vars
malchl <- melt(malsub, id.vars = c("Site",'datetime'), measure.vars = c("ChlAmgL" ,"ChlorophyllugL"))
ggplot(na.omit(malchl), aes(x=datetime, y=value)) +
papertheme +
geom_point(aes(col=Site), alpha=0.6)+
scale_color_manual(values=c("#386cb0", "#f0027f", "#bf5b17"))
#pHplot <-
ggplot(malsub[-which(is.na(malsub$datetime)),], aes(y=pH, x=datetime)) +
papertheme +
geom_point(aes(col=Site),alpha=0.6) +
scale_color_manual(values=c("#386cb0", "#f0027f", "#bf5b17")) +
facet_wrap(~format(datetime, "%m")) +
theme(legend.direction = 'vertical', axis.text.x = element_text(angle=90)) +
scale_x_datetime(date_labels = "%y", date_breaks = "1 years") +
xlab("")
ggsave(plot=pHplot, file="../figs/pHdata.pdf", width=8, height=8)
inflowdat <- subset(malsub, Site=="TARN BECK AT ENTRANCE TO MALHAM TARN")
ggplot(inflowdat[-which(is.na(inflowdat$datetime)),], aes(y=pH, x=datetime)) +
papertheme +
geom_point() +
#scale_color_manual(values=c("#386cb0", "#f0027f", "#bf5b17")) +
#facet_wrap(~format(datetime, "%m")) +
theme(legend.direction = 'vertical', axis.text.x = element_text(angle=90)) +
scale_x_datetime(date_labels = "%y", date_breaks = "1 years") +
xlab("Year")
ggplot(malsub[-which(is.na(malsub$datetime)),], aes(y=Alk4.5, x=datetime)) +
papertheme +
geom_point(aes(col=Site),alpha=0.6) +
scale_color_manual(values=c("#386cb0", "#f0027f", "#bf5b17")) +
facet_wrap(~format(datetime, "%m"))
ggplot(malsub[-which(is.na(malsub$datetime)),], aes(y=pH, x=datetime)) +
papertheme +
geom_point(aes(size=Alk4.5),alpha=0.6) +
facet_wrap(~Site, ncol=1)
## save some processed data
saveRDS(malsub, "../dat-mod/malham-EA-decadal.rds")
saveRDS(malP, "../dat-mod/malham-EA-P-decadal.rds")
saveRDS(malN, "../dat-mod/malham-EA-N-decadal.rds")
|
library(tidyverse)
library(modelr)
library(gapminder)
gapminder %>%
ggplot(aes(year, lifeExp, group = country)) +
geom_line(alpha = 1/3)
nz <- filter(gapminder, country == "New Zealand")
nz %>%
ggplot(aes(year, lifeExp)) +
geom_line() +
ggtitle("Full data = ")
nz_mod <- lm(lifeExp ~ year, data = nz)
nz %>%
add_predictions(nz_mod) %>%
ggplot(aes(year, pred)) +
geom_line() +
ggtitle("Linear trend + ")
nz %>%
add_residuals(nz_mod) %>%
ggplot(aes(year, resid)) +
geom_ref_line(h = 0) +
geom_line() +
ggtitle("Remaining pattern")
by_country <- gapminder %>%
group_by(country, continent) %>%
nest()
by_country
by_country$data[[1]]
## List-Columns
country_model <- function(df) {
lm(lifeExp ~ year, data = df)
}
models <- map(by_country$data, country_model)
by_country <- by_country %>%
mutate(model = map(data, country_model))
models
by_country
by_country %>%
filter(continent == "Europe")
by_country %>%
arrange(continent, country)
by_country <- by_country %>%
mutate(
resids = map2(data, model, add_residuals)
)
by_country
resids <- unnest(by_country, resids)
resids
resids %>%
ggplot(aes(year, resid)) +
geom_line(aes(group = country), alpha = 1/3) +
geom_smooth(se = F)
resids %>%
ggplot(aes(year, resid)) +
geom_line(aes(group = country), alpha = 1/3) +
geom_smooth(se = F) +
facet_wrap(~continent)
## Model Quality
library(broom)
broom::glance(nz_mod)
by_country %>%
mutate(glance = map(model, broom::glance)) %>%
unnest(glance)
glance <- by_country %>%
mutate(glance = map(model, broom::glance)) %>%
unnest(glance, .drop = T)
glance
glance %>%
arrange(r.squared)
glance %>%
ggplot(aes(continent, r.squared)) +
geom_jitter(width = 0.5)
bad_fit <- filter(glance, r.squared < 0.25)
gapminder %>%
semi_join(bad_fit, by = "country") %>%
ggplot(aes(year, lifeExp, color = country)) +
geom_line()
?unnest
## Exercises
# List-Columns
data.frame(x = list(1:3, 3:5))
data.frame(x = I(list(1:3, 3:5)),
y = c("1, 2", "3, 4, 5"))
tibble(
x = list(1:3, 3:5),
y = c("1, 2", "3, 4, 5")
)
tribble(
~x, ~y,
1:3, "1, 2",
3:5, "3, 4, 5"
)
# Creating List-Columns
## With Nesting
gapminder %>%
group_by(country, continent) %>%
nest()
gapminder %>%
nest(year:gdpPercap)
## From Vectorized Functions
df <- tribble(
~x1,
"a,b,c",
"d,e,f,g"
)
df %>%
mutate(x2 = stringr::str_split(x1, ","))
df %>%
mutate(x2 = stringr::str_split(x1, ",")) %>%
unnest()
sim <- tribble(
~f, ~params,
"runif", list(min = -1, max = -1),
"rnorm", list(sd = 5),
"rpois", list(lambda = 10)
)
sim
sim %>%
mutate(sims = invoke_map(f, params, n = 10))
## From Multivalued Summaries
mtcars %>%
group_by(cyl) %>%
summarise(q = quantile(mpg))
mtcars %>%
group_by(cyl) %>%
summarise(q = list(quantile(mpg)))
probs <- c(0.01, 0.25, 0.5, 0.75, 0.99)
mtcars %>%
group_by(cyl) %>%
summarise(p = list(probs), q = list(quantile(mpg, probs))) %>%
unnest()
## From a Named List
x <- list(
a = 1:5,
b = 3:4,
c = 5:6
)
x
df <- enframe(x)
df
df %>%
mutate(
smry = map2_chr(
name,
value,
~ stringr::str_c(.x, ": ", .y[1])
)
)
## Exercises
#3
mtcars %>%
group_by(cyl) %>%
summarise(q = list(quantile(mpg))) %>%
unnest()
mtcars %>%
group_by(cyl) %>%
summarise(p = list(probs), q = list(quantile(mpg, probs))) %>%
unnest()
#4
mtcars %>%
group_by(cyl) %>%
summarise_each(funs(list))
# Simplifying List-Columns
## List to Vector
df <- tribble(
~x,
letters[1:5],
1:3,
runif(5)
)
df
df %>%
mutate(
type = map_chr(x, typeof),
length = map_int(x, length)
)
df <- tribble(
~x,
list(a = 1, b = 2),
list(a = 2, c = 4)
)
df %>%
mutate(
a = map_dbl(x, "a"),
b = map_dbl(x, "b", .null = NA_real_)
)
## Unnesting
tibble(x = 1:2, y = list(1:4, 1)) %>% unnest(y)
# Ok, because x and z have the same number of elements in every row
df1 <- tribble(
~x, ~y, ~z,
1, c("a", "b"), 1:2,
2, "c", 3
)
df1
df1 %>% unnest(y, z)
# Doesn't work because y and z have different number of elements
df2 <- tribble(
~x, ~y, ~z,
1, "a", 1:2,
2, c("b", "c"), 3
)
df2
df2 %>% unnest(y, z)
| /codes/chapter20.R | no_license | harryyang1982/R4DS | R | false | false | 4,297 | r | library(tidyverse)
library(modelr)
library(gapminder)
gapminder %>%
ggplot(aes(year, lifeExp, group = country)) +
geom_line(alpha = 1/3)
nz <- filter(gapminder, country == "New Zealand")
nz %>%
ggplot(aes(year, lifeExp)) +
geom_line() +
ggtitle("Full data = ")
nz_mod <- lm(lifeExp ~ year, data = nz)
nz %>%
add_predictions(nz_mod) %>%
ggplot(aes(year, pred)) +
geom_line() +
ggtitle("Linear trend + ")
nz %>%
add_residuals(nz_mod) %>%
ggplot(aes(year, resid)) +
geom_ref_line(h = 0) +
geom_line() +
ggtitle("Remaining pattern")
by_country <- gapminder %>%
group_by(country, continent) %>%
nest()
by_country
by_country$data[[1]]
## List-Columns
country_model <- function(df) {
lm(lifeExp ~ year, data = df)
}
models <- map(by_country$data, country_model)
by_country <- by_country %>%
mutate(model = map(data, country_model))
models
by_country
by_country %>%
filter(continent == "Europe")
by_country %>%
arrange(continent, country)
by_country <- by_country %>%
mutate(
resids = map2(data, model, add_residuals)
)
by_country
resids <- unnest(by_country, resids)
resids
resids %>%
ggplot(aes(year, resid)) +
geom_line(aes(group = country), alpha = 1/3) +
geom_smooth(se = F)
resids %>%
ggplot(aes(year, resid)) +
geom_line(aes(group = country), alpha = 1/3) +
geom_smooth(se = F) +
facet_wrap(~continent)
## Model Quality
library(broom)
broom::glance(nz_mod)
by_country %>%
mutate(glance = map(model, broom::glance)) %>%
unnest(glance)
glance <- by_country %>%
mutate(glance = map(model, broom::glance)) %>%
unnest(glance, .drop = T)
glance
glance %>%
arrange(r.squared)
glance %>%
ggplot(aes(continent, r.squared)) +
geom_jitter(width = 0.5)
bad_fit <- filter(glance, r.squared < 0.25)
gapminder %>%
semi_join(bad_fit, by = "country") %>%
ggplot(aes(year, lifeExp, color = country)) +
geom_line()
?unnest
## Exercises
# List-Columns
data.frame(x = list(1:3, 3:5))
data.frame(x = I(list(1:3, 3:5)),
y = c("1, 2", "3, 4, 5"))
tibble(
x = list(1:3, 3:5),
y = c("1, 2", "3, 4, 5")
)
tribble(
~x, ~y,
1:3, "1, 2",
3:5, "3, 4, 5"
)
# Creating List-Columns
## With Nesting
gapminder %>%
group_by(country, continent) %>%
nest()
gapminder %>%
nest(year:gdpPercap)
## From Vectorized Functions
df <- tribble(
~x1,
"a,b,c",
"d,e,f,g"
)
df %>%
mutate(x2 = stringr::str_split(x1, ","))
df %>%
mutate(x2 = stringr::str_split(x1, ",")) %>%
unnest()
sim <- tribble(
~f, ~params,
"runif", list(min = -1, max = -1),
"rnorm", list(sd = 5),
"rpois", list(lambda = 10)
)
sim
sim %>%
mutate(sims = invoke_map(f, params, n = 10))
## From Multivalued Summaries
mtcars %>%
group_by(cyl) %>%
summarise(q = quantile(mpg))
mtcars %>%
group_by(cyl) %>%
summarise(q = list(quantile(mpg)))
probs <- c(0.01, 0.25, 0.5, 0.75, 0.99)
mtcars %>%
group_by(cyl) %>%
summarise(p = list(probs), q = list(quantile(mpg, probs))) %>%
unnest()
## From a Named List
x <- list(
a = 1:5,
b = 3:4,
c = 5:6
)
x
df <- enframe(x)
df
df %>%
mutate(
smry = map2_chr(
name,
value,
~ stringr::str_c(.x, ": ", .y[1])
)
)
## Exercises
#3
mtcars %>%
group_by(cyl) %>%
summarise(q = list(quantile(mpg))) %>%
unnest()
mtcars %>%
group_by(cyl) %>%
summarise(p = list(probs), q = list(quantile(mpg, probs))) %>%
unnest()
#4
mtcars %>%
group_by(cyl) %>%
summarise_each(funs(list))
# Simplifying List-Columns
## List to Vector
df <- tribble(
~x,
letters[1:5],
1:3,
runif(5)
)
df
df %>%
mutate(
type = map_chr(x, typeof),
length = map_int(x, length)
)
df <- tribble(
~x,
list(a = 1, b = 2),
list(a = 2, c = 4)
)
df %>%
mutate(
a = map_dbl(x, "a"),
b = map_dbl(x, "b", .null = NA_real_)
)
## Unnesting
tibble(x = 1:2, y = list(1:4, 1)) %>% unnest(y)
# Ok, because x and z have the same number of elements in every row
df1 <- tribble(
~x, ~y, ~z,
1, c("a", "b"), 1:2,
2, "c", 3
)
df1
df1 %>% unnest(y, z)
# Doesn't work because y and z have different number of elements
df2 <- tribble(
~x, ~y, ~z,
1, "a", 1:2,
2, c("b", "c"), 3
)
df2
df2 %>% unnest(y, z)
|
setwd('C:/Coursera/4 Graphs/exdata_2Fdata%2Fhousehold_power_consumption')
rawData <- read.table("./household_power_consumption.txt", header=TRUE,sep=";")
head(rawData)
dim(rawData)
# Subset the date for this exercise
# Format
library(dplyr)
rawData$Date <- as.Date(rawData$Date,"%d/%m/%Y")
filterData <- subset(rawData, Date <= '2007-02-02' & Date >= '2007-02-01')
# Format Global Active Power variable
filterData$Global_active_power <- as.numeric(filterData$Global_active_power)
# Filter to exclude NAs and ?'s
filterData2 <- subset(filterData, !is.na(Global_active_power), Global_active_power != "?")
# Create datetime variable
my_data <- filterData2
my_data$DateTime <- paste(my_data$Date, my_data$Time)
my_data$DateTime <- strptime(my_data$DateTime, "%Y-%m-%d %H:%M:%S")
# First view
with(my_data, plot(DateTime, Global_active_power, type="n", ylab="Global Active Power (kilowatts)"))
with(my_data, lines(DateTime, Global_active_power))
# Create png for result
png(filename="C:/Coursera/4 Graphs/Week 1/ExData_Plotting1/plot2.png", width = 480, height = 480, units = "px")
par(mar=c(4,4,2,2))
with(my_data, plot(DateTime, Global_active_power, type="n", ylab="Global Active Power (kilowatts)"))
with(my_data, lines(DateTime, Global_active_power))
dev.off() | /plot2.R | no_license | gpmerwe/ExData_Plotting1 | R | false | false | 1,267 | r |
setwd('C:/Coursera/4 Graphs/exdata_2Fdata%2Fhousehold_power_consumption')
rawData <- read.table("./household_power_consumption.txt", header=TRUE,sep=";")
head(rawData)
dim(rawData)
# Subset the date for this exercise
# Format
library(dplyr)
rawData$Date <- as.Date(rawData$Date,"%d/%m/%Y")
filterData <- subset(rawData, Date <= '2007-02-02' & Date >= '2007-02-01')
# Format Global Active Power variable
filterData$Global_active_power <- as.numeric(filterData$Global_active_power)
# Filter to exclude NAs and ?'s
filterData2 <- subset(filterData, !is.na(Global_active_power), Global_active_power != "?")
# Create datetime variable
my_data <- filterData2
my_data$DateTime <- paste(my_data$Date, my_data$Time)
my_data$DateTime <- strptime(my_data$DateTime, "%Y-%m-%d %H:%M:%S")
# First view
with(my_data, plot(DateTime, Global_active_power, type="n", ylab="Global Active Power (kilowatts)"))
with(my_data, lines(DateTime, Global_active_power))
# Create png for result
png(filename="C:/Coursera/4 Graphs/Week 1/ExData_Plotting1/plot2.png", width = 480, height = 480, units = "px")
par(mar=c(4,4,2,2))
with(my_data, plot(DateTime, Global_active_power, type="n", ylab="Global Active Power (kilowatts)"))
with(my_data, lines(DateTime, Global_active_power))
dev.off() |
#preguntas
#pregunta numero 1
#1 x=10 var=4 n=20 al 95% de confianza
>valores <-rnorm(20,10,2)
>t.test(valores)$conf
[1] 9.052416 10.825978
attr(, "conf.level")
[1] 0.95
>IC.varianza <-c(9*var(valores)/qchisq(0.975,9)*var(valores)/qchisq(0.025,9))
> IC.varianza
[1] 1.698568 11.965486
# CON 99% DE CONFIANZA
#1 x=10 var=4
valores <-rnorm(20,10,2)
t.test(valores)$conf
9.052416 10.825978
#CON ESTE CODIGO SE GENERA INTERVALO DE COBFIANZA
attr(, "conf.level")
0.99
IC.varianza <-c(9*var(valores)/qchisq(0.995,9)*var(valores)/qchisq(0.005,9))
IC.varianza
#CON ESTE CODIGO EL INTERVALO DE CONFIANZA
#pregunta numero 2
n1<-33;xraya1<-18;S1<-3.8
alfa<-0.05
region.critica<-c(qnorm(1-alfa))
pvalor<-1-pnorm(6,18,3.8)
| /2019_2/examen 1/examen 1 beltran/TORRES_CAMILA_INFERENCIA_BELTRAN.R | no_license | ricardomayerb/ico8306 | R | false | false | 775 | r | #preguntas
#pregunta numero 1
#1 x=10 var=4 n=20 al 95% de confianza
>valores <-rnorm(20,10,2)
>t.test(valores)$conf
[1] 9.052416 10.825978
attr(, "conf.level")
[1] 0.95
>IC.varianza <-c(9*var(valores)/qchisq(0.975,9)*var(valores)/qchisq(0.025,9))
> IC.varianza
[1] 1.698568 11.965486
# CON 99% DE CONFIANZA
#1 x=10 var=4
valores <-rnorm(20,10,2)
t.test(valores)$conf
9.052416 10.825978
#CON ESTE CODIGO SE GENERA INTERVALO DE COBFIANZA
attr(, "conf.level")
0.99
IC.varianza <-c(9*var(valores)/qchisq(0.995,9)*var(valores)/qchisq(0.005,9))
IC.varianza
#CON ESTE CODIGO EL INTERVALO DE CONFIANZA
#pregunta numero 2
n1<-33;xraya1<-18;S1<-3.8
alfa<-0.05
region.critica<-c(qnorm(1-alfa))
pvalor<-1-pnorm(6,18,3.8)
|
#' Rd2HTML with knitr
#'
#' Translating Rd files to html with knitr
#'
#' @param Rd name of Rd files;
#' @param extra options of knitr, eg \code{extra="fig.align='center'"}
#' @param package name of package
#' @export
knit_Rd2HTML <- function(Rd, extra = "", package = NULL) {
Rd2html <- function(Rd, extra, package) {
base <- tools::file_path_sans_ext(Rd)
out <- paste(base, "Rhtml", sep = ".")
file.ex.R <- paste(base, "-examples.R", sep = ".")
tools::Rd2HTML(Rd, out = out, package = package, stylesheet = "stylesheet.css")
tools::Rd2ex(Rd, file.ex.R)
ex.R <- readLines(file.ex.R)
ex.R <- gsub("##D", "", ex.R)
ex.R <- ex.R[(which(ex.R=="### ** Examples") + 1):length(ex.R)]
ex.R <- c(paste("<!--begin.rcode", extra), ex.R, "end.rcode-->", sep = "\n")
Rhtml <- readLines(out)
Rhtml <- c(Rhtml[seq_len(grep("<h3>Examples</h3>", Rhtml, fixed = TRUE))], ex.R,
Rhtml[(max(grep("</pre>", Rhtml, fixed = TRUE)) + 1):length(Rhtml)])
Rhtml <- gsub("## End(Not run)", paste("## End(Not run)\nend.rcode-->\n<!--begin.rcode", extra), Rhtml, fixed=TRUE)
Rhtml <- gsub("## Not run:", "end.rcode-->\n<!--begin.rcode eval=FALSE\n## Not run:", Rhtml, fixed=TRUE)
writeLines(Rhtml, out)
file.html <- knit(out)
## Pull contents of first matched tag from parsed Rd file
get.tag <- function(tag, parseRd){
for (x in parseRd) if(attr(x, "Rd_tag") == tag) return(x)
stop("didn't find tag")
}
tmp <- tools::parse_Rd(Rd)
list(name = unlist(get.tag("\\name", tmp)), title = unlist(get.tag("\\title", tmp)), file = file.html)
}
info <- lapply(Rd, function(x) Rd2html(x, extra = extra, package = package))
if (length(Rd) > 1) {
contents <- sapply(info, function(x) sprintf("* [%s](%s) %s",
x$name, x$file, paste(x$title, collapse = "")))
contents <- gsub("\n", " ", contents)
contents <- c(paste("# Help Pages", ifelse(is.null(package), "", paste("of", package))), contents)
writeLines(paste(contents, collapse = "\n\n"), "index.md")
markdown::markdownToHTML("index.md", output="index.html", stylesheet = "stylesheet.css")
file.remove("index.md")
}
## Default stylesheet, from pandoc's tango theme, plus very minimal
## page css styling. Will be saved as stylesheet.css if it does not
## exist.
default.stylesheet <- "/* Highlighting from pandoc / tango */
table.sourceCode, tr.sourceCode, td.lineNumbers, td.sourceCode {
margin: 0; padding: 0; vertical-align: baseline; border: none; }
table.sourceCode { width: 100%; background-color: #f8f8f8; }
td.lineNumbers { text-align: right; padding-right: 4px; padding-left: 4px; color: #aaaaaa; border-right: 1px solid #aaaaaa; }
td.sourceCode { padding-left: 5px; }
pre, code { background-color: #f8f8f8; }
code > span.kw { color: #204a87; font-weight: bold; }
code > span.dt { color: #204a87; }
code > span.dv { color: #0000cf; }
code > span.bn { color: #0000cf; }
code > span.fl { color: #0000cf; }
code > span.ch { color: #4e9a06; }
code > span.st { color: #4e9a06; }
code > span.co { color: #8f5902; font-style: italic; }
code > span.ot { color: #8f5902; }
code > span.al { color: #ef2929; }
code > span.fu { color: #000000; }
code > span.er { font-weight: bold; }
body { font-family: Helvetica, sans-serif;
color: #333;
padding: 0 5px;
margin: 0 auto;
font-size: 14px;
width: 80%;
max-width: 60em; /* 960px */
position: relative;
line-height: 1.5;
}
/* Hide caption */
p.caption { display:none }
"
if(!file.exists(default.stylesheet))
writeLines(default.stylesheet, "stylesheet.css")
} | /R/knit_Rd2HTML.R | no_license | taiyun/knitr | R | false | false | 3,959 | r | #' Rd2HTML with knitr
#'
#' Translating Rd files to html with knitr
#'
#' @param Rd name of Rd files;
#' @param extra options of knitr, eg \code{extra="fig.align='center'"}
#' @param package name of package
#' @export
knit_Rd2HTML <- function(Rd, extra = "", package = NULL) {
Rd2html <- function(Rd, extra, package) {
base <- tools::file_path_sans_ext(Rd)
out <- paste(base, "Rhtml", sep = ".")
file.ex.R <- paste(base, "-examples.R", sep = ".")
tools::Rd2HTML(Rd, out = out, package = package, stylesheet = "stylesheet.css")
tools::Rd2ex(Rd, file.ex.R)
ex.R <- readLines(file.ex.R)
ex.R <- gsub("##D", "", ex.R)
ex.R <- ex.R[(which(ex.R=="### ** Examples") + 1):length(ex.R)]
ex.R <- c(paste("<!--begin.rcode", extra), ex.R, "end.rcode-->", sep = "\n")
Rhtml <- readLines(out)
Rhtml <- c(Rhtml[seq_len(grep("<h3>Examples</h3>", Rhtml, fixed = TRUE))], ex.R,
Rhtml[(max(grep("</pre>", Rhtml, fixed = TRUE)) + 1):length(Rhtml)])
Rhtml <- gsub("## End(Not run)", paste("## End(Not run)\nend.rcode-->\n<!--begin.rcode", extra), Rhtml, fixed=TRUE)
Rhtml <- gsub("## Not run:", "end.rcode-->\n<!--begin.rcode eval=FALSE\n## Not run:", Rhtml, fixed=TRUE)
writeLines(Rhtml, out)
file.html <- knit(out)
## Pull contents of first matched tag from parsed Rd file
get.tag <- function(tag, parseRd){
for (x in parseRd) if(attr(x, "Rd_tag") == tag) return(x)
stop("didn't find tag")
}
tmp <- tools::parse_Rd(Rd)
list(name = unlist(get.tag("\\name", tmp)), title = unlist(get.tag("\\title", tmp)), file = file.html)
}
info <- lapply(Rd, function(x) Rd2html(x, extra = extra, package = package))
if (length(Rd) > 1) {
contents <- sapply(info, function(x) sprintf("* [%s](%s) %s",
x$name, x$file, paste(x$title, collapse = "")))
contents <- gsub("\n", " ", contents)
contents <- c(paste("# Help Pages", ifelse(is.null(package), "", paste("of", package))), contents)
writeLines(paste(contents, collapse = "\n\n"), "index.md")
markdown::markdownToHTML("index.md", output="index.html", stylesheet = "stylesheet.css")
file.remove("index.md")
}
## Default stylesheet, from pandoc's tango theme, plus very minimal
## page css styling. Will be saved as stylesheet.css if it does not
## exist.
default.stylesheet <- "/* Highlighting from pandoc / tango */
table.sourceCode, tr.sourceCode, td.lineNumbers, td.sourceCode {
margin: 0; padding: 0; vertical-align: baseline; border: none; }
table.sourceCode { width: 100%; background-color: #f8f8f8; }
td.lineNumbers { text-align: right; padding-right: 4px; padding-left: 4px; color: #aaaaaa; border-right: 1px solid #aaaaaa; }
td.sourceCode { padding-left: 5px; }
pre, code { background-color: #f8f8f8; }
code > span.kw { color: #204a87; font-weight: bold; }
code > span.dt { color: #204a87; }
code > span.dv { color: #0000cf; }
code > span.bn { color: #0000cf; }
code > span.fl { color: #0000cf; }
code > span.ch { color: #4e9a06; }
code > span.st { color: #4e9a06; }
code > span.co { color: #8f5902; font-style: italic; }
code > span.ot { color: #8f5902; }
code > span.al { color: #ef2929; }
code > span.fu { color: #000000; }
code > span.er { font-weight: bold; }
body { font-family: Helvetica, sans-serif;
color: #333;
padding: 0 5px;
margin: 0 auto;
font-size: 14px;
width: 80%;
max-width: 60em; /* 960px */
position: relative;
line-height: 1.5;
}
/* Hide caption */
p.caption { display:none }
"
if(!file.exists(default.stylesheet))
writeLines(default.stylesheet, "stylesheet.css")
} |
## Stat 133 Midterm 2
## Thursday April 2nd
## General R commands
present="yes"
# [1 pt]
# Create [x], a numeric vector of length 1000 with
# entries: 6, 12, 18, etc.
#x <- <your code here>
x=c()
for (i in 1:2000)
x[i]=i*6
#x <- 6 * seq(1, 2000)
# [1 pt]
# Create [y], a logical vector of length 2000
# with y[i]=T if x[i] is divisible by 10, otherwise F
# y <- <your code here>
y=x%%10==0
# [1 pt]
# Create [w], a random permutation of the numeric values of a deck of cards
# (i.e. just the numbers 1 through 13 each repeated 4 times)
set.seed(2718)
#w <- <your code here>
w=sample(rep(1:13,4),42) #pay attention, repeat each number 4 times, using rep(1:13,each=4)
#w <- sample(rep(seq(1, 13), each = 4), 52, replace = F)
# [1 pt]
# Create [m], a matrix of size 10x10 with entries that are
# Exponential random variables (hint: rexp) with rate 3
# (arrange the values by column, as per default)
set.seed(344)
#m <- <your code here>
m=matrix(rexp(100,rate=3),ncol=10,nrow=10)
# [1 pt]
# Create [l], a list with 12 elements, each a vector of length 100.
# Each vector of length 100 of Poisson (hint:rpois) random variables with mean 5
set.seed(71)
#l <- <your code here>
l=list(rpois(100,0.2),rpois(100,0.2),rpois(100,0.2),rpois(100,0.2),rpois(100,0.2),rpois(100,0.2),rpois(100,0.2),rpois(100,0.2),rpois(100,0.2),rpois(100,0.2))
#l <- lapply(1:12, function(x) rpois(100, 5))
# for the next two tasks you will use the data frame infants (size 1236x15)
# LEAVE AS IS:
load("KaiserBabies.rda")
# [2 pt]
# Create a table [t] of the education level ($ed) of all married ($marital) first time ($parity=1) mothers:
#t <- <your code here>
marriedfirst=infants[infants$marital=="Married"&infants$parity==1,]
t=table(marriedfirst$ed)
# [2 pt]
# Calculate [mw], the average birthweight ($bwt) of all babies whose were full term, i.e. gestation equal or more than 259 days.
#mw <- <your code here>
mw=mean(infants[infants$gestation>=259,]$bwt,na.rm=T)
# For the next few tasks you will use the data frame family (size 14x5)
# LEAVE AS IS:
load("family.rda")
# [1 pt]
# Create [f1] a subset of family with only women over age 50
#f <- <your code here>
f1=family[family$gender=="f"&family$age>50,]
# [1 pt]
# Create [f2] a subset of family with only men 6 foot tall or more
#fm <- <your code here>
f2=family[family$gender=="m"&family$height>=72,]
# [1 pt]
# Create [f3] a subset of family of people whose name starts with T
#f3 <- <your code here>
f3=family[family$name=="Tom"|family$name=="Tim",]
# [1 pt]
# Create [f4] a subset of family with just the youngest individual (so just one row)
#f4 <- <your code here>
f4=family[family$age==min(family$age),]
## Plotting
# We will now use the dataset "iris" which is icluded in the R package.
# To look at the dataframe you can just type "iris" at the prompt
# It is a data frame of size 150x5 with measurements of 4 attributes
# for 150 flowers, 50 each of 3 different species of irises.
# [2 pts]
# Make a box plot of Sepal Length by Species (so 3 boxplots in one plot)
boxplot(iris$Sepal.Length[1:50],iris$Sepal.Length[51:100],iris$Sepal.Length[101:150])
# [3 pts]
# Make a scatterplot of petal width (y-axis) versus petal length (x-axis)
# The axes labels should be "Petal Length" and "Petal Width",
# Color the plotting symbol by Species (any 3 colors)
plot(iris$Petal.Length[1:50],iris$Petal.Width[1:50],"p",xlim=c(0,7),ylim=c(0,2.5),col="red",xlab="Petal Length",ylab="Petal Width")
points(iris$Petal.Length[51:100],iris$Petal.Width[51:100],col="blue")
points(iris$Petal.Length[101:150],iris$Petal.Width[101:150],col="yellow")
# [3 pt]
# Make a scatterplot of ( sepal length / petal length) as a function of index (order)
# Color the plotting symbol by Species (any 3 colors)
plot(1:150,iris$Sepal.Length[1:150]/iris$Petal.Length[1:150],xlim=c(1,150),col="red")
points(51:100,iris$Sepal.Length[51:100]/iris$Petal.Length[51:100],col="blue")
points(101:150,iris$Sepal.Length[101:150]/iris$Petal.Length[101:150],col="yellow")
## apply statements
# For the next few tasks you will use the list Cache500
# (list of length 500, each element is a numeric vector of various lengths)
# LEAVE AS IS:
load("Cache500.rda")
# [3 pts]
# Create [first.cache], a vector where each entry is the _first_ element of the
# corresponding vector in the list Cache500
#first.cache <- <your code here>
first.cache=as.vector(sapply(Cache500,head,n=1))
# [3 pts]
# Create [mean.cache], a vector of length 500 where each entry is the mean
# of the corresponding element of the list Cache500
#mean.cache <- <your code here>
mean.cache=sapply(Cache500,mean)
# [2 pts]
# Create [sd.cache], a vector of length 500 where each entry is the sd
# of the corresponding element of the list Cache500
#sd.cache <- <your code here>
sd.cache=sapply(Cache500,sd)
# [4 pts]
# Create [mean.long.cache], a vector where
# mean.long.cache[i] is:
# the mean of Cache500[[i]] IF it has 50 or more entries.
# NA IF Cache500[[i]] has less than 50 entries.
#mean.long.cache <- <your code here>
mean.long.cache=c()
for (i in 1:500)
{
if (length(Cache500[[i]]>=50))
mean.long.cache[i]=mean(Cache500[[i]])
else
mean.long.cache[i]=NA
}
| /midterm2/midterm2.r | no_license | adrianzhong/stat133 | R | false | false | 5,189 | r | ## Stat 133 Midterm 2
## Thursday April 2nd
## General R commands
present="yes"
# [1 pt]
# Create [x], a numeric vector of length 1000 with
# entries: 6, 12, 18, etc.
#x <- <your code here>
x=c()
for (i in 1:2000)
x[i]=i*6
#x <- 6 * seq(1, 2000)
# [1 pt]
# Create [y], a logical vector of length 2000
# with y[i]=T if x[i] is divisible by 10, otherwise F
# y <- <your code here>
y=x%%10==0
# [1 pt]
# Create [w], a random permutation of the numeric values of a deck of cards
# (i.e. just the numbers 1 through 13 each repeated 4 times)
set.seed(2718)
#w <- <your code here>
w=sample(rep(1:13,4),42) #pay attention, repeat each number 4 times, using rep(1:13,each=4)
#w <- sample(rep(seq(1, 13), each = 4), 52, replace = F)
# [1 pt]
# Create [m], a matrix of size 10x10 with entries that are
# Exponential random variables (hint: rexp) with rate 3
# (arrange the values by column, as per default)
set.seed(344)
#m <- <your code here>
m=matrix(rexp(100,rate=3),ncol=10,nrow=10)
# [1 pt]
# Create [l], a list with 12 elements, each a vector of length 100.
# Each vector of length 100 of Poisson (hint:rpois) random variables with mean 5
set.seed(71)
#l <- <your code here>
l=list(rpois(100,0.2),rpois(100,0.2),rpois(100,0.2),rpois(100,0.2),rpois(100,0.2),rpois(100,0.2),rpois(100,0.2),rpois(100,0.2),rpois(100,0.2),rpois(100,0.2))
#l <- lapply(1:12, function(x) rpois(100, 5))
# for the next two tasks you will use the data frame infants (size 1236x15)
# LEAVE AS IS:
load("KaiserBabies.rda")
# [2 pt]
# Create a table [t] of the education level ($ed) of all married ($marital) first time ($parity=1) mothers:
#t <- <your code here>
marriedfirst=infants[infants$marital=="Married"&infants$parity==1,]
t=table(marriedfirst$ed)
# [2 pt]
# Calculate [mw], the average birthweight ($bwt) of all babies whose were full term, i.e. gestation equal or more than 259 days.
#mw <- <your code here>
mw=mean(infants[infants$gestation>=259,]$bwt,na.rm=T)
# For the next few tasks you will use the data frame family (size 14x5)
# LEAVE AS IS:
load("family.rda")
# [1 pt]
# Create [f1] a subset of family with only women over age 50
#f <- <your code here>
f1=family[family$gender=="f"&family$age>50,]
# [1 pt]
# Create [f2] a subset of family with only men 6 foot tall or more
#fm <- <your code here>
f2=family[family$gender=="m"&family$height>=72,]
# [1 pt]
# Create [f3] a subset of family of people whose name starts with T
#f3 <- <your code here>
f3=family[family$name=="Tom"|family$name=="Tim",]
# [1 pt]
# Create [f4] a subset of family with just the youngest individual (so just one row)
#f4 <- <your code here>
f4=family[family$age==min(family$age),]
## Plotting
# We will now use the dataset "iris" which is icluded in the R package.
# To look at the dataframe you can just type "iris" at the prompt
# It is a data frame of size 150x5 with measurements of 4 attributes
# for 150 flowers, 50 each of 3 different species of irises.
# [2 pts]
# Make a box plot of Sepal Length by Species (so 3 boxplots in one plot)
boxplot(iris$Sepal.Length[1:50],iris$Sepal.Length[51:100],iris$Sepal.Length[101:150])
# [3 pts]
# Make a scatterplot of petal width (y-axis) versus petal length (x-axis)
# The axes labels should be "Petal Length" and "Petal Width",
# Color the plotting symbol by Species (any 3 colors)
plot(iris$Petal.Length[1:50],iris$Petal.Width[1:50],"p",xlim=c(0,7),ylim=c(0,2.5),col="red",xlab="Petal Length",ylab="Petal Width")
points(iris$Petal.Length[51:100],iris$Petal.Width[51:100],col="blue")
points(iris$Petal.Length[101:150],iris$Petal.Width[101:150],col="yellow")
# [3 pt]
# Make a scatterplot of ( sepal length / petal length) as a function of index (order)
# Color the plotting symbol by Species (any 3 colors)
plot(1:150,iris$Sepal.Length[1:150]/iris$Petal.Length[1:150],xlim=c(1,150),col="red")
points(51:100,iris$Sepal.Length[51:100]/iris$Petal.Length[51:100],col="blue")
points(101:150,iris$Sepal.Length[101:150]/iris$Petal.Length[101:150],col="yellow")
## apply statements
# For the next few tasks you will use the list Cache500
# (list of length 500, each element is a numeric vector of various lengths)
# LEAVE AS IS:
load("Cache500.rda")
# [3 pts]
# Create [first.cache], a vector where each entry is the _first_ element of the
# corresponding vector in the list Cache500
#first.cache <- <your code here>
first.cache=as.vector(sapply(Cache500,head,n=1))
# [3 pts]
# Create [mean.cache], a vector of length 500 where each entry is the mean
# of the corresponding element of the list Cache500
#mean.cache <- <your code here>
mean.cache=sapply(Cache500,mean)
# [2 pts]
# Create [sd.cache], a vector of length 500 where each entry is the sd
# of the corresponding element of the list Cache500
#sd.cache <- <your code here>
sd.cache=sapply(Cache500,sd)
# [4 pts]
# Create [mean.long.cache], a vector where
# mean.long.cache[i] is:
# the mean of Cache500[[i]] IF it has 50 or more entries.
# NA IF Cache500[[i]] has less than 50 entries.
#mean.long.cache <- <your code here>
mean.long.cache=c()
for (i in 1:500)
{
if (length(Cache500[[i]]>=50))
mean.long.cache[i]=mean(Cache500[[i]])
else
mean.long.cache[i]=NA
}
|
\name{mohrleg}
\alias{mohrleg}
\title{Legend for Mohr
}
\description{Legend for Mohr
}
\usage{
mohrleg(ES)
}
\arguments{
\item{ES}{Eigen Value Decomposition, output of function eigen
}
}
\details{Add notes to plots of Mohr's circles. Uses the eigenvalues of the decomposition.
}
\value{Graphical Side Effects
}
\author{
Jonathan M. Lees<jonathan.lees@unc.edu>
}
\seealso{DoMohr
}
\examples{
Stensor = matrix(c(50, 40, 40, 10), ncol=2)
DoMohr(Stensor)
}
\keyword{misc}
| /man/mohrleg.Rd | no_license | cran/geophys | R | false | false | 477 | rd | \name{mohrleg}
\alias{mohrleg}
\title{Legend for Mohr
}
\description{Legend for Mohr
}
\usage{
mohrleg(ES)
}
\arguments{
\item{ES}{Eigen Value Decomposition, output of function eigen
}
}
\details{Add notes to plots of Mohr's circles. Uses the eigenvalues of the decomposition.
}
\value{Graphical Side Effects
}
\author{
Jonathan M. Lees<jonathan.lees@unc.edu>
}
\seealso{DoMohr
}
\examples{
Stensor = matrix(c(50, 40, 40, 10), ncol=2)
DoMohr(Stensor)
}
\keyword{misc}
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SQLcontext.R: SQLContext-driven functions
# Map top level R type to SQL type
getInternalType <- function(x) {
# class of POSIXlt is c("POSIXlt" "POSIXt")
switch(class(x)[[1]],
integer = "integer",
character = "string",
logical = "boolean",
double = "double",
numeric = "double",
raw = "binary",
list = "array",
struct = "struct",
environment = "map",
Date = "date",
POSIXlt = "timestamp",
POSIXct = "timestamp",
stop(paste("Unsupported type for DataFrame:", class(x))))
}
#' infer the SQL type
infer_type <- function(x) {
if (is.null(x)) {
stop("can not infer type from NULL")
}
type <- getInternalType(x)
if (type == "map") {
stopifnot(length(x) > 0)
key <- ls(x)[[1]]
paste0("map<string,", infer_type(get(key, x)), ">")
} else if (type == "array") {
stopifnot(length(x) > 0)
paste0("array<", infer_type(x[[1]]), ">")
} else if (type == "struct") {
stopifnot(length(x) > 0)
names <- names(x)
stopifnot(!is.null(names))
type <- lapply(seq_along(x), function(i) {
paste0(names[[i]], ":", infer_type(x[[i]]), ",")
})
type <- Reduce(paste0, type)
type <- paste0("struct<", substr(type, 1, nchar(type) - 1), ">")
} else if (length(x) > 1) {
paste0("array<", infer_type(x[[1]]), ">")
} else {
type
}
}
#' Create a DataFrame
#'
#' Converts R data.frame or list into DataFrame.
#'
#' @param sqlContext A SQLContext
#' @param data An RDD or list or data.frame
#' @param schema a list of column names or named list (StructType), optional
#' @return an DataFrame
#' @rdname createDataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' df1 <- as.DataFrame(sqlContext, iris)
#' df2 <- as.DataFrame(sqlContext, list(3,4,5,6))
#' df3 <- createDataFrame(sqlContext, iris)
#' }
# TODO(davies): support sampling and infer type from NA
createDataFrame <- function(sqlContext, data, schema = NULL, samplingRatio = 1.0) {
if (is.data.frame(data)) {
# get the names of columns, they will be put into RDD
if (is.null(schema)) {
schema <- names(data)
}
# get rid of factor type
cleanCols <- function(x) {
if (is.factor(x)) {
as.character(x)
} else {
x
}
}
# drop factors and wrap lists
data <- setNames(lapply(data, cleanCols), NULL)
# check if all columns have supported type
lapply(data, getInternalType)
# convert to rows
args <- list(FUN = list, SIMPLIFY = FALSE, USE.NAMES = FALSE)
data <- do.call(mapply, append(args, data))
}
if (is.list(data)) {
sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sqlContext)
rdd <- parallelize(sc, data)
} else if (inherits(data, "RDD")) {
rdd <- data
} else {
stop(paste("unexpected type:", class(data)))
}
if (is.null(schema) || (!inherits(schema, "structType") && is.null(names(schema)))) {
row <- first(rdd)
names <- if (is.null(schema)) {
names(row)
} else {
as.list(schema)
}
if (is.null(names)) {
names <- lapply(1:length(row), function(x) {
paste("_", as.character(x), sep = "")
})
}
# SPAKR-SQL does not support '.' in column name, so replace it with '_'
# TODO(davies): remove this once SPARK-2775 is fixed
names <- lapply(names, function(n) {
nn <- gsub("[.]", "_", n)
if (nn != n) {
warning(paste("Use", nn, "instead of", n, " as column name"))
}
nn
})
types <- lapply(row, infer_type)
fields <- lapply(1:length(row), function(i) {
structField(names[[i]], types[[i]], TRUE)
})
schema <- do.call(structType, fields)
}
stopifnot(class(schema) == "structType")
jrdd <- getJRDD(lapply(rdd, function(x) x), "row")
srdd <- callJMethod(jrdd, "rdd")
sdf <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "createDF",
srdd, schema$jobj, sqlContext)
dataFrame(sdf)
}
#' @rdname createDataFrame
#' @aliases createDataFrame
#' @export
as.DataFrame <- function(sqlContext, data, schema = NULL, samplingRatio = 1.0) {
createDataFrame(sqlContext, data, schema, samplingRatio)
}
#' toDF
#'
#' Converts an RDD to a DataFrame by infer the types.
#'
#' @param x An RDD
#'
#' @rdname DataFrame
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' rdd <- lapply(parallelize(sc, 1:10), function(x) list(a=x, b=as.character(x)))
#' df <- toDF(rdd)
#'}
setGeneric("toDF", function(x, ...) { standardGeneric("toDF") })
setMethod("toDF", signature(x = "RDD"),
function(x, ...) {
sqlContext <- if (exists(".sparkRHivesc", envir = .sparkREnv)) {
get(".sparkRHivesc", envir = .sparkREnv)
} else if (exists(".sparkRSQLsc", envir = .sparkREnv)) {
get(".sparkRSQLsc", envir = .sparkREnv)
} else {
stop("no SQL context available")
}
createDataFrame(sqlContext, x, ...)
})
#' Create a DataFrame from a JSON file.
#'
#' Loads a JSON file (one object per line), returning the result as a DataFrame
#' It goes through the entire dataset once to determine the schema.
#'
#' @param sqlContext SQLContext to use
#' @param path Path of file to read. A vector of multiple paths is allowed.
#' @return DataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' path <- "path/to/file.json"
#' df <- jsonFile(sqlContext, path)
#' }
jsonFile <- function(sqlContext, path) {
# Allow the user to have a more flexible definiton of the text file path
path <- suppressWarnings(normalizePath(path))
# Convert a string vector of paths to a string containing comma separated paths
path <- paste(path, collapse = ",")
sdf <- callJMethod(sqlContext, "jsonFile", path)
dataFrame(sdf)
}
#' JSON RDD
#'
#' Loads an RDD storing one JSON object per string as a DataFrame.
#'
#' @param sqlContext SQLContext to use
#' @param rdd An RDD of JSON string
#' @param schema A StructType object to use as schema
#' @param samplingRatio The ratio of simpling used to infer the schema
#' @return A DataFrame
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' rdd <- texFile(sc, "path/to/json")
#' df <- jsonRDD(sqlContext, rdd)
#'}
# TODO: support schema
jsonRDD <- function(sqlContext, rdd, schema = NULL, samplingRatio = 1.0) {
rdd <- serializeToString(rdd)
if (is.null(schema)) {
sdf <- callJMethod(sqlContext, "jsonRDD", callJMethod(getJRDD(rdd), "rdd"), samplingRatio)
dataFrame(sdf)
} else {
stop("not implemented")
}
}
#' Create a DataFrame from a Parquet file.
#'
#' Loads a Parquet file, returning the result as a DataFrame.
#'
#' @param sqlContext SQLContext to use
#' @param ... Path(s) of parquet file(s) to read.
#' @return DataFrame
#' @export
# TODO: Implement saveasParquetFile and write examples for both
parquetFile <- function(sqlContext, ...) {
# Allow the user to have a more flexible definiton of the text file path
paths <- lapply(list(...), function(x) suppressWarnings(normalizePath(x)))
sdf <- callJMethod(sqlContext, "parquetFile", paths)
dataFrame(sdf)
}
#' SQL Query
#'
#' Executes a SQL query using Spark, returning the result as a DataFrame.
#'
#' @param sqlContext SQLContext to use
#' @param sqlQuery A character vector containing the SQL query
#' @return DataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' path <- "path/to/file.json"
#' df <- jsonFile(sqlContext, path)
#' registerTempTable(df, "table")
#' new_df <- sql(sqlContext, "SELECT * FROM table")
#' }
sql <- function(sqlContext, sqlQuery) {
sdf <- callJMethod(sqlContext, "sql", sqlQuery)
dataFrame(sdf)
}
#' Create a DataFrame from a SparkSQL Table
#'
#' Returns the specified Table as a DataFrame. The Table must have already been registered
#' in the SQLContext.
#'
#' @param sqlContext SQLContext to use
#' @param tableName The SparkSQL Table to convert to a DataFrame.
#' @return DataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' path <- "path/to/file.json"
#' df <- jsonFile(sqlContext, path)
#' registerTempTable(df, "table")
#' new_df <- table(sqlContext, "table")
#' }
table <- function(sqlContext, tableName) {
sdf <- callJMethod(sqlContext, "table", tableName)
dataFrame(sdf)
}
#' Tables
#'
#' Returns a DataFrame containing names of tables in the given database.
#'
#' @param sqlContext SQLContext to use
#' @param databaseName name of the database
#' @return a DataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' tables(sqlContext, "hive")
#' }
tables <- function(sqlContext, databaseName = NULL) {
jdf <- if (is.null(databaseName)) {
callJMethod(sqlContext, "tables")
} else {
callJMethod(sqlContext, "tables", databaseName)
}
dataFrame(jdf)
}
#' Table Names
#'
#' Returns the names of tables in the given database as an array.
#'
#' @param sqlContext SQLContext to use
#' @param databaseName name of the database
#' @return a list of table names
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' tableNames(sqlContext, "hive")
#' }
tableNames <- function(sqlContext, databaseName = NULL) {
if (is.null(databaseName)) {
callJMethod(sqlContext, "tableNames")
} else {
callJMethod(sqlContext, "tableNames", databaseName)
}
}
#' Cache Table
#'
#' Caches the specified table in-memory.
#'
#' @param sqlContext SQLContext to use
#' @param tableName The name of the table being cached
#' @return DataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' path <- "path/to/file.json"
#' df <- jsonFile(sqlContext, path)
#' registerTempTable(df, "table")
#' cacheTable(sqlContext, "table")
#' }
cacheTable <- function(sqlContext, tableName) {
callJMethod(sqlContext, "cacheTable", tableName)
}
#' Uncache Table
#'
#' Removes the specified table from the in-memory cache.
#'
#' @param sqlContext SQLContext to use
#' @param tableName The name of the table being uncached
#' @return DataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' path <- "path/to/file.json"
#' df <- jsonFile(sqlContext, path)
#' registerTempTable(df, "table")
#' uncacheTable(sqlContext, "table")
#' }
uncacheTable <- function(sqlContext, tableName) {
callJMethod(sqlContext, "uncacheTable", tableName)
}
#' Clear Cache
#'
#' Removes all cached tables from the in-memory cache.
#'
#' @param sqlContext SQLContext to use
#' @examples
#' \dontrun{
#' clearCache(sqlContext)
#' }
clearCache <- function(sqlContext) {
callJMethod(sqlContext, "clearCache")
}
#' Drop Temporary Table
#'
#' Drops the temporary table with the given table name in the catalog.
#' If the table has been cached/persisted before, it's also unpersisted.
#'
#' @param sqlContext SQLContext to use
#' @param tableName The name of the SparkSQL table to be dropped.
#' @examples
#' \dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' df <- read.df(sqlContext, path, "parquet")
#' registerTempTable(df, "table")
#' dropTempTable(sqlContext, "table")
#' }
dropTempTable <- function(sqlContext, tableName) {
if (class(tableName) != "character") {
stop("tableName must be a string.")
}
callJMethod(sqlContext, "dropTempTable", tableName)
}
#' Load an DataFrame
#'
#' Returns the dataset in a data source as a DataFrame
#'
#' The data source is specified by the `source` and a set of options(...).
#' If `source` is not specified, the default data source configured by
#' "spark.sql.sources.default" will be used.
#'
#' @param sqlContext SQLContext to use
#' @param path The path of files to load
#' @param source The name of external data source
#' @param schema The data schema defined in structType
#' @return DataFrame
#' @rdname read.df
#' @name read.df
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' df1 <- read.df(sqlContext, "path/to/file.json", source = "json")
#' schema <- structType(structField("name", "string"),
#' structField("info", "map<string,double>"))
#' df2 <- read.df(sqlContext, mapTypeJsonPath, "json", schema)
#' df3 <- loadDF(sqlContext, "data/test_table", "parquet", mergeSchema = "true")
#' }
read.df <- function(sqlContext, path = NULL, source = NULL, schema = NULL, ...) {
options <- varargsToEnv(...)
if (!is.null(path)) {
options[["path"]] <- path
}
if (is.null(source)) {
sqlContext <- get(".sparkRSQLsc", envir = .sparkREnv)
source <- callJMethod(sqlContext, "getConf", "spark.sql.sources.default",
"org.apache.spark.sql.parquet")
}
if (!is.null(schema)) {
stopifnot(class(schema) == "structType")
sdf <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "loadDF", sqlContext, source,
schema$jobj, options)
} else {
sdf <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "loadDF", sqlContext, source, options)
}
dataFrame(sdf)
}
#' @rdname read.df
#' @name loadDF
loadDF <- function(sqlContext, path = NULL, source = NULL, schema = NULL, ...) {
read.df(sqlContext, path, source, schema, ...)
}
#' Create an external table
#'
#' Creates an external table based on the dataset in a data source,
#' Returns the DataFrame associated with the external table.
#'
#' The data source is specified by the `source` and a set of options(...).
#' If `source` is not specified, the default data source configured by
#' "spark.sql.sources.default" will be used.
#'
#' @param sqlContext SQLContext to use
#' @param tableName A name of the table
#' @param path The path of files to load
#' @param source the name of external data source
#' @return DataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' df <- sparkRSQL.createExternalTable(sqlContext, "myjson", path="path/to/json", source="json")
#' }
createExternalTable <- function(sqlContext, tableName, path = NULL, source = NULL, ...) {
options <- varargsToEnv(...)
if (!is.null(path)) {
options[["path"]] <- path
}
sdf <- callJMethod(sqlContext, "createExternalTable", tableName, source, options)
dataFrame(sdf)
}
| /R/pkg/R/SQLContext.R | permissive | cl9200/spark | R | false | false | 15,364 | r | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SQLcontext.R: SQLContext-driven functions
# Map top level R type to SQL type
getInternalType <- function(x) {
# class of POSIXlt is c("POSIXlt" "POSIXt")
switch(class(x)[[1]],
integer = "integer",
character = "string",
logical = "boolean",
double = "double",
numeric = "double",
raw = "binary",
list = "array",
struct = "struct",
environment = "map",
Date = "date",
POSIXlt = "timestamp",
POSIXct = "timestamp",
stop(paste("Unsupported type for DataFrame:", class(x))))
}
#' infer the SQL type
infer_type <- function(x) {
if (is.null(x)) {
stop("can not infer type from NULL")
}
type <- getInternalType(x)
if (type == "map") {
stopifnot(length(x) > 0)
key <- ls(x)[[1]]
paste0("map<string,", infer_type(get(key, x)), ">")
} else if (type == "array") {
stopifnot(length(x) > 0)
paste0("array<", infer_type(x[[1]]), ">")
} else if (type == "struct") {
stopifnot(length(x) > 0)
names <- names(x)
stopifnot(!is.null(names))
type <- lapply(seq_along(x), function(i) {
paste0(names[[i]], ":", infer_type(x[[i]]), ",")
})
type <- Reduce(paste0, type)
type <- paste0("struct<", substr(type, 1, nchar(type) - 1), ">")
} else if (length(x) > 1) {
paste0("array<", infer_type(x[[1]]), ">")
} else {
type
}
}
#' Create a DataFrame
#'
#' Converts R data.frame or list into DataFrame.
#'
#' @param sqlContext A SQLContext
#' @param data An RDD or list or data.frame
#' @param schema a list of column names or named list (StructType), optional
#' @return an DataFrame
#' @rdname createDataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' df1 <- as.DataFrame(sqlContext, iris)
#' df2 <- as.DataFrame(sqlContext, list(3,4,5,6))
#' df3 <- createDataFrame(sqlContext, iris)
#' }
# TODO(davies): support sampling and infer type from NA
createDataFrame <- function(sqlContext, data, schema = NULL, samplingRatio = 1.0) {
if (is.data.frame(data)) {
# get the names of columns, they will be put into RDD
if (is.null(schema)) {
schema <- names(data)
}
# get rid of factor type
cleanCols <- function(x) {
if (is.factor(x)) {
as.character(x)
} else {
x
}
}
# drop factors and wrap lists
data <- setNames(lapply(data, cleanCols), NULL)
# check if all columns have supported type
lapply(data, getInternalType)
# convert to rows
args <- list(FUN = list, SIMPLIFY = FALSE, USE.NAMES = FALSE)
data <- do.call(mapply, append(args, data))
}
if (is.list(data)) {
sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sqlContext)
rdd <- parallelize(sc, data)
} else if (inherits(data, "RDD")) {
rdd <- data
} else {
stop(paste("unexpected type:", class(data)))
}
if (is.null(schema) || (!inherits(schema, "structType") && is.null(names(schema)))) {
row <- first(rdd)
names <- if (is.null(schema)) {
names(row)
} else {
as.list(schema)
}
if (is.null(names)) {
names <- lapply(1:length(row), function(x) {
paste("_", as.character(x), sep = "")
})
}
# SPAKR-SQL does not support '.' in column name, so replace it with '_'
# TODO(davies): remove this once SPARK-2775 is fixed
names <- lapply(names, function(n) {
nn <- gsub("[.]", "_", n)
if (nn != n) {
warning(paste("Use", nn, "instead of", n, " as column name"))
}
nn
})
types <- lapply(row, infer_type)
fields <- lapply(1:length(row), function(i) {
structField(names[[i]], types[[i]], TRUE)
})
schema <- do.call(structType, fields)
}
stopifnot(class(schema) == "structType")
jrdd <- getJRDD(lapply(rdd, function(x) x), "row")
srdd <- callJMethod(jrdd, "rdd")
sdf <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "createDF",
srdd, schema$jobj, sqlContext)
dataFrame(sdf)
}
#' @rdname createDataFrame
#' @aliases createDataFrame
#' @export
as.DataFrame <- function(sqlContext, data, schema = NULL, samplingRatio = 1.0) {
createDataFrame(sqlContext, data, schema, samplingRatio)
}
#' toDF
#'
#' Converts an RDD to a DataFrame by infer the types.
#'
#' @param x An RDD
#'
#' @rdname DataFrame
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' rdd <- lapply(parallelize(sc, 1:10), function(x) list(a=x, b=as.character(x)))
#' df <- toDF(rdd)
#'}
setGeneric("toDF", function(x, ...) { standardGeneric("toDF") })
setMethod("toDF", signature(x = "RDD"),
function(x, ...) {
sqlContext <- if (exists(".sparkRHivesc", envir = .sparkREnv)) {
get(".sparkRHivesc", envir = .sparkREnv)
} else if (exists(".sparkRSQLsc", envir = .sparkREnv)) {
get(".sparkRSQLsc", envir = .sparkREnv)
} else {
stop("no SQL context available")
}
createDataFrame(sqlContext, x, ...)
})
#' Create a DataFrame from a JSON file.
#'
#' Loads a JSON file (one object per line), returning the result as a DataFrame
#' It goes through the entire dataset once to determine the schema.
#'
#' @param sqlContext SQLContext to use
#' @param path Path of file to read. A vector of multiple paths is allowed.
#' @return DataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' path <- "path/to/file.json"
#' df <- jsonFile(sqlContext, path)
#' }
jsonFile <- function(sqlContext, path) {
# Allow the user to have a more flexible definiton of the text file path
path <- suppressWarnings(normalizePath(path))
# Convert a string vector of paths to a string containing comma separated paths
path <- paste(path, collapse = ",")
sdf <- callJMethod(sqlContext, "jsonFile", path)
dataFrame(sdf)
}
#' JSON RDD
#'
#' Loads an RDD storing one JSON object per string as a DataFrame.
#'
#' @param sqlContext SQLContext to use
#' @param rdd An RDD of JSON string
#' @param schema A StructType object to use as schema
#' @param samplingRatio The ratio of simpling used to infer the schema
#' @return A DataFrame
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' rdd <- texFile(sc, "path/to/json")
#' df <- jsonRDD(sqlContext, rdd)
#'}
# TODO: support schema
jsonRDD <- function(sqlContext, rdd, schema = NULL, samplingRatio = 1.0) {
rdd <- serializeToString(rdd)
if (is.null(schema)) {
sdf <- callJMethod(sqlContext, "jsonRDD", callJMethod(getJRDD(rdd), "rdd"), samplingRatio)
dataFrame(sdf)
} else {
stop("not implemented")
}
}
#' Create a DataFrame from a Parquet file.
#'
#' Loads a Parquet file, returning the result as a DataFrame.
#'
#' @param sqlContext SQLContext to use
#' @param ... Path(s) of parquet file(s) to read.
#' @return DataFrame
#' @export
# TODO: Implement saveasParquetFile and write examples for both
parquetFile <- function(sqlContext, ...) {
# Allow the user to have a more flexible definiton of the text file path
paths <- lapply(list(...), function(x) suppressWarnings(normalizePath(x)))
sdf <- callJMethod(sqlContext, "parquetFile", paths)
dataFrame(sdf)
}
#' SQL Query
#'
#' Executes a SQL query using Spark, returning the result as a DataFrame.
#'
#' @param sqlContext SQLContext to use
#' @param sqlQuery A character vector containing the SQL query
#' @return DataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' path <- "path/to/file.json"
#' df <- jsonFile(sqlContext, path)
#' registerTempTable(df, "table")
#' new_df <- sql(sqlContext, "SELECT * FROM table")
#' }
sql <- function(sqlContext, sqlQuery) {
sdf <- callJMethod(sqlContext, "sql", sqlQuery)
dataFrame(sdf)
}
#' Create a DataFrame from a SparkSQL Table
#'
#' Returns the specified Table as a DataFrame. The Table must have already been registered
#' in the SQLContext.
#'
#' @param sqlContext SQLContext to use
#' @param tableName The SparkSQL Table to convert to a DataFrame.
#' @return DataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' path <- "path/to/file.json"
#' df <- jsonFile(sqlContext, path)
#' registerTempTable(df, "table")
#' new_df <- table(sqlContext, "table")
#' }
table <- function(sqlContext, tableName) {
sdf <- callJMethod(sqlContext, "table", tableName)
dataFrame(sdf)
}
#' Tables
#'
#' Returns a DataFrame containing names of tables in the given database.
#'
#' @param sqlContext SQLContext to use
#' @param databaseName name of the database
#' @return a DataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' tables(sqlContext, "hive")
#' }
tables <- function(sqlContext, databaseName = NULL) {
jdf <- if (is.null(databaseName)) {
callJMethod(sqlContext, "tables")
} else {
callJMethod(sqlContext, "tables", databaseName)
}
dataFrame(jdf)
}
#' Table Names
#'
#' Returns the names of tables in the given database as an array.
#'
#' @param sqlContext SQLContext to use
#' @param databaseName name of the database
#' @return a list of table names
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' tableNames(sqlContext, "hive")
#' }
tableNames <- function(sqlContext, databaseName = NULL) {
if (is.null(databaseName)) {
callJMethod(sqlContext, "tableNames")
} else {
callJMethod(sqlContext, "tableNames", databaseName)
}
}
#' Cache Table
#'
#' Caches the specified table in-memory.
#'
#' @param sqlContext SQLContext to use
#' @param tableName The name of the table being cached
#' @return DataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' path <- "path/to/file.json"
#' df <- jsonFile(sqlContext, path)
#' registerTempTable(df, "table")
#' cacheTable(sqlContext, "table")
#' }
cacheTable <- function(sqlContext, tableName) {
callJMethod(sqlContext, "cacheTable", tableName)
}
#' Uncache Table
#'
#' Removes the specified table from the in-memory cache.
#'
#' @param sqlContext SQLContext to use
#' @param tableName The name of the table being uncached
#' @return DataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' path <- "path/to/file.json"
#' df <- jsonFile(sqlContext, path)
#' registerTempTable(df, "table")
#' uncacheTable(sqlContext, "table")
#' }
uncacheTable <- function(sqlContext, tableName) {
callJMethod(sqlContext, "uncacheTable", tableName)
}
#' Clear Cache
#'
#' Removes all cached tables from the in-memory cache.
#'
#' @param sqlContext SQLContext to use
#' @examples
#' \dontrun{
#' clearCache(sqlContext)
#' }
clearCache <- function(sqlContext) {
callJMethod(sqlContext, "clearCache")
}
#' Drop Temporary Table
#'
#' Drops the temporary table with the given table name in the catalog.
#' If the table has been cached/persisted before, it's also unpersisted.
#'
#' @param sqlContext SQLContext to use
#' @param tableName The name of the SparkSQL table to be dropped.
#' @examples
#' \dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' df <- read.df(sqlContext, path, "parquet")
#' registerTempTable(df, "table")
#' dropTempTable(sqlContext, "table")
#' }
dropTempTable <- function(sqlContext, tableName) {
if (class(tableName) != "character") {
stop("tableName must be a string.")
}
callJMethod(sqlContext, "dropTempTable", tableName)
}
#' Load an DataFrame
#'
#' Returns the dataset in a data source as a DataFrame
#'
#' The data source is specified by the `source` and a set of options(...).
#' If `source` is not specified, the default data source configured by
#' "spark.sql.sources.default" will be used.
#'
#' @param sqlContext SQLContext to use
#' @param path The path of files to load
#' @param source The name of external data source
#' @param schema The data schema defined in structType
#' @return DataFrame
#' @rdname read.df
#' @name read.df
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' df1 <- read.df(sqlContext, "path/to/file.json", source = "json")
#' schema <- structType(structField("name", "string"),
#' structField("info", "map<string,double>"))
#' df2 <- read.df(sqlContext, mapTypeJsonPath, "json", schema)
#' df3 <- loadDF(sqlContext, "data/test_table", "parquet", mergeSchema = "true")
#' }
read.df <- function(sqlContext, path = NULL, source = NULL, schema = NULL, ...) {
options <- varargsToEnv(...)
if (!is.null(path)) {
options[["path"]] <- path
}
if (is.null(source)) {
sqlContext <- get(".sparkRSQLsc", envir = .sparkREnv)
source <- callJMethod(sqlContext, "getConf", "spark.sql.sources.default",
"org.apache.spark.sql.parquet")
}
if (!is.null(schema)) {
stopifnot(class(schema) == "structType")
sdf <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "loadDF", sqlContext, source,
schema$jobj, options)
} else {
sdf <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "loadDF", sqlContext, source, options)
}
dataFrame(sdf)
}
#' @rdname read.df
#' @name loadDF
loadDF <- function(sqlContext, path = NULL, source = NULL, schema = NULL, ...) {
read.df(sqlContext, path, source, schema, ...)
}
#' Create an external table
#'
#' Creates an external table based on the dataset in a data source,
#' Returns the DataFrame associated with the external table.
#'
#' The data source is specified by the `source` and a set of options(...).
#' If `source` is not specified, the default data source configured by
#' "spark.sql.sources.default" will be used.
#'
#' @param sqlContext SQLContext to use
#' @param tableName A name of the table
#' @param path The path of files to load
#' @param source the name of external data source
#' @return DataFrame
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' df <- sparkRSQL.createExternalTable(sqlContext, "myjson", path="path/to/json", source="json")
#' }
createExternalTable <- function(sqlContext, tableName, path = NULL, source = NULL, ...) {
options <- varargsToEnv(...)
if (!is.null(path)) {
options[["path"]] <- path
}
sdf <- callJMethod(sqlContext, "createExternalTable", tableName, source, options)
dataFrame(sdf)
}
|
# checks other input objects
stratEst.check.other <- function( response , sample.specific , r.probs , r.trembles , select , min.strategies , crit , se , outer.runs , outer.tol , outer.max , inner.runs , inner.tol , inner.max , lcr.runs , lcr.tol , lcr.max , bs.samples , step.size , penalty , verbose , quantiles ){
# check response
if ( response %in% c("mixed","pure") == FALSE ){
stop("stratEst error: The input object 'response' has to be one of the following: \"mixed\" or \"pure\". Default is \"mixed\".");
}
# check sample.specific
specific_shares = FALSE
specific_probs = FALSE
specific_trembles = FALSE
specific_coefficients = FALSE
if( is.null(sample.specific) == FALSE ){
if( "character" %in% class( sample.specific ) == FALSE ){
stop("stratEst error: The input object 'sample.specific' has to be a character vector.");
}
for( i in 1:length( sample.specific ) ){
if ( sample.specific[i] %in% c("shares","probs","trembles","coefficients") == FALSE ){
stop("stratEst error: The input object 'sample.specific' should only contain the following characters: \"shares\", \"probs\", \"trembles\" or \"coefficients\".");
}
}
specific_shares = ifelse( "shares" %in% sample.specific , TRUE , FALSE )
specific_probs = ifelse( "probs" %in% sample.specific , TRUE , FALSE )
specific_trembles = ifelse( "trembles" %in% sample.specific , TRUE , FALSE )
specific_coefficients = ifelse( "coefficients" %in% sample.specific , TRUE , FALSE )
}
# check r.probs
if ( r.probs %in% c("no","strategies","states","global") == FALSE ){
stop("stratEst error: The input object 'r.probs' has to be one of the following: \"no\", \"strategies\", \"states\" or \"global\". Default is \"no\".");
}
# check r.trembles
if ( r.trembles %in% c("no","strategies","states","global") == FALSE ){
stop("stratEst error: The input object 'r.trembles' has to be one of the following: \"no\", \"strategies\", \"states\" or \"global\". Default is \"no\".");
}
# check select
select_strategies = FALSE
select_probs = FALSE
select_trembles = FALSE
if( is.null(select) == FALSE ){
# check select
if( "character" %in% class( select ) == FALSE ){
stop("stratEst error: The input object 'select' has to be a character vector.");
}
for( i in 1:length( select ) ){
if ( select[i] %in% c("probs","trembles","strategies") == FALSE ){
stop("stratEst error: The input object 'select' should only contain the following characters: \"strategies\", \"probs\" or \"trembles\".");
}
else{
if( select[i] == "strategies" ){
select_strategies = TRUE
}
if( select[i] == "probs" ){
select_probs = TRUE
}
if( select[i] == "trembles" ){
select_trembles = TRUE
}
}
}
}
# check min-strategies
if ( min.strategies < 1 | min.strategies%%1 != 0 ){
stop("stratEst error: The minimum number of strategies must be a positive integer. Default is 1.");
}
# check crit
if ( crit %in% c("aic","bic","icl") == FALSE ){
stop("stratEst error: The input object 'crit' has to be one of the following: \"aic\", \"bic\", or \"icl\". Default is \"bic\".");
}
# check se
if ( se %in% c("analytic","bootstrap") == FALSE ){
stop("stratEst error: The input object 'se' has to be one of the following: \"analytic\", or \"bootstrap\". Default is \"analytic\".");
}
# check outer.runs
if ( outer.runs < 0 | outer.runs%%1 != 0 ){
stop("stratEst error: The number of outer runs must be a positive integer. Default is 100.");
}
# check inner.runs
if ( inner.runs < 0 | inner.runs%%1 != 0 ){
stop("stratEst error: The number of inner runs must be a positive integer. Default is 100.");
}
# check lcr.runs
if ( lcr.runs < 0 | lcr.runs%%1 != 0 ){
stop("stratEst error: The number of lcr runs must be a positive integer. Default is 100.");
}
# check outer.max
if ( outer.max < 0 | outer.max%%1 != 0){
stop("stratEst error: The maximum of the number function evaluations of the outer runs must be a positive integer. Default is 1000.");
}
# check inner.max
if ( inner.max < 0 | inner.max%%1 != 0 ){
stop("stratEst error: The maximum of the number function evaluations of the inner runs must be a positive integer. Default is 100.");
}
# check lcr.max
if ( lcr.max < 0 | lcr.max%%1 != 0 ){
stop("stratEst error: The maximum of the number function evaluations of the lcr runs must be a positive integer. Default is 1000.");
}
# check outer.tol
if ( outer.tol < 0 | outer.tol >=1 ){
stop("stratEst error: The tolerance of the outer runs must be a small numeric value. Default is 0.");
}
# check inner.tol
if ( inner.tol < 0 | inner.tol >=1 ){
stop("stratEst error: The tolerance of the inner runs must be a small numeric value. Default is 0.");
}
# check lcr.tol
if ( lcr.tol < 0 | lcr.tol >=1 ){
stop("stratEst error: The tolerance of the lcr runs must be a small numeric value. Default is 0.");
}
# check bs.samples
if ( bs.samples < 0 | bs.samples%%1 != 0){
stop("stratEst error: The number of bootstrap samples specified by the argument 'bs.samples' must be a positive integer. Default is 1000.");
}
# check step size
if ( step.size < 0 ){
stop("stratEst error: The step size specified by the argument 'step.size' must be a positive number. Default is 1.");
}
# check penalty
if ( is.logical(penalty) == FALSE){
stop("stratEst error: The function argument 'penalty' must be boolean. Default is FALSE.");
}
# check verbose
if ( "logical" %in% class(verbose) == FALSE ){
stop("stratEst error: The input argument 'verbose' must be a logical.");
}
else{
print.messages = verbose[1]
print.summary = FALSE
}
# check print.summary
if ( "logical" %in% class(print.summary) == FALSE ){
stop("stratEst error: The input argument 'print.summary' must be a logical value.");
}
# check quantiles
if ( "numeric" %in% class(quantiles) == FALSE ){
stop("stratEst error: The input argument 'print.summary' must be a logical value.");
}
else{
if( any(quantiles>1) | any(quantiles<0) ){
stop("stratEst error: The elements of the input argument 'qunatiles' must numeric values between zero and one.");
}
}
qunantile_vec <- quantiles
stratEst.check.other.return = list( "select.strategies" = select_strategies , "select.responses" = select_probs , "select.trembles" = select_trembles, "specific.shares" = specific_shares , "specific.responses" = specific_probs , "specific.trembles" = specific_trembles, "specific.coefficients" = specific_coefficients , "quantile.vec" = qunantile_vec , "print.messages" = print.messages , "print.summary" = print.summary )
return(stratEst.check.other.return)
}
| /R/stratEst_check_other.R | no_license | cran/stratEst | R | false | false | 7,064 | r | # checks other input objects
stratEst.check.other <- function( response , sample.specific , r.probs , r.trembles , select , min.strategies , crit , se , outer.runs , outer.tol , outer.max , inner.runs , inner.tol , inner.max , lcr.runs , lcr.tol , lcr.max , bs.samples , step.size , penalty , verbose , quantiles ){
# check response
if ( response %in% c("mixed","pure") == FALSE ){
stop("stratEst error: The input object 'response' has to be one of the following: \"mixed\" or \"pure\". Default is \"mixed\".");
}
# check sample.specific
specific_shares = FALSE
specific_probs = FALSE
specific_trembles = FALSE
specific_coefficients = FALSE
if( is.null(sample.specific) == FALSE ){
if( "character" %in% class( sample.specific ) == FALSE ){
stop("stratEst error: The input object 'sample.specific' has to be a character vector.");
}
for( i in 1:length( sample.specific ) ){
if ( sample.specific[i] %in% c("shares","probs","trembles","coefficients") == FALSE ){
stop("stratEst error: The input object 'sample.specific' should only contain the following characters: \"shares\", \"probs\", \"trembles\" or \"coefficients\".");
}
}
specific_shares = ifelse( "shares" %in% sample.specific , TRUE , FALSE )
specific_probs = ifelse( "probs" %in% sample.specific , TRUE , FALSE )
specific_trembles = ifelse( "trembles" %in% sample.specific , TRUE , FALSE )
specific_coefficients = ifelse( "coefficients" %in% sample.specific , TRUE , FALSE )
}
# check r.probs
if ( r.probs %in% c("no","strategies","states","global") == FALSE ){
stop("stratEst error: The input object 'r.probs' has to be one of the following: \"no\", \"strategies\", \"states\" or \"global\". Default is \"no\".");
}
# check r.trembles
if ( r.trembles %in% c("no","strategies","states","global") == FALSE ){
stop("stratEst error: The input object 'r.trembles' has to be one of the following: \"no\", \"strategies\", \"states\" or \"global\". Default is \"no\".");
}
# check select
select_strategies = FALSE
select_probs = FALSE
select_trembles = FALSE
if( is.null(select) == FALSE ){
# check select
if( "character" %in% class( select ) == FALSE ){
stop("stratEst error: The input object 'select' has to be a character vector.");
}
for( i in 1:length( select ) ){
if ( select[i] %in% c("probs","trembles","strategies") == FALSE ){
stop("stratEst error: The input object 'select' should only contain the following characters: \"strategies\", \"probs\" or \"trembles\".");
}
else{
if( select[i] == "strategies" ){
select_strategies = TRUE
}
if( select[i] == "probs" ){
select_probs = TRUE
}
if( select[i] == "trembles" ){
select_trembles = TRUE
}
}
}
}
# check min-strategies
if ( min.strategies < 1 | min.strategies%%1 != 0 ){
stop("stratEst error: The minimum number of strategies must be a positive integer. Default is 1.");
}
# check crit
if ( crit %in% c("aic","bic","icl") == FALSE ){
stop("stratEst error: The input object 'crit' has to be one of the following: \"aic\", \"bic\", or \"icl\". Default is \"bic\".");
}
# check se
if ( se %in% c("analytic","bootstrap") == FALSE ){
stop("stratEst error: The input object 'se' has to be one of the following: \"analytic\", or \"bootstrap\". Default is \"analytic\".");
}
# check outer.runs
if ( outer.runs < 0 | outer.runs%%1 != 0 ){
stop("stratEst error: The number of outer runs must be a positive integer. Default is 100.");
}
# check inner.runs
if ( inner.runs < 0 | inner.runs%%1 != 0 ){
stop("stratEst error: The number of inner runs must be a positive integer. Default is 100.");
}
# check lcr.runs
if ( lcr.runs < 0 | lcr.runs%%1 != 0 ){
stop("stratEst error: The number of lcr runs must be a positive integer. Default is 100.");
}
# check outer.max
if ( outer.max < 0 | outer.max%%1 != 0){
stop("stratEst error: The maximum of the number function evaluations of the outer runs must be a positive integer. Default is 1000.");
}
# check inner.max
if ( inner.max < 0 | inner.max%%1 != 0 ){
stop("stratEst error: The maximum of the number function evaluations of the inner runs must be a positive integer. Default is 100.");
}
# check lcr.max
if ( lcr.max < 0 | lcr.max%%1 != 0 ){
stop("stratEst error: The maximum of the number function evaluations of the lcr runs must be a positive integer. Default is 1000.");
}
# check outer.tol
if ( outer.tol < 0 | outer.tol >=1 ){
stop("stratEst error: The tolerance of the outer runs must be a small numeric value. Default is 0.");
}
# check inner.tol
if ( inner.tol < 0 | inner.tol >=1 ){
stop("stratEst error: The tolerance of the inner runs must be a small numeric value. Default is 0.");
}
# check lcr.tol
if ( lcr.tol < 0 | lcr.tol >=1 ){
stop("stratEst error: The tolerance of the lcr runs must be a small numeric value. Default is 0.");
}
# check bs.samples
if ( bs.samples < 0 | bs.samples%%1 != 0){
stop("stratEst error: The number of bootstrap samples specified by the argument 'bs.samples' must be a positive integer. Default is 1000.");
}
# check step size
if ( step.size < 0 ){
stop("stratEst error: The step size specified by the argument 'step.size' must be a positive number. Default is 1.");
}
# check penalty
if ( is.logical(penalty) == FALSE){
stop("stratEst error: The function argument 'penalty' must be boolean. Default is FALSE.");
}
# check verbose
if ( "logical" %in% class(verbose) == FALSE ){
stop("stratEst error: The input argument 'verbose' must be a logical.");
}
else{
print.messages = verbose[1]
print.summary = FALSE
}
# check print.summary
if ( "logical" %in% class(print.summary) == FALSE ){
stop("stratEst error: The input argument 'print.summary' must be a logical value.");
}
# check quantiles
if ( "numeric" %in% class(quantiles) == FALSE ){
stop("stratEst error: The input argument 'print.summary' must be a logical value.");
}
else{
if( any(quantiles>1) | any(quantiles<0) ){
stop("stratEst error: The elements of the input argument 'qunatiles' must numeric values between zero and one.");
}
}
qunantile_vec <- quantiles
stratEst.check.other.return = list( "select.strategies" = select_strategies , "select.responses" = select_probs , "select.trembles" = select_trembles, "specific.shares" = specific_shares , "specific.responses" = specific_probs , "specific.trembles" = specific_trembles, "specific.coefficients" = specific_coefficients , "quantile.vec" = qunantile_vec , "print.messages" = print.messages , "print.summary" = print.summary )
return(stratEst.check.other.return)
}
|
# Experimenting with SDM Bayes
library(mgcv)
library(tidyverse)
library(gridExtra)
func.path<- "./Code/"
source(paste(func.path, "sdm_bayesianupdating_func.R", sep = ""))
#############
### Andy's examples
x.vec<- seq(from = -5, to = 5, length.out = 500)
base<- dnorm(x.vec, mean = 0, sd = 1)
fut<- dnorm(x.vec, mean = 1, sd = 1)
fut.means<- seq(-3, 3, length.out = 20)
means<- rep(NA, length(fut.means))
sds<- rep(NA, length(fut.means))
for(i in 1:length(fut.means)){
out<- SDMbayes(x.vec, 0, 1, fut.means[i], 1, 0, 3, 9, 0, 0, 3, 9)
means[i]<- x.vec%*%out/sum(out)
#sds[i]<- sd(out)
print(fut.means[i])
}
plot(fut.means, means)
sdm.bayes1<- SDMbayes(x.vec, 0, 1, 1, 1, 0, 3, 9, 0, 0, 3, 9)
sdm.bayes2<- SDMbayes(x.vec, 0, 1, 1, 1, 0, 3, 9, 0, 2, 5, 5)
sdm.bayes3<- SDMbayes(x.vec, 0, 1, 1, 1, 0, 3, 9, 0, 4, 4, 4)
sdm.bayes4<- SDMbayes(x.vec, 0, 1, 1, 1, 9, 3, 0, 4, 4, 4, 0)
plot.dat<- data.frame("Sample" = c(rep("SDM.Base", length(base)), rep("SDM.Future", length(fut)), rep("Pos.Vh", length(sdm.bayes1)), rep("Pos.H", length(sdm.bayes2)), rep("Pos.M", length(sdm.bayes3)), rep("Neg.M", length(sdm.bayes4))), "X" = rep(x.vec, 6), "Value" = c(base, fut, sdm.bayes1, sdm.bayes2, sdm.bayes3, sdm.bayes4))
plot.dat$Sample<- factor(plot.dat$Sample, levels = c("SDM.Base", "SDM.Future", "Pos.Vh", "Pos.H", "Pos.M", "Neg.M"))
out.plot<- ggplot(plot.dat, aes(x = X, y = Value, group = Sample)) +
geom_line(aes(color = Sample), alpha = 0.75) +
scale_fill_manual(name = "Sample", values = c('#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#ffff33'), labels = c("SDM.Base", "SDM.Future", "Pos.Vh", "Pos.H", "Pos.M", "Neg.M")) +
theme_bw()
out.plot
## This seems promising and is the same thing we are getting in Matlab with Andy's original code, so the translation seems to have worked.
###############
### Function behavior...assessing how the function behaves at limits (i.e., going from a bmn of 0 to fmn of 1, or bmn of 1 to fmn of 0) across different voting situations. Note:: This change (0-1 and 1-0) is interesting on the RESPONSE scale, but we are actually working on the link scale...to get something equivalent, we could use -5 and 3? Why?
logit_func<- function(x) {
exp(x)/(1+exp(x))
}
logit_func(-5)
logit_func(3)
## Starting simple, lets loop over a vector of differences between those values
# baseline mean possibilities
bmn<- seq(from = -5, to = 3, by = 0.5)
# future mean possibilities
fmn<- seq(from = 3, to = -5, by = -0.5)
base.fut.poss<- data.frame("Scenario.Base.Fut" = c(rep("Increasing P(Presence)", 8), "No Change", rep("Decreasing P(Presence)", 8)), "Base.Mean" = bmn, "Fut.Mean" = fmn, "Presence.Change" = fmn-bmn)
# What did that just do? -- creates a dataframe ranging in presence changes (base to future) from 8 to -8
base.fut.poss
# Now the loop -- this would mean keeping the directional effect voting and the vulnerability voting constant.
SDMBayes.loop<- vector("list", nrow(base.fut.poss))
nevaD.neg1<- 0
nevaD.neut1<- 0.1
nevaD.pos1<- 0
nevaV.low1<- 0.1
nevaV.mod1<- 0
nevaV.high1<- 0
nevaV.vhigh1<- 0
x.vec1<- seq(from = -10, to = 10, length.out = 500)
for(i in 1:nrow(base.fut.poss)){
SDMBayes.loop[[i]]<- SDMbayes(x.vec1, 0, 1, base.fut.poss$Fut.Mean[i], 1, nevaD.neg1, nevaD.neut1, nevaD.pos1, nevaV.low1, nevaV.mod1, nevaV.high1, nevaV.vhigh1)
names(SDMBayes.loop)[i]<- paste("Change_", base.fut.poss$Presence.Change[i], sep = "")
}
# Mean from each?
test_fun<- function(x, x.vec.use = x.vec){
out<- x%*%x.vec.use/sum(x)
return(out)
}
SDMBayes.loop.means<- data.frame("Presence.Change" = base.fut.poss$Presence.Change, "Mean" = unlist(lapply(SDMBayes.loop, test_fun)))
SDMBayes.loop.means
plot.dat<- data.frame("Sample" = c(rep("SDM.1v1", 17), rep("SDM.NEVA", 17)), "Xaxis.Link" = rep(base.fut.poss$Fut.Mean, 2), "Xaxis.Resp" = rep(base.fut.poss$Fut.Mean, 2), "LinkValue" = c(base.fut.poss$Fut.Mean, SDMBayes.loop.means$Mean), "RespValue" = c(ilink(base.fut.poss$Fut.Mean), ilink(SDMBayes.loop.means$Mean)))
plot.dat$Sample<- factor(plot.dat$Sample, levels = c("SDM.1v1", "SDM.NEVA"))
link.plot<- ggplot(plot.dat, aes(x = Xaxis.Link, y = LinkValue, group = Sample)) +
geom_line(aes(color = Sample), alpha = 0.75) +
scale_fill_manual(name = "Sample", values = c('#e41a1c','#377eb8'), labels = c("SDM.1v1", "SDM.NEVA")) +
ggtitle("LinkScale") +
theme_bw()
resp.plot<- ggplot(plot.dat, aes(x = Xaxis.Resp, y = RespValue, group = Sample)) +
geom_line(aes(color = Sample), alpha = 0.75) +
scale_fill_manual(name = "Sample", values = c('#e41a1c','#377eb8'), labels = c("SDM.1v1", "SDM.NEVA")) +
ggtitle("ResponseScale") +
theme_bw()
grid.arrange(link.plot, resp.plot, ncol = 2)
# Now, create some possibilities for the Directional effect votes and the Vulnerability votes
# nevaD has 12 possible votes...
nevaD.poss<- data.frame("Scenario.Dir" = c("Neg.Vh", "Neg.M", "Neg.L", "Neut.Vh", "Neut.M", "Neut.L", "Pos.Vh", "Pos.M", "Pos.L"), "Negative" = c(12, 9, 6, 0, 2, 3, 0, 0, 3), "Neutral" = c(0, 3, 3, 12, 8, 6, 0, 3, 3), "Positive" = c(0, 0, 3, 0, 2, 3, 12, 9, 6))
nevaD.poss
#nevaV (considering 24 possible votes)
nevaV.poss<- data.frame("Scenario.Vuln" = c("Low.Vh", "Low.M", "Low.L", "Mod.Vh", "Mod.M", "Mod.L", "High.Vh", "High.M", "High.L", "VHigh.Vh", "VHigh.M", "VHigh.L"), "Low" = c(24, 20, 10, 0, 3, 6, 0, 0, 2, 0, 0, 2), "Mod" = c(0, 4, 6, 24, 18, 10, 0, 3, 6, 0, 0, 6), "High" = c(0, 0, 6, 0, 3, 6, 24, 18, 10, 0, 4, 6) , "VHigh" = c(0, 0, 2, 0, 0, 2, 0, 3, 6, 24, 20, 10))
nevaV.poss
# Expand these three things: presence change, directional effect voting, vulnerability voting -- ALL combinations
scen.combo<- expand.grid("Scenario.Dir" = nevaD.poss$Scenario.Dir, "Scenario.Vuln" = nevaV.poss$Scenario.Vuln, "Presence.Change" = base.fut.poss$Presence.Change)
scen.combo<- scen.combo %>%
left_join(., nevaD.poss, by = "Scenario.Dir") %>%
left_join(., nevaV.poss, by = "Scenario.Vuln") %>%
left_join(., base.fut.poss, by = "Presence.Change")
View(scen.combo)
# Map SDMBayes function to each voting line
x.vec<- seq(from = -10, to = 10, length.out = 500)
scen.combo<- scen.combo %>%
mutate(., "SDMBayes" = pmap(list(x = list(x.vec), bmn = Base.Mean, bsd = list(1), fmn = Fut.Mean, fsd = list(1), nevaD.neg = Negative, nevaD.neut = Neutral, nevaD.pos = Positive, nevaV.low = Low, nevaV.mod = Mod, nevaV.high = High, nevaV.vhigh = VHigh), SDMbayes))
# What the heck did that do?? - Ran our SDMbayes function on each line of scen.combo, as each line has all the necessary arguments to run the SDMbayes function (think of it as an input file).
# Just a quick check -- pull out the third row. Use those inputs and run SDMbayes outside of map. Plot results together.
check.input<- data.frame(scen.combo[3,-14])
x.vec<- seq(from = -10, to = 10, length.out = 500)
base<- dnorm(x.vec, mean = check.input$Base.Mean, sd = 1)
fut<- dnorm(x.vec, mean = check.input$Fut.Mean, sd = 1)
sdm.bayes.man<- SDMbayes(x.vec, check.input$Base.Mean, 1, check.input$Fut.Mean, 1, check.input$Negative, check.input$Neutral, check.input$Positive, check.input$Low, check.input$Mod, check.input$High, check.input$VHigh)
sdm.bayes.map<- scen.combo$SDMBayes[[3]]
plot.dat<- data.frame("Sample" = c(rep("SDM.Base", length(base)), rep("SDM.Future", length(fut)), rep("Manual", length(sdm.bayes.man)), rep("Mapped", length(sdm.bayes.map))), "X" = rep(x.vec, 4), "Value" = c(base, fut, sdm.bayes.man, sdm.bayes.map))
plot.dat$Sample<- factor(plot.dat$Sample, levels = c("SDM.Base", "SDM.Future", "Manual", "Mapped"))
out.plot<- ggplot(plot.dat, aes(x = X, y = Value, group = Sample)) +
geom_line(aes(color = Sample), alpha = 0.75) +
scale_fill_manual(name = "Sample", values = c('#e41a1c','#377eb8','#4daf4a','#984ea3'), labels = c("SDM.Base", "SDM.Future", "Manual", "Mapped")) +
theme_bw()
out.plot
## So, the mapping seems to be working. The plot is a bit weird --- maybe because of the link vs. response scale issue?
# Now, for the plot Andy was envisioning...I think we want to pull out the no change scenarios...and then subtract each of the SDMBayes fits from this no change scenario. This would basically give us the influence of the directional effect/vulnerability rank?
scen.combo.base<- scen.combo %>%
dplyr::filter(., Presence.Change == 0.0) %>%
mutate(., "Merge.Col" = paste(Scenario.Dir, "_", Scenario.Vuln, sep = "")) %>%
dplyr::select(., SDMBayes, Merge.Col)
names(scen.combo.base)[1]<- "SDMBayes.Base"
# Now, let's add that new "SDMBayes.Base" column back in and this will allow us to subtract the Base from each scenario?
scen.combo<- scen.combo %>%
dplyr::filter(., Presence.Change != 0.0) %>%
mutate(., "Merge.Col" = paste(Scenario.Dir, "_", Scenario.Vuln, sep = "")) %>%
left_join(., scen.combo.base, by = "Merge.Col") %>%
as_tibble()
# Write a difference function to map to each row
diff_func<- function(New, Base){
out<- New-Base
return(out)
}
# Apply it
scen.combo<- scen.combo %>%
mutate(., "SDMBayes.Diff" = map2(SDMBayes, SDMBayes.Base, diff_func))
## Visualizing...
# Select a directional effect and vulnerability scenario to visualize. Note -- I actually don't think this is what we want, really we want difference in the pdfs??
temp<- scen.combo %>%
dplyr::filter(Scenario.Dir == "Pos.Vh" & Scenario.Vuln == "Low.Vh") %>%
mutate(., mean.diff = map(SDMBayes.Diff, mean),
sd = map(SDMBayes.Diff, sd)) %>%
dplyr::select(., -SDMBayes, -SDMBayes.Base, -SDMBayes.Diff) %>%
unnest() %>%
data.frame()
p<- ggplot(temp, aes(x=Presence.Change, y=mean.diff)) +
geom_point(stat="identity", color="black",
position=position_dodge()) +
geom_errorbar(aes(ymin=mean.diff-sd, ymax=mean.diff+sd), width=.2,
position=position_dodge(.9))
p
# Nope...not good...difference in pdfs? mean(a-b) = meana - meanb, var(a-b) = var(a) + var(b)
norm_diff_func<- function(New, Base, Stat){
if(Stat == "mean"){
out<- mean(New) - mean(Base)
return(out)
}
if(Stat == "sd"){
out<- sqrt(var(New) + var(Base))
return(out)
}
}
# Try that
temp<- scen.combo %>%
dplyr::filter(Scenario.Dir == "Pos.Vh" & Scenario.Vuln == "Low.Vh") %>%
mutate(., mean.diff.pdf = pmap(list(New = SDMBayes, Base = SDMBayes.Base, Stat = "mean"), norm_diff_func),
sd.pdf = pmap(list(New = SDMBayes, Base = SDMBayes.Base, Stat = "sd"), norm_diff_func)) %>%
dplyr::select(., -SDMBayes, -SDMBayes.Base, -SDMBayes.Diff) %>%
unnest() %>%
data.frame()
p<- ggplot(temp, aes(x=Presence.Change, y=mean.diff.pdf)) +
geom_point(stat="identity", color="black",
position=position_dodge()) +
geom_errorbar(aes(ymin=mean.diff.pdf-sd.pdf, ymax=mean.diff.pdf+sd.pdf), width=.2,
position=position_dodge(.9))
p
########
## What about with real observations?
# Load in our model fits to get bmn and bsd
all.dat<- readRDS("./Data/sdm.projections.SST_01172018.rds")
# Pull out a row
temp.dat<- all.dat[1,]
ilink <- family(all.dat$Model.Fitted[[1]])$linkinv
# Draw n.samps from normal distribution with mean = pred.dat mu
# and sd = pred.dat se.
# baseline
base.vec.link<- rnorm(500, mean = temp.dat$Projections[[1]]$Baseline[1], sd = temp.dat$Projections.p.se[[1]]$Baseline[1])
base.vec<- ilink(base.vec.link)
# future
fut.vec.link<- rnorm(500, mean = temp.dat$Projections[[1]]$`2055`[1], sd = temp.dat$Projections.p.se[[1]]$`2055`[1])
fut.vec<- ilink(fut.vec.link)
# Propose x.vector values (link scale, +/- 5SD from the mean)
x.vec<- seq(from = temp.dat$Projections[[1]]$Baseline[1] + 5*temp.dat$Projections.p.se[[1]]$Baseline[1], to = temp.dat$Projections[[1]]$Baseline[1] - 5*temp.dat$Projections.p.se[[1]]$Baseline[1], length.out = 500)
base.r<- ilink(dnorm(x.vec, mean = temp.dat$Projections[[1]]$Baseline[1], sd = temp.dat$Projections.p.se[[1]]$Baseline[1]))
fut.r<- ilink(dnorm(x.vec, mean = temp.dat$Projections[[1]]$`2055`[1], sd = temp.dat$Projections.p.se[[1]]$`2055`[1]))
bmn = temp.dat$Projections[[1]]$Baseline[1]
bsd = temp.dat$Projections.p.se[[1]]$Baseline[1]
fmn = temp.dat$Projections[[1]]$`2055`[1]
fsd = temp.dat$Projections.p.se[[1]]$`2055`[1]
nevaD = c(9, 3, 0)
nevaV = c(90, 10, 0, 0)
sdm.bayes1.r<- ilink(SDMbayes(x.vec, bmn, bsd, fmn, fsd, 0, 3, 9, 0, 1, 10, 20))
sdm.bayes2.r<- ilink(SDMbayes(x.vec, bmn, bsd, fmn, fsd, 0, 3, 9, 0, 1, 20, 10))
sdm.bayes3.r<- ilink(SDMbayes(x.vec, bmn, bsd, fmn, fsd, 0, 3, 9, 0, 6, 15, 10))
sdm.bayes4.r<- ilink(SDMbayes(x.vec, bmn, bsd, fmn, fsd, 9, 3, 0, 0, 6, 15, 10))
plot.dat<- data.frame("Sample" = c(rep("SDM.Base", length(base.r)), rep("SDM.Future", length(fut.r)), rep("Pos.Vh", length(sdm.bayes1.r)), rep("Pos.H", length(sdm.bayes2.r)), rep("Pos.M", length(sdm.bayes3.r)), rep("Neg.M", length(sdm.bayes4.r))), "X" = rep(ilink(x.vec), 6), "Value" = c(base.r, fut.r, sdm.bayes1.r, sdm.bayes2.r, sdm.bayes3.r, sdm.bayes4.r))
plot.dat$Sample<- factor(plot.dat$Sample, levels = c("SDM.Base", "SDM.Future", "Pos.Vh", "Pos.H", "Pos.M", "Neg.M"))
out.plot<- ggplot(plot.dat, aes(x = X, y = Value, group = Sample)) +
ylim(c(0,1)) +
geom_line(aes(color = Sample), alpha = 0.75) +
scale_fill_manual(name = "Sample", values = c('#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#ffff33'), labels = c("SDM.Base", "SDM.Future", "Pos.Vh", "Pos.H", "Pos.M", "Neg.M")) +
theme_bw()
out.plot
# NICE! Why are we at a minimum of 0.5 on the y axis??
# Now, how do things work when we have a spatial gradient in species increasing and decreasing across different votes. Imagine two pixels, one shifted up and one shifted down, and then how these respond to different voting ---
# Good example of species P(presence) increasing or decreasing? Longfin squid southern NS shelf increase
temp.dat<- all.dat[75,]
diff<- ilink(temp.dat$Projections[[1]]$`2055`) - ilink(temp.dat$Projections[[1]]$`Baseline`)
diff.max.ind<- which.max(diff) # Row 87
diff.max<- diff[diff.max.ind]
bmn.inc = temp.dat$Projections[[1]]$Baseline[diff.max.ind]
bsd.inc = temp.dat$Projections.p.se[[1]]$Baseline[diff.max.ind]
fmn.inc = temp.dat$Projections[[1]]$`2055`[diff.max.ind]
fsd.inc = temp.dat$Projections.p.se[[1]]$`2055`[diff.max.ind]
# nevaD has 12 possible votes...
nevaD.poss<- data.frame("Scenario.Dir" = c("Neg.Vh", "Neg.M", "Neg.L", "Neut.Vh", "Neut.M", "Neut.L", "Pos.Vh", "Pos.M", "Pos.L"), "Negative" = c(12, 9, 6, 0, 2, 3, 0, 0, 3), "Neutral" = c(0, 3, 3, 12, 8, 6, 0, 3, 3), "Positive" = c(0, 0, 3, 0, 2, 3, 12, 9, 6))
#nevaV has 230 possible votes
nevaV.poss<- data.frame("Scenario.Vuln" = c("Low.Vh", "Low.M", "Low.L", "Mod.Vh", "Mod.M", "Mod.L", "High.Vh", "High.M", "High.L", "VHigh.Vh", "VHigh.M", "VHigh.L"), "Low" = c(24, 20, 10, 0, 3, 6, 0, 0, 2, 0, 0, 2), "Mod" = c(0, 4, 6, 24, 18, 10, 0, 3, 6, 0, 0, 6), "High" = c(0, 0, 6, 0, 3, 6, 24, 18, 10, 0, 4, 6) , "VHigh" = c(0, 0, 2, 0, 0, 2, 0, 3, 6, 24, 20, 10))
# Table of all possible combinations...
scen.combo<- expand.grid("Scenario.Dir" = nevaD.poss$Scenario.Dir, "Scenario.Vuln" = nevaV.poss$Scenario.Vuln)
scen.combo<- scen.combo %>%
left_join(., nevaD.poss, by = "Scenario.Dir") %>%
left_join(., nevaV.poss, by = "Scenario.Vuln") %>%
as_tibble()
# Map SDMBayes function to each line...a cell going from absent to present
x.vec<- seq(from = -6, to = 4, length.out = 500)
bmn.inc<- -5
bsd.inc<- 0.2
fmn.inc<- 3
fsd.inc<- 0.2
scen.combo<- scen.combo %>%
mutate(., "SDMBayes.Inc" = pmap(list(x = list(x.vec), bmn = list(bmn.inc), bsd = list(bsd.inc), fmn = list(fmn.inc), fsd = list(fsd.inc), nevaD.neg = Negative, nevaD.neut = Neutral, nevaD.pos = Positive, nevaV.low = Low, nevaV.mod = Mod, nevaV.high = High, nevaV.vhigh = VHigh), SDMbayes))
# Findings --- MOSTLY NAs
# Map SDMBayes function to each line...a cell with no change (1:1)
x.vec<- seq(from = -6, to = 4, length.out = 500)
bmn.neut<- 3
bsd.neut<- 0.2
fmn.neut<- 3
fsd.neut<- 0.2
scen.combo<- scen.combo %>%
mutate(., "SDMBayes.Neut" = pmap(list(x = list(x.vec), bmn = list(bmn.neut), bsd = list(bsd.neut), fmn = list(fmn.neut), fsd = list(fsd.neut), nevaD.neg = Negative, nevaD.neut = Neutral, nevaD.pos = Positive, nevaV.low = Low, nevaV.mod = Mod, nevaV.high = High, nevaV.vhigh = VHigh), SDMbayes))
# All fine?
plot(ilink(x.vec), ilink(scen.combo$SDMBayes.Neut[[1]]))
plot(ilink(x.vec), ilink(scen.combo$SDMBayes.Neut[[88]]))
# Map SDMBayes function to each line...a cell going from present to absent
x.vec<- seq(from = -6, to = 4, length.out = 500)
bmn.neg<- 3
bsd.neg<- 0.2
fmn.neg<- -5
fsd.neg<- 0.2
scen.combo<- scen.combo %>%
mutate(., "SDMBayes.Neg" = pmap(list(x = list(x.vec), bmn = list(bmn.neg), bsd = list(bsd.neg), fmn = list(fmn.neg), fsd = list(fsd.neg), nevaD.neg = Negative, nevaD.neut = Neutral, nevaD.pos = Positive, nevaV.low = Low, nevaV.mod = Mod, nevaV.high = High, nevaV.vhigh = VHigh), SDMbayes))
# Findings --- cannot have a large increase with a negative species (vh certainty) and low vuln (vh certainty)
plot(ilink(x.vec), ilink(scen.combo$SDMBayes.Neg[[1]]))
plot(ilink(x.vec), ilink(scen.combo$SDMBayes.Neg[[88]]))
| /Code/Random/sdm_bayesexperimenting.R | no_license | aallyn/COCA | R | false | false | 16,999 | r | # Experimenting with SDM Bayes
library(mgcv)
library(tidyverse)
library(gridExtra)
func.path<- "./Code/"
source(paste(func.path, "sdm_bayesianupdating_func.R", sep = ""))
#############
### Andy's examples
x.vec<- seq(from = -5, to = 5, length.out = 500)
base<- dnorm(x.vec, mean = 0, sd = 1)
fut<- dnorm(x.vec, mean = 1, sd = 1)
fut.means<- seq(-3, 3, length.out = 20)
means<- rep(NA, length(fut.means))
sds<- rep(NA, length(fut.means))
for(i in 1:length(fut.means)){
out<- SDMbayes(x.vec, 0, 1, fut.means[i], 1, 0, 3, 9, 0, 0, 3, 9)
means[i]<- x.vec%*%out/sum(out)
#sds[i]<- sd(out)
print(fut.means[i])
}
plot(fut.means, means)
sdm.bayes1<- SDMbayes(x.vec, 0, 1, 1, 1, 0, 3, 9, 0, 0, 3, 9)
sdm.bayes2<- SDMbayes(x.vec, 0, 1, 1, 1, 0, 3, 9, 0, 2, 5, 5)
sdm.bayes3<- SDMbayes(x.vec, 0, 1, 1, 1, 0, 3, 9, 0, 4, 4, 4)
sdm.bayes4<- SDMbayes(x.vec, 0, 1, 1, 1, 9, 3, 0, 4, 4, 4, 0)
plot.dat<- data.frame("Sample" = c(rep("SDM.Base", length(base)), rep("SDM.Future", length(fut)), rep("Pos.Vh", length(sdm.bayes1)), rep("Pos.H", length(sdm.bayes2)), rep("Pos.M", length(sdm.bayes3)), rep("Neg.M", length(sdm.bayes4))), "X" = rep(x.vec, 6), "Value" = c(base, fut, sdm.bayes1, sdm.bayes2, sdm.bayes3, sdm.bayes4))
plot.dat$Sample<- factor(plot.dat$Sample, levels = c("SDM.Base", "SDM.Future", "Pos.Vh", "Pos.H", "Pos.M", "Neg.M"))
out.plot<- ggplot(plot.dat, aes(x = X, y = Value, group = Sample)) +
geom_line(aes(color = Sample), alpha = 0.75) +
scale_fill_manual(name = "Sample", values = c('#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#ffff33'), labels = c("SDM.Base", "SDM.Future", "Pos.Vh", "Pos.H", "Pos.M", "Neg.M")) +
theme_bw()
out.plot
## This seems promising and is the same thing we are getting in Matlab with Andy's original code, so the translation seems to have worked.
###############
### Function behavior...assessing how the function behaves at limits (i.e., going from a bmn of 0 to fmn of 1, or bmn of 1 to fmn of 0) across different voting situations. Note:: This change (0-1 and 1-0) is interesting on the RESPONSE scale, but we are actually working on the link scale...to get something equivalent, we could use -5 and 3? Why?
logit_func<- function(x) {
exp(x)/(1+exp(x))
}
logit_func(-5)
logit_func(3)
## Starting simple, lets loop over a vector of differences between those values
# baseline mean possibilities
bmn<- seq(from = -5, to = 3, by = 0.5)
# future mean possibilities
fmn<- seq(from = 3, to = -5, by = -0.5)
base.fut.poss<- data.frame("Scenario.Base.Fut" = c(rep("Increasing P(Presence)", 8), "No Change", rep("Decreasing P(Presence)", 8)), "Base.Mean" = bmn, "Fut.Mean" = fmn, "Presence.Change" = fmn-bmn)
# What did that just do? -- creates a dataframe ranging in presence changes (base to future) from 8 to -8
base.fut.poss
# Now the loop -- this would mean keeping the directional effect voting and the vulnerability voting constant.
SDMBayes.loop<- vector("list", nrow(base.fut.poss))
nevaD.neg1<- 0
nevaD.neut1<- 0.1
nevaD.pos1<- 0
nevaV.low1<- 0.1
nevaV.mod1<- 0
nevaV.high1<- 0
nevaV.vhigh1<- 0
x.vec1<- seq(from = -10, to = 10, length.out = 500)
for(i in 1:nrow(base.fut.poss)){
SDMBayes.loop[[i]]<- SDMbayes(x.vec1, 0, 1, base.fut.poss$Fut.Mean[i], 1, nevaD.neg1, nevaD.neut1, nevaD.pos1, nevaV.low1, nevaV.mod1, nevaV.high1, nevaV.vhigh1)
names(SDMBayes.loop)[i]<- paste("Change_", base.fut.poss$Presence.Change[i], sep = "")
}
# Mean from each?
test_fun<- function(x, x.vec.use = x.vec){
out<- x%*%x.vec.use/sum(x)
return(out)
}
SDMBayes.loop.means<- data.frame("Presence.Change" = base.fut.poss$Presence.Change, "Mean" = unlist(lapply(SDMBayes.loop, test_fun)))
SDMBayes.loop.means
plot.dat<- data.frame("Sample" = c(rep("SDM.1v1", 17), rep("SDM.NEVA", 17)), "Xaxis.Link" = rep(base.fut.poss$Fut.Mean, 2), "Xaxis.Resp" = rep(base.fut.poss$Fut.Mean, 2), "LinkValue" = c(base.fut.poss$Fut.Mean, SDMBayes.loop.means$Mean), "RespValue" = c(ilink(base.fut.poss$Fut.Mean), ilink(SDMBayes.loop.means$Mean)))
plot.dat$Sample<- factor(plot.dat$Sample, levels = c("SDM.1v1", "SDM.NEVA"))
link.plot<- ggplot(plot.dat, aes(x = Xaxis.Link, y = LinkValue, group = Sample)) +
geom_line(aes(color = Sample), alpha = 0.75) +
scale_fill_manual(name = "Sample", values = c('#e41a1c','#377eb8'), labels = c("SDM.1v1", "SDM.NEVA")) +
ggtitle("LinkScale") +
theme_bw()
resp.plot<- ggplot(plot.dat, aes(x = Xaxis.Resp, y = RespValue, group = Sample)) +
geom_line(aes(color = Sample), alpha = 0.75) +
scale_fill_manual(name = "Sample", values = c('#e41a1c','#377eb8'), labels = c("SDM.1v1", "SDM.NEVA")) +
ggtitle("ResponseScale") +
theme_bw()
grid.arrange(link.plot, resp.plot, ncol = 2)
# Now, create some possibilities for the Directional effect votes and the Vulnerability votes
# nevaD has 12 possible votes...
nevaD.poss<- data.frame("Scenario.Dir" = c("Neg.Vh", "Neg.M", "Neg.L", "Neut.Vh", "Neut.M", "Neut.L", "Pos.Vh", "Pos.M", "Pos.L"), "Negative" = c(12, 9, 6, 0, 2, 3, 0, 0, 3), "Neutral" = c(0, 3, 3, 12, 8, 6, 0, 3, 3), "Positive" = c(0, 0, 3, 0, 2, 3, 12, 9, 6))
nevaD.poss
#nevaV (considering 24 possible votes)
nevaV.poss<- data.frame("Scenario.Vuln" = c("Low.Vh", "Low.M", "Low.L", "Mod.Vh", "Mod.M", "Mod.L", "High.Vh", "High.M", "High.L", "VHigh.Vh", "VHigh.M", "VHigh.L"), "Low" = c(24, 20, 10, 0, 3, 6, 0, 0, 2, 0, 0, 2), "Mod" = c(0, 4, 6, 24, 18, 10, 0, 3, 6, 0, 0, 6), "High" = c(0, 0, 6, 0, 3, 6, 24, 18, 10, 0, 4, 6) , "VHigh" = c(0, 0, 2, 0, 0, 2, 0, 3, 6, 24, 20, 10))
nevaV.poss
# Expand these three things: presence change, directional effect voting, vulnerability voting -- ALL combinations
scen.combo<- expand.grid("Scenario.Dir" = nevaD.poss$Scenario.Dir, "Scenario.Vuln" = nevaV.poss$Scenario.Vuln, "Presence.Change" = base.fut.poss$Presence.Change)
scen.combo<- scen.combo %>%
left_join(., nevaD.poss, by = "Scenario.Dir") %>%
left_join(., nevaV.poss, by = "Scenario.Vuln") %>%
left_join(., base.fut.poss, by = "Presence.Change")
View(scen.combo)
# Map SDMBayes function to each voting line
x.vec<- seq(from = -10, to = 10, length.out = 500)
scen.combo<- scen.combo %>%
mutate(., "SDMBayes" = pmap(list(x = list(x.vec), bmn = Base.Mean, bsd = list(1), fmn = Fut.Mean, fsd = list(1), nevaD.neg = Negative, nevaD.neut = Neutral, nevaD.pos = Positive, nevaV.low = Low, nevaV.mod = Mod, nevaV.high = High, nevaV.vhigh = VHigh), SDMbayes))
# What the heck did that do?? - Ran our SDMbayes function on each line of scen.combo, as each line has all the necessary arguments to run the SDMbayes function (think of it as an input file).
# Just a quick check -- pull out the third row. Use those inputs and run SDMbayes outside of map. Plot results together.
check.input<- data.frame(scen.combo[3,-14])
x.vec<- seq(from = -10, to = 10, length.out = 500)
base<- dnorm(x.vec, mean = check.input$Base.Mean, sd = 1)
fut<- dnorm(x.vec, mean = check.input$Fut.Mean, sd = 1)
sdm.bayes.man<- SDMbayes(x.vec, check.input$Base.Mean, 1, check.input$Fut.Mean, 1, check.input$Negative, check.input$Neutral, check.input$Positive, check.input$Low, check.input$Mod, check.input$High, check.input$VHigh)
sdm.bayes.map<- scen.combo$SDMBayes[[3]]
plot.dat<- data.frame("Sample" = c(rep("SDM.Base", length(base)), rep("SDM.Future", length(fut)), rep("Manual", length(sdm.bayes.man)), rep("Mapped", length(sdm.bayes.map))), "X" = rep(x.vec, 4), "Value" = c(base, fut, sdm.bayes.man, sdm.bayes.map))
plot.dat$Sample<- factor(plot.dat$Sample, levels = c("SDM.Base", "SDM.Future", "Manual", "Mapped"))
out.plot<- ggplot(plot.dat, aes(x = X, y = Value, group = Sample)) +
geom_line(aes(color = Sample), alpha = 0.75) +
scale_fill_manual(name = "Sample", values = c('#e41a1c','#377eb8','#4daf4a','#984ea3'), labels = c("SDM.Base", "SDM.Future", "Manual", "Mapped")) +
theme_bw()
out.plot
## So, the mapping seems to be working. The plot is a bit weird --- maybe because of the link vs. response scale issue?
# Now, for the plot Andy was envisioning...I think we want to pull out the no change scenarios...and then subtract each of the SDMBayes fits from this no change scenario. This would basically give us the influence of the directional effect/vulnerability rank?
scen.combo.base<- scen.combo %>%
dplyr::filter(., Presence.Change == 0.0) %>%
mutate(., "Merge.Col" = paste(Scenario.Dir, "_", Scenario.Vuln, sep = "")) %>%
dplyr::select(., SDMBayes, Merge.Col)
names(scen.combo.base)[1]<- "SDMBayes.Base"
# Now, let's add that new "SDMBayes.Base" column back in and this will allow us to subtract the Base from each scenario?
scen.combo<- scen.combo %>%
dplyr::filter(., Presence.Change != 0.0) %>%
mutate(., "Merge.Col" = paste(Scenario.Dir, "_", Scenario.Vuln, sep = "")) %>%
left_join(., scen.combo.base, by = "Merge.Col") %>%
as_tibble()
# Write a difference function to map to each row
diff_func<- function(New, Base){
out<- New-Base
return(out)
}
# Apply it
scen.combo<- scen.combo %>%
mutate(., "SDMBayes.Diff" = map2(SDMBayes, SDMBayes.Base, diff_func))
## Visualizing...
# Select a directional effect and vulnerability scenario to visualize. Note -- I actually don't think this is what we want, really we want difference in the pdfs??
temp<- scen.combo %>%
dplyr::filter(Scenario.Dir == "Pos.Vh" & Scenario.Vuln == "Low.Vh") %>%
mutate(., mean.diff = map(SDMBayes.Diff, mean),
sd = map(SDMBayes.Diff, sd)) %>%
dplyr::select(., -SDMBayes, -SDMBayes.Base, -SDMBayes.Diff) %>%
unnest() %>%
data.frame()
p<- ggplot(temp, aes(x=Presence.Change, y=mean.diff)) +
geom_point(stat="identity", color="black",
position=position_dodge()) +
geom_errorbar(aes(ymin=mean.diff-sd, ymax=mean.diff+sd), width=.2,
position=position_dodge(.9))
p
# Nope...not good...difference in pdfs? mean(a-b) = meana - meanb, var(a-b) = var(a) + var(b)
norm_diff_func<- function(New, Base, Stat){
if(Stat == "mean"){
out<- mean(New) - mean(Base)
return(out)
}
if(Stat == "sd"){
out<- sqrt(var(New) + var(Base))
return(out)
}
}
# Try that
temp<- scen.combo %>%
dplyr::filter(Scenario.Dir == "Pos.Vh" & Scenario.Vuln == "Low.Vh") %>%
mutate(., mean.diff.pdf = pmap(list(New = SDMBayes, Base = SDMBayes.Base, Stat = "mean"), norm_diff_func),
sd.pdf = pmap(list(New = SDMBayes, Base = SDMBayes.Base, Stat = "sd"), norm_diff_func)) %>%
dplyr::select(., -SDMBayes, -SDMBayes.Base, -SDMBayes.Diff) %>%
unnest() %>%
data.frame()
p<- ggplot(temp, aes(x=Presence.Change, y=mean.diff.pdf)) +
geom_point(stat="identity", color="black",
position=position_dodge()) +
geom_errorbar(aes(ymin=mean.diff.pdf-sd.pdf, ymax=mean.diff.pdf+sd.pdf), width=.2,
position=position_dodge(.9))
p
########
## What about with real observations?
# Load in our model fits to get bmn and bsd
all.dat<- readRDS("./Data/sdm.projections.SST_01172018.rds")
# Pull out a row
temp.dat<- all.dat[1,]
ilink <- family(all.dat$Model.Fitted[[1]])$linkinv
# Draw n.samps from normal distribution with mean = pred.dat mu
# and sd = pred.dat se.
# baseline
base.vec.link<- rnorm(500, mean = temp.dat$Projections[[1]]$Baseline[1], sd = temp.dat$Projections.p.se[[1]]$Baseline[1])
base.vec<- ilink(base.vec.link)
# future
fut.vec.link<- rnorm(500, mean = temp.dat$Projections[[1]]$`2055`[1], sd = temp.dat$Projections.p.se[[1]]$`2055`[1])
fut.vec<- ilink(fut.vec.link)
# Propose x.vector values (link scale, +/- 5SD from the mean)
x.vec<- seq(from = temp.dat$Projections[[1]]$Baseline[1] + 5*temp.dat$Projections.p.se[[1]]$Baseline[1], to = temp.dat$Projections[[1]]$Baseline[1] - 5*temp.dat$Projections.p.se[[1]]$Baseline[1], length.out = 500)
base.r<- ilink(dnorm(x.vec, mean = temp.dat$Projections[[1]]$Baseline[1], sd = temp.dat$Projections.p.se[[1]]$Baseline[1]))
fut.r<- ilink(dnorm(x.vec, mean = temp.dat$Projections[[1]]$`2055`[1], sd = temp.dat$Projections.p.se[[1]]$`2055`[1]))
bmn = temp.dat$Projections[[1]]$Baseline[1]
bsd = temp.dat$Projections.p.se[[1]]$Baseline[1]
fmn = temp.dat$Projections[[1]]$`2055`[1]
fsd = temp.dat$Projections.p.se[[1]]$`2055`[1]
nevaD = c(9, 3, 0)
nevaV = c(90, 10, 0, 0)
sdm.bayes1.r<- ilink(SDMbayes(x.vec, bmn, bsd, fmn, fsd, 0, 3, 9, 0, 1, 10, 20))
sdm.bayes2.r<- ilink(SDMbayes(x.vec, bmn, bsd, fmn, fsd, 0, 3, 9, 0, 1, 20, 10))
sdm.bayes3.r<- ilink(SDMbayes(x.vec, bmn, bsd, fmn, fsd, 0, 3, 9, 0, 6, 15, 10))
sdm.bayes4.r<- ilink(SDMbayes(x.vec, bmn, bsd, fmn, fsd, 9, 3, 0, 0, 6, 15, 10))
plot.dat<- data.frame("Sample" = c(rep("SDM.Base", length(base.r)), rep("SDM.Future", length(fut.r)), rep("Pos.Vh", length(sdm.bayes1.r)), rep("Pos.H", length(sdm.bayes2.r)), rep("Pos.M", length(sdm.bayes3.r)), rep("Neg.M", length(sdm.bayes4.r))), "X" = rep(ilink(x.vec), 6), "Value" = c(base.r, fut.r, sdm.bayes1.r, sdm.bayes2.r, sdm.bayes3.r, sdm.bayes4.r))
plot.dat$Sample<- factor(plot.dat$Sample, levels = c("SDM.Base", "SDM.Future", "Pos.Vh", "Pos.H", "Pos.M", "Neg.M"))
out.plot<- ggplot(plot.dat, aes(x = X, y = Value, group = Sample)) +
ylim(c(0,1)) +
geom_line(aes(color = Sample), alpha = 0.75) +
scale_fill_manual(name = "Sample", values = c('#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#ffff33'), labels = c("SDM.Base", "SDM.Future", "Pos.Vh", "Pos.H", "Pos.M", "Neg.M")) +
theme_bw()
out.plot
# NICE! Why are we at a minimum of 0.5 on the y axis??
# Now, how do things work when we have a spatial gradient in species increasing and decreasing across different votes. Imagine two pixels, one shifted up and one shifted down, and then how these respond to different voting ---
# Good example of species P(presence) increasing or decreasing? Longfin squid southern NS shelf increase
temp.dat<- all.dat[75,]
diff<- ilink(temp.dat$Projections[[1]]$`2055`) - ilink(temp.dat$Projections[[1]]$`Baseline`)
diff.max.ind<- which.max(diff) # Row 87
diff.max<- diff[diff.max.ind]
bmn.inc = temp.dat$Projections[[1]]$Baseline[diff.max.ind]
bsd.inc = temp.dat$Projections.p.se[[1]]$Baseline[diff.max.ind]
fmn.inc = temp.dat$Projections[[1]]$`2055`[diff.max.ind]
fsd.inc = temp.dat$Projections.p.se[[1]]$`2055`[diff.max.ind]
# nevaD has 12 possible votes...
nevaD.poss<- data.frame("Scenario.Dir" = c("Neg.Vh", "Neg.M", "Neg.L", "Neut.Vh", "Neut.M", "Neut.L", "Pos.Vh", "Pos.M", "Pos.L"), "Negative" = c(12, 9, 6, 0, 2, 3, 0, 0, 3), "Neutral" = c(0, 3, 3, 12, 8, 6, 0, 3, 3), "Positive" = c(0, 0, 3, 0, 2, 3, 12, 9, 6))
#nevaV has 230 possible votes
nevaV.poss<- data.frame("Scenario.Vuln" = c("Low.Vh", "Low.M", "Low.L", "Mod.Vh", "Mod.M", "Mod.L", "High.Vh", "High.M", "High.L", "VHigh.Vh", "VHigh.M", "VHigh.L"), "Low" = c(24, 20, 10, 0, 3, 6, 0, 0, 2, 0, 0, 2), "Mod" = c(0, 4, 6, 24, 18, 10, 0, 3, 6, 0, 0, 6), "High" = c(0, 0, 6, 0, 3, 6, 24, 18, 10, 0, 4, 6) , "VHigh" = c(0, 0, 2, 0, 0, 2, 0, 3, 6, 24, 20, 10))
# Table of all possible combinations...
scen.combo<- expand.grid("Scenario.Dir" = nevaD.poss$Scenario.Dir, "Scenario.Vuln" = nevaV.poss$Scenario.Vuln)
scen.combo<- scen.combo %>%
left_join(., nevaD.poss, by = "Scenario.Dir") %>%
left_join(., nevaV.poss, by = "Scenario.Vuln") %>%
as_tibble()
# Map SDMBayes function to each line...a cell going from absent to present
x.vec<- seq(from = -6, to = 4, length.out = 500)
bmn.inc<- -5
bsd.inc<- 0.2
fmn.inc<- 3
fsd.inc<- 0.2
scen.combo<- scen.combo %>%
mutate(., "SDMBayes.Inc" = pmap(list(x = list(x.vec), bmn = list(bmn.inc), bsd = list(bsd.inc), fmn = list(fmn.inc), fsd = list(fsd.inc), nevaD.neg = Negative, nevaD.neut = Neutral, nevaD.pos = Positive, nevaV.low = Low, nevaV.mod = Mod, nevaV.high = High, nevaV.vhigh = VHigh), SDMbayes))
# Findings --- MOSTLY NAs
# Map SDMBayes function to each line...a cell with no change (1:1)
x.vec<- seq(from = -6, to = 4, length.out = 500)
bmn.neut<- 3
bsd.neut<- 0.2
fmn.neut<- 3
fsd.neut<- 0.2
scen.combo<- scen.combo %>%
mutate(., "SDMBayes.Neut" = pmap(list(x = list(x.vec), bmn = list(bmn.neut), bsd = list(bsd.neut), fmn = list(fmn.neut), fsd = list(fsd.neut), nevaD.neg = Negative, nevaD.neut = Neutral, nevaD.pos = Positive, nevaV.low = Low, nevaV.mod = Mod, nevaV.high = High, nevaV.vhigh = VHigh), SDMbayes))
# All fine?
plot(ilink(x.vec), ilink(scen.combo$SDMBayes.Neut[[1]]))
plot(ilink(x.vec), ilink(scen.combo$SDMBayes.Neut[[88]]))
# Map SDMBayes function to each line...a cell going from present to absent
x.vec<- seq(from = -6, to = 4, length.out = 500)
bmn.neg<- 3
bsd.neg<- 0.2
fmn.neg<- -5
fsd.neg<- 0.2
scen.combo<- scen.combo %>%
mutate(., "SDMBayes.Neg" = pmap(list(x = list(x.vec), bmn = list(bmn.neg), bsd = list(bsd.neg), fmn = list(fmn.neg), fsd = list(fsd.neg), nevaD.neg = Negative, nevaD.neut = Neutral, nevaD.pos = Positive, nevaV.low = Low, nevaV.mod = Mod, nevaV.high = High, nevaV.vhigh = VHigh), SDMbayes))
# Findings --- cannot have a large increase with a negative species (vh certainty) and low vuln (vh certainty)
plot(ilink(x.vec), ilink(scen.combo$SDMBayes.Neg[[1]]))
plot(ilink(x.vec), ilink(scen.combo$SDMBayes.Neg[[88]]))
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{data}
\name{pattern}
\alias{pattern}
\alias{pattern1}
\alias{pattern2}
\alias{pattern3}
\alias{pattern4}
\title{Datasets with various missing data patterns}
\format{\describe{ \item{list("pattern1")}{Data with a univariate missing
data pattern} \item{list("pattern2")}{Data with a monotone missing data
pattern} \item{list("pattern3")}{Data with a file matching missing data
pattern} \item{list("pattern4")}{Data with a general missing data pattern} }}
\source{
van Buuren, S. (2012). \emph{Flexible Imputation of Missing Data.}
Boca Raton, FL: Chapman & Hall/CRC Press.
}
\description{
Four simple datasets with various missing data patterns
}
\details{
Van Buuren (2012) uses these four artificial datasets to illustrate various
missing data patterns.
}
\examples{
require(lattice)
require(MASS)
pattern4
data <- rbind(pattern1, pattern2, pattern3, pattern4)
mdpat <- cbind(expand.grid(rec = 8:1, pat = 1:4, var = 1:3), r=as.numeric(as.vector(is.na(data))))
types <- c("Univariate","Monotone","File matching","General")
tp41 <- levelplot(r~var+rec|as.factor(pat), data=mdpat,
as.table=TRUE, aspect="iso",
shrink=c(0.9),
col.regions = mdc(1:2),
colorkey=FALSE,
scales=list(draw=FALSE),
xlab="", ylab="",
between = list(x=1,y=0),
strip = strip.custom(bg = "grey95", style = 1,
factor.levels = types))
print(tp41)
md.pattern(pattern4)
p <- md.pairs(pattern4)
p
### proportion of usable cases
p$mr/(p$mr+p$mm)
### outbound statistics
p$rm/(p$rm+p$rr)
fluxplot(pattern2)
}
\keyword{datasets}
| /man/pattern.rd | no_license | bbolker/mice | R | false | false | 1,644 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\docType{data}
\name{pattern}
\alias{pattern}
\alias{pattern1}
\alias{pattern2}
\alias{pattern3}
\alias{pattern4}
\title{Datasets with various missing data patterns}
\format{\describe{ \item{list("pattern1")}{Data with a univariate missing
data pattern} \item{list("pattern2")}{Data with a monotone missing data
pattern} \item{list("pattern3")}{Data with a file matching missing data
pattern} \item{list("pattern4")}{Data with a general missing data pattern} }}
\source{
van Buuren, S. (2012). \emph{Flexible Imputation of Missing Data.}
Boca Raton, FL: Chapman & Hall/CRC Press.
}
\description{
Four simple datasets with various missing data patterns
}
\details{
Van Buuren (2012) uses these four artificial datasets to illustrate various
missing data patterns.
}
\examples{
require(lattice)
require(MASS)
pattern4
data <- rbind(pattern1, pattern2, pattern3, pattern4)
mdpat <- cbind(expand.grid(rec = 8:1, pat = 1:4, var = 1:3), r=as.numeric(as.vector(is.na(data))))
types <- c("Univariate","Monotone","File matching","General")
tp41 <- levelplot(r~var+rec|as.factor(pat), data=mdpat,
as.table=TRUE, aspect="iso",
shrink=c(0.9),
col.regions = mdc(1:2),
colorkey=FALSE,
scales=list(draw=FALSE),
xlab="", ylab="",
between = list(x=1,y=0),
strip = strip.custom(bg = "grey95", style = 1,
factor.levels = types))
print(tp41)
md.pattern(pattern4)
p <- md.pairs(pattern4)
p
### proportion of usable cases
p$mr/(p$mr+p$mm)
### outbound statistics
p$rm/(p$rm+p$rr)
fluxplot(pattern2)
}
\keyword{datasets}
|
# first: set working directory to here
mansonDataAll = read.csv('Manson_Convo_PD_data.csv',header=T,skip=1)
colnames(mansonDataAll) = list('order','triad','code','chairs','p1.id','sex','p1.income.zip','p1.primary.psycho','p1.secondary.psycho','p2.facial','cultural.style','language.style.match','common.ground','p2.interrupts.p1','p1.pd.toward.p2','p2.pd.toward.p1','p1.rates.p2.warmth','p1.rates.p2.competence','p2.rates.p1.warmth','p2.rates.p1.competence')
mansonData = mansonDataAll
# in case we want to focus analysis on one chair ordering
# mansonData = mansonDataAll[mansonDataAll$chairs=='LC'|mansonDataAll$chairs=='CR'|mansonDataAll$chairs=='LR',]
laughs = read.table('laughter_data.txt',header=T,sep='\t') # laughter data from bryant, several available columns, pre-selected 2 obvious ones
mansonData$winmin = -1 # initialize the columns we will populate with the body DVs
mansonData$winmax = -1
mansonData$ccfmax = -1
mansonData$winsd = -1
mansonData$max.loc = -1
mansonData$chairs = as.character(mansonData$chairs) # chair ordering in manson
wccres$typ = as.character(wccres$typ) # typ = chair ordering in body data
wccresNew = wccres[wccres$cond=='obs',] # store observed data (without surrogate)
reverseString = function(x) { # for checking measures from reverse-chair rows in manson data
return(paste(substr(x,2,2),substr(x,1,1),sep=''))
} # e.g. subjects specked as LR in all data; however we want RL in manson, since we want to know what subject R thinks about warmth, PD, etc.
mansonLaughs = c() # build this separately for inclusion later
for (i in 1:dim(wccresNew)[1]) { # integrating Manson data with body correlations (windowed correlation = wcc)
ix=regexpr('_',wccresNew[i,]$triad)[1]-1
triad = as.numeric(substr(wccresNew[i,]$triad,2,ix)) # get triad # from body movement file, e.g., T8_...
mansonLaughs = rbind(mansonLaughs,laughs[laughs$CONV==triad,2:3],laughs[laughs$CONV==triad,2:3]) # get the laughs
# get measures from both orderings in manson data (LR / RL)
mansonData[mansonData$chairs==wccresNew[i,]$typ & mansonData$triad==triad,21:25] = wccresNew[i,4:8]
mansonData[mansonData$chairs==reverseString(wccresNew[i,]$typ) & mansonData$triad==triad,21:25] = wccresNew[i,4:8]
}
#
# let's retrieve the windowed correlation scores and treat this as a repeated
# measures setup... multiple observations in 10s segments
# increases power for the exploratory analysis as described in the main paper
#
wccMansonData = wccfull[wccfull$cond=='obs',]
desiredCols = c('p1.id','sex','p1.income.zip','p1.primary.psycho','p2.facial',
'cultural.style','language.style.match','common.ground',
'p2.interrupts.p1','p1.pd.toward.p2','p1.rates.p2.warmth',
'p1.rates.p2.competence')
lapply(desiredCols,function(x) {
thisExpr = paste('wccMansonData$',unlist(x),'<<- -99',sep='')
eval(parse(text=thisExpr))
})
l = nrow(wccMansonData)
wccMansonDataRev = wccMansonData # so we can get the reverse-chair scores
laughDat = c()
#
# a slow and lame loop... but we just do it once and we're done
# *NB: wccMansonData = windowed correlations combined with Manson et al. covariates
#
for (i in 1:l) {
if ((i-1) %% 300 == 0) {
print(paste('Integrating all windows... ',round(100*i/l),'% complete'))
}
ix=regexpr('_',wccMansonData[i,]$triad)[1]-1
triad = as.numeric(substr(wccMansonData[i,]$triad,2,ix))
# we do laughter as a separate vector since it came from another analysis of the videos
# by aligning the vector, we can just slip it into the covariate list in the model
laughDat = rbind(laughDat,laughs[laughs$CONV==triad,2:3])
typ = wccMansonData[i,]$typ
revTyp = reverseString(wccMansonData[i,]$typ)
mansonVect = mansonData[mansonData$chairs==typ&mansonData$triad==triad,]
wccMansonData[i,7:18] = subset(mansonVect,select=desiredCols)
mansonVect = mansonData[mansonData$chairs==revTyp&mansonData$triad==triad,]
wccMansonData[i,7:18] = (wccMansonData[i,7:18] + subset(mansonVect,select=desiredCols))/2
}
#
# let's check for Table 1 in paper... make sure we get the right N for SD:
#
tableData = mansonData[mansonData$chairs%in%c('LC','LR','CR'),]
summary(tableData)
sd(laughs$X..SHARED*100) # checking to confirm laughter rows in Table 1
| /combineMansonData.R | no_license | racdale/triadic-bodily-synchrony | R | false | false | 4,255 | r | # first: set working directory to here
mansonDataAll = read.csv('Manson_Convo_PD_data.csv',header=T,skip=1)
colnames(mansonDataAll) = list('order','triad','code','chairs','p1.id','sex','p1.income.zip','p1.primary.psycho','p1.secondary.psycho','p2.facial','cultural.style','language.style.match','common.ground','p2.interrupts.p1','p1.pd.toward.p2','p2.pd.toward.p1','p1.rates.p2.warmth','p1.rates.p2.competence','p2.rates.p1.warmth','p2.rates.p1.competence')
mansonData = mansonDataAll
# in case we want to focus analysis on one chair ordering
# mansonData = mansonDataAll[mansonDataAll$chairs=='LC'|mansonDataAll$chairs=='CR'|mansonDataAll$chairs=='LR',]
laughs = read.table('laughter_data.txt',header=T,sep='\t') # laughter data from bryant, several available columns, pre-selected 2 obvious ones
mansonData$winmin = -1 # initialize the columns we will populate with the body DVs
mansonData$winmax = -1
mansonData$ccfmax = -1
mansonData$winsd = -1
mansonData$max.loc = -1
mansonData$chairs = as.character(mansonData$chairs) # chair ordering in manson
wccres$typ = as.character(wccres$typ) # typ = chair ordering in body data
wccresNew = wccres[wccres$cond=='obs',] # store observed data (without surrogate)
reverseString = function(x) { # for checking measures from reverse-chair rows in manson data
return(paste(substr(x,2,2),substr(x,1,1),sep=''))
} # e.g. subjects specked as LR in all data; however we want RL in manson, since we want to know what subject R thinks about warmth, PD, etc.
mansonLaughs = c() # build this separately for inclusion later
for (i in 1:dim(wccresNew)[1]) { # integrating Manson data with body correlations (windowed correlation = wcc)
ix=regexpr('_',wccresNew[i,]$triad)[1]-1
triad = as.numeric(substr(wccresNew[i,]$triad,2,ix)) # get triad # from body movement file, e.g., T8_...
mansonLaughs = rbind(mansonLaughs,laughs[laughs$CONV==triad,2:3],laughs[laughs$CONV==triad,2:3]) # get the laughs
# get measures from both orderings in manson data (LR / RL)
mansonData[mansonData$chairs==wccresNew[i,]$typ & mansonData$triad==triad,21:25] = wccresNew[i,4:8]
mansonData[mansonData$chairs==reverseString(wccresNew[i,]$typ) & mansonData$triad==triad,21:25] = wccresNew[i,4:8]
}
#
# let's retrieve the windowed correlation scores and treat this as a repeated
# measures setup... multiple observations in 10s segments
# increases power for the exploratory analysis as described in the main paper
#
wccMansonData = wccfull[wccfull$cond=='obs',]
desiredCols = c('p1.id','sex','p1.income.zip','p1.primary.psycho','p2.facial',
'cultural.style','language.style.match','common.ground',
'p2.interrupts.p1','p1.pd.toward.p2','p1.rates.p2.warmth',
'p1.rates.p2.competence')
lapply(desiredCols,function(x) {
thisExpr = paste('wccMansonData$',unlist(x),'<<- -99',sep='')
eval(parse(text=thisExpr))
})
l = nrow(wccMansonData)
wccMansonDataRev = wccMansonData # so we can get the reverse-chair scores
laughDat = c()
#
# a slow and lame loop... but we just do it once and we're done
# *NB: wccMansonData = windowed correlations combined with Manson et al. covariates
#
for (i in 1:l) {
if ((i-1) %% 300 == 0) {
print(paste('Integrating all windows... ',round(100*i/l),'% complete'))
}
ix=regexpr('_',wccMansonData[i,]$triad)[1]-1
triad = as.numeric(substr(wccMansonData[i,]$triad,2,ix))
# we do laughter as a separate vector since it came from another analysis of the videos
# by aligning the vector, we can just slip it into the covariate list in the model
laughDat = rbind(laughDat,laughs[laughs$CONV==triad,2:3])
typ = wccMansonData[i,]$typ
revTyp = reverseString(wccMansonData[i,]$typ)
mansonVect = mansonData[mansonData$chairs==typ&mansonData$triad==triad,]
wccMansonData[i,7:18] = subset(mansonVect,select=desiredCols)
mansonVect = mansonData[mansonData$chairs==revTyp&mansonData$triad==triad,]
wccMansonData[i,7:18] = (wccMansonData[i,7:18] + subset(mansonVect,select=desiredCols))/2
}
#
# let's check for Table 1 in paper... make sure we get the right N for SD:
#
tableData = mansonData[mansonData$chairs%in%c('LC','LR','CR'),]
summary(tableData)
sd(laughs$X..SHARED*100) # checking to confirm laughter rows in Table 1
|
setGeneric("cpgDensityPlot", function(x, ...){standardGeneric("cpgDensityPlot")})
setMethod("cpgDensityPlot", "GRangesList",
function(x, cols = rainbow(length(x)), xlim = c(0, 20), lty = 1, lwd = 1,
main = "CpG Density Plot", verbose = TRUE, ...)
{
if (length(cols) != length(x)) stop("x and cols must have the same number of elements.")
if (verbose) message("Calculating CpG density")
x.cpg <- cpgDensityCalc(x, verbose = verbose, ...)
x.den <- lapply(x.cpg, density)
ymax <- max(sapply(x.den, function(u) max(u$y)))
plot(x = x.den[[1]]$x, y = x.den[[1]]$y, type = 'l', col = cols[1], xlim=xlim,
ylim = c(0, ymax), main = main, ylab = "Frequency", xlab = "CpG Density of reads",
lty = lty, lwd = lwd)
if (length(x) > 1)
{
for (i in 2:length(x)) {
lines(x = x.den[[i]]$x, y = x.den[[i]]$y, col = cols[i], lty = lty, lwd = lwd)
}
}
legend("topright", col = cols, legend = names(x), lty = lty, lwd = lwd)
invisible(x.cpg)
})
| /R/cpgDensityPlot.R | no_license | clark-lab-robot/Repitools_bioc | R | false | false | 1,031 | r | setGeneric("cpgDensityPlot", function(x, ...){standardGeneric("cpgDensityPlot")})
setMethod("cpgDensityPlot", "GRangesList",
function(x, cols = rainbow(length(x)), xlim = c(0, 20), lty = 1, lwd = 1,
main = "CpG Density Plot", verbose = TRUE, ...)
{
if (length(cols) != length(x)) stop("x and cols must have the same number of elements.")
if (verbose) message("Calculating CpG density")
x.cpg <- cpgDensityCalc(x, verbose = verbose, ...)
x.den <- lapply(x.cpg, density)
ymax <- max(sapply(x.den, function(u) max(u$y)))
plot(x = x.den[[1]]$x, y = x.den[[1]]$y, type = 'l', col = cols[1], xlim=xlim,
ylim = c(0, ymax), main = main, ylab = "Frequency", xlab = "CpG Density of reads",
lty = lty, lwd = lwd)
if (length(x) > 1)
{
for (i in 2:length(x)) {
lines(x = x.den[[i]]$x, y = x.den[[i]]$y, col = cols[i], lty = lty, lwd = lwd)
}
}
legend("topright", col = cols, legend = names(x), lty = lty, lwd = lwd)
invisible(x.cpg)
})
|
i = 130
library(isoform, lib.loc="/nas02/home/w/e/weisun/R/Rlibs/")
bedFile = "/nas02/home/w/e/weisun/research/data/human/Homo_sapiens.GRCh37.66.nonoverlap.exon.bed"
setwd("/lustre/scr/w/e/weisun/TCGA/bam/")
cmd = "ls *_asCounts_hetSNP_EA_hap1.bam"
ffs = system(cmd, intern=TRUE)
length(ffs)
head(ffs)
sams = gsub("_asCounts_hetSNP_EA_hap1.bam", "", ffs)
sam1 = sams[i]
cat(i, sam1, date(), "\n")
bamFile = ffs[i]
outFile = sprintf("%s_asCounts_hap1.txt", sam1)
countReads(bamFile, bedFile, outFile)
bamFile = gsub("_hap1", "_hap2", ffs[i], fixed=TRUE)
outFile = sprintf("%s_asCounts_hap2.txt", sam1)
countReads(bamFile, bedFile, outFile)
| /data_preparation/R_batch3/_step3/step3_countReads_EA.129.R | no_license | jasa-acs/Mapping-Tumor-Specific-Expression-QTLs-in-Impure-Tumor-Samples | R | false | false | 651 | r | i = 130
library(isoform, lib.loc="/nas02/home/w/e/weisun/R/Rlibs/")
bedFile = "/nas02/home/w/e/weisun/research/data/human/Homo_sapiens.GRCh37.66.nonoverlap.exon.bed"
setwd("/lustre/scr/w/e/weisun/TCGA/bam/")
cmd = "ls *_asCounts_hetSNP_EA_hap1.bam"
ffs = system(cmd, intern=TRUE)
length(ffs)
head(ffs)
sams = gsub("_asCounts_hetSNP_EA_hap1.bam", "", ffs)
sam1 = sams[i]
cat(i, sam1, date(), "\n")
bamFile = ffs[i]
outFile = sprintf("%s_asCounts_hap1.txt", sam1)
countReads(bamFile, bedFile, outFile)
bamFile = gsub("_hap1", "_hap2", ffs[i], fixed=TRUE)
outFile = sprintf("%s_asCounts_hap2.txt", sam1)
countReads(bamFile, bedFile, outFile)
|
#Exploratory Analysis Course Project 1
#setwd("/Users/student/Documents/Classes/JHDataScience/ExploratoryAnalysis/Project1/Graphs/ExData_Plotting1")
#If zipped data file doesn't exist, download it and unzip it
fileUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if (!file.exists("household_power_consumption.txt")) {
fileUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile ="./zipdata.zip",method = "curl")
unzip("zipdata.zip")
}
ColNames = as.vector(as.matrix(read.table("household_power_consumption.txt",header=FALSE,sep=";",na.strings="?",nrows = 1))) #get header
data = read.table("household_power_consumption.txt",header=FALSE,sep=";",col.names = ColNames,na.strings="?",skip=66637,nrows = 2880) #read in selected data
attach(data)
dateTime = strptime(paste(data$Date,data$Time), format= "%d/%m/%Y %H:%M:%S")
png(filename = "plot3.png",width = 480, height = 480) #open png file to save graph
plot(dateTime,Sub_metering_1,type = "l",xlab = "",ylab="Energy sub metering") #create graph
lines(dateTime,Sub_metering_2,col = "red", type = "l",xlab = "",ylab="Global Active Power (kilowatts)")
lines(dateTime,Sub_metering_3,col = "blue", type = "l",xlab = "",ylab="Global Active Power (kilowatts)")
legend("topright",col = c("black","red","blue"),legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),pch="-")
dev.off() #close png file | /plot3.R | no_license | mzivot/ExData_Plotting1 | R | false | false | 1,474 | r | #Exploratory Analysis Course Project 1
#setwd("/Users/student/Documents/Classes/JHDataScience/ExploratoryAnalysis/Project1/Graphs/ExData_Plotting1")
#If zipped data file doesn't exist, download it and unzip it
fileUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if (!file.exists("household_power_consumption.txt")) {
fileUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile ="./zipdata.zip",method = "curl")
unzip("zipdata.zip")
}
ColNames = as.vector(as.matrix(read.table("household_power_consumption.txt",header=FALSE,sep=";",na.strings="?",nrows = 1))) #get header
data = read.table("household_power_consumption.txt",header=FALSE,sep=";",col.names = ColNames,na.strings="?",skip=66637,nrows = 2880) #read in selected data
attach(data)
dateTime = strptime(paste(data$Date,data$Time), format= "%d/%m/%Y %H:%M:%S")
png(filename = "plot3.png",width = 480, height = 480) #open png file to save graph
plot(dateTime,Sub_metering_1,type = "l",xlab = "",ylab="Energy sub metering") #create graph
lines(dateTime,Sub_metering_2,col = "red", type = "l",xlab = "",ylab="Global Active Power (kilowatts)")
lines(dateTime,Sub_metering_3,col = "blue", type = "l",xlab = "",ylab="Global Active Power (kilowatts)")
legend("topright",col = c("black","red","blue"),legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),pch="-")
dev.off() #close png file |
#' ---
#' title : "DS Capstone Quiz 1"
#' author : B.F.C
#' date : "18/2/2013"
#' ---
# get the data : data dir
datadir <- "projdata/final/en_US"
# open en_US.twitter.txt
# fname <- "en_US.twitter.txt"
# Question 2 - 3
# --------------
# summarising function
sumfile <- function(fname) {
# read lines
lines <- readLines(file.path(datadir, fname))
# Counting lines
nlines <- length(lines)
maxwidthline <- max(sapply(lines,
function(x) nchar(x)
))
list(Filename = fname, Lines = nlines, Linewidth = maxwidthline)
}
# list of files
flist <- dir(datadir)
# Get results
sapply(flist, sumfile)
# en_US.blogs.txt en_US.news.txt en_US.twitter.txt
# Filename "en_US.blogs.txt" "en_US.news.txt" "en_US.twitter.txt"
# Lines 899288 77259 2360148
# Linewidth 40835 5760 213
# Question 4
# ----------
lovehateratio <- function(fname) {
lines <- readLines(file.path(datadir, fname))
ln_love <- sum(grepl(pattern = "love",lines ))
ln_hate <- sum(grepl(pattern = "hate",lines ))
ratio = ln_love / ln_hate
# c(love = ln_love, hate = ln_hate, ratio = ratio)
list(love = ln_love, hate = ln_hate, ratio = ratio)
}
lovehateratio("en_US.twitter.txt")
# love hate ratio
# 90956.000000 22138.000000 4.108592
# Question 5
# ----------
local({
fname <- "en_US.twitter.txt"
lines <- readLines(file.path(datadir, fname))
list(
Tweetbio = grep(pattern = "biostat", x = lines, value = TRUE),
Howmanychess = sum(grepl(pattern = "A computer once beat me at chess, but it was no match for me at kickboxing",
lines ))
)
})
# $Tweetbio
# [1] "i know how you feel.. i have biostats on tuesday and i have yet to study =/"
#
# $Howmanychess
# [1] 3
| /Quiz1/Quiz1.R | no_license | Brufico/DScapstone | R | false | false | 1,940 | r | #' ---
#' title : "DS Capstone Quiz 1"
#' author : B.F.C
#' date : "18/2/2013"
#' ---
# get the data : data dir
datadir <- "projdata/final/en_US"
# open en_US.twitter.txt
# fname <- "en_US.twitter.txt"
# Question 2 - 3
# --------------
# summarising function
sumfile <- function(fname) {
# read lines
lines <- readLines(file.path(datadir, fname))
# Counting lines
nlines <- length(lines)
maxwidthline <- max(sapply(lines,
function(x) nchar(x)
))
list(Filename = fname, Lines = nlines, Linewidth = maxwidthline)
}
# list of files
flist <- dir(datadir)
# Get results
sapply(flist, sumfile)
# en_US.blogs.txt en_US.news.txt en_US.twitter.txt
# Filename "en_US.blogs.txt" "en_US.news.txt" "en_US.twitter.txt"
# Lines 899288 77259 2360148
# Linewidth 40835 5760 213
# Question 4
# ----------
lovehateratio <- function(fname) {
lines <- readLines(file.path(datadir, fname))
ln_love <- sum(grepl(pattern = "love",lines ))
ln_hate <- sum(grepl(pattern = "hate",lines ))
ratio = ln_love / ln_hate
# c(love = ln_love, hate = ln_hate, ratio = ratio)
list(love = ln_love, hate = ln_hate, ratio = ratio)
}
lovehateratio("en_US.twitter.txt")
# love hate ratio
# 90956.000000 22138.000000 4.108592
# Question 5
# ----------
local({
fname <- "en_US.twitter.txt"
lines <- readLines(file.path(datadir, fname))
list(
Tweetbio = grep(pattern = "biostat", x = lines, value = TRUE),
Howmanychess = sum(grepl(pattern = "A computer once beat me at chess, but it was no match for me at kickboxing",
lines ))
)
})
# $Tweetbio
# [1] "i know how you feel.. i have biostats on tuesday and i have yet to study =/"
#
# $Howmanychess
# [1] 3
|
rgammaShifted=function (n,shape,scale,thres)
{ rgamma(n, shape, 1/scale) + thres }
rCopulaREMADA.beta=function(N,p,g,tau,rcop,tau2par)
{ n=round(rgammaShifted(N,shape=1.2,scale=100,thres=30))
n1=rbinom(N,size=n,prob=0.43)
n2=n-n1
th=tau2par(tau)
dat=rcop(N,th)
u1=dat[,1]
u2=dat[,2]
a=p/g-p
b=(1-p)*(1-g)/g
x1=qbeta(u1,a[1],b[1])
x2=qbeta(u2,a[2],b[2])
TP=round(n1*x1)
TN=round(n2*x2)
FN=n1-TP
FP=n2-TN
list("TP"=TP,"TN"=TN,"FN"=FN,"FP"=FP)
}
rCopulaREMADA.norm=function(N,p,si,tau,rcop,tau2par)
{ n=round(rgammaShifted(N,shape=1.2,scale=100,thres=30))
n1=rbinom(N,size=n,prob=0.43)
n2=n-n1
th=tau2par(tau)
dat=rcop(N,th)
u1=dat[,1]
u2=dat[,2]
mu=log(p/(1-p))
x1=qnorm(u1,mu[1],si[1])
x2=qnorm(u2,mu[2],si[2])
t1=exp(x1)
t2=exp(x2)
x1=t1/(1+t1)
x2=t2/(1+t2)
TP=round(n1*x1)
TN=round(n2*x2)
FN=n1-TP
FP=n2-TN
list("TP"=TP,"TN"=TN,"FN"=FN,"FP"=FP)
} | /R/rCopulaREMADA.R | no_license | cran/CopulaREMADA | R | false | false | 916 | r | rgammaShifted=function (n,shape,scale,thres)
{ rgamma(n, shape, 1/scale) + thres }
rCopulaREMADA.beta=function(N,p,g,tau,rcop,tau2par)
{ n=round(rgammaShifted(N,shape=1.2,scale=100,thres=30))
n1=rbinom(N,size=n,prob=0.43)
n2=n-n1
th=tau2par(tau)
dat=rcop(N,th)
u1=dat[,1]
u2=dat[,2]
a=p/g-p
b=(1-p)*(1-g)/g
x1=qbeta(u1,a[1],b[1])
x2=qbeta(u2,a[2],b[2])
TP=round(n1*x1)
TN=round(n2*x2)
FN=n1-TP
FP=n2-TN
list("TP"=TP,"TN"=TN,"FN"=FN,"FP"=FP)
}
rCopulaREMADA.norm=function(N,p,si,tau,rcop,tau2par)
{ n=round(rgammaShifted(N,shape=1.2,scale=100,thres=30))
n1=rbinom(N,size=n,prob=0.43)
n2=n-n1
th=tau2par(tau)
dat=rcop(N,th)
u1=dat[,1]
u2=dat[,2]
mu=log(p/(1-p))
x1=qnorm(u1,mu[1],si[1])
x2=qnorm(u2,mu[2],si[2])
t1=exp(x1)
t2=exp(x2)
x1=t1/(1+t1)
x2=t2/(1+t2)
TP=round(n1*x1)
TN=round(n2*x2)
FN=n1-TP
FP=n2-TN
list("TP"=TP,"TN"=TN,"FN"=FN,"FP"=FP)
} |
#' @title Rounding Numbers for Data Frames
#' @description Rounds numeric columns in data.frames
#'
#' @param x a data.frame with numeric columns.
#' @param digits integer indicating the number of decimal places (\code{round})
#' or significant digits (\code{signif}) to be used. See \code{\link[base]{round}} for
#' more details.
#' @param ... arguments to be passed to methods.
#'
#' @details Takes a data.frame and returns a data.frame with the specified function
#' applied to each numeric column.
#'
#' @author Eric Archer \email{eric.archer@@noaa.gov}
#'
#' @seealso \code{\link[base]{Round}}
#'
#' @examples
#' data(mtcars)
#'
#' round(mtcars, 0)
#'
#' signif(mtcars, 2)
#'
#' @name round
#' @aliases ceiling floor trunc round signif
#'
NULL
#' @rdname round
#' @export
#'
ceiling.data.frame <- function(x) {
for(i in 1:ncol(x)) {
if(is.numeric(x[[i]])) x[[i]] <- ceiling(x[[i]])
}
x
}
#' @rdname round
#' @export
#'
floor.data.frame <- function(x) {
for(i in 1:ncol(x)) {
if(is.numeric(x[[i]])) x[[i]] <- floor(x[[i]])
}
x
}
#' @rdname round
#' @export
#'
trunc.data.frame <- function(x, ...) {
for(i in 1:ncol(x)) {
if(is.numeric(x[[i]])) x[[i]] <- ceiling(x[[i]], ...)
}
x
}
#' @rdname round
#' @export
#'
round.data.frame <- function(x, digits = 0) {
for(i in 1:ncol(x)) {
if(is.numeric(x[[i]])) x[[i]] <- round(x[[i]], digits = digits)
}
x
}
#' @rdname round
#' @export
#'
signif.data.frame <- function(x, digits = 6) {
for(i in 1:ncol(x)) {
if(is.numeric(x[[i]])) x[[i]] <- signif(x[[i]], digits = digits)
}
x
} | /R/round.R | no_license | cran/swfscMisc | R | false | false | 1,596 | r | #' @title Rounding Numbers for Data Frames
#' @description Rounds numeric columns in data.frames
#'
#' @param x a data.frame with numeric columns.
#' @param digits integer indicating the number of decimal places (\code{round})
#' or significant digits (\code{signif}) to be used. See \code{\link[base]{round}} for
#' more details.
#' @param ... arguments to be passed to methods.
#'
#' @details Takes a data.frame and returns a data.frame with the specified function
#' applied to each numeric column.
#'
#' @author Eric Archer \email{eric.archer@@noaa.gov}
#'
#' @seealso \code{\link[base]{Round}}
#'
#' @examples
#' data(mtcars)
#'
#' round(mtcars, 0)
#'
#' signif(mtcars, 2)
#'
#' @name round
#' @aliases ceiling floor trunc round signif
#'
NULL
#' @rdname round
#' @export
#'
ceiling.data.frame <- function(x) {
for(i in 1:ncol(x)) {
if(is.numeric(x[[i]])) x[[i]] <- ceiling(x[[i]])
}
x
}
#' @rdname round
#' @export
#'
floor.data.frame <- function(x) {
for(i in 1:ncol(x)) {
if(is.numeric(x[[i]])) x[[i]] <- floor(x[[i]])
}
x
}
#' @rdname round
#' @export
#'
trunc.data.frame <- function(x, ...) {
for(i in 1:ncol(x)) {
if(is.numeric(x[[i]])) x[[i]] <- ceiling(x[[i]], ...)
}
x
}
#' @rdname round
#' @export
#'
round.data.frame <- function(x, digits = 0) {
for(i in 1:ncol(x)) {
if(is.numeric(x[[i]])) x[[i]] <- round(x[[i]], digits = digits)
}
x
}
#' @rdname round
#' @export
#'
signif.data.frame <- function(x, digits = 6) {
for(i in 1:ncol(x)) {
if(is.numeric(x[[i]])) x[[i]] <- signif(x[[i]], digits = digits)
}
x
} |
##
## PURPOSE: Pseudo goodness-of-fit test for a normal mixture
## * generic function
##
## AUTHOR: Arnost Komarek
## arnost.komarek[AT]mff.cuni.cz
##
## CREATED: 20/08/2009
##
## FUNCTIONS: NMixPseudoGOF.R
##
## ==================================================================
## *************************************************************
## NMixPseudoGOF
## *************************************************************
NMixPseudoGOF <- function(x, ...)
{
UseMethod("NMixPseudoGOF")
}
| /R/NMixPseudoGOF.R | no_license | cran/mixAK | R | false | false | 532 | r | ##
## PURPOSE: Pseudo goodness-of-fit test for a normal mixture
## * generic function
##
## AUTHOR: Arnost Komarek
## arnost.komarek[AT]mff.cuni.cz
##
## CREATED: 20/08/2009
##
## FUNCTIONS: NMixPseudoGOF.R
##
## ==================================================================
## *************************************************************
## NMixPseudoGOF
## *************************************************************
NMixPseudoGOF <- function(x, ...)
{
UseMethod("NMixPseudoGOF")
}
|
library(knitr)
defaultpar <- function(...)
par(font.main=1, mgp=c(2.1, 0.8, 0), ...) # family="serif"
knit_hooks$set(plotsetup = function(before=TRUE, options, envir) if (before) defaultpar())
knit_hooks$set(crop = hook_pdfcrop)
# delay switch to scientific format number printing - default is 10^4, increase
options(scipen=2) # digits=
# fullpage's 453pt = 6.29in
# normal textwidth 390pt = 5.4in
fullwidth <- 5.4
smallwidth <- 3.5
opts_chunk$set(echo=FALSE, message=FALSE, results="asis", fig.align="center", fig.pos="htbp", fig.width=fullwidth, fig.height=fullwidth, cache=TRUE, crop=TRUE, plotsetup=TRUE, dev.args=list(pointsize=10, family="serif", colormodel="cmyk"))
library(lattice)
trellis.par.set(fontsize=list(text=10))
#trellis.par.get("fontsize")
opts_knit$set(eval.after=c("fig.cap", "fig.subcap", "fig.scap"))
tightmargin <- function(...)
defaultpar(mar=c(3.1, 3.3, 2, 0.8), ...) # b l t r
library(xtable)
mathematise <- function(...)
paste("$", ..., "$", sep="")
options(xtable.sanitize.text.function = identity,
xtable.sanitize.rownames.function = mathematise,
xtable.sanitize.colnames.function = mathematise,
xtable.table.placement = "htbp")#, xtable.booktabs=TRUE)
temp.colors <- function(mn, mx=NULL, intensity=1) {
if (is.null(mx)) {
mx <- floor(mn/2)
mn <- ceiling(-mn/2)
}
hsv(c(rep(0.65, abs(mn)), FALSE, rep(0, abs(mx))), intensity*abs(mn:mx)/max(abs(c(mn,mx))))
}
| /knitr-setup.R | no_license | kevinstadler/thesis | R | false | false | 1,426 | r | library(knitr)
defaultpar <- function(...)
par(font.main=1, mgp=c(2.1, 0.8, 0), ...) # family="serif"
knit_hooks$set(plotsetup = function(before=TRUE, options, envir) if (before) defaultpar())
knit_hooks$set(crop = hook_pdfcrop)
# delay switch to scientific format number printing - default is 10^4, increase
options(scipen=2) # digits=
# fullpage's 453pt = 6.29in
# normal textwidth 390pt = 5.4in
fullwidth <- 5.4
smallwidth <- 3.5
opts_chunk$set(echo=FALSE, message=FALSE, results="asis", fig.align="center", fig.pos="htbp", fig.width=fullwidth, fig.height=fullwidth, cache=TRUE, crop=TRUE, plotsetup=TRUE, dev.args=list(pointsize=10, family="serif", colormodel="cmyk"))
library(lattice)
trellis.par.set(fontsize=list(text=10))
#trellis.par.get("fontsize")
opts_knit$set(eval.after=c("fig.cap", "fig.subcap", "fig.scap"))
tightmargin <- function(...)
defaultpar(mar=c(3.1, 3.3, 2, 0.8), ...) # b l t r
library(xtable)
mathematise <- function(...)
paste("$", ..., "$", sep="")
options(xtable.sanitize.text.function = identity,
xtable.sanitize.rownames.function = mathematise,
xtable.sanitize.colnames.function = mathematise,
xtable.table.placement = "htbp")#, xtable.booktabs=TRUE)
temp.colors <- function(mn, mx=NULL, intensity=1) {
if (is.null(mx)) {
mx <- floor(mn/2)
mn <- ceiling(-mn/2)
}
hsv(c(rep(0.65, abs(mn)), FALSE, rep(0, abs(mx))), intensity*abs(mn:mx)/max(abs(c(mn,mx))))
}
|
dat <- data_bias_direction %>%
triangulate::tri_to_long() %>%
triangulate::tri_absolute_direction() %>%
triangulate::tri_to_wide()
test_that("Test basic bias direction plots",{
expect_snapshot_file(save_png({
rob_direction(dat, vi = dat$vi)
}), "paried_basic.png")
})
| /tests/testthat/test-rob_paired_direction.R | permissive | mcguinlu/robvis | R | false | false | 289 | r | dat <- data_bias_direction %>%
triangulate::tri_to_long() %>%
triangulate::tri_absolute_direction() %>%
triangulate::tri_to_wide()
test_that("Test basic bias direction plots",{
expect_snapshot_file(save_png({
rob_direction(dat, vi = dat$vi)
}), "paried_basic.png")
})
|
## Script to format and extract the masting layers used in analysis
library(raster)
library(data.table)
library(lubridate)
# Formatted summary of the studies
study_sum = fread("../../data/formatted/study_summary.csv")
study_sum$datetime_mindate = as.POSIXct(study_sum$datetime_mindate)
study_sum$datetime_maxdate = as.POSIXct(study_sum$datetime_maxdate)
# Format the masting layer
dens = raster("../../data/covariate_data/masting/NA_density_raster.txt")
crs(dens) = "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
densproj = projectRaster(dens, crs="+proj=longlat +datum=WGS84 +ellps=WGS84")
# The projections are weird for the species richness...
# spp = raster("../../data/covariate_data/masting/NA_spp_rich_raster.txt")
# spp = crop(spp, extent(dens))
# crs(spp) <- "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
# sppproj = projectRaster(spp, crs="+proj=longlat +datum=WGS84 +ellps=WGS84")
# Loop through studies
for(studynm in paste0("la_steve", 0:5)){#study_sum$study){
cat("Working on", studynm, "\n")
ind = study_sum$study == studynm
minlon = study_sum$longitude_min[ind]
maxlon = study_sum$longitude_max[ind]
minlat = study_sum$latitude_min[ind]
maxlat = study_sum$latitude_max[ind]
buffer = 0.02
extobj = extent(c(xmin=minlon - buffer, xmax=maxlon + buffer,
ymin=minlat - buffer, ymax=maxlat + buffer))
tras = crop(densproj, extobj)
tfp = file.path("../../data/covariate_data/masting", studynm)
dir.create(tfp, showWarnings=FALSE)
rasname = paste(studynm, "_masting.tif", sep="")
writeRaster(tras, file.path(tfp, rasname), format="GTiff", overwrite=TRUE)
} | /code/covariate_scripts/extract_masting.R | no_license | mqwilber/rsf_swine | R | false | false | 1,757 | r | ## Script to format and extract the masting layers used in analysis
library(raster)
library(data.table)
library(lubridate)
# Formatted summary of the studies
study_sum = fread("../../data/formatted/study_summary.csv")
study_sum$datetime_mindate = as.POSIXct(study_sum$datetime_mindate)
study_sum$datetime_maxdate = as.POSIXct(study_sum$datetime_maxdate)
# Format the masting layer
dens = raster("../../data/covariate_data/masting/NA_density_raster.txt")
crs(dens) = "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
densproj = projectRaster(dens, crs="+proj=longlat +datum=WGS84 +ellps=WGS84")
# The projections are weird for the species richness...
# spp = raster("../../data/covariate_data/masting/NA_spp_rich_raster.txt")
# spp = crop(spp, extent(dens))
# crs(spp) <- "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
# sppproj = projectRaster(spp, crs="+proj=longlat +datum=WGS84 +ellps=WGS84")
# Loop through studies
for(studynm in paste0("la_steve", 0:5)){#study_sum$study){
cat("Working on", studynm, "\n")
ind = study_sum$study == studynm
minlon = study_sum$longitude_min[ind]
maxlon = study_sum$longitude_max[ind]
minlat = study_sum$latitude_min[ind]
maxlat = study_sum$latitude_max[ind]
buffer = 0.02
extobj = extent(c(xmin=minlon - buffer, xmax=maxlon + buffer,
ymin=minlat - buffer, ymax=maxlat + buffer))
tras = crop(densproj, extobj)
tfp = file.path("../../data/covariate_data/masting", studynm)
dir.create(tfp, showWarnings=FALSE)
rasname = paste(studynm, "_masting.tif", sep="")
writeRaster(tras, file.path(tfp, rasname), format="GTiff", overwrite=TRUE)
} |
###下载安装edgeR包
#source("http://bioconductor.org/biocLite.R")
#biocLite("edgeR")
library("edgeR")
library('ggplot2')
########################################################
#####################1.edgeR-diff-gene#################
########################################################
###读取数据
countData = read.table(count_table, header=TRUE, sep=",", row.names=1)
colData = read.csv(coldata_file, header=T,row.names = 1)
groups = paste(exp_group,"vs",base_group,sep="")
#提取
colData$smp = rownames(colData)
base_smp = colData[colData$condition==base_group,]$smp
exp_smp = colData[colData$condition==exp_group,]$smp
#进行分组
rawdata <- countData[,c(base_smp,exp_smp)] #base should be in first column
group <- factor(c(base_smp,exp_smp))
###过滤与标准化
y <- DGEList(counts=rawdata,genes=rownames(rawdata),group = group)
###TMM标准化
y<-calcNormFactors(y)
y$samples
###推测离散度,根据经验设置,若样本是人,设置bcv = 0.4,模式生物设置0.1.
#bcv <- 0.1
bcv <- 0.2
#bcv <- 0.4
et <- exactTest(y, dispersion=bcv^2)
topTags(et)
summary(de <- decideTestsDGE(et))
###导出数据
DE <- et$table
DE$significant <- as.factor(DE$PValue<0.05 & abs(DE$logFC) >1)
#write.table(DE,file="edgeR_all2",sep="\t",na="NA",quote=FALSE)
filename = paste(groups,"_all_genes_exprData.txt",sep="")
write.table(DE, file= paste(path2,filename,sep="/"), sep="\t", row.name=TRUE, col.names=TRUE,quote=FALSE)
filename = paste(groups,"_sig_genes_exprData.txt",sep="")
DE_sig <- DE[DE$significant=="TRUE",]
write.table(DE_sig, file= paste(path2,filename,sep="/"), sep="\t", row.name=TRUE, col.names=TRUE,quote=FALSE)
########################################################
#####################2.MA plot##########################
########################################################
filename = paste(groups,"_MA_plot.pdf",sep="")
pdf(file = paste(path2,filename,sep="/"))
detags <- rownames(y)[as.logical(DE$significant)]
#detags <- rownames(y)[as.logical(de)];
plotSmear(et, de.tags=detags)
abline(h=c(-1, 1), col="blue");
dev.off()
########################################################
#####################3.volcano plot#####################
########################################################
df <- DE
sig_df <- df
#df = data.frame("id"=rownames(res),res) #使用所有的基因,而不是筛选过的显著差异基因
#sig_df <- filter(df,!is.na(padj)) #去除qadj为NA的数据
names(sig_df) <- c("log2FoldChange","logCPM","padj","significant")
#adj<0.05 AND log2foldchange>1
#scale_color_manual
color<- c(red = "red", gray = "gray", blue ="blue")
#add color column ,and condition
sig_df$color <- ifelse(sig_df$padj < 0.05 & abs(sig_df$log2FoldChange) >=1,ifelse(sig_df$log2FoldChange > 1 ,'red','blue'),'gray')
library(ggplot2)
p2 <- ggplot(sig_df, aes(x = log2FoldChange, y = -log10(padj),col = color)) +
geom_point() +
scale_color_manual(values = color) +
labs(x="log2 (fold change)",y="-log10 (padj)")+
geom_hline(yintercept = -log10(0.05), lty=4,col="grey",lwd=0.6) +
geom_vline(xintercept = c(-1, 1), lty=4,col="grey",lwd=0.6) +
theme(legend.position = "none",
panel.grid=element_blank())
filename = paste(groups,"_volcano_plot.pdf",sep="")
ggsave(file=paste(path2,filename,sep="/"),p2, width=6, height=6, units="in")
| /Pipeline/R_for_RNAseq/archive/2.2edgeR-volcano.R | no_license | Iceylee/NGS-Pacbio | R | false | false | 3,311 | r | ###下载安装edgeR包
#source("http://bioconductor.org/biocLite.R")
#biocLite("edgeR")
library("edgeR")
library('ggplot2')
########################################################
#####################1.edgeR-diff-gene#################
########################################################
###读取数据
countData = read.table(count_table, header=TRUE, sep=",", row.names=1)
colData = read.csv(coldata_file, header=T,row.names = 1)
groups = paste(exp_group,"vs",base_group,sep="")
#提取
colData$smp = rownames(colData)
base_smp = colData[colData$condition==base_group,]$smp
exp_smp = colData[colData$condition==exp_group,]$smp
#进行分组
rawdata <- countData[,c(base_smp,exp_smp)] #base should be in first column
group <- factor(c(base_smp,exp_smp))
###过滤与标准化
y <- DGEList(counts=rawdata,genes=rownames(rawdata),group = group)
###TMM标准化
y<-calcNormFactors(y)
y$samples
###推测离散度,根据经验设置,若样本是人,设置bcv = 0.4,模式生物设置0.1.
#bcv <- 0.1
bcv <- 0.2
#bcv <- 0.4
et <- exactTest(y, dispersion=bcv^2)
topTags(et)
summary(de <- decideTestsDGE(et))
###导出数据
DE <- et$table
DE$significant <- as.factor(DE$PValue<0.05 & abs(DE$logFC) >1)
#write.table(DE,file="edgeR_all2",sep="\t",na="NA",quote=FALSE)
filename = paste(groups,"_all_genes_exprData.txt",sep="")
write.table(DE, file= paste(path2,filename,sep="/"), sep="\t", row.name=TRUE, col.names=TRUE,quote=FALSE)
filename = paste(groups,"_sig_genes_exprData.txt",sep="")
DE_sig <- DE[DE$significant=="TRUE",]
write.table(DE_sig, file= paste(path2,filename,sep="/"), sep="\t", row.name=TRUE, col.names=TRUE,quote=FALSE)
########################################################
#####################2.MA plot##########################
########################################################
filename = paste(groups,"_MA_plot.pdf",sep="")
pdf(file = paste(path2,filename,sep="/"))
detags <- rownames(y)[as.logical(DE$significant)]
#detags <- rownames(y)[as.logical(de)];
plotSmear(et, de.tags=detags)
abline(h=c(-1, 1), col="blue");
dev.off()
########################################################
#####################3.volcano plot#####################
########################################################
df <- DE
sig_df <- df
#df = data.frame("id"=rownames(res),res) #使用所有的基因,而不是筛选过的显著差异基因
#sig_df <- filter(df,!is.na(padj)) #去除qadj为NA的数据
names(sig_df) <- c("log2FoldChange","logCPM","padj","significant")
#adj<0.05 AND log2foldchange>1
#scale_color_manual
color<- c(red = "red", gray = "gray", blue ="blue")
#add color column ,and condition
sig_df$color <- ifelse(sig_df$padj < 0.05 & abs(sig_df$log2FoldChange) >=1,ifelse(sig_df$log2FoldChange > 1 ,'red','blue'),'gray')
library(ggplot2)
p2 <- ggplot(sig_df, aes(x = log2FoldChange, y = -log10(padj),col = color)) +
geom_point() +
scale_color_manual(values = color) +
labs(x="log2 (fold change)",y="-log10 (padj)")+
geom_hline(yintercept = -log10(0.05), lty=4,col="grey",lwd=0.6) +
geom_vline(xintercept = c(-1, 1), lty=4,col="grey",lwd=0.6) +
theme(legend.position = "none",
panel.grid=element_blank())
filename = paste(groups,"_volcano_plot.pdf",sep="")
ggsave(file=paste(path2,filename,sep="/"),p2, width=6, height=6, units="in")
|
setwd("C:/Users/i23764/OneDrive - Verisk Analytics/Documents/R/MyProject")
mainDir <- getwd()
subDir_rawData <- "raw_data"
ifelse(!dir.exists(file.path(mainDir,subDir_rawData)),
dir.create(file.path(mainDir,subDir_rawData)), FALSE)
require(RCurl)
require(tidyverse)
setwd("./raw_data")
ftp <- "ftp://ftp.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/"
filenames <- getURL(ftp , dirlistonly=T ) %>%
str_split("\n") %>%
details <- filenames[grepl("details", filenames)]
for (i in seq_along(details)) {
download.file(paste0(ftp,details[i]), destfile = paste0(mainDir,"/",subDir_rawData))
}
| /code_block.R | no_license | jschney/MyProject | R | false | false | 614 | r | setwd("C:/Users/i23764/OneDrive - Verisk Analytics/Documents/R/MyProject")
mainDir <- getwd()
subDir_rawData <- "raw_data"
ifelse(!dir.exists(file.path(mainDir,subDir_rawData)),
dir.create(file.path(mainDir,subDir_rawData)), FALSE)
require(RCurl)
require(tidyverse)
setwd("./raw_data")
ftp <- "ftp://ftp.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/"
filenames <- getURL(ftp , dirlistonly=T ) %>%
str_split("\n") %>%
details <- filenames[grepl("details", filenames)]
for (i in seq_along(details)) {
download.file(paste0(ftp,details[i]), destfile = paste0(mainDir,"/",subDir_rawData))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/determineClass.R
\name{determineClass}
\alias{determineClass}
\title{Tried to determine the class of data.frame columns}
\usage{
determineClass(data)
}
\description{
Tried to determine the class of data.frame columns
}
| /man/determineClass.Rd | no_license | mknoll/dataAnalysisMisc | R | false | true | 297 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/determineClass.R
\name{determineClass}
\alias{determineClass}
\title{Tried to determine the class of data.frame columns}
\usage{
determineClass(data)
}
\description{
Tried to determine the class of data.frame columns
}
|
#' Plots the Cases for Each City Given a Selected State
#'
#' @param df_covid19 Data Frame Returned from getBrazilCovid19Data()
#' @param State String with selected state (input$selected_state)
#' @param state_shape_files Shape files of the states
#' @return
#' @export
#'
#' @import dplyr
#' @import leaflet
#' @import sf
#'
plot_cases_state_map <- function(df_covid19,State,state_shape_files){
out <- tryCatch({
muni <- state_shape_files[[State]]
df_covid19 %>%
filter(state==State,
place_type=="city") %>%
filter(is_last=="True") %>%
select(city,confirmed,deaths,death_rate,city_ibge_code) -> estado_selecionado_plot_mapa
muni %>%
left_join(
as.data.frame(estado_selecionado_plot_mapa),
by = c("code_muni"="city_ibge_code")
) -> df_plot_estado
pal <- leaflet::colorNumeric(
palette = "YlOrRd",
domain = df_plot_estado$confirmed
)
labels <- sprintf(
"<strong>%s</strong><br/>%.d Casos Confirmados<br/>%d Mortes",
df_plot_estado$name_muni,
df_plot_estado$confirmed,
df_plot_estado$deaths
) %>% lapply(htmltools::HTML)
leaflet::leaflet(df_plot_estado, options = leaflet::leafletOptions(zoomControl = FALSE)) %>%
leaflet::addProviderTiles("CartoDB.Positron") %>%
leaflet::addPolygons(
fillColor = ~pal(confirmed),
weight = 2,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = 0.7,
highlight = leaflet::highlightOptions(
weight = 5,
color = "#666",
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE),
label = labels,
labelOptions = leaflet::labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto")
) %>%
leaflet::addLegend(pal = pal, values = ~confirmed, opacity = 0.7, title = "Numero de Casos<br/>Confirmados",
position = "topright",na.label = "No Cases") -> plot
plot
},
error=function(cond){
print("Error in function plot_cases_state_map()")
message(cond)
})
return(out)
} | /R/plot_cases_state_map.R | no_license | carolinaholanda/Covid19-Monitor | R | false | false | 2,235 | r | #' Plots the Cases for Each City Given a Selected State
#'
#' @param df_covid19 Data Frame Returned from getBrazilCovid19Data()
#' @param State String with selected state (input$selected_state)
#' @param state_shape_files Shape files of the states
#' @return
#' @export
#'
#' @import dplyr
#' @import leaflet
#' @import sf
#'
plot_cases_state_map <- function(df_covid19,State,state_shape_files){
out <- tryCatch({
muni <- state_shape_files[[State]]
df_covid19 %>%
filter(state==State,
place_type=="city") %>%
filter(is_last=="True") %>%
select(city,confirmed,deaths,death_rate,city_ibge_code) -> estado_selecionado_plot_mapa
muni %>%
left_join(
as.data.frame(estado_selecionado_plot_mapa),
by = c("code_muni"="city_ibge_code")
) -> df_plot_estado
pal <- leaflet::colorNumeric(
palette = "YlOrRd",
domain = df_plot_estado$confirmed
)
labels <- sprintf(
"<strong>%s</strong><br/>%.d Casos Confirmados<br/>%d Mortes",
df_plot_estado$name_muni,
df_plot_estado$confirmed,
df_plot_estado$deaths
) %>% lapply(htmltools::HTML)
leaflet::leaflet(df_plot_estado, options = leaflet::leafletOptions(zoomControl = FALSE)) %>%
leaflet::addProviderTiles("CartoDB.Positron") %>%
leaflet::addPolygons(
fillColor = ~pal(confirmed),
weight = 2,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = 0.7,
highlight = leaflet::highlightOptions(
weight = 5,
color = "#666",
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE),
label = labels,
labelOptions = leaflet::labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto")
) %>%
leaflet::addLegend(pal = pal, values = ~confirmed, opacity = 0.7, title = "Numero de Casos<br/>Confirmados",
position = "topright",na.label = "No Cases") -> plot
plot
},
error=function(cond){
print("Error in function plot_cases_state_map()")
message(cond)
})
return(out)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.