content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
abbreviations<-function(cl,abb=TRUE){
tis = "adipose_subcutaneous,adipose_visceral_(omentum),adrenal_gland,artery_aorta,artery_coronary,artery_tibial,brain-0,brain-1,brain-2,breast_mammary_tissue,cells_ebv-transformed_lymphocytes,cells_transformed_fibroblasts,colon_sigmoid,colon_transverse,esophagus_gastroesophageal_junction,esophagus_mucosa,esophagus_muscularis,heart_atrial_appendage,heart_left_ventricle,kidney_cortex,liver,lung,minor_salivary_gland,muscle_skeletal,nerve_tibial,ovary,pancreas,pituitary,prostate,skin,small_intestine_terminal_ileum,spleen,stomach,testis,thyroid,uterus,vagina,whole_blood"
abr = "ADS,ADV,ARG,ATA,ATC,ATT,BRO,BRC,BRB,BST,LCL,FIB,CLS,CLT,GEJ,EMC,EMS,HRA,HRV,KDN,LVR,LNG,MSG,SMU,TNV,OVR,PNC,PIT,PRS,SKN,ITI,SPL,STM,TST,THY,UTR,VGN,WBL"
tisname = "Adipose subcutaneous,Adipose visceral,Adrenal gland,Artery aorta,Artery coronary,Artery tibial,Brain other,Brain cerebellum,Brain basal ganglia,Breast,Lymphoblastoid cell line,Fibroblast cell line,Colon sigmoid,Colon transverse,Gastroesophageal junction,Esophagus mucosa,Esophagus muscularis,Heart atrial appendage,Heart left ventricle,Kidney cortex,Liver,Lung,Minor salivary gland,Skeletal muscle,Tibial nerve,Ovary,Pancreas,Pituitary,Prostate,Skin,Intestine terminal ileum,Spleen,Stomach,Testis,Thyroid,Uterus,Vagina,Whole blood"
abr = strsplit(abr,",")[[1]]
tis = strsplit(tis,",")[[1]]
tisname=strsplit(tisname,",")[[1]]
if(abb){
cl = abr[match(cl,tis)]
} else {
cl = tisname[match(cl,tis)]
}
factor(cl)
}
| /generateFigures/helper.R | no_license | QuackenbushLab/normFigures | R | false | false | 1,506 | r | abbreviations<-function(cl,abb=TRUE){
tis = "adipose_subcutaneous,adipose_visceral_(omentum),adrenal_gland,artery_aorta,artery_coronary,artery_tibial,brain-0,brain-1,brain-2,breast_mammary_tissue,cells_ebv-transformed_lymphocytes,cells_transformed_fibroblasts,colon_sigmoid,colon_transverse,esophagus_gastroesophageal_junction,esophagus_mucosa,esophagus_muscularis,heart_atrial_appendage,heart_left_ventricle,kidney_cortex,liver,lung,minor_salivary_gland,muscle_skeletal,nerve_tibial,ovary,pancreas,pituitary,prostate,skin,small_intestine_terminal_ileum,spleen,stomach,testis,thyroid,uterus,vagina,whole_blood"
abr = "ADS,ADV,ARG,ATA,ATC,ATT,BRO,BRC,BRB,BST,LCL,FIB,CLS,CLT,GEJ,EMC,EMS,HRA,HRV,KDN,LVR,LNG,MSG,SMU,TNV,OVR,PNC,PIT,PRS,SKN,ITI,SPL,STM,TST,THY,UTR,VGN,WBL"
tisname = "Adipose subcutaneous,Adipose visceral,Adrenal gland,Artery aorta,Artery coronary,Artery tibial,Brain other,Brain cerebellum,Brain basal ganglia,Breast,Lymphoblastoid cell line,Fibroblast cell line,Colon sigmoid,Colon transverse,Gastroesophageal junction,Esophagus mucosa,Esophagus muscularis,Heart atrial appendage,Heart left ventricle,Kidney cortex,Liver,Lung,Minor salivary gland,Skeletal muscle,Tibial nerve,Ovary,Pancreas,Pituitary,Prostate,Skin,Intestine terminal ileum,Spleen,Stomach,Testis,Thyroid,Uterus,Vagina,Whole blood"
abr = strsplit(abr,",")[[1]]
tis = strsplit(tis,",")[[1]]
tisname=strsplit(tisname,",")[[1]]
if(abb){
cl = abr[match(cl,tis)]
} else {
cl = tisname[match(cl,tis)]
}
factor(cl)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/halfmatrix.R
\name{half}
\alias{half}
\title{Find Half of Something}
\usage{
half(x, ...)
}
\arguments{
\item{x}{object}
\item{...}{passed arguments}
}
\description{
Finds half of something. Generic, with method for matrix.
}
\seealso{
Other halfmatrix:
\code{\link{as.data.frame.halfmatrix}()},
\code{\link{as.halfmatrix.default}()},
\code{\link{as.halfmatrix.halfmatrix}()},
\code{\link{as.halfmatrix}()},
\code{\link{as.matrix.halfmatrix}()},
\code{\link{half.matrix}()},
\code{\link{is.square.matrix}()},
\code{\link{is.square}()},
\code{\link{offdiag.halfmatrix}()},
\code{\link{offdiag}()},
\code{\link{ord.halfmatrix}()},
\code{\link{ord.matrix}()},
\code{\link{ord}()},
\code{\link{print.halfmatrix}()}
}
\concept{halfmatrix}
\keyword{internal}
| /man/half.Rd | no_license | cran/nonmemica | R | false | true | 869 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/halfmatrix.R
\name{half}
\alias{half}
\title{Find Half of Something}
\usage{
half(x, ...)
}
\arguments{
\item{x}{object}
\item{...}{passed arguments}
}
\description{
Finds half of something. Generic, with method for matrix.
}
\seealso{
Other halfmatrix:
\code{\link{as.data.frame.halfmatrix}()},
\code{\link{as.halfmatrix.default}()},
\code{\link{as.halfmatrix.halfmatrix}()},
\code{\link{as.halfmatrix}()},
\code{\link{as.matrix.halfmatrix}()},
\code{\link{half.matrix}()},
\code{\link{is.square.matrix}()},
\code{\link{is.square}()},
\code{\link{offdiag.halfmatrix}()},
\code{\link{offdiag}()},
\code{\link{ord.halfmatrix}()},
\code{\link{ord.matrix}()},
\code{\link{ord}()},
\code{\link{print.halfmatrix}()}
}
\concept{halfmatrix}
\keyword{internal}
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyServer(function(input, output) {
output$piechart <- renderPlot({
# Extracting tweets
tweets<-searchTwitter(input$search,1000,lang='en') #fetching 1000 tweets containing the search string
label<-input$search
df<-twListToDF(tweets) #converting tweets into dataframe
num<-nrow(df)
word_df<-unnest_tokens(df[1],words,text) #breaking down every tweet into individual words
nrc<-data.frame(get_sentiments("nrc")) #selecting "nrc" dataset from sentiment package in tidytext
sentiment_df<-nrc[!is.na(match(nrc[,1],word_df[,1])),] #retrieving tweet words that match with nrc sentiment words
sentiment_table<-sentiment_df %>% group_by(sentiment) %>% summarise(count=n()) #data formatting
sentiment_table<-sentiment_table %>% mutate(Perc=round(count*100/sum(count),0)) #data formatting
#plotting percentage sentiment in agreement with fetched tweet words
pie(sentiment_table$count,labels = paste(sentiment_table$sentiment,sentiment_table$Perc,rep("%",10),sep="_"), col=rainbow(length(sentiment_table$sentiment)), main=paste0("Sentiment Analysis of ",num," tweets on: ",label))
})
})
| /.gitignore/server.R | no_license | bpali26/Shinyapp-on-Sentiment-Analysis-of-tweets- | R | false | false | 1,408 | r |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyServer(function(input, output) {
output$piechart <- renderPlot({
# Extracting tweets
tweets<-searchTwitter(input$search,1000,lang='en') #fetching 1000 tweets containing the search string
label<-input$search
df<-twListToDF(tweets) #converting tweets into dataframe
num<-nrow(df)
word_df<-unnest_tokens(df[1],words,text) #breaking down every tweet into individual words
nrc<-data.frame(get_sentiments("nrc")) #selecting "nrc" dataset from sentiment package in tidytext
sentiment_df<-nrc[!is.na(match(nrc[,1],word_df[,1])),] #retrieving tweet words that match with nrc sentiment words
sentiment_table<-sentiment_df %>% group_by(sentiment) %>% summarise(count=n()) #data formatting
sentiment_table<-sentiment_table %>% mutate(Perc=round(count*100/sum(count),0)) #data formatting
#plotting percentage sentiment in agreement with fetched tweet words
pie(sentiment_table$count,labels = paste(sentiment_table$sentiment,sentiment_table$Perc,rep("%",10),sep="_"), col=rainbow(length(sentiment_table$sentiment)), main=paste0("Sentiment Analysis of ",num," tweets on: ",label))
})
})
|
#' Align two binary ordered trees
#' @param T1 A binary tree
#' @param T2 A binary tree
#' @param cost_matrix Cost matrix between each pair of nodes in T1 and T2
#' @return An alignment object
#' @export
align <- function(T1, T2, cost_matrix) {
align_obj <- create_align_object(T1, T2, cost_matrix)
align_obj <- initialize(align_obj)
align_obj <- fill_matrix(align_obj)
align_obj <- traceback(align_obj)
align_obj <- build_tree(align_obj)
return(align_obj)
}
#' Create alignment object
#' @param T1 tree 1
#' @param T2 tree 2
#' @param cost_matrix The cost matrix
#' @return An alignment object
#' @export
create_align_object <- function(T1, T2, cost_matrix) {
ordered_T1 <- postorder_labels(T1)
ordered_T2 <- postorder_labels(T2)
T_matrix <- matrix(NA, nrow = length(ordered_T1) + 1, ncol = length(ordered_T2) + 1)
rownames(T_matrix) <- c('lambda', ordered_T1)
colnames(T_matrix) <- c('lambda', ordered_T2)
T_matrix['lambda', 'lambda'] <- 0
F_map <- hashmap::hashmap('lambda_lambda', 0)
list(T1 = T1, T2 = T2, cost_matrix = cost_matrix, T_matrix = T_matrix, F_map = F_map)
}
initialize_T1 <- function(align_object) {
T1_node <- rownames(align_object$T_matrix)[-1]
for (node in T1_node) {
children <- align_object$T1_children[node, ]
F_node_lambda <- align_object$T_matrix[children, "lambda"]
align_object$F_map[[paste_collapse(1, node, 1, 1, 'lambda')]] <- 2 * F_node_lambda[1]
align_object$F_map[[paste_collapse(1, node, 2, 2, 'lambda')]] <- 2 * F_node_lambda[2]
align_object$F_map[[paste_collapse(1, node, 1, 2, 'lambda')]] <- sum(F_node_lambda)
align_object$T_matrix[node, "lambda"] <- sum(F_node_lambda) + align_object$cost_matrix[node, "lambda"]
}
return(align_object)
}
initialize_T2 <- function(align_object) {
T2_node <- colnames(align_object$T_matrix)[-1]
for (node in T2_node) {
children <- align_object$T2_children[node, ]
lambda_F_node <- align_object$T_matrix["lambda", children]
align_object$F_map[[paste_collapse('lambda', 2, node, 1, 1)]] <- 2 * lambda_F_node[1]
align_object$F_map[[paste_collapse('lambda', 2, node, 2, 2)]] <- 2 * lambda_F_node[2]
align_object$F_map[[paste_collapse('lambda', 2, node, 1, 2)]] <- sum(lambda_F_node)
align_object$T_matrix["lambda", node] <- sum(lambda_F_node) + align_object$cost_matrix["lambda", node]
}
return(align_object)
}
#' Intialize the T matrix and F map
#'
#' @param align_object An alignment object
#' @return An alignment object with T matrix and F map initialized
#' @export
initialize <- function(align_object) {
align_object$T1_children <- create_children_map(align_object$T1)
align_object$T2_children <- create_children_map(align_object$T2)
align_object <- initialize_T1(align_object)
align_object <- initialize_T2(align_object)
return(align_object)
}
create_children_map <- function(tree) {
childrens <- c()
labels <- c()
postorder(tree, function(x) {
left_children <- ifelse(is.null(x$left), 'lambda', x$left$label)
right_children <- ifelse(is.null(x$right), 'lambda', x$right$label)
childrens <<- rbind(childrens, c(left_children, right_children))
labels <<- c(labels, x$label)
})
rownames(childrens) <- labels
colnames(childrens) <- c('left', 'right')
return(childrens)
}
lambda_T2 <- function(align_obj, i, j) {
subtree_cost <- c()
T2_children_j <- align_obj$T2_children[j, ]
for (r in T2_children_j) {
subtree_cost <- c(subtree_cost, align_obj$T_matrix[i, r] - align_obj$T_matrix['lambda', r])
}
align_obj$T_matrix['lambda', j] + min(subtree_cost)
}
T1_lambda <- function(align_obj, i, j) {
subtree_cost <- c()
T1_children_i <- align_obj$T1_children[i, ]
for (r in T1_children_i) {
subtree_cost <- c(subtree_cost, align_obj$T_matrix[r, j] - align_obj$T_matrix[r, 'lambda'])
}
align_obj$T_matrix[i, 'lambda'] + min(subtree_cost)
}
paste_collapse <- function(...) paste0(c(...), collapse = ' ')
T1_T2 <- function(align_obj, i, j) {
F_i_j <- align_obj$F_map[[paste_collapse(1, i, 1, 2, 2, j, 1, 2)]]
F_i_j + align_obj$cost_matrix[i, j]
}
#' Fill T matrix and F map
#'
#' @param align_obj An alignment object
#' @return An alignment object with T matrix and F map filled
#' @export
fill_matrix <- function(align_obj) {
align_obj$T_choice <- align_obj$T_matrix
align_obj$T_choice[,] <- NA
for (i in rownames(align_obj$T_matrix)[-1]) {
for (j in colnames(align_obj$T_matrix)[-1]) {
cost <- c()
for (s in seq(2)) {
align_obj <- fill_F(align_obj, paste_collapse(1, i, s, 2),
paste_collapse(2, j, 1, 2))
}
for (t in seq(2)) {
align_obj <- fill_F(align_obj, paste_collapse(1, i, 1, 2),
paste_collapse(2, j, t, 2))
}
cost <- c(cost, lambda_T2(align_obj, i, j))
cost <- c(cost, T1_lambda(align_obj, i, j))
cost <- c(cost, T1_T2(align_obj, i, j))
# print(cost)
align_obj$T_choice[i, j] <- which.min(cost)
align_obj$T_matrix[i, j] <- min(cost, na.rm = T)
}
}
return(align_obj)
}
fill_F <- function(align_obj, x, y) {
x <- strsplit(x, split = ' ')[[1]][-1]
i <- x[1]
s <- as.numeric(x[2])
mi <- as.numeric(x[3])
y <- strsplit(y, split = ' ')[[1]][-1]
j <- y[1]
t <- as.numeric(y[2])
nj <- as.numeric(y[3])
align_obj$F_map[[paste_collapse(1, i, s, s-1, 2, j, t, t - 1)]] <- 0
for (p in seq(s, mi)) {
align_obj$F_map[[paste_collapse(1, i, s, p, 2, j, t, t - 1)]] <-
align_obj$F_map[[paste_collapse(1, i, s, p - 1, 2, j, t, t - 1)]] +
align_obj$T_matrix[align_obj$T1_children[i, p], 'lambda']
}
for (q in seq(t, nj)) {
align_obj$F_map[[paste_collapse(1, i, s, s - 1, 2, j, t, q)]] <-
align_obj$F_map[[paste_collapse(1, i, s, s - 1, 2, j, t, q - 1)]] +
align_obj$T_matrix['lambda', align_obj$T2_children[j, q]]
}
for (p in seq(s, mi)) {
for (q in seq(t, nj)) {
align_obj <- fill_F_helper(align_obj, i, s, p, j, t, q)
}
}
return(align_obj)
}
fill_case_4 <- function(align_obj, i, s, p, j, t, q) {
case_4 <- c()
for (k in seq(s, p)) {
idx1 <- paste_collapse(1, i, k, p)
T2_j_q <- align_obj$T2_children[j, q]
if (T2_j_q == 'lambda') idx2 <- 'lambda'
else idx2 <- paste_collapse(2, T2_j_q, 1, 2)
case_4 <- c(case_4, align_obj$F_map[[paste_collapse(1, i, s, k - 1, 2, j, t, q - 1)]] + align_obj$F_map[[paste_collapse(idx1, idx2)]])
}
case_4 <- align_obj$cost_matrix['lambda', align_obj$T2_children[j, q]] + min(case_4)
return(case_4)
}
fill_case_5 <- function(align_obj, i, s, p, j, t, q) {
case_5 <- c()
for (k in seq(t, q)) {
T1_i_p <- align_obj$T1_children[i, p]
if (T1_i_p == 'lambda') idx1 <- 'lambda'
else idx1 <- paste_collapse(1, T1_i_p, 1, 2)
idx2 <- paste_collapse(2, j, k, q)
case_5 <- c(case_5, align_obj$F_map[[paste_collapse(1, i, s, p - 1, 2, j, t, k - 1)]] + align_obj$F_map[[paste_collapse(idx1, idx2)]])
}
case_5 <- align_obj$cost_matrix[align_obj$T1_children[i, p], 'lambda'] + min(case_5)
return(case_5)
}
fill_F_helper <- function(align_obj, i, s, p, j, t, q) {
case_1 <- align_obj$F_map[[paste_collapse(1, i, s, p - 1, 2, j, t, q)]] +
align_obj$T_matrix[align_obj$T1_children[i, p], 'lambda']
case_2 <- align_obj$F_map[[paste_collapse(1, i, s, p, 2, j, t, q - 1)]] +
align_obj$T_matrix['lambda', align_obj$T2_children[j, q]]
case_3 <- align_obj$F_map[[paste_collapse(1, i, s, p - 1, 2, j, t, q - 1)]] +
align_obj$T_matrix[align_obj$T1_children[i, p], align_obj$T2_children[j, q]]
case_4 <- fill_case_4(align_obj, i, s, p, j, t, q)
case_5 <- fill_case_5(align_obj, i, s, p, j, t, q)
all <- c(case_1, case_2, case_3, case_4, case_5)
loc <- paste_collapse(1, i, s, p, 2, j, t, q)
align_obj$F_map[[loc]] <- min(all)
return(align_obj)
}
#' Traceback for constructing aligned tree
traceback <- function(align_obj) {
align_obj$alignment <- c()
align_obj <- recurse(align_obj, align_obj$T1, align_obj$T2, left = T)
return(align_obj)
}
postorder2 <- function(x, alignment, left) {
if (is.null(x)) return(alignment)
else {
insert <- ifelse(left, paste_collapse(x$label, 'lambda'), paste_collapse('lambda', x$label))
alignment <- c(alignment, insert)
if (!is.null(x$left)) alignment <- c(alignment, ')')
alignment <- postorder2(x$left, alignment, left)
if (!is.null(x$right)) alignment <- c(alignment, ',')
alignment <- postorder2(x$right, alignment, left)
if (!is.null(x$left)) alignment <- c(alignment, '(')
}
return(alignment)
}
recurse <- function(align_obj, x, y, left) {
x_cond <- is.null(x)
y_cond <- is.null(y)
if (left & !(x_cond & y_cond)) align_obj$alignment <- c(align_obj$alignment, ')')
if (x_cond & !y_cond) {
align_obj$alignment <- postorder2(y, align_obj$alignment, left = F)
} else if (y_cond & !x_cond) {
align_obj$alignment <- postorder2(x, align_obj$alignment, left = T)
} else if (!(x_cond | y_cond)) {
choix <- align_obj$T_choice[x$label, y$label]
if (choix == 3) {
align_obj$alignment <- c(align_obj$alignment, paste_collapse(x$label, y$label))
align_obj <- recurse(align_obj, x$left, y$left, left = T)
align_obj <- recurse(align_obj, x$right, y$right, left = F)
} else if (choix == 2) {
align_obj$alignment <- c(align_obj$alignment, paste_collapse(x$label, 'lambda'))
if (left) {
align_obj <- recurse(align_obj, x$left, y, left = T)
align_obj <- recurse(align_obj, x$right, NULL, left = F)
} else {
align_obj <- recurse(align_obj, x$left, NULL, left = T)
align_obj <- recurse(align_obj, x$right, y, left = F)
}
} else if (choix == 1) {
align_obj$alignment <- c(align_obj$alignment, paste_collapse('lambda', y$label))
if (left) {
align_obj <- recurse(align_obj, x, y$left, left = T)
align_obj <- recurse(align_obj, NULL, y$right, left = F)
} else {
align_obj <- recurse(align_obj, NULL, y$left, left = T)
align_obj <- recurse(align_obj, x, y$right, left = F)
}
}
}
if (!left & !(x_cond & y_cond)) align_obj$alignment <- c(align_obj$alignment, '(')
if (left & !(x_cond & y_cond)) align_obj$alignment <- c(align_obj$alignment, ',')
return(align_obj)
}
build_tree <- function(align_obj) {
align_obj$alignment <- sapply(align_obj$alignment, function(x) gsub(' ', '_', x))
text <- paste0(paste0(rev(align_obj$alignment[-c(1, length(align_obj$alignment))]), collapse = ''), ';')
align_obj$tree <- as_binary_tree(ape::read.tree(text = text))
return(align_obj)
} | /R/align_functions.R | permissive | asmagen/hierarchicalSingleCell | R | false | false | 10,616 | r | #' Align two binary ordered trees
#' @param T1 A binary tree
#' @param T2 A binary tree
#' @param cost_matrix Cost matrix between each pair of nodes in T1 and T2
#' @return An alignment object
#' @export
align <- function(T1, T2, cost_matrix) {
align_obj <- create_align_object(T1, T2, cost_matrix)
align_obj <- initialize(align_obj)
align_obj <- fill_matrix(align_obj)
align_obj <- traceback(align_obj)
align_obj <- build_tree(align_obj)
return(align_obj)
}
#' Create alignment object
#' @param T1 tree 1
#' @param T2 tree 2
#' @param cost_matrix The cost matrix
#' @return An alignment object
#' @export
create_align_object <- function(T1, T2, cost_matrix) {
ordered_T1 <- postorder_labels(T1)
ordered_T2 <- postorder_labels(T2)
T_matrix <- matrix(NA, nrow = length(ordered_T1) + 1, ncol = length(ordered_T2) + 1)
rownames(T_matrix) <- c('lambda', ordered_T1)
colnames(T_matrix) <- c('lambda', ordered_T2)
T_matrix['lambda', 'lambda'] <- 0
F_map <- hashmap::hashmap('lambda_lambda', 0)
list(T1 = T1, T2 = T2, cost_matrix = cost_matrix, T_matrix = T_matrix, F_map = F_map)
}
initialize_T1 <- function(align_object) {
T1_node <- rownames(align_object$T_matrix)[-1]
for (node in T1_node) {
children <- align_object$T1_children[node, ]
F_node_lambda <- align_object$T_matrix[children, "lambda"]
align_object$F_map[[paste_collapse(1, node, 1, 1, 'lambda')]] <- 2 * F_node_lambda[1]
align_object$F_map[[paste_collapse(1, node, 2, 2, 'lambda')]] <- 2 * F_node_lambda[2]
align_object$F_map[[paste_collapse(1, node, 1, 2, 'lambda')]] <- sum(F_node_lambda)
align_object$T_matrix[node, "lambda"] <- sum(F_node_lambda) + align_object$cost_matrix[node, "lambda"]
}
return(align_object)
}
initialize_T2 <- function(align_object) {
T2_node <- colnames(align_object$T_matrix)[-1]
for (node in T2_node) {
children <- align_object$T2_children[node, ]
lambda_F_node <- align_object$T_matrix["lambda", children]
align_object$F_map[[paste_collapse('lambda', 2, node, 1, 1)]] <- 2 * lambda_F_node[1]
align_object$F_map[[paste_collapse('lambda', 2, node, 2, 2)]] <- 2 * lambda_F_node[2]
align_object$F_map[[paste_collapse('lambda', 2, node, 1, 2)]] <- sum(lambda_F_node)
align_object$T_matrix["lambda", node] <- sum(lambda_F_node) + align_object$cost_matrix["lambda", node]
}
return(align_object)
}
#' Intialize the T matrix and F map
#'
#' @param align_object An alignment object
#' @return An alignment object with T matrix and F map initialized
#' @export
initialize <- function(align_object) {
align_object$T1_children <- create_children_map(align_object$T1)
align_object$T2_children <- create_children_map(align_object$T2)
align_object <- initialize_T1(align_object)
align_object <- initialize_T2(align_object)
return(align_object)
}
create_children_map <- function(tree) {
childrens <- c()
labels <- c()
postorder(tree, function(x) {
left_children <- ifelse(is.null(x$left), 'lambda', x$left$label)
right_children <- ifelse(is.null(x$right), 'lambda', x$right$label)
childrens <<- rbind(childrens, c(left_children, right_children))
labels <<- c(labels, x$label)
})
rownames(childrens) <- labels
colnames(childrens) <- c('left', 'right')
return(childrens)
}
lambda_T2 <- function(align_obj, i, j) {
subtree_cost <- c()
T2_children_j <- align_obj$T2_children[j, ]
for (r in T2_children_j) {
subtree_cost <- c(subtree_cost, align_obj$T_matrix[i, r] - align_obj$T_matrix['lambda', r])
}
align_obj$T_matrix['lambda', j] + min(subtree_cost)
}
T1_lambda <- function(align_obj, i, j) {
subtree_cost <- c()
T1_children_i <- align_obj$T1_children[i, ]
for (r in T1_children_i) {
subtree_cost <- c(subtree_cost, align_obj$T_matrix[r, j] - align_obj$T_matrix[r, 'lambda'])
}
align_obj$T_matrix[i, 'lambda'] + min(subtree_cost)
}
paste_collapse <- function(...) paste0(c(...), collapse = ' ')
T1_T2 <- function(align_obj, i, j) {
F_i_j <- align_obj$F_map[[paste_collapse(1, i, 1, 2, 2, j, 1, 2)]]
F_i_j + align_obj$cost_matrix[i, j]
}
#' Fill T matrix and F map
#'
#' @param align_obj An alignment object
#' @return An alignment object with T matrix and F map filled
#' @export
fill_matrix <- function(align_obj) {
align_obj$T_choice <- align_obj$T_matrix
align_obj$T_choice[,] <- NA
for (i in rownames(align_obj$T_matrix)[-1]) {
for (j in colnames(align_obj$T_matrix)[-1]) {
cost <- c()
for (s in seq(2)) {
align_obj <- fill_F(align_obj, paste_collapse(1, i, s, 2),
paste_collapse(2, j, 1, 2))
}
for (t in seq(2)) {
align_obj <- fill_F(align_obj, paste_collapse(1, i, 1, 2),
paste_collapse(2, j, t, 2))
}
cost <- c(cost, lambda_T2(align_obj, i, j))
cost <- c(cost, T1_lambda(align_obj, i, j))
cost <- c(cost, T1_T2(align_obj, i, j))
# print(cost)
align_obj$T_choice[i, j] <- which.min(cost)
align_obj$T_matrix[i, j] <- min(cost, na.rm = T)
}
}
return(align_obj)
}
fill_F <- function(align_obj, x, y) {
x <- strsplit(x, split = ' ')[[1]][-1]
i <- x[1]
s <- as.numeric(x[2])
mi <- as.numeric(x[3])
y <- strsplit(y, split = ' ')[[1]][-1]
j <- y[1]
t <- as.numeric(y[2])
nj <- as.numeric(y[3])
align_obj$F_map[[paste_collapse(1, i, s, s-1, 2, j, t, t - 1)]] <- 0
for (p in seq(s, mi)) {
align_obj$F_map[[paste_collapse(1, i, s, p, 2, j, t, t - 1)]] <-
align_obj$F_map[[paste_collapse(1, i, s, p - 1, 2, j, t, t - 1)]] +
align_obj$T_matrix[align_obj$T1_children[i, p], 'lambda']
}
for (q in seq(t, nj)) {
align_obj$F_map[[paste_collapse(1, i, s, s - 1, 2, j, t, q)]] <-
align_obj$F_map[[paste_collapse(1, i, s, s - 1, 2, j, t, q - 1)]] +
align_obj$T_matrix['lambda', align_obj$T2_children[j, q]]
}
for (p in seq(s, mi)) {
for (q in seq(t, nj)) {
align_obj <- fill_F_helper(align_obj, i, s, p, j, t, q)
}
}
return(align_obj)
}
fill_case_4 <- function(align_obj, i, s, p, j, t, q) {
case_4 <- c()
for (k in seq(s, p)) {
idx1 <- paste_collapse(1, i, k, p)
T2_j_q <- align_obj$T2_children[j, q]
if (T2_j_q == 'lambda') idx2 <- 'lambda'
else idx2 <- paste_collapse(2, T2_j_q, 1, 2)
case_4 <- c(case_4, align_obj$F_map[[paste_collapse(1, i, s, k - 1, 2, j, t, q - 1)]] + align_obj$F_map[[paste_collapse(idx1, idx2)]])
}
case_4 <- align_obj$cost_matrix['lambda', align_obj$T2_children[j, q]] + min(case_4)
return(case_4)
}
fill_case_5 <- function(align_obj, i, s, p, j, t, q) {
case_5 <- c()
for (k in seq(t, q)) {
T1_i_p <- align_obj$T1_children[i, p]
if (T1_i_p == 'lambda') idx1 <- 'lambda'
else idx1 <- paste_collapse(1, T1_i_p, 1, 2)
idx2 <- paste_collapse(2, j, k, q)
case_5 <- c(case_5, align_obj$F_map[[paste_collapse(1, i, s, p - 1, 2, j, t, k - 1)]] + align_obj$F_map[[paste_collapse(idx1, idx2)]])
}
case_5 <- align_obj$cost_matrix[align_obj$T1_children[i, p], 'lambda'] + min(case_5)
return(case_5)
}
fill_F_helper <- function(align_obj, i, s, p, j, t, q) {
case_1 <- align_obj$F_map[[paste_collapse(1, i, s, p - 1, 2, j, t, q)]] +
align_obj$T_matrix[align_obj$T1_children[i, p], 'lambda']
case_2 <- align_obj$F_map[[paste_collapse(1, i, s, p, 2, j, t, q - 1)]] +
align_obj$T_matrix['lambda', align_obj$T2_children[j, q]]
case_3 <- align_obj$F_map[[paste_collapse(1, i, s, p - 1, 2, j, t, q - 1)]] +
align_obj$T_matrix[align_obj$T1_children[i, p], align_obj$T2_children[j, q]]
case_4 <- fill_case_4(align_obj, i, s, p, j, t, q)
case_5 <- fill_case_5(align_obj, i, s, p, j, t, q)
all <- c(case_1, case_2, case_3, case_4, case_5)
loc <- paste_collapse(1, i, s, p, 2, j, t, q)
align_obj$F_map[[loc]] <- min(all)
return(align_obj)
}
#' Traceback for constructing aligned tree
traceback <- function(align_obj) {
align_obj$alignment <- c()
align_obj <- recurse(align_obj, align_obj$T1, align_obj$T2, left = T)
return(align_obj)
}
postorder2 <- function(x, alignment, left) {
if (is.null(x)) return(alignment)
else {
insert <- ifelse(left, paste_collapse(x$label, 'lambda'), paste_collapse('lambda', x$label))
alignment <- c(alignment, insert)
if (!is.null(x$left)) alignment <- c(alignment, ')')
alignment <- postorder2(x$left, alignment, left)
if (!is.null(x$right)) alignment <- c(alignment, ',')
alignment <- postorder2(x$right, alignment, left)
if (!is.null(x$left)) alignment <- c(alignment, '(')
}
return(alignment)
}
recurse <- function(align_obj, x, y, left) {
x_cond <- is.null(x)
y_cond <- is.null(y)
if (left & !(x_cond & y_cond)) align_obj$alignment <- c(align_obj$alignment, ')')
if (x_cond & !y_cond) {
align_obj$alignment <- postorder2(y, align_obj$alignment, left = F)
} else if (y_cond & !x_cond) {
align_obj$alignment <- postorder2(x, align_obj$alignment, left = T)
} else if (!(x_cond | y_cond)) {
choix <- align_obj$T_choice[x$label, y$label]
if (choix == 3) {
align_obj$alignment <- c(align_obj$alignment, paste_collapse(x$label, y$label))
align_obj <- recurse(align_obj, x$left, y$left, left = T)
align_obj <- recurse(align_obj, x$right, y$right, left = F)
} else if (choix == 2) {
align_obj$alignment <- c(align_obj$alignment, paste_collapse(x$label, 'lambda'))
if (left) {
align_obj <- recurse(align_obj, x$left, y, left = T)
align_obj <- recurse(align_obj, x$right, NULL, left = F)
} else {
align_obj <- recurse(align_obj, x$left, NULL, left = T)
align_obj <- recurse(align_obj, x$right, y, left = F)
}
} else if (choix == 1) {
align_obj$alignment <- c(align_obj$alignment, paste_collapse('lambda', y$label))
if (left) {
align_obj <- recurse(align_obj, x, y$left, left = T)
align_obj <- recurse(align_obj, NULL, y$right, left = F)
} else {
align_obj <- recurse(align_obj, NULL, y$left, left = T)
align_obj <- recurse(align_obj, x, y$right, left = F)
}
}
}
if (!left & !(x_cond & y_cond)) align_obj$alignment <- c(align_obj$alignment, '(')
if (left & !(x_cond & y_cond)) align_obj$alignment <- c(align_obj$alignment, ',')
return(align_obj)
}
build_tree <- function(align_obj) {
align_obj$alignment <- sapply(align_obj$alignment, function(x) gsub(' ', '_', x))
text <- paste0(paste0(rev(align_obj$alignment[-c(1, length(align_obj$alignment))]), collapse = ''), ';')
align_obj$tree <- as_binary_tree(ape::read.tree(text = text))
return(align_obj)
} |
###
### Creator: Yunze Liu (Reed Liu)
### Date: 2019-04-11
### Email: jieandze1314@gmail.com
### Blog: www.jieandze1314.com
### CAAS/AGIS/SDAU
### Update Log:2019-04-1 AnnotationHub & Biostrings
### ---------------
##################################
# AnnotationHub
##################################
library(AnnotationHub)
options()$BioC_mirror ##查看使用bioconductor的默认镜像
# 检查是否存在新版本
ahub <- AnnotationHub()
##################################
# BioStrings
##################################
# 准备基因组文件
BiocManager::install("BSgenome", version = "3.8")
library(BSgenome)
available.genomes() #总共91个
| /bioconductor/Bioconductor-part5.R | no_license | reedliu/R-code | R | false | false | 671 | r | ###
### Creator: Yunze Liu (Reed Liu)
### Date: 2019-04-11
### Email: jieandze1314@gmail.com
### Blog: www.jieandze1314.com
### CAAS/AGIS/SDAU
### Update Log:2019-04-1 AnnotationHub & Biostrings
### ---------------
##################################
# AnnotationHub
##################################
library(AnnotationHub)
options()$BioC_mirror ##查看使用bioconductor的默认镜像
# 检查是否存在新版本
ahub <- AnnotationHub()
##################################
# BioStrings
##################################
# 准备基因组文件
BiocManager::install("BSgenome", version = "3.8")
library(BSgenome)
available.genomes() #总共91个
|
testlist <- list(data = structure(c(-9.32640852845583e+304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), .Dim = 9:10), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) | /biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610556301-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 423 | r | testlist <- list(data = structure(c(-9.32640852845583e+304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), .Dim = 9:10), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) |
library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.2,family="gaussian",standardize=FALSE)
sink('./Model/EN/ReliefF/urinary_tract/urinary_tract_036.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/ReliefF/urinary_tract/urinary_tract_036.R | no_license | leon1003/QSMART | R | false | false | 373 | r | library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.2,family="gaussian",standardize=FALSE)
sink('./Model/EN/ReliefF/urinary_tract/urinary_tract_036.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#' MAIC: A package for performing matched-adjusted indirect comparisons
#'
#' The MAIC package provides functions to help perform and summarize results
#' from matched-adjusted indirect comparisons
#'
#'
#' @docType package
#' @name MAIC
#' @importFrom magrittr "%>%"
NULL
| /R/MAIC.R | no_license | mirakrotneva/MAIC | R | false | false | 273 | r | #' MAIC: A package for performing matched-adjusted indirect comparisons
#'
#' The MAIC package provides functions to help perform and summarize results
#' from matched-adjusted indirect comparisons
#'
#'
#' @docType package
#' @name MAIC
#' @importFrom magrittr "%>%"
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bootstrap_function.R
\name{plot.bootstrap}
\alias{plot.bootstrap}
\title{plot}
\usage{
\method{plot}{bootstrap}(bootstrap, bins = 30)
}
\value{
a two plots of the class object "bootstrap".
}
\description{
plot
}
| /man/plot.bootstrap.Rd | no_license | aumath-advancedr2019/Sampling | R | false | true | 290 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bootstrap_function.R
\name{plot.bootstrap}
\alias{plot.bootstrap}
\title{plot}
\usage{
\method{plot}{bootstrap}(bootstrap, bins = 30)
}
\value{
a two plots of the class object "bootstrap".
}
\description{
plot
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coupons.R
\name{stripe_retrieve_coupon}
\alias{stripe_retrieve_coupon}
\title{Retrieve a coupon.}
\usage{
stripe_retrieve_coupon(coupon_id, api_key = NULL)
}
\arguments{
\item{coupon_id}{The coupon you want to retrieve.}
\item{api_key}{Your Stripe API Key}
}
\value{
A data frame with the coupon information
}
\description{
Retrieve the information about a coupon.
}
| /man/stripe_retrieve_coupon.Rd | no_license | muschellij2/RStripe | R | false | true | 446 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coupons.R
\name{stripe_retrieve_coupon}
\alias{stripe_retrieve_coupon}
\title{Retrieve a coupon.}
\usage{
stripe_retrieve_coupon(coupon_id, api_key = NULL)
}
\arguments{
\item{coupon_id}{The coupon you want to retrieve.}
\item{api_key}{Your Stripe API Key}
}
\value{
A data frame with the coupon information
}
\description{
Retrieve the information about a coupon.
}
|
#!/usr/bin/env Rscript
###---PACKAGES---###
if (!require("pacman")) { install.packages("pacman", repos='http://cran.us.r-project.org') }
library(pacman)
#required packages
required_packages = c(
"tidyverse",
"grid",
"ggplotify",
"svglite"
)
github_packages = c(
"slowkow/ggrepel"
)
#load packages
pacman::p_load(
char=required_packages,
install=TRUE,
character.only=TRUE,
try.bioconductor=TRUE,
update.bioconductor=TRUE
)
#load github packages
pacman::p_load_gh(
char = github_packages,
update = getOption("pac_update"),
dependencies = TRUE
)
###---GLOBALS---###
genesToLabel=NULL
#if you want to label points, just make a list
#i.e. c("MITF")
padj_threshold = 0.01
abs_lfc_threshold = 1
###---CLEAN DATA---###
data.in = "../data/in/dge/featureCounts_deseq2/table/result_lfcShrink/standardized/"
data.out = "../data/out/"
plot.out = "../plot/"
#contrasts
contrasts = basename(list.dirs(path = data.in))[2:length(list.dirs(path = data.in))]
#results
results=list()
for (i in 1:length(contrasts)) {
results[[i]] = read.csv(file = paste0(data.in,contrasts[i],"/result-lfcShrink_stndrd-filt_anno-basic_padj1_lfc0.csv"), header = TRUE)
}
names(results) = contrasts
clean_data = function(result,genesToLabel = NULL) {
result.no.zeroes = result %>%
dplyr::filter(padj != 0)
min.padj.value = min(result.no.zeroes$padj)
ret=result %>%
dplyr::mutate(padj=ifelse(padj == 0, min.padj.value, padj)) %>%
dplyr::mutate(color_group="Not Significant") %>%
dplyr::mutate(color_group=ifelse(padj < padj_threshold & log2FoldChange < -abs_lfc_threshold, "Down-Regulated Genes", color_group)) %>%
dplyr::mutate(color_group=ifelse(padj < padj_threshold & log2FoldChange > abs_lfc_threshold, "Up-Regulated Genes", color_group))
if (!is.null(genesToLabel)) {
ret = ret %>%
dplyr::mutate(label=ifelse(external_gene_name %in% genesToLabel, external_gene_name, NA))
}
return(ret)
}
cleaned_inputs = list()
for (i in 1:length(results)) {
cleaned_inputs[[i]] = clean_data(result=results[[i]],genesToLabel = genesToLabel)
}
names(cleaned_inputs) = contrasts
###---PLOT---###
x_lab=expression(-log[2](Fold~Change))
y_lab=expression(-log[10](italic(q)-value))
volcano_plot = function(cleaned_input,x_lab=x_lab,y_lab=y_lab) {
cols=c("Down-Regulated Genes" = "#234463","Up-Regulated Genes" = "#781e1e", "Not Significant" = "gray50")
cols2=c("Down-Regulated Genes" = "#f1f8ff", "Up-Regulated Genes" = "#fff6f6")
max_lfc=max(abs(cleaned_input$log2FoldChange))
max_padj=max(-log10(cleaned_input$padj),na.rm = TRUE)
plot=ggplot(data=cleaned_input, mapping=aes(x=log2FoldChange,y=-log10(padj))) +
#down
geom_rect(
fill = cols2[1],
xmin = -Inf,
xmax = -abs_lfc_threshold,
ymin = -log10(padj_threshold),
ymax = Inf
) +
#up
geom_rect(
fill = cols2[2],
xmin = abs_lfc_threshold,
xmax = Inf,
ymin = -log10(padj_threshold),
ymax = Inf
) +
geom_point(mapping=aes(color=color_group,fill=color_group),alpha=0.5) +
geom_hline(yintercept=-log10(padj_threshold), color='black', size=0.5, linetype = "dashed") +
geom_vline(xintercept=abs_lfc_threshold, color='black', size=0.5, linetype = "dashed") +
geom_vline(xintercept=-abs_lfc_threshold, color='black', size=0.5, linetype = "dashed") +
scale_color_manual(values=cols,guide=FALSE) +
scale_fill_manual(values=cols,guide=FALSE) +
scale_x_continuous(limits = c(-max_lfc-1,max_lfc+1), expand = c(0, 0)) +
scale_y_continuous(limits = c(0,(max_padj+1)), expand = c(0, 0)) +
labs(x=x_lab,y=y_lab) + {
if ("label" %in% names(cleaned_input)) {
ggrepel::geom_label_repel(
min.segment.length = 0,
mapping = aes(label=label)
)
}
} +
theme_classic() +
theme(
axis.title=element_text(size=12),
strip.text=element_text(size=12, color = "white", face="bold"),
axis.text=element_text(size=12),
axis.line = element_blank(),
panel.border = element_rect(color = "black", fill = NA, size = 1.)
)
#aes(fill = as.factor(geneset))
#strip colors
# g = ggplot_gtable(ggplot_build(plot))
# striprt = which( grepl('strip-t', g$layout$name) )
# fills = c("#234463","#781e1e")
# k = 1
# for (i in striprt) {
# j = which(grepl('rect', g$grobs[[i]]$grobs[[1]]$childrenOrder))
# g$grobs[[i]]$grobs[[1]]$children[[j]]$gp$fill = fills[k]
# k = k+1
# }
# return(ggplotify::as.ggplot(g))
return(plot)
}
plots = list()
for (i in 1:length(results)) {
plots[[i]] = volcano_plot(cleaned_input = cleaned_inputs[[i]],x_lab = x_lab,y_lab = y_lab)
}
names(plots) = contrasts
for(i in 1:length(plots)) {
ggsave(filename=paste0(plot.out,"volcano_",names(plots)[i],".png"),plot=plots[[i]],device="png",dpi=320,width=6,height=6)
ggsave(filename=paste0(plot.out,"volcano_",names(plots)[i],".svg"),plot=plots[[i]],device="svg",dpi=320,width=6,height=6)
ggsave(filename=paste0(plot.out,"volcano_",names(plots)[i],".pdf"),plot=plots[[i]],device="pdf",dpi=320,width=6,height=6)
} | /figures/(1) Volcano/R/volcano_plot.R | no_license | monovich/giblin-sirt5-melanoma | R | false | false | 5,268 | r | #!/usr/bin/env Rscript
###---PACKAGES---###
if (!require("pacman")) { install.packages("pacman", repos='http://cran.us.r-project.org') }
library(pacman)
#required packages
required_packages = c(
"tidyverse",
"grid",
"ggplotify",
"svglite"
)
github_packages = c(
"slowkow/ggrepel"
)
#load packages
pacman::p_load(
char=required_packages,
install=TRUE,
character.only=TRUE,
try.bioconductor=TRUE,
update.bioconductor=TRUE
)
#load github packages
pacman::p_load_gh(
char = github_packages,
update = getOption("pac_update"),
dependencies = TRUE
)
###---GLOBALS---###
genesToLabel=NULL
#if you want to label points, just make a list
#i.e. c("MITF")
padj_threshold = 0.01
abs_lfc_threshold = 1
###---CLEAN DATA---###
data.in = "../data/in/dge/featureCounts_deseq2/table/result_lfcShrink/standardized/"
data.out = "../data/out/"
plot.out = "../plot/"
#contrasts
contrasts = basename(list.dirs(path = data.in))[2:length(list.dirs(path = data.in))]
#results
results=list()
for (i in 1:length(contrasts)) {
results[[i]] = read.csv(file = paste0(data.in,contrasts[i],"/result-lfcShrink_stndrd-filt_anno-basic_padj1_lfc0.csv"), header = TRUE)
}
names(results) = contrasts
clean_data = function(result,genesToLabel = NULL) {
result.no.zeroes = result %>%
dplyr::filter(padj != 0)
min.padj.value = min(result.no.zeroes$padj)
ret=result %>%
dplyr::mutate(padj=ifelse(padj == 0, min.padj.value, padj)) %>%
dplyr::mutate(color_group="Not Significant") %>%
dplyr::mutate(color_group=ifelse(padj < padj_threshold & log2FoldChange < -abs_lfc_threshold, "Down-Regulated Genes", color_group)) %>%
dplyr::mutate(color_group=ifelse(padj < padj_threshold & log2FoldChange > abs_lfc_threshold, "Up-Regulated Genes", color_group))
if (!is.null(genesToLabel)) {
ret = ret %>%
dplyr::mutate(label=ifelse(external_gene_name %in% genesToLabel, external_gene_name, NA))
}
return(ret)
}
cleaned_inputs = list()
for (i in 1:length(results)) {
cleaned_inputs[[i]] = clean_data(result=results[[i]],genesToLabel = genesToLabel)
}
names(cleaned_inputs) = contrasts
###---PLOT---###
x_lab=expression(-log[2](Fold~Change))
y_lab=expression(-log[10](italic(q)-value))
volcano_plot = function(cleaned_input,x_lab=x_lab,y_lab=y_lab) {
cols=c("Down-Regulated Genes" = "#234463","Up-Regulated Genes" = "#781e1e", "Not Significant" = "gray50")
cols2=c("Down-Regulated Genes" = "#f1f8ff", "Up-Regulated Genes" = "#fff6f6")
max_lfc=max(abs(cleaned_input$log2FoldChange))
max_padj=max(-log10(cleaned_input$padj),na.rm = TRUE)
plot=ggplot(data=cleaned_input, mapping=aes(x=log2FoldChange,y=-log10(padj))) +
#down
geom_rect(
fill = cols2[1],
xmin = -Inf,
xmax = -abs_lfc_threshold,
ymin = -log10(padj_threshold),
ymax = Inf
) +
#up
geom_rect(
fill = cols2[2],
xmin = abs_lfc_threshold,
xmax = Inf,
ymin = -log10(padj_threshold),
ymax = Inf
) +
geom_point(mapping=aes(color=color_group,fill=color_group),alpha=0.5) +
geom_hline(yintercept=-log10(padj_threshold), color='black', size=0.5, linetype = "dashed") +
geom_vline(xintercept=abs_lfc_threshold, color='black', size=0.5, linetype = "dashed") +
geom_vline(xintercept=-abs_lfc_threshold, color='black', size=0.5, linetype = "dashed") +
scale_color_manual(values=cols,guide=FALSE) +
scale_fill_manual(values=cols,guide=FALSE) +
scale_x_continuous(limits = c(-max_lfc-1,max_lfc+1), expand = c(0, 0)) +
scale_y_continuous(limits = c(0,(max_padj+1)), expand = c(0, 0)) +
labs(x=x_lab,y=y_lab) + {
if ("label" %in% names(cleaned_input)) {
ggrepel::geom_label_repel(
min.segment.length = 0,
mapping = aes(label=label)
)
}
} +
theme_classic() +
theme(
axis.title=element_text(size=12),
strip.text=element_text(size=12, color = "white", face="bold"),
axis.text=element_text(size=12),
axis.line = element_blank(),
panel.border = element_rect(color = "black", fill = NA, size = 1.)
)
#aes(fill = as.factor(geneset))
#strip colors
# g = ggplot_gtable(ggplot_build(plot))
# striprt = which( grepl('strip-t', g$layout$name) )
# fills = c("#234463","#781e1e")
# k = 1
# for (i in striprt) {
# j = which(grepl('rect', g$grobs[[i]]$grobs[[1]]$childrenOrder))
# g$grobs[[i]]$grobs[[1]]$children[[j]]$gp$fill = fills[k]
# k = k+1
# }
# return(ggplotify::as.ggplot(g))
return(plot)
}
plots = list()
for (i in 1:length(results)) {
plots[[i]] = volcano_plot(cleaned_input = cleaned_inputs[[i]],x_lab = x_lab,y_lab = y_lab)
}
names(plots) = contrasts
for(i in 1:length(plots)) {
ggsave(filename=paste0(plot.out,"volcano_",names(plots)[i],".png"),plot=plots[[i]],device="png",dpi=320,width=6,height=6)
ggsave(filename=paste0(plot.out,"volcano_",names(plots)[i],".svg"),plot=plots[[i]],device="svg",dpi=320,width=6,height=6)
ggsave(filename=paste0(plot.out,"volcano_",names(plots)[i],".pdf"),plot=plots[[i]],device="pdf",dpi=320,width=6,height=6)
} |
## This is a function that catches the inverse of a Matrix with the use of two
## functions
## This function creates a matrix with a function to set and get different
## values
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function (y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## This function calculates the inverse of the matrix but checks first if it
## has been calculated
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
| /cachematrix.R | no_license | damense/ProgrammingAssignment2 | R | false | false | 817 | r | ## This is a function that catches the inverse of a Matrix with the use of two
## functions
## This function creates a matrix with a function to set and get different
## values
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function (y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## This function calculates the inverse of the matrix but checks first if it
## has been calculated
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
#' @title Visualize Distribution of NA gapsizes
#'
#' @description Visualize Distribution of NA gapsizes(NAs in a row) in a time series
#'
#' @param x Numeric Vector (\code{\link{vector}}) or Time Series (\code{\link{ts}}) object containing NAs
#'
#' @param limit Specifies how many of the top gapsizes are shown in the plot.
#'
#' @param byTotalNA For byTotalNA = TRUE the top gapsizes according to their overall weight are shown. (occurence * gapsize)
#' For byTotalNA = FALSE the top gapsizes are shown by their number of occurence. (occurence)
#'
#' @param legend If TRUE a legend is shown at the bottom of the plot. A custom legend can be obtained by
#' setting this parameter to FALSE and using \code{\link[graphics]{legend}} function
#'
#' @param col A vector of colors for the bars or bar components.
#' @param xlab Label for x axis of the plot
#' @param ylab Label for y axis of plot
#' @param main Main title for the plot
#'
#' @param cex.names Expansion factor for axis names (bar labels).
#'
#' @param horiz A logical value. If FALSE, the bars are drawn vertically with the first bar to the left. If TRUE, the bars are drawn horizontally with the first at the bottom.
#'
#' @param axes Logical. If TRUE, a vertical (or horizontal, if horiz is true) axis is drawn.
#'
#' @param beside A logical value. If FALSE, the columns of height are portrayed as stacked bars, and if TRUE the columns are portrayed as juxtaposed bars.
#'
#' @param las Numeric in {0,1,2,3}; the style of axis labels. 0:always parallel to the axis, 1:always horizontal, 2:always perpendicular to the axis, 3:always vertical.
#'
#' @param ... Additional graphical parameters that can be passed through to barplot
#'
#' @author Steffen Moritz
#'
#' @details This plotting function can be used to visualize the length of the NA gaps(NAs in a row)
#' in a time series. It shows a ranking of which gapsizes occur most often. This ranking can be ordered by total NAs for this gapsize (occurence * gap length) or by occurence of the gapsize.
#' The outcome will be somethink like in the time series 2NAs in a row occured 27times, 4NAs in a row occured 11 times,
#' 7NAs in a row occured 5 times, 1NA in a row occured 3 times,... .
#'
#' @seealso \code{\link[imputeTS]{plotNA.distribution}},\code{\link[imputeTS]{plotNA.distributionBar}},
#' \code{\link[imputeTS]{plotNA.imputations}}
#'
#' @examples
#' #Prerequisite: Load a time series with missing values
#' x <-tsNH4
#'
#' #Example 1: Visualize the top gapsizes
#' plotNA.gapsize(x)
#'
#' @importFrom graphics lines par plot points barplot
#' @export plotNA.gapsize
plotNA.gapsize <- function(x, limit = 10, byTotalNA = F , legend = T, col = c('indianred','steelblue'), xlab="Ranking of the different gapsizes", ylab="Number",main ="Occurance of gapsizes (NAs in a row)",cex.names = 0.7, horiz = F , axes =T,beside = T,las = 1, ... ) {
data <- x
#Check for wrong input
data <- precheck(data)
id.na <- which(is.na(data))
#save par settings and reset after function
par.default <- par(no.readonly=TRUE)
on.exit(par(par.default))
## Calculation consecutive NA information (results is stored in vec)
vec <- rep(0, length(data))
run <- 0
for (i in 0:(length(data)-1)) {
if(is.na(data[i+1])) {
run <- run + 1
if(i == (length(data)-1)) {
vec[run] <- vec[run] + 1}
}
else {
vec[run] <- vec[run] + 1
run <- 0
}
}
bars1 <- bars2 <- labels1 <- labels2 <- NULL
for (i in 1:length(vec)) {
if(vec[i] > 0) {
bars1 <-c(bars1, vec[i])
bars2 <- c(bars2, vec[i]*i )
labels1 <- c(labels1,paste0(i," NAs"))
labels2 <- c(labels2,paste(""))
}
}
## Sort either by NA
if ( byTotalNA == T) {
#sort accoding to overall NAs
fooind <- order(bars1)
bars1 <- bars1[fooind]
bars2 <- bars2[fooind]
labels1 <- labels1[fooind]
}
else {
#sort accoding to overall NAs
fooind <- order(bars2)
bars1 <- bars1[fooind]
bars2 <- bars2[fooind]
labels1 <- labels1[fooind]
}
##Adjust to show only a limited amount of bars (limit)
if(length(bars1) > limit) {
bars1 <- bars1[(length(bars1)-limit+1):length(bars1)]
bars2 <- bars2[(length(bars2)-limit+1):length(bars2)]
labels1 <- labels1[(length(labels1)-limit+1):length(labels1)]
labels2 <- labels2[(length(labels2)-limit+1):length(labels2)]
}
inp <- matrix(c(bars1,bars2),byrow=TRUE,ncol=length(bars1))
labels <- as.vector(rbind(labels1,labels2))
if (legend == T) { par(oma =c(0.5,0,0,0)) }
##here comes the plot itself
barplot(inp, names.arg = labels,main = main, las = las, horiz = horiz , axes = axes ,beside = beside, col =col ,cex.names= cex.names,xlab =xlab,ylab=ylab, ...)
if (legend == T) {
par(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE)
plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n")
legend("bottom", bty ='n',xjust =0.5, horiz = T , cex=1, legend = c( "Num occurence gapsize", "Total NAs for gapsize"), col = c("indianred", "steelblue"), pch = c(20))
}
}
| /imputeTS/R/plotNA.gapsize.R | no_license | ingted/R-Examples | R | false | false | 5,262 | r | #' @title Visualize Distribution of NA gapsizes
#'
#' @description Visualize Distribution of NA gapsizes(NAs in a row) in a time series
#'
#' @param x Numeric Vector (\code{\link{vector}}) or Time Series (\code{\link{ts}}) object containing NAs
#'
#' @param limit Specifies how many of the top gapsizes are shown in the plot.
#'
#' @param byTotalNA For byTotalNA = TRUE the top gapsizes according to their overall weight are shown. (occurence * gapsize)
#' For byTotalNA = FALSE the top gapsizes are shown by their number of occurence. (occurence)
#'
#' @param legend If TRUE a legend is shown at the bottom of the plot. A custom legend can be obtained by
#' setting this parameter to FALSE and using \code{\link[graphics]{legend}} function
#'
#' @param col A vector of colors for the bars or bar components.
#' @param xlab Label for x axis of the plot
#' @param ylab Label for y axis of plot
#' @param main Main title for the plot
#'
#' @param cex.names Expansion factor for axis names (bar labels).
#'
#' @param horiz A logical value. If FALSE, the bars are drawn vertically with the first bar to the left. If TRUE, the bars are drawn horizontally with the first at the bottom.
#'
#' @param axes Logical. If TRUE, a vertical (or horizontal, if horiz is true) axis is drawn.
#'
#' @param beside A logical value. If FALSE, the columns of height are portrayed as stacked bars, and if TRUE the columns are portrayed as juxtaposed bars.
#'
#' @param las Numeric in {0,1,2,3}; the style of axis labels. 0:always parallel to the axis, 1:always horizontal, 2:always perpendicular to the axis, 3:always vertical.
#'
#' @param ... Additional graphical parameters that can be passed through to barplot
#'
#' @author Steffen Moritz
#'
#' @details This plotting function can be used to visualize the length of the NA gaps(NAs in a row)
#' in a time series. It shows a ranking of which gapsizes occur most often. This ranking can be ordered by total NAs for this gapsize (occurence * gap length) or by occurence of the gapsize.
#' The outcome will be somethink like in the time series 2NAs in a row occured 27times, 4NAs in a row occured 11 times,
#' 7NAs in a row occured 5 times, 1NA in a row occured 3 times,... .
#'
#' @seealso \code{\link[imputeTS]{plotNA.distribution}},\code{\link[imputeTS]{plotNA.distributionBar}},
#' \code{\link[imputeTS]{plotNA.imputations}}
#'
#' @examples
#' #Prerequisite: Load a time series with missing values
#' x <-tsNH4
#'
#' #Example 1: Visualize the top gapsizes
#' plotNA.gapsize(x)
#'
#' @importFrom graphics lines par plot points barplot
#' @export plotNA.gapsize
plotNA.gapsize <- function(x, limit = 10, byTotalNA = F , legend = T, col = c('indianred','steelblue'), xlab="Ranking of the different gapsizes", ylab="Number",main ="Occurance of gapsizes (NAs in a row)",cex.names = 0.7, horiz = F , axes =T,beside = T,las = 1, ... ) {
data <- x
#Check for wrong input
data <- precheck(data)
id.na <- which(is.na(data))
#save par settings and reset after function
par.default <- par(no.readonly=TRUE)
on.exit(par(par.default))
## Calculation consecutive NA information (results is stored in vec)
vec <- rep(0, length(data))
run <- 0
for (i in 0:(length(data)-1)) {
if(is.na(data[i+1])) {
run <- run + 1
if(i == (length(data)-1)) {
vec[run] <- vec[run] + 1}
}
else {
vec[run] <- vec[run] + 1
run <- 0
}
}
bars1 <- bars2 <- labels1 <- labels2 <- NULL
for (i in 1:length(vec)) {
if(vec[i] > 0) {
bars1 <-c(bars1, vec[i])
bars2 <- c(bars2, vec[i]*i )
labels1 <- c(labels1,paste0(i," NAs"))
labels2 <- c(labels2,paste(""))
}
}
## Sort either by NA
if ( byTotalNA == T) {
#sort accoding to overall NAs
fooind <- order(bars1)
bars1 <- bars1[fooind]
bars2 <- bars2[fooind]
labels1 <- labels1[fooind]
}
else {
#sort accoding to overall NAs
fooind <- order(bars2)
bars1 <- bars1[fooind]
bars2 <- bars2[fooind]
labels1 <- labels1[fooind]
}
##Adjust to show only a limited amount of bars (limit)
if(length(bars1) > limit) {
bars1 <- bars1[(length(bars1)-limit+1):length(bars1)]
bars2 <- bars2[(length(bars2)-limit+1):length(bars2)]
labels1 <- labels1[(length(labels1)-limit+1):length(labels1)]
labels2 <- labels2[(length(labels2)-limit+1):length(labels2)]
}
inp <- matrix(c(bars1,bars2),byrow=TRUE,ncol=length(bars1))
labels <- as.vector(rbind(labels1,labels2))
if (legend == T) { par(oma =c(0.5,0,0,0)) }
##here comes the plot itself
barplot(inp, names.arg = labels,main = main, las = las, horiz = horiz , axes = axes ,beside = beside, col =col ,cex.names= cex.names,xlab =xlab,ylab=ylab, ...)
if (legend == T) {
par(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE)
plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n")
legend("bottom", bty ='n',xjust =0.5, horiz = T , cex=1, legend = c( "Num occurence gapsize", "Total NAs for gapsize"), col = c("indianred", "steelblue"), pch = c(20))
}
}
|
submission_1 = read.csv('../Results/h2o_blend.csv')
submission_2 = read.csv('../Results/xgb_starter_v7.sub.csv')
ensemble_loss = (submission_1$loss + submission_2$loss) / 2
ids = submission_1$id
ensemble = data.frame(id = ids, loss = ensemble_loss)
write.csv(ensemble, '../Results/ensemble_better.csv', row.names = FALSE)
| /Project4-MachineLearning/Datasaurus/Josh/ensemble.R | no_license | vuchau/bootcamp007_project | R | false | false | 326 | r |
submission_1 = read.csv('../Results/h2o_blend.csv')
submission_2 = read.csv('../Results/xgb_starter_v7.sub.csv')
ensemble_loss = (submission_1$loss + submission_2$loss) / 2
ids = submission_1$id
ensemble = data.frame(id = ids, loss = ensemble_loss)
write.csv(ensemble, '../Results/ensemble_better.csv', row.names = FALSE)
|
\name{find_origin}
\alias{find_origin}
\title{Find the origin.}
\usage{
find_origin(x, binwidth)
}
\arguments{
\item{x}{numeric or integer vector}
\item{binwidth}{binwidth}
}
\description{
Find the origin.
}
\details{
This algorithm implements simple heuristics for
determining the origin of a histogram when only the
binwidth is specified. It:
\itemize{ \item rounds to zero, if relatively close \item
subtracts 0.5 offset, if an x is integer \item ensures
the origin is a multiple of the binwidth }
}
\examples{
find_origin(1:10, 1)
find_origin(1:10, 2)
find_origin(c(1, 1e6), 1)
}
\keyword{internal}
| /man/find_origin.Rd | no_license | trestletech/bigvis | R | false | false | 626 | rd | \name{find_origin}
\alias{find_origin}
\title{Find the origin.}
\usage{
find_origin(x, binwidth)
}
\arguments{
\item{x}{numeric or integer vector}
\item{binwidth}{binwidth}
}
\description{
Find the origin.
}
\details{
This algorithm implements simple heuristics for
determining the origin of a histogram when only the
binwidth is specified. It:
\itemize{ \item rounds to zero, if relatively close \item
subtracts 0.5 offset, if an x is integer \item ensures
the origin is a multiple of the binwidth }
}
\examples{
find_origin(1:10, 1)
find_origin(1:10, 2)
find_origin(c(1, 1e6), 1)
}
\keyword{internal}
|
library(shinythemes)
shinyUI(
navbarPage(title = "Airbnb Visualization",
id ="nav",
theme = shinytheme("united"), #https://rstudio.github.io/shinythemes/
#### Overview ##########
tabPanel("Overview",
br(),
br(),
br(),
#img(src = "airbnb_overview.jpg", height = 600, weight =700, align="center")
HTML('<center><img src="airbnb_overview.jpg", height = 600, weight =700 ></center>')
),
#### Map ##########
tabPanel("NYC map",
div(class="outer",
tags$head(includeCSS("styles.css"),#customized CSS
includeScript("gomap.js")),
leafletOutput(outputId = "map",width="100%", height="100%"),
# Options: borough, Room Type, Price, Rating, Reviews
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE, draggable = TRUE,
top = 80, left = "auto", right = 20, bottom = "auto",
width = 320, height = "auto",
h2("Airbnb in NYC"),
checkboxGroupInput(inputId = "select_boro", label = h4("Borough"),
choices = boro, selected = 'Manhattan'),
checkboxGroupInput(inputId = "select_room", label = h4("Room Type"),
choices = room, selected = room),
sliderInput(inputId = "slider_price", label = h4("Price"), min = 1, max = 300,
pre = "$", sep = ",", value = c(30, 300)), #animate=TRUE
sliderInput(inputId = "slider_rating", label = h4("Rating Score"), min = 20, max = 100,
value = c(60, 100)),
sliderInput(inputId = "slider_review", label = h4("Number of Reviews"), min = 10, max = 450,
value = c(10, 450)),
h6("The map information is based on May 02, 2017 dataset from"),
h6(a("Inside Airbnb",href="http://insideairbnb.com/get-the-data.html", target="_blank"))
),
# Results: count_room, avgprice
absolutePanel(id = "controls", class = "panel panel-default", fixed = FALSE, draggable = TRUE,
top = 320, left = 20, right = "auto" , bottom = "auto",
width = 320, height = "auto",
plotlyOutput(outputId = "count_room",height = 150),
plotlyOutput(outputId = "avgprice", height = 150))
)),
#### Listings ##########
tabPanel("Listings, Boroughs and Price Changes",
fluidRow(
column(3,
h3("Listings by Boroughs and Room Type"),
br(),
br(),
sliderInput(inputId = "tab2_price", h4("Price/Night"), min = 10, max = 500, value = c(10, 500)),
sliderInput(inputId = "tab2_rating", h4("Rating Score"), min = 10, max = 100, value = c(10,100)),
br(),
br(),
h3("Price Changes over Time"),
selectInput("price_option", label = h3("Select Time Type"),
choices = list("Year" = "Year","Month" = "Month"), selected = "Year")
),
column(9,
h3(""),
plotlyOutput(outputId = "graph1",width=1000, height =350),
br(),
plotlyOutput(outputId = "tab_price",width=1000, height =350)
)
)#fuildrow
),#tabpanel2
#### References ##########
navbarMenu("References",
tabPanel("Inside Airbnb",
h3("Inside Airbnb", a("Link", href="http://insideairbnb.com/get-the-data.html"))),
tabPanel("Airbnb Business Model",
h3("Airbnb Business Model", a("Link", href="http://bmtoolbox.net/stories/airbnb/")))
) #https://stackoverflow.com/questions/17847764/put-a-html-link-to-the-r-shiny-application
#pdf https://gist.github.com/aagarw30/d5aa49864674aaf74951
#web https://stackoverflow.com/questions/33020558/embed-iframe-inside-shiny-app
))
| /ui.R | no_license | fototo/Airbnb_shinyapp | R | false | false | 4,313 | r | library(shinythemes)
shinyUI(
navbarPage(title = "Airbnb Visualization",
id ="nav",
theme = shinytheme("united"), #https://rstudio.github.io/shinythemes/
#### Overview ##########
tabPanel("Overview",
br(),
br(),
br(),
#img(src = "airbnb_overview.jpg", height = 600, weight =700, align="center")
HTML('<center><img src="airbnb_overview.jpg", height = 600, weight =700 ></center>')
),
#### Map ##########
tabPanel("NYC map",
div(class="outer",
tags$head(includeCSS("styles.css"),#customized CSS
includeScript("gomap.js")),
leafletOutput(outputId = "map",width="100%", height="100%"),
# Options: borough, Room Type, Price, Rating, Reviews
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE, draggable = TRUE,
top = 80, left = "auto", right = 20, bottom = "auto",
width = 320, height = "auto",
h2("Airbnb in NYC"),
checkboxGroupInput(inputId = "select_boro", label = h4("Borough"),
choices = boro, selected = 'Manhattan'),
checkboxGroupInput(inputId = "select_room", label = h4("Room Type"),
choices = room, selected = room),
sliderInput(inputId = "slider_price", label = h4("Price"), min = 1, max = 300,
pre = "$", sep = ",", value = c(30, 300)), #animate=TRUE
sliderInput(inputId = "slider_rating", label = h4("Rating Score"), min = 20, max = 100,
value = c(60, 100)),
sliderInput(inputId = "slider_review", label = h4("Number of Reviews"), min = 10, max = 450,
value = c(10, 450)),
h6("The map information is based on May 02, 2017 dataset from"),
h6(a("Inside Airbnb",href="http://insideairbnb.com/get-the-data.html", target="_blank"))
),
# Results: count_room, avgprice
absolutePanel(id = "controls", class = "panel panel-default", fixed = FALSE, draggable = TRUE,
top = 320, left = 20, right = "auto" , bottom = "auto",
width = 320, height = "auto",
plotlyOutput(outputId = "count_room",height = 150),
plotlyOutput(outputId = "avgprice", height = 150))
)),
#### Listings ##########
tabPanel("Listings, Boroughs and Price Changes",
fluidRow(
column(3,
h3("Listings by Boroughs and Room Type"),
br(),
br(),
sliderInput(inputId = "tab2_price", h4("Price/Night"), min = 10, max = 500, value = c(10, 500)),
sliderInput(inputId = "tab2_rating", h4("Rating Score"), min = 10, max = 100, value = c(10,100)),
br(),
br(),
h3("Price Changes over Time"),
selectInput("price_option", label = h3("Select Time Type"),
choices = list("Year" = "Year","Month" = "Month"), selected = "Year")
),
column(9,
h3(""),
plotlyOutput(outputId = "graph1",width=1000, height =350),
br(),
plotlyOutput(outputId = "tab_price",width=1000, height =350)
)
)#fuildrow
),#tabpanel2
#### References ##########
navbarMenu("References",
tabPanel("Inside Airbnb",
h3("Inside Airbnb", a("Link", href="http://insideairbnb.com/get-the-data.html"))),
tabPanel("Airbnb Business Model",
h3("Airbnb Business Model", a("Link", href="http://bmtoolbox.net/stories/airbnb/")))
) #https://stackoverflow.com/questions/17847764/put-a-html-link-to-the-r-shiny-application
#pdf https://gist.github.com/aagarw30/d5aa49864674aaf74951
#web https://stackoverflow.com/questions/33020558/embed-iframe-inside-shiny-app
))
|
library(ape)
testtree <- read.tree("10105_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10105_0_unrooted.txt") | /codeml_files/newick_trees_processed_and_cleaned/10105_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("10105_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10105_0_unrooted.txt") |
## Lets a user create a cacheMatrix. Once the cacheSolve function has been run, the inverse of the matrix is cached
## example usuage:
## x <- makeCacheMatrix(1:4, 2, 2)
## cacheSolve(x) ## calculates the inverse
## cacheSolve(x) ## retrieves the inverse from the cach, no calculation needed
## set(x)
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()){
inverse <- NULL ## upon creation, the inverse is unknown
set <- function(y){
x <<- y
inverse <<- NULL ##when the data is changed, the inverse becomes unknown
}
get <- function() x ##returns the data
setinverse <- function(newInverse) inverse <<- newInverse ##update the inverse
getinverse <- function() inverse
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## Returns the inverse of the matrix. If this function has been run earlier, a cached result is returned
## If the matrix has been changed, the cache is cleared and the inverse will have to be computed again
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if (!is.null(inverse)){
message("getting cached data") ## as in the example. Can be deleted
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
| /cachematrix.R | no_license | JelleBrill/ProgrammingAssignment2 | R | false | false | 1,272 | r | ## Lets a user create a cacheMatrix. Once the cacheSolve function has been run, the inverse of the matrix is cached
## example usuage:
## x <- makeCacheMatrix(1:4, 2, 2)
## cacheSolve(x) ## calculates the inverse
## cacheSolve(x) ## retrieves the inverse from the cach, no calculation needed
## set(x)
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()){
inverse <- NULL ## upon creation, the inverse is unknown
set <- function(y){
x <<- y
inverse <<- NULL ##when the data is changed, the inverse becomes unknown
}
get <- function() x ##returns the data
setinverse <- function(newInverse) inverse <<- newInverse ##update the inverse
getinverse <- function() inverse
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## Returns the inverse of the matrix. If this function has been run earlier, a cached result is returned
## If the matrix has been changed, the cache is cleared and the inverse will have to be computed again
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if (!is.null(inverse)){
message("getting cached data") ## as in the example. Can be deleted
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
|
Sys.setenv(R_LIBS_USER = "/home/rstudio/.rpackages")
setHook("rstudio.sessionInit", function(newSession) {
if (newSession){
if( is.null(rstudioapi::getActiveProject()) ){
# Find a project file
aProject = dirname(list.files(pattern = ".rproj",
recursive = T,
ignore.case = T,
full.names = T,
path = "/home/rstudio"))
# Open that project!
rstudioapi::openProject(aProject[1])
}
}
if(!is.null(rstudioapi::getActiveProject())){
#rstudioapi::navigateToFile("README.md")
}
}, action = "append")
| /.Rprofile | no_license | ljcolling/arm-env | R | false | false | 696 | rprofile | Sys.setenv(R_LIBS_USER = "/home/rstudio/.rpackages")
setHook("rstudio.sessionInit", function(newSession) {
if (newSession){
if( is.null(rstudioapi::getActiveProject()) ){
# Find a project file
aProject = dirname(list.files(pattern = ".rproj",
recursive = T,
ignore.case = T,
full.names = T,
path = "/home/rstudio"))
# Open that project!
rstudioapi::openProject(aProject[1])
}
}
if(!is.null(rstudioapi::getActiveProject())){
#rstudioapi::navigateToFile("README.md")
}
}, action = "append")
|
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
ngettext<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::ngettext(params)
}
}
| /R/ngettext.R | no_license | granatb/RapeR | R | false | false | 671 | r |
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
ngettext<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::ngettext(params)
}
}
|
#' Create a ggplot of empirical item characteristic curves compared to theoretical relationship based on the model
#'
#' The empirical item characteristic curve plots the mean scores on each item for groups with different total scores
#' on the whole test.
#'
#' @param mirtobj An estimated IRT model (of class SingleGroupClass) estimated either using \link[mirt]{mirt} or \link[unimirt]{unimirt}.
#' @param itenum A numeric input denoting which item the plot should be produced for.
#' @param which.items A numeric vector denoting which items should be used to create the total test score. By default all available items are included.
#' @param ngroups The number of groups to split the cohort into in order to produce the empirical points on the curve (the mean total score in each group is plotted against the mean item score). Setting ngroups=1 (the default and the exception to the usual pattern) will create one group for each possible total test score.
#'
#' @return A list with the following elements.
#' \describe{
#' \item{plot1}{A function that translates any vector of scores on form X into equivalent scores on form Y.}
#' \item{modelchartdat}{A data frame giving the expected relationship between total score and mean item score based on the IRT model}
#' \item{empiricalchartdat}{A data frame giving the empirical results within each group}
#' }#'
#' @import ggplot2
#' @examples
#' \dontrun{
#' mirt1=mirt(mathsdata,1)
#' EICC=EmpiricalICCfit(mirt1,1)
#' EICC
#' EICC$plot1
#' EmpiricalICCfit(mirt1,1,ngroups=10)$plot1
#' }
#' @export
EmpiricalICCfit=function(mirtobj,itenum,which.items=NULL,ngroups=1){
itedata=GetDataFromMirt(mirtobj)
if(is.null(which.items) & sum(is.na(itedata))>0){return(NULL)}
thetas = mirtobj@Model$Theta
qwts = mirtobj@Internals$Prior[[1]]
if (length(qwts) > length(thetas)) {
qwts = mirtobj@Internals$Prior[[1]][1, ]
}
nqpts = dim(thetas)[1]
coefs = coef(mirtobj, simplify = TRUE)$items
coefsd = coefs[, substr(colnames(coefs), 1, 1) == "d" & !colnames(coefs) ==
"d0"]
if (!is.matrix(coefsd)) {
coefsd = as.matrix(coefsd)
}
nites = dim(coefs)[1]
if (is.null(which.items)) {
which.items = 1:nites
}
#if itenum is not in which.items can just use original EmpiricalICCfit and stop there
if(!itenum%in%which.items){return(EmpiricalICCfitV1(mirtobj=mirtobj,itenum=itenum
,which.items=which.items,ngroups=ngroups))}
#else remove the item itself from which.items and carry on
which.items=which.items[which.items!=itenum]
papermax = sum(!is.na(coefsd[which.items, ]))
paperps = matrix(rep(0, (papermax + 1) * nqpts), ncol = nqpts)
paperps[1, ] = 1
for (iiz in which.items) {
probs = t(mirt::probtrace(mirt::extract.item(mirtobj,
iiz), thetas))
nrowp = dim(probs)[1]
temp1 = 0 * paperps
for (prow in 1:nrowp) {
pscore = prow - 1
addmat = paperps
if (pscore > 0) {
extrarows = matrix(rep(0, pscore * nqpts), nrow = pscore)
addmat = rbind(extrarows, paperps[1:(papermax +
1 - pscore), ])
}
temp1 = temp1 + t(t(addmat) * probs[prow, ])
}
paperps = temp1
}
#probability of scores on rest given theta
paperps=t(paperps)
#probability of individual item scores for each thera
iprobs = mirt::probtrace(mirt::extract.item(mirtobj,itenum), thetas)
#item maximum
imax=ncol(iprobs)-1
#matrix of associated item scores of same shape
iscoresmat=matrix(rep(0:imax,each=length(thetas)),nrow=length(thetas))
#matrix of ability priors of same shape
qwtsmat=matrix(rep(qwts,(imax+1)),nrow=length(thetas))
#maximum on TOTAL test (including item)
totmax=imax+ncol(paperps)-1
#paperps with extra columns of zeros for where X exceeds total
paperpsaug=cbind(matrix(rep(0,imax*length(thetas)),nrow=length(thetas))
,paperps
,matrix(rep(0,imax*length(thetas)),nrow=length(thetas)))
#function to work out E(x|total) for a fixed total
ExFunc=function(total){
#Pick up relevant column of paperps=P(Rest|x) to give us P(Tot|x)=P(Tot-x|x)
PxGivenTot=paperpsaug[,total+imax+1-(0:imax)]*iprobs*qwtsmat
#note that PxGivenTot isn't actually P(x|Total) as various standardising constants
#have been left out of calculations
#will divide by a standardising factor sum(PxGivenTot) instead
sum(PxGivenTot*iscoresmat)/sum(PxGivenTot)
}
expectedite=sapply(0:totmax,ExFunc)
modelchartdat=data.frame(raw.score=0:totmax,item.score=expectedite)
scoredata=as.matrix(itedata[,sort(c(itenum,which.items))])
scoretot=rowSums(scoredata)
itescore=itedata[,itenum]
keep=(!is.na(scoretot) & !is.na(itescore))
itescore=itescore[keep]
scoretot=scoretot[keep]
scoregroups=scoretot
if(ngroups>1){
cuts=seq(0,1,length=ngroups+1)[-c(1,ngroups+1)]
scoregroups=findInterval(scoretot,stats::quantile(scoretot,cuts))
}
empiricalchartdat=data.frame(raw.score=tapply(scoretot,scoregroups,mean)
,item.score=tapply(itescore,scoregroups,mean)
,N=table(scoregroups)
)
plot1=ggplot(data=modelchartdat,aes_string(x="raw.score",y="item.score"))+geom_line()+
geom_point(data=empiricalchartdat,alpha=0.5,aes_string(size="N.Freq"))+
scale_size_area()+ylim(0,imax)+xlim(0,totmax)
return(list(plot1=plot1,modelchartdat=modelchartdat,empiricalchartdat=empiricalchartdat))
}
#' Subroutine for empirical item characteristic curves
#'
#' NOTE: THIS IS A SIMPLIFIED VERSION OF THE FUNCTION FOR EmpiricalICCfit
#'
#' The empirical item characteristic curve plots the mean scores on each item for groups with different total scores
#' on the whole test. If the total test scores include the item itself then technically this subroutine
#' calculates the expected relationship against total scores on a parallel test so (for example) a raw total score of zero
#' will not necessarily imply a definite score of zero on the item.
#'
#' Currently just used as a subroutine within the function EmpiricalICCfit
#'
#' @param mirtobj An estimated IRT model (of class SingleGroupClass) estimated either using the function "unimirt"
#' or by applying the function "mirt" directly.
#' @param itenum A numeric input denoting which item the plot should be produced for.
#' @param which.items A numeric vector denoting which items should be used to create the total test score. By default all available items are included.
#' @param ngroups The number of groups to split the cohort into in order to produce the empirical points on the curve (the mean total score in each group is plotted against the mean item score). Setting ngroups=1 (the default and the exception to the usual pattern) will create one group for each possible total test score.
#'
#' @return A list with the following elements.
#' \describe{
#' \item{plot1}{A function that translates any vector of scores on form X into equivalent scores on form Y.}
#' \item{modelchartdat}{A data frame giving the expected relationship between total score and mean item score based on the IRT model}
#' \item{empiricalchartdat}{A data frame giving the empirical results within each group}
#' }
#' @import ggplot2
EmpiricalICCfitV1=function(mirtobj,itenum,which.items=NULL,ngroups=1){
itedata=GetDataFromMirt(mirtobj)
if(is.null(which.items) & sum(is.na(itedata))>0){return(NULL)}
thetas = mirtobj@Model$Theta
qwts = mirtobj@Internals$Prior[[1]]
if (length(qwts) > length(thetas)) {
qwts = mirtobj@Internals$Prior[[1]][1, ]
}
nqpts = dim(thetas)[1]
coefs = coef(mirtobj, simplify = TRUE)$items
coefsd = coefs[, substr(colnames(coefs), 1, 1) == "d" & !colnames(coefs) ==
"d0"]
if (!is.matrix(coefsd)) {
coefsd = as.matrix(coefsd)
}
nites = dim(coefs)[1]
if (is.null(which.items)) {
which.items = 1:nites
}
papermax = sum(!is.na(coefsd[which.items, ]))
paperps = matrix(rep(0, (papermax + 1) * nqpts), ncol = nqpts)
paperps[1, ] = 1
for (iiz in which.items) {
probs = t(mirt::probtrace(mirt::extract.item(mirtobj,
iiz), thetas))
nrowp = dim(probs)[1]
temp1 = 0 * paperps
for (prow in 1:nrowp) {
pscore = prow - 1
addmat = paperps
if (pscore > 0) {
extrarows = matrix(rep(0, pscore * nqpts), nrow = pscore)
addmat = rbind(extrarows, paperps[1:(papermax +
1 - pscore), ])
}
temp1 = temp1 + t(t(addmat) * probs[prow, ])
}
paperps = temp1
}
#probability of each theta for each raw score
thetaps=t(paperps)*qwts
thetaps=t(thetaps)/colSums(thetaps)
#expected theta for each given raw score
expectedtheta=colSums(t(thetaps)*thetas[,1])
sdtheta=sqrt(colSums(t(thetaps)*thetas[,1]*thetas[,1])-expectedtheta^2)
#expected item score for each given theta
expectedite1 <- expected.item(extract.item(mirtobj,itenum), thetas)
expectedite=colSums(t(thetaps)*expectedite1)
#really this shows expected score "if each pupil did ANOTHER item like this one"
modelchartdat=data.frame(raw.score=0:papermax,item.score=expectedite)
scoredata=as.matrix(itedata[,which.items])
scoretot=rowSums(scoredata)
itescore=itedata[,itenum]
keep=(!is.na(scoretot) & !is.na(itescore))
itescore=itescore[keep]
scoretot=scoretot[keep]
scoregroups=scoretot
#ngroups=10
if(ngroups>1){
cuts=seq(0,1,length=ngroups+1)[-c(1,ngroups+1)]
scoregroups=findInterval(scoretot,stats::quantile(scoretot,cuts))
}
empiricalchartdat=data.frame(raw.score=tapply(scoretot,scoregroups,mean)
,item.score=tapply(itescore,scoregroups,mean)
,N=table(scoregroups)
)
maxes=extract.mirt(mirtobj,"K")-1
plot1=ggplot(data=modelchartdat,aes_string(x="raw.score",y="item.score"))+geom_line()+
geom_point(data=empiricalchartdat,alpha=0.5,aes_string(size="N.Freq"))+
scale_size_area()+ylim(0,maxes[itenum])
return(list(plot1=plot1,modelchartdat=modelchartdat,empiricalchartdat=empiricalchartdat))
}
| /R/EmpiricalICCfit.R | permissive | CambridgeAssessmentResearch/unimirt | R | false | false | 10,503 | r | #' Create a ggplot of empirical item characteristic curves compared to theoretical relationship based on the model
#'
#' The empirical item characteristic curve plots the mean scores on each item for groups with different total scores
#' on the whole test.
#'
#' @param mirtobj An estimated IRT model (of class SingleGroupClass) estimated either using \link[mirt]{mirt} or \link[unimirt]{unimirt}.
#' @param itenum A numeric input denoting which item the plot should be produced for.
#' @param which.items A numeric vector denoting which items should be used to create the total test score. By default all available items are included.
#' @param ngroups The number of groups to split the cohort into in order to produce the empirical points on the curve (the mean total score in each group is plotted against the mean item score). Setting ngroups=1 (the default and the exception to the usual pattern) will create one group for each possible total test score.
#'
#' @return A list with the following elements.
#' \describe{
#' \item{plot1}{A function that translates any vector of scores on form X into equivalent scores on form Y.}
#' \item{modelchartdat}{A data frame giving the expected relationship between total score and mean item score based on the IRT model}
#' \item{empiricalchartdat}{A data frame giving the empirical results within each group}
#' }#'
#' @import ggplot2
#' @examples
#' \dontrun{
#' mirt1=mirt(mathsdata,1)
#' EICC=EmpiricalICCfit(mirt1,1)
#' EICC
#' EICC$plot1
#' EmpiricalICCfit(mirt1,1,ngroups=10)$plot1
#' }
#' @export
EmpiricalICCfit=function(mirtobj,itenum,which.items=NULL,ngroups=1){
itedata=GetDataFromMirt(mirtobj)
if(is.null(which.items) & sum(is.na(itedata))>0){return(NULL)}
thetas = mirtobj@Model$Theta
qwts = mirtobj@Internals$Prior[[1]]
if (length(qwts) > length(thetas)) {
qwts = mirtobj@Internals$Prior[[1]][1, ]
}
nqpts = dim(thetas)[1]
coefs = coef(mirtobj, simplify = TRUE)$items
coefsd = coefs[, substr(colnames(coefs), 1, 1) == "d" & !colnames(coefs) ==
"d0"]
if (!is.matrix(coefsd)) {
coefsd = as.matrix(coefsd)
}
nites = dim(coefs)[1]
if (is.null(which.items)) {
which.items = 1:nites
}
#if itenum is not in which.items can just use original EmpiricalICCfit and stop there
if(!itenum%in%which.items){return(EmpiricalICCfitV1(mirtobj=mirtobj,itenum=itenum
,which.items=which.items,ngroups=ngroups))}
#else remove the item itself from which.items and carry on
which.items=which.items[which.items!=itenum]
papermax = sum(!is.na(coefsd[which.items, ]))
paperps = matrix(rep(0, (papermax + 1) * nqpts), ncol = nqpts)
paperps[1, ] = 1
for (iiz in which.items) {
probs = t(mirt::probtrace(mirt::extract.item(mirtobj,
iiz), thetas))
nrowp = dim(probs)[1]
temp1 = 0 * paperps
for (prow in 1:nrowp) {
pscore = prow - 1
addmat = paperps
if (pscore > 0) {
extrarows = matrix(rep(0, pscore * nqpts), nrow = pscore)
addmat = rbind(extrarows, paperps[1:(papermax +
1 - pscore), ])
}
temp1 = temp1 + t(t(addmat) * probs[prow, ])
}
paperps = temp1
}
#probability of scores on rest given theta
paperps=t(paperps)
#probability of individual item scores for each thera
iprobs = mirt::probtrace(mirt::extract.item(mirtobj,itenum), thetas)
#item maximum
imax=ncol(iprobs)-1
#matrix of associated item scores of same shape
iscoresmat=matrix(rep(0:imax,each=length(thetas)),nrow=length(thetas))
#matrix of ability priors of same shape
qwtsmat=matrix(rep(qwts,(imax+1)),nrow=length(thetas))
#maximum on TOTAL test (including item)
totmax=imax+ncol(paperps)-1
#paperps with extra columns of zeros for where X exceeds total
paperpsaug=cbind(matrix(rep(0,imax*length(thetas)),nrow=length(thetas))
,paperps
,matrix(rep(0,imax*length(thetas)),nrow=length(thetas)))
#function to work out E(x|total) for a fixed total
ExFunc=function(total){
#Pick up relevant column of paperps=P(Rest|x) to give us P(Tot|x)=P(Tot-x|x)
PxGivenTot=paperpsaug[,total+imax+1-(0:imax)]*iprobs*qwtsmat
#note that PxGivenTot isn't actually P(x|Total) as various standardising constants
#have been left out of calculations
#will divide by a standardising factor sum(PxGivenTot) instead
sum(PxGivenTot*iscoresmat)/sum(PxGivenTot)
}
expectedite=sapply(0:totmax,ExFunc)
modelchartdat=data.frame(raw.score=0:totmax,item.score=expectedite)
scoredata=as.matrix(itedata[,sort(c(itenum,which.items))])
scoretot=rowSums(scoredata)
itescore=itedata[,itenum]
keep=(!is.na(scoretot) & !is.na(itescore))
itescore=itescore[keep]
scoretot=scoretot[keep]
scoregroups=scoretot
if(ngroups>1){
cuts=seq(0,1,length=ngroups+1)[-c(1,ngroups+1)]
scoregroups=findInterval(scoretot,stats::quantile(scoretot,cuts))
}
empiricalchartdat=data.frame(raw.score=tapply(scoretot,scoregroups,mean)
,item.score=tapply(itescore,scoregroups,mean)
,N=table(scoregroups)
)
plot1=ggplot(data=modelchartdat,aes_string(x="raw.score",y="item.score"))+geom_line()+
geom_point(data=empiricalchartdat,alpha=0.5,aes_string(size="N.Freq"))+
scale_size_area()+ylim(0,imax)+xlim(0,totmax)
return(list(plot1=plot1,modelchartdat=modelchartdat,empiricalchartdat=empiricalchartdat))
}
#' Subroutine for empirical item characteristic curves
#'
#' NOTE: THIS IS A SIMPLIFIED VERSION OF THE FUNCTION FOR EmpiricalICCfit
#'
#' The empirical item characteristic curve plots the mean scores on each item for groups with different total scores
#' on the whole test. If the total test scores include the item itself then technically this subroutine
#' calculates the expected relationship against total scores on a parallel test so (for example) a raw total score of zero
#' will not necessarily imply a definite score of zero on the item.
#'
#' Currently just used as a subroutine within the function EmpiricalICCfit
#'
#' @param mirtobj An estimated IRT model (of class SingleGroupClass) estimated either using the function "unimirt"
#' or by applying the function "mirt" directly.
#' @param itenum A numeric input denoting which item the plot should be produced for.
#' @param which.items A numeric vector denoting which items should be used to create the total test score. By default all available items are included.
#' @param ngroups The number of groups to split the cohort into in order to produce the empirical points on the curve (the mean total score in each group is plotted against the mean item score). Setting ngroups=1 (the default and the exception to the usual pattern) will create one group for each possible total test score.
#'
#' @return A list with the following elements.
#' \describe{
#' \item{plot1}{A function that translates any vector of scores on form X into equivalent scores on form Y.}
#' \item{modelchartdat}{A data frame giving the expected relationship between total score and mean item score based on the IRT model}
#' \item{empiricalchartdat}{A data frame giving the empirical results within each group}
#' }
#' @import ggplot2
EmpiricalICCfitV1=function(mirtobj,itenum,which.items=NULL,ngroups=1){
itedata=GetDataFromMirt(mirtobj)
if(is.null(which.items) & sum(is.na(itedata))>0){return(NULL)}
thetas = mirtobj@Model$Theta
qwts = mirtobj@Internals$Prior[[1]]
if (length(qwts) > length(thetas)) {
qwts = mirtobj@Internals$Prior[[1]][1, ]
}
nqpts = dim(thetas)[1]
coefs = coef(mirtobj, simplify = TRUE)$items
coefsd = coefs[, substr(colnames(coefs), 1, 1) == "d" & !colnames(coefs) ==
"d0"]
if (!is.matrix(coefsd)) {
coefsd = as.matrix(coefsd)
}
nites = dim(coefs)[1]
if (is.null(which.items)) {
which.items = 1:nites
}
papermax = sum(!is.na(coefsd[which.items, ]))
paperps = matrix(rep(0, (papermax + 1) * nqpts), ncol = nqpts)
paperps[1, ] = 1
for (iiz in which.items) {
probs = t(mirt::probtrace(mirt::extract.item(mirtobj,
iiz), thetas))
nrowp = dim(probs)[1]
temp1 = 0 * paperps
for (prow in 1:nrowp) {
pscore = prow - 1
addmat = paperps
if (pscore > 0) {
extrarows = matrix(rep(0, pscore * nqpts), nrow = pscore)
addmat = rbind(extrarows, paperps[1:(papermax +
1 - pscore), ])
}
temp1 = temp1 + t(t(addmat) * probs[prow, ])
}
paperps = temp1
}
#probability of each theta for each raw score
thetaps=t(paperps)*qwts
thetaps=t(thetaps)/colSums(thetaps)
#expected theta for each given raw score
expectedtheta=colSums(t(thetaps)*thetas[,1])
sdtheta=sqrt(colSums(t(thetaps)*thetas[,1]*thetas[,1])-expectedtheta^2)
#expected item score for each given theta
expectedite1 <- expected.item(extract.item(mirtobj,itenum), thetas)
expectedite=colSums(t(thetaps)*expectedite1)
#really this shows expected score "if each pupil did ANOTHER item like this one"
modelchartdat=data.frame(raw.score=0:papermax,item.score=expectedite)
scoredata=as.matrix(itedata[,which.items])
scoretot=rowSums(scoredata)
itescore=itedata[,itenum]
keep=(!is.na(scoretot) & !is.na(itescore))
itescore=itescore[keep]
scoretot=scoretot[keep]
scoregroups=scoretot
#ngroups=10
if(ngroups>1){
cuts=seq(0,1,length=ngroups+1)[-c(1,ngroups+1)]
scoregroups=findInterval(scoretot,stats::quantile(scoretot,cuts))
}
empiricalchartdat=data.frame(raw.score=tapply(scoretot,scoregroups,mean)
,item.score=tapply(itescore,scoregroups,mean)
,N=table(scoregroups)
)
maxes=extract.mirt(mirtobj,"K")-1
plot1=ggplot(data=modelchartdat,aes_string(x="raw.score",y="item.score"))+geom_line()+
geom_point(data=empiricalchartdat,alpha=0.5,aes_string(size="N.Freq"))+
scale_size_area()+ylim(0,maxes[itenum])
return(list(plot1=plot1,modelchartdat=modelchartdat,empiricalchartdat=empiricalchartdat))
}
|
library(data.table)
files <- list.files(pattern = "LUAD_CNV_genenames[0-9]+")
write.csv(files, "files.csv")
bind <- rbindlist(lapply(files, fread))
write.csv(bind, "LUAD_CNV_genenames_bind.csv")
| /bind.R | no_license | natalie-stephenson/GRB2-SH2-Screen_WIP | R | false | false | 195 | r | library(data.table)
files <- list.files(pattern = "LUAD_CNV_genenames[0-9]+")
write.csv(files, "files.csv")
bind <- rbindlist(lapply(files, fread))
write.csv(bind, "LUAD_CNV_genenames_bind.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flowAnalysis.R
\name{fhAnalyze}
\alias{fhAnalyze}
\title{fhAnalyze}
\usage{
fhAnalyze(fh, verbose = TRUE)
}
\arguments{
\item{fh}{a \code{\link{FlowHist}} object}
\item{verbose}{boolean, set to FALSE to turn off logging messages}
}
\value{
a \code{\link{FlowHist}} object with the analysis (nls, counts,
cv, RCS) slots filled.
}
\description{
Complete non-linear regression analysis of FlowHist histogram data
}
\details{
Completes the NLS analysis, and calculates the modelled events and CVs
for the result.
}
\examples{
library(flowPloidyData)
fh1 <- FlowHist(file = flowPloidyFiles()[1], channel = "FL3.INT.LIN")
fh1 <- fhAnalyze(fh1)
}
\seealso{
\code{\link{FlowHist}}
}
\author{
Tyler Smith
}
| /man/fhAnalyze.Rd | no_license | plantarum/flowPloidy | R | false | true | 779 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flowAnalysis.R
\name{fhAnalyze}
\alias{fhAnalyze}
\title{fhAnalyze}
\usage{
fhAnalyze(fh, verbose = TRUE)
}
\arguments{
\item{fh}{a \code{\link{FlowHist}} object}
\item{verbose}{boolean, set to FALSE to turn off logging messages}
}
\value{
a \code{\link{FlowHist}} object with the analysis (nls, counts,
cv, RCS) slots filled.
}
\description{
Complete non-linear regression analysis of FlowHist histogram data
}
\details{
Completes the NLS analysis, and calculates the modelled events and CVs
for the result.
}
\examples{
library(flowPloidyData)
fh1 <- FlowHist(file = flowPloidyFiles()[1], channel = "FL3.INT.LIN")
fh1 <- fhAnalyze(fh1)
}
\seealso{
\code{\link{FlowHist}}
}
\author{
Tyler Smith
}
|
# Cell state annotation of mouse CD8 T cells using the ProjecTILs pipeline.
# Dependencies:
# r/3.6.0
# Seurat/3.2.2
# ProjecTILs 0.5.1
# Atlases downloaded with ProjecTIL pipeline.
# Query mouse object provided.
library(ProjecTILs)
library(Seurat)
library(ggplot2)
# path.to.atlas <- "ref_TIL_Atlas_mouse_v1.rds"
# path.to.atlas <- "ref_LCMV_Atlas_mouse_v1.rds"
# path.to.query <- "200918_end_seurat.rds
# sample.name <- "mouse_CD8_Tcells"
# outdir <- "results_directory"
dir.create(file.path(outdir), showWarnings = FALSE, recursive = TRUE)
source("utils.R")
###############################################################################
# Run projection algorithm
ref <- load.reference.map(ref = path.to.atlas)
query.obj <- readRDS(file = path.to.query)
query.projected <- make.projection(query.obj, ref=ref,
filter.cells = T,
query.assay = "RNA",
skip.normalize = TRUE)
query.projected <- cellstate.predict(ref=ref, query=query.projected)
###############################################################################
# Map the subset of cells in projected query back to the original query set of cells
# and assign "NaN" to cells without label.
cells.all <- colnames(query.obj)
cells.kept <- colnames(query.projected)
cells.kept <- gsub("^Q_","",cells.kept)
I <- sapply(cells.kept, function(z) which(cells.all == z))
cell.state.labels <- query.projected@meta.data[["functional.cluster"]]
cell.state.idents <- cells.all
cell.state.idents[I] <- cell.state.labels
cell.state.idents[-I] <- "NA"
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents,
col.name = "cell.state.idents")
pred.conf <- query.projected@meta.data[["functional.cluster.conf"]]
conf <- cells.all
conf[I] <- pred.conf
conf[-I] <- "NA"
query.obj <- AddMetaData(object = query.obj, metadata = conf, col.name = "conf")
res <- cbind(cell.state.labels, conf)
write.table(res, file = paste0(outdir, "/", sample.name, "_cell_state_labels_wConf.xls", sep = "\t")
###############################################################################
# Highlight individual cell state labels on original query UMAP (Figure S2A)
create_UMAP_DimPlot(query.obj, with.and.withoutlabels = TRUE,
cell.groups = "cell.state.idents",
outfile = paste0(outdir, "/top_lvl/", sample.name, "_UMAP_cell_state_idents.pdf"))
dir.create(file.path(paste0(outdir, "/per_cell_state_highlights")), showWarnings = FALSE, recursive = TRUE)
cell.state.idents[-I] <- "Other"
cell.state.idents <- as.factor(cell.state.idents)
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents, col.name = "cell.state.idents")
cell.states <- levels(cell.state.idents)
cell.states <- cell.states[!cell.states %in% "Other"]
group.colors <- c("red", "grey")
for (cellstate in cell.states) {
levels(query.obj@meta.data[["cell.state.idents"]])[which(levels(query.obj@meta.data[["cell.state.idents"]]) != cellstate)] <- "Other"
names(group.colors) <- c(cellstate, "Other")
UMAP_DimPlot(query.obj, with.and.withoutlabels = FALSE,
cell.groups = "cell.state.idents", group.colors = group.colors,
plot.order = cellstate,
outfile = paste0(outdir, "/per_cell_state_highlights/", sample.name, "_UMAP_cell_state_idents_", cellstate, ".pdf"))
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents,
col.name = "cell.state.idents")
}
###############################################################################
# Highlight Tpex and Tex on original Seurat UMAP (Figure 2D)
# TIL
I <- sort(union(which(cell.state.idents == "CD8_Tpex"), which(cell.state.idents == "CD8_Tex")))
cell.state.idents[-I] <- "Other"
table(cell.state.idents)
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents, col.name = "cell.labels")
table(query.obj$cell.labels)
ccols <- c("orange","purple","gray") # order of table of idents: CD8_Tex, CD8_Tpex, Other
pdf(file = paste0(outdir, "/", sample.name, "_TIL_Tpex_Tex_UMAP_highlight_plots_unordered_UMAP.pdf")
DimPlot(query.obj, reduction = "umap", group.by = "cell.labels", cols = ccols)
dev.off()
# LCMV
# I <- sort(union(which(cell.state.idents == "Tpex"), which(cell.state.idents == "Tex")))
# cell.state.idents[-I] <- "Other"
# table(cell.state.idents)
# query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents,
# col.name = "cell.labels")
# table(query.obj$cell.labels)
# ccols <- c("orange","purple","gray") # order of table of idents: CD8_Tex, CD8_Tpex, Other
# pdf(file = paste0(outdir, "/", sample.name, "_LCMV_Tpex_Tex_UMAP_highlight_plots_unordered_UMAP.pdf")
# DimPlot(query.obj, reduction = "umap", group.by = "cell.labels", cols = ccols)
# dev.off()
###############################################################################
# Predicted states overlay on UMAP - showing only labels w/ pred.conf > 0.5.
I <- which(pred.conf < 0.5)
pred.conf[I] <- NaN
print(sprintf("Number of cells with a confidence score < 0.05: %d", length(which(is.nan(pred.conf)))))
cell.state.labels <- query.projected@meta.data[["functional.cluster"]]
cell.state.labels[I] <- "NA"
# Map the subset of cells in projected query back to the original query set of cells
# and assign "NaN" to cells without a confidence score.
cells.all <- colnames(query.obj)
cells.kept <- colnames(query.projected)
cells.kept <- gsub("^Q_","",cells.kept)
I.kept <- sapply(cells.kept, function(z) which(cells.all == z)) # named integer
query.pred.conf <- cells.all
query.pred.conf[I.kept] <- pred.conf
query.pred.conf[-I.kept] <- "NaN"
query.pred.conf <- as.numeric(query.pred.conf)
query.obj <- AddMetaData(query.obj, metadata=query.pred.conf, col.name = "functional.cluster.conf")
outfile = paste0(outdir, "/", sample.name, "_", "functional_cluster_conf_UMAP_highlight.pdf")
pdf(outfile)
print( FeaturePlot(query.obj, reduction = "umap",
features = "functional.cluster.conf") &
scale_colour_gradientn(colours = c("#0571B0", "#92C5DE", "#D3D3D3", "#F4A582","#CA0020")) )
dev.off()
cell.state.idents <- cells.all
cell.state.idents[I.kept] <- cell.state.labels
cell.state.idents[-I.kept] <- "filtered"
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents, col.name = "cell.state.idents")
create_UMAP_DimPlot(query.obj, with.and.withoutlabels = TRUE,
cell.groups = "cell.state.idents",
outfile = paste0(outdir, "/top_lvl/", sample.name, "_UMAP_cell_state_idents_NAs_notlumped.pdf"))
cell.state.idents[-I.kept] <- "NA"
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents,
col.name = "cell.state.idents")
create_UMAP_DimPlot(query.obj, with.and.withoutlabels = TRUE,
cell.groups = "cell.state.idents",
outfile = paste0(outdir, "/top_lvl/", sample.name, "_UMAP_cell_state_idents_NAs_lumped.pdf"))
dir.create(file.path(paste0(outdir, "/per_cell_state_highlights")), showWarnings = FALSE, recursive = TRUE)
cell.state.idents[-I] <- "Other"
cell.state.idents <- as.factor(cell.state.idents)
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents, col.name = "cell.state.idents")
cell.states <- levels(cell.state.idents)
cell.states <- cell.states[!cell.states %in% "Other"]
group.colors <- c("red", "grey")
for (cellstate in cell.states) {
levels(query.obj@meta.data[["cell.state.idents"]])[which(levels(query.obj@meta.data[["cell.state.idents"]]) != cellstate)] <- "Other"
names(group.colors) <- c(cellstate, "Other")
create_UMAP_DimPlot(query.obj, with.and.withoutlabels = FALSE,
cell.groups = "cell.state.idents", group.colors = group.colors,
plot.order = cellstate,
outfile = paste0(outdir, "/per_cell_state_highlights/",
sample.name, "_UMAP_cell_state_idents_", cellstate, ".pdf"))
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents,
col.name = "cell.state.idents")
}
| /scripts/ProjecTILs_analysis/ProjectTILs_analysis.R | no_license | jackslab/2021_Burger_et_al_scRNAseq | R | false | false | 8,234 | r | # Cell state annotation of mouse CD8 T cells using the ProjecTILs pipeline.
# Dependencies:
# r/3.6.0
# Seurat/3.2.2
# ProjecTILs 0.5.1
# Atlases downloaded with ProjecTIL pipeline.
# Query mouse object provided.
library(ProjecTILs)
library(Seurat)
library(ggplot2)
# path.to.atlas <- "ref_TIL_Atlas_mouse_v1.rds"
# path.to.atlas <- "ref_LCMV_Atlas_mouse_v1.rds"
# path.to.query <- "200918_end_seurat.rds
# sample.name <- "mouse_CD8_Tcells"
# outdir <- "results_directory"
dir.create(file.path(outdir), showWarnings = FALSE, recursive = TRUE)
source("utils.R")
###############################################################################
# Run projection algorithm
ref <- load.reference.map(ref = path.to.atlas)
query.obj <- readRDS(file = path.to.query)
query.projected <- make.projection(query.obj, ref=ref,
filter.cells = T,
query.assay = "RNA",
skip.normalize = TRUE)
query.projected <- cellstate.predict(ref=ref, query=query.projected)
###############################################################################
# Map the subset of cells in projected query back to the original query set of cells
# and assign "NaN" to cells without label.
cells.all <- colnames(query.obj)
cells.kept <- colnames(query.projected)
cells.kept <- gsub("^Q_","",cells.kept)
I <- sapply(cells.kept, function(z) which(cells.all == z))
cell.state.labels <- query.projected@meta.data[["functional.cluster"]]
cell.state.idents <- cells.all
cell.state.idents[I] <- cell.state.labels
cell.state.idents[-I] <- "NA"
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents,
col.name = "cell.state.idents")
pred.conf <- query.projected@meta.data[["functional.cluster.conf"]]
conf <- cells.all
conf[I] <- pred.conf
conf[-I] <- "NA"
query.obj <- AddMetaData(object = query.obj, metadata = conf, col.name = "conf")
res <- cbind(cell.state.labels, conf)
write.table(res, file = paste0(outdir, "/", sample.name, "_cell_state_labels_wConf.xls", sep = "\t")
###############################################################################
# Highlight individual cell state labels on original query UMAP (Figure S2A)
create_UMAP_DimPlot(query.obj, with.and.withoutlabels = TRUE,
cell.groups = "cell.state.idents",
outfile = paste0(outdir, "/top_lvl/", sample.name, "_UMAP_cell_state_idents.pdf"))
dir.create(file.path(paste0(outdir, "/per_cell_state_highlights")), showWarnings = FALSE, recursive = TRUE)
cell.state.idents[-I] <- "Other"
cell.state.idents <- as.factor(cell.state.idents)
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents, col.name = "cell.state.idents")
cell.states <- levels(cell.state.idents)
cell.states <- cell.states[!cell.states %in% "Other"]
group.colors <- c("red", "grey")
for (cellstate in cell.states) {
levels(query.obj@meta.data[["cell.state.idents"]])[which(levels(query.obj@meta.data[["cell.state.idents"]]) != cellstate)] <- "Other"
names(group.colors) <- c(cellstate, "Other")
UMAP_DimPlot(query.obj, with.and.withoutlabels = FALSE,
cell.groups = "cell.state.idents", group.colors = group.colors,
plot.order = cellstate,
outfile = paste0(outdir, "/per_cell_state_highlights/", sample.name, "_UMAP_cell_state_idents_", cellstate, ".pdf"))
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents,
col.name = "cell.state.idents")
}
###############################################################################
# Highlight Tpex and Tex on original Seurat UMAP (Figure 2D)
# TIL
I <- sort(union(which(cell.state.idents == "CD8_Tpex"), which(cell.state.idents == "CD8_Tex")))
cell.state.idents[-I] <- "Other"
table(cell.state.idents)
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents, col.name = "cell.labels")
table(query.obj$cell.labels)
ccols <- c("orange","purple","gray") # order of table of idents: CD8_Tex, CD8_Tpex, Other
pdf(file = paste0(outdir, "/", sample.name, "_TIL_Tpex_Tex_UMAP_highlight_plots_unordered_UMAP.pdf")
DimPlot(query.obj, reduction = "umap", group.by = "cell.labels", cols = ccols)
dev.off()
# LCMV
# I <- sort(union(which(cell.state.idents == "Tpex"), which(cell.state.idents == "Tex")))
# cell.state.idents[-I] <- "Other"
# table(cell.state.idents)
# query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents,
# col.name = "cell.labels")
# table(query.obj$cell.labels)
# ccols <- c("orange","purple","gray") # order of table of idents: CD8_Tex, CD8_Tpex, Other
# pdf(file = paste0(outdir, "/", sample.name, "_LCMV_Tpex_Tex_UMAP_highlight_plots_unordered_UMAP.pdf")
# DimPlot(query.obj, reduction = "umap", group.by = "cell.labels", cols = ccols)
# dev.off()
###############################################################################
# Predicted states overlay on UMAP - showing only labels w/ pred.conf > 0.5.
I <- which(pred.conf < 0.5)
pred.conf[I] <- NaN
print(sprintf("Number of cells with a confidence score < 0.05: %d", length(which(is.nan(pred.conf)))))
cell.state.labels <- query.projected@meta.data[["functional.cluster"]]
cell.state.labels[I] <- "NA"
# Map the subset of cells in projected query back to the original query set of cells
# and assign "NaN" to cells without a confidence score.
cells.all <- colnames(query.obj)
cells.kept <- colnames(query.projected)
cells.kept <- gsub("^Q_","",cells.kept)
I.kept <- sapply(cells.kept, function(z) which(cells.all == z)) # named integer
query.pred.conf <- cells.all
query.pred.conf[I.kept] <- pred.conf
query.pred.conf[-I.kept] <- "NaN"
query.pred.conf <- as.numeric(query.pred.conf)
query.obj <- AddMetaData(query.obj, metadata=query.pred.conf, col.name = "functional.cluster.conf")
outfile = paste0(outdir, "/", sample.name, "_", "functional_cluster_conf_UMAP_highlight.pdf")
pdf(outfile)
print( FeaturePlot(query.obj, reduction = "umap",
features = "functional.cluster.conf") &
scale_colour_gradientn(colours = c("#0571B0", "#92C5DE", "#D3D3D3", "#F4A582","#CA0020")) )
dev.off()
cell.state.idents <- cells.all
cell.state.idents[I.kept] <- cell.state.labels
cell.state.idents[-I.kept] <- "filtered"
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents, col.name = "cell.state.idents")
create_UMAP_DimPlot(query.obj, with.and.withoutlabels = TRUE,
cell.groups = "cell.state.idents",
outfile = paste0(outdir, "/top_lvl/", sample.name, "_UMAP_cell_state_idents_NAs_notlumped.pdf"))
cell.state.idents[-I.kept] <- "NA"
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents,
col.name = "cell.state.idents")
create_UMAP_DimPlot(query.obj, with.and.withoutlabels = TRUE,
cell.groups = "cell.state.idents",
outfile = paste0(outdir, "/top_lvl/", sample.name, "_UMAP_cell_state_idents_NAs_lumped.pdf"))
dir.create(file.path(paste0(outdir, "/per_cell_state_highlights")), showWarnings = FALSE, recursive = TRUE)
cell.state.idents[-I] <- "Other"
cell.state.idents <- as.factor(cell.state.idents)
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents, col.name = "cell.state.idents")
cell.states <- levels(cell.state.idents)
cell.states <- cell.states[!cell.states %in% "Other"]
group.colors <- c("red", "grey")
for (cellstate in cell.states) {
levels(query.obj@meta.data[["cell.state.idents"]])[which(levels(query.obj@meta.data[["cell.state.idents"]]) != cellstate)] <- "Other"
names(group.colors) <- c(cellstate, "Other")
create_UMAP_DimPlot(query.obj, with.and.withoutlabels = FALSE,
cell.groups = "cell.state.idents", group.colors = group.colors,
plot.order = cellstate,
outfile = paste0(outdir, "/per_cell_state_highlights/",
sample.name, "_UMAP_cell_state_idents_", cellstate, ".pdf"))
query.obj <- AddMetaData(object = query.obj, metadata = cell.state.idents,
col.name = "cell.state.idents")
}
|
# pull out error message, or return NULL
arxiv_error_message <-
function(listresult)
{
nentries <- sum(names(listresult)=="entry")
if(nentries == 1) { # one entry
entry <- listresult[["entry"]]
# single entry with Error as title and "arXiv api core" as author?
if(all(c("title", "author", "summary") %in% names(entry)) &&
entry$title == "Error" &&
"name" %in% names(entry$author) && entry$author$name == "arXiv api core") {
return(entry$summary)
}
}
# ok; return NULL
NULL
}
| /R/errors.R | permissive | ropensci/aRxiv | R | false | false | 568 | r |
# pull out error message, or return NULL
arxiv_error_message <-
function(listresult)
{
nentries <- sum(names(listresult)=="entry")
if(nentries == 1) { # one entry
entry <- listresult[["entry"]]
# single entry with Error as title and "arXiv api core" as author?
if(all(c("title", "author", "summary") %in% names(entry)) &&
entry$title == "Error" &&
"name" %in% names(entry$author) && entry$author$name == "arXiv api core") {
return(entry$summary)
}
}
# ok; return NULL
NULL
}
|
### loading packages
if (!require("ggplot2")) {
install.packages("ggplot2", dependencies = TRUE)
library(ggplot2)
}
if (!require("maps")) {
install.packages("maps", dependencies = TRUE)
library(maps)
}
if (!require("mapdata")) {
install.packages("mapdata", dependencies = TRUE)
library(mapdata)
}
if (!require("fields")) {
install.packages("fields", dependencies = TRUE)
library(fields)
}
pdf("chloropleth_maps.pdf", height = 7,width = 10)
### 1) average temperature USA
# 1.1) reading in and processing data
usa_map <- map_data(map = "state")
usa_temp <- read.csv("usa_temp.csv", comment.char = "#")
usa_data <- merge(usa_temp, usa_map,
by.x ="state", by.y = "region") # case sensitive
usa_sorted <- usa_data[order(usa_data["order"]),]
# 1.2) plotting USA chloropleth maps
usa_map1 <- ggplot(data = usa_sorted) +
geom_polygon(aes(x = long, y = lat,
group = group, fill = fahrenheit)) +
ggtitle("USA Map 1")
print(usa_map1)
usa_map2 <- usa_map1 + coord_map("polyconic") +
ggtitle("USA Map 2 - polyconic")
print(usa_map2)
usa_map3 <- usa_map2 +
geom_path(aes(x = long, y = lat, group = group),
color = "black") +
ggtitle("USA Map 3 - black contours")
print(usa_map3)
usa_map4 <- usa_map3 +
scale_fill_gradient(low = "yellow", high = "red") +
ggtitle("USA Map 4 - gradient 1")
print(usa_map4)
usa_map5 <- usa_map3 +
scale_fill_gradient2(low = "steelblue", mid = "yellow",
high = "red", midpoint = colMeans(usa_sorted["fahrenheit"])) +
ggtitle("USA Map 5 - gradient 2")
print(usa_map5)
### 2) South American population count
# 2.1) reading in and processing data
south_am_map <- map_data("worldHires",
region = c("Argentina", "Bolivia", "Brazil",
"Chile", "Colombia", "Ecuador", "Falkland Islands",
"French Guiana", "Guyana", "Paraguay", "Peru",
"Suriname", "Uruguay", "Venezuela"))
south_am_pop <- read.csv("south_america_pop.csv",
comment.char = "#")
south_am_data <- merge(south_am_pop, south_am_map,
by.x = "country", by.y = "region")
south_am_sorted <- south_am_data[order(
south_am_data["order"]),]
# 2.2) creating chloropleth maps
south_am_map1 <- ggplot(data = south_am_sorted) +
geom_polygon(aes(x = long, y = lat,
group = group, fill = population)) +
geom_path(aes(x = long, y = lat, group = group),
color = "black") +
coord_map("polyconic") +
scale_fill_gradient(low = "lightyellow",
high = "red", guide = "legend")
print(south_am_map1)
south_am_map2 = south_am_map1 +
theme(panel.background = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
axis.ticks = element_blank())
print(south_am_map2)
dev.off()
cat("Heat maps saved as ", getwd(),
"/chloropleth_maps.pdf", sep = "")
### 3) Volcano contour plot
pdf("contour_plot.pdf", height = 7,width = 10)
data(volcano)
image.plot(volcano)
contour(volcano, add = TRUE)
dev.off()
cat("Heat maps and contour plot saved as ", getwd(),
"/chloropleth_maps.pdf and", getwd(), "/contour_plot.pdf" , sep = "")
| /book/packt/Instant.Heat.Maps.in.R.How-to/Code_Final/5644OS_04_01.r | no_license | xenron/sandbox-da-r | R | false | false | 3,067 | r | ### loading packages
if (!require("ggplot2")) {
install.packages("ggplot2", dependencies = TRUE)
library(ggplot2)
}
if (!require("maps")) {
install.packages("maps", dependencies = TRUE)
library(maps)
}
if (!require("mapdata")) {
install.packages("mapdata", dependencies = TRUE)
library(mapdata)
}
if (!require("fields")) {
install.packages("fields", dependencies = TRUE)
library(fields)
}
pdf("chloropleth_maps.pdf", height = 7,width = 10)
### 1) average temperature USA
# 1.1) reading in and processing data
usa_map <- map_data(map = "state")
usa_temp <- read.csv("usa_temp.csv", comment.char = "#")
usa_data <- merge(usa_temp, usa_map,
by.x ="state", by.y = "region") # case sensitive
usa_sorted <- usa_data[order(usa_data["order"]),]
# 1.2) plotting USA chloropleth maps
usa_map1 <- ggplot(data = usa_sorted) +
geom_polygon(aes(x = long, y = lat,
group = group, fill = fahrenheit)) +
ggtitle("USA Map 1")
print(usa_map1)
usa_map2 <- usa_map1 + coord_map("polyconic") +
ggtitle("USA Map 2 - polyconic")
print(usa_map2)
usa_map3 <- usa_map2 +
geom_path(aes(x = long, y = lat, group = group),
color = "black") +
ggtitle("USA Map 3 - black contours")
print(usa_map3)
usa_map4 <- usa_map3 +
scale_fill_gradient(low = "yellow", high = "red") +
ggtitle("USA Map 4 - gradient 1")
print(usa_map4)
usa_map5 <- usa_map3 +
scale_fill_gradient2(low = "steelblue", mid = "yellow",
high = "red", midpoint = colMeans(usa_sorted["fahrenheit"])) +
ggtitle("USA Map 5 - gradient 2")
print(usa_map5)
### 2) South American population count
# 2.1) reading in and processing data
south_am_map <- map_data("worldHires",
region = c("Argentina", "Bolivia", "Brazil",
"Chile", "Colombia", "Ecuador", "Falkland Islands",
"French Guiana", "Guyana", "Paraguay", "Peru",
"Suriname", "Uruguay", "Venezuela"))
south_am_pop <- read.csv("south_america_pop.csv",
comment.char = "#")
south_am_data <- merge(south_am_pop, south_am_map,
by.x = "country", by.y = "region")
south_am_sorted <- south_am_data[order(
south_am_data["order"]),]
# 2.2) creating chloropleth maps
south_am_map1 <- ggplot(data = south_am_sorted) +
geom_polygon(aes(x = long, y = lat,
group = group, fill = population)) +
geom_path(aes(x = long, y = lat, group = group),
color = "black") +
coord_map("polyconic") +
scale_fill_gradient(low = "lightyellow",
high = "red", guide = "legend")
print(south_am_map1)
south_am_map2 = south_am_map1 +
theme(panel.background = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
axis.ticks = element_blank())
print(south_am_map2)
dev.off()
cat("Heat maps saved as ", getwd(),
"/chloropleth_maps.pdf", sep = "")
### 3) Volcano contour plot
pdf("contour_plot.pdf", height = 7,width = 10)
data(volcano)
image.plot(volcano)
contour(volcano, add = TRUE)
dev.off()
cat("Heat maps and contour plot saved as ", getwd(),
"/chloropleth_maps.pdf and", getwd(), "/contour_plot.pdf" , sep = "")
|
\docType{data}
\name{GeomJoinedSegment}
\alias{GeomJoinedSegment}
\title{Create a geom_joinedsegment which joins together segments with 'round' corners. Looks MUCH nicer.
See https://groups.google.com/forum/?fromgroups#!topic/ggplot2/movv0f_MSuY}
\format{proto object
$ draw :function (., data, scales, coordinates, arrow = NULL, ...)
..- attr(*, "srcref")=Class 'srcref' atomic [1:8] 639 10 657 1 10 1 639 657
.. .. ..- attr(*, "srcfile")=Classes 'srcfilecopy', 'srcfile' <environment: 0x105bb6f78>
$ objname: chr "geom_joinedsegment"
parent: proto object
.. parent: proto object
.. .. parent: proto object}
\usage{
GeomJoinedSegment
}
\description{
Create a geom_joinedsegment which joins together segments
with 'round' corners. Looks MUCH nicer. See
https://groups.google.com/forum/?fromgroups#!topic/ggplot2/movv0f_MSuY
}
\keyword{datasets}
| /man/GeomJoinedSegment.Rd | no_license | gjuggler/ggphylo | R | false | false | 876 | rd | \docType{data}
\name{GeomJoinedSegment}
\alias{GeomJoinedSegment}
\title{Create a geom_joinedsegment which joins together segments with 'round' corners. Looks MUCH nicer.
See https://groups.google.com/forum/?fromgroups#!topic/ggplot2/movv0f_MSuY}
\format{proto object
$ draw :function (., data, scales, coordinates, arrow = NULL, ...)
..- attr(*, "srcref")=Class 'srcref' atomic [1:8] 639 10 657 1 10 1 639 657
.. .. ..- attr(*, "srcfile")=Classes 'srcfilecopy', 'srcfile' <environment: 0x105bb6f78>
$ objname: chr "geom_joinedsegment"
parent: proto object
.. parent: proto object
.. .. parent: proto object}
\usage{
GeomJoinedSegment
}
\description{
Create a geom_joinedsegment which joins together segments
with 'round' corners. Looks MUCH nicer. See
https://groups.google.com/forum/?fromgroups#!topic/ggplot2/movv0f_MSuY
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkgdepends.R
\docType{package}
\name{pkgdepends-package}
\alias{pkgdepends}
\alias{pkgdepends-package}
\title{pkgdepends: Package Dependency Resolution and Downloads}
\description{
pkgdepends is a toolkit for package dependencies, downloads and
installations, to be used in other packages. If you are looking for a
package manager, see \href{https://github.com/r-lib/pak}{pak}.
}
\section{Features}{
\itemize{
\item Look up package dependencies recursively.
\item Visualize package dependencies.
\item Download packages and their dependencies.
\item Install downloaded packages.
\item Includes a dependency solver to find a consistent set of
dependencies.
\item Supports CRAN and Bioconductor packages automatically.
\item Supports packages on GitHub.
\item Supports local package file and trees.
\item Supports the \code{Remotes} entry in the \code{DESCRIPTION} file.
\item Caches metadata and downloaded packages via
\href{https://github.com/r-lib/pkgcache}{pkgcache}
\item Performs all downloads and HTTP queries concurrently.
\item Builds and installs packages in parallel.
}
}
\section{Install}{
Once on CRAN, install the package with:\if{html}{\out{<div class="sourceCode r">}}\preformatted{install.packages("pkgdepends")
}\if{html}{\out{</div>}}
}
\section{Usage}{
\if{html}{\out{<div class="sourceCode r">}}\preformatted{library(pkgdepends)
}\if{html}{\out{</div>}}
\subsection{Package references}{
A package reference (ref) specifies a location from which an R package
can be obtained from. Examples:\preformatted{devtools
cran::devtools
bioc::Biobase
r-lib/pkgdepends
https://github.com/r-lib/pkgdepends
local::~/works/shiny
}
See \link[=pkg_refs]{“Package references”} for details.
}
\subsection{Package dependencies}{
Dependencies of the development version of the cli package:\if{html}{\out{<div class="sourceCode r">}}\preformatted{pd <- new_pkg_deps("r-lib/pkgcache")
pd$solve()
pd$draw()
}\if{html}{\out{</div>}}\preformatted{#> r-lib/pkgcache 1.1.1.9000 [new][bld][cmp][dl] (unknown size)
#> +-assertthat 0.2.1 [new][dl] (52.47 kB)
#> +-callr 3.5.1 [new]
#> | +-processx 3.4.4 [new]
#> | | +-ps 1.4.0 [new]
#> | | \-R6 2.5.0 [new]
#> | \-R6
#> +-cli 2.1.0 [new]
#> | +-assertthat
#> | +-crayon 1.3.4 [new][dl] (748.25 kB)
#> | +-glue 1.4.2 [new]
#> | \-fansi 0.4.1 [new][dl] (210.40 kB)
#> +-curl 4.3 [new][dl] (741.06 kB)
#> +-digest 0.6.27 [new]
#> +-filelock 1.0.2 [new][dl] (26.67 kB)
#> +-glue
#> +-prettyunits 1.1.1 [new][dl] (34.79 kB)
#> +-R6
#> +-processx
#> +-rappdirs 0.3.1 [new][dl] (145.56 kB)
#> +-rlang 0.4.8 [new]
#> +-tibble 3.0.4 [new]
#> | +-cli
#> | +-crayon
#> | +-ellipsis 0.3.1 [new][dl] (33.48 kB)
#> | | \-rlang
#> | +-fansi
#> | +-lifecycle 0.2.0 [new][dl] (91.64 kB)
#> | | +-glue
#> | | \-rlang
#> | +-magrittr 2.0.1 [new][dl] (unknown size)
#> | +-pillar 1.4.6 [new]
#> | | +-cli
#> | | +-crayon
#> | | +-ellipsis
#> | | +-fansi
#> | | +-lifecycle
#> | | +-rlang
#> | | +-utf8 1.1.4 [new][dl] (195.28 kB)
#> | | \-vctrs 0.3.5 [new][dl] (unknown size)
#> | | +-ellipsis
#> | | +-digest
#> | | +-glue
#> | | \-rlang
#> | +-pkgconfig 2.0.3 [new][dl] (17.63 kB)
#> | +-rlang
#> | \-vctrs
#> \-uuid 0.1-4 [new][dl] (27.75 kB)
#>
#> Key: [new] new | [dl] download | [bld] build | [cmp] compile
}
See the \code{\link{pkg_deps}} class for details.
}
\subsection{Package downloads}{
Downloading all dependencies of a package:\if{html}{\out{<div class="sourceCode r">}}\preformatted{pdl <- new_pkg_download_proposal("r-lib/cli")
pdl$resolve()
pdl$download()
}\if{html}{\out{</div>}}
See the \code{\link{pkg_download_proposal}} class for
details.
}
\subsection{Package installation}{
Installing or updating a set of package:\if{html}{\out{<div class="sourceCode r">}}\preformatted{lib <- tempfile()
pdi <- new_pkg_installation_proposal(
"r-lib/cli",
config = list(library = lib)
)
pdi$solve()
pdi$download()
pdi$install()
}\if{html}{\out{</div>}}
}
\subsection{Dependency resolution}{
\code{\link{pkg_deps}},
\code{\link{pkg_download_proposal}} and
\code{\link{pkg_installation_proposal}} all resolve
their dependencies recursively, to obtain information about all packages
needed for the specified \link[=pkg_refs]{package references}. See
\link[=pkg_resolution]{“Dependency resolution”} for details.
}
\subsection{The dependency solver}{
The dependency solver takes the resolution information, and works out
the exact versions of each package that must be installed, such that
version and other requirements are satisfied. See \link[=pkg_solution]{“The dependency solver”} for details.
}
\subsection{Installation plans}{
\code{\link{pkg_installation_proposal}} can create
installation plans, and then also install them. It is also possible to
import installation plans that were created by other tools. See
\link[=install_plans]{“Installation plans”} for details.
}
\subsection{Configuration}{
The details of \code{\link{pkg_deps}},
\code{\link{pkg_download_proposal}} and
\code{\link{pkg_installation_proposal}} can be tuned
with a list of configuration options. See
\link[=pkg_config]{“Configuration”} for details.
}
}
\section{Related}{
\itemize{
\item \href{https://github.com/r-lib/pak}{pak} – R package manager
\item \href{https://github.com/r-lib/pkgcache}{pkgcache} – Metadata and package
cache
\item \href{https://github.com/r-lib/devtools}{devtools} – Tools for R package
developers
}
}
\section{License}{
MIT (c) RStudio
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/r-lib/pkgdepends#readme}
\item Report bugs at \url{https://github.com/r-lib/pkgdepends/issues}
}
}
| /man/pkgdepends-package.Rd | permissive | isabella232/pkgdepends | R | false | true | 5,680 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkgdepends.R
\docType{package}
\name{pkgdepends-package}
\alias{pkgdepends}
\alias{pkgdepends-package}
\title{pkgdepends: Package Dependency Resolution and Downloads}
\description{
pkgdepends is a toolkit for package dependencies, downloads and
installations, to be used in other packages. If you are looking for a
package manager, see \href{https://github.com/r-lib/pak}{pak}.
}
\section{Features}{
\itemize{
\item Look up package dependencies recursively.
\item Visualize package dependencies.
\item Download packages and their dependencies.
\item Install downloaded packages.
\item Includes a dependency solver to find a consistent set of
dependencies.
\item Supports CRAN and Bioconductor packages automatically.
\item Supports packages on GitHub.
\item Supports local package file and trees.
\item Supports the \code{Remotes} entry in the \code{DESCRIPTION} file.
\item Caches metadata and downloaded packages via
\href{https://github.com/r-lib/pkgcache}{pkgcache}
\item Performs all downloads and HTTP queries concurrently.
\item Builds and installs packages in parallel.
}
}
\section{Install}{
Once on CRAN, install the package with:\if{html}{\out{<div class="sourceCode r">}}\preformatted{install.packages("pkgdepends")
}\if{html}{\out{</div>}}
}
\section{Usage}{
\if{html}{\out{<div class="sourceCode r">}}\preformatted{library(pkgdepends)
}\if{html}{\out{</div>}}
\subsection{Package references}{
A package reference (ref) specifies a location from which an R package
can be obtained from. Examples:\preformatted{devtools
cran::devtools
bioc::Biobase
r-lib/pkgdepends
https://github.com/r-lib/pkgdepends
local::~/works/shiny
}
See \link[=pkg_refs]{“Package references”} for details.
}
\subsection{Package dependencies}{
Dependencies of the development version of the cli package:\if{html}{\out{<div class="sourceCode r">}}\preformatted{pd <- new_pkg_deps("r-lib/pkgcache")
pd$solve()
pd$draw()
}\if{html}{\out{</div>}}\preformatted{#> r-lib/pkgcache 1.1.1.9000 [new][bld][cmp][dl] (unknown size)
#> +-assertthat 0.2.1 [new][dl] (52.47 kB)
#> +-callr 3.5.1 [new]
#> | +-processx 3.4.4 [new]
#> | | +-ps 1.4.0 [new]
#> | | \-R6 2.5.0 [new]
#> | \-R6
#> +-cli 2.1.0 [new]
#> | +-assertthat
#> | +-crayon 1.3.4 [new][dl] (748.25 kB)
#> | +-glue 1.4.2 [new]
#> | \-fansi 0.4.1 [new][dl] (210.40 kB)
#> +-curl 4.3 [new][dl] (741.06 kB)
#> +-digest 0.6.27 [new]
#> +-filelock 1.0.2 [new][dl] (26.67 kB)
#> +-glue
#> +-prettyunits 1.1.1 [new][dl] (34.79 kB)
#> +-R6
#> +-processx
#> +-rappdirs 0.3.1 [new][dl] (145.56 kB)
#> +-rlang 0.4.8 [new]
#> +-tibble 3.0.4 [new]
#> | +-cli
#> | +-crayon
#> | +-ellipsis 0.3.1 [new][dl] (33.48 kB)
#> | | \-rlang
#> | +-fansi
#> | +-lifecycle 0.2.0 [new][dl] (91.64 kB)
#> | | +-glue
#> | | \-rlang
#> | +-magrittr 2.0.1 [new][dl] (unknown size)
#> | +-pillar 1.4.6 [new]
#> | | +-cli
#> | | +-crayon
#> | | +-ellipsis
#> | | +-fansi
#> | | +-lifecycle
#> | | +-rlang
#> | | +-utf8 1.1.4 [new][dl] (195.28 kB)
#> | | \-vctrs 0.3.5 [new][dl] (unknown size)
#> | | +-ellipsis
#> | | +-digest
#> | | +-glue
#> | | \-rlang
#> | +-pkgconfig 2.0.3 [new][dl] (17.63 kB)
#> | +-rlang
#> | \-vctrs
#> \-uuid 0.1-4 [new][dl] (27.75 kB)
#>
#> Key: [new] new | [dl] download | [bld] build | [cmp] compile
}
See the \code{\link{pkg_deps}} class for details.
}
\subsection{Package downloads}{
Downloading all dependencies of a package:\if{html}{\out{<div class="sourceCode r">}}\preformatted{pdl <- new_pkg_download_proposal("r-lib/cli")
pdl$resolve()
pdl$download()
}\if{html}{\out{</div>}}
See the \code{\link{pkg_download_proposal}} class for
details.
}
\subsection{Package installation}{
Installing or updating a set of package:\if{html}{\out{<div class="sourceCode r">}}\preformatted{lib <- tempfile()
pdi <- new_pkg_installation_proposal(
"r-lib/cli",
config = list(library = lib)
)
pdi$solve()
pdi$download()
pdi$install()
}\if{html}{\out{</div>}}
}
\subsection{Dependency resolution}{
\code{\link{pkg_deps}},
\code{\link{pkg_download_proposal}} and
\code{\link{pkg_installation_proposal}} all resolve
their dependencies recursively, to obtain information about all packages
needed for the specified \link[=pkg_refs]{package references}. See
\link[=pkg_resolution]{“Dependency resolution”} for details.
}
\subsection{The dependency solver}{
The dependency solver takes the resolution information, and works out
the exact versions of each package that must be installed, such that
version and other requirements are satisfied. See \link[=pkg_solution]{“The dependency solver”} for details.
}
\subsection{Installation plans}{
\code{\link{pkg_installation_proposal}} can create
installation plans, and then also install them. It is also possible to
import installation plans that were created by other tools. See
\link[=install_plans]{“Installation plans”} for details.
}
\subsection{Configuration}{
The details of \code{\link{pkg_deps}},
\code{\link{pkg_download_proposal}} and
\code{\link{pkg_installation_proposal}} can be tuned
with a list of configuration options. See
\link[=pkg_config]{“Configuration”} for details.
}
}
\section{Related}{
\itemize{
\item \href{https://github.com/r-lib/pak}{pak} – R package manager
\item \href{https://github.com/r-lib/pkgcache}{pkgcache} – Metadata and package
cache
\item \href{https://github.com/r-lib/devtools}{devtools} – Tools for R package
developers
}
}
\section{License}{
MIT (c) RStudio
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/r-lib/pkgdepends#readme}
\item Report bugs at \url{https://github.com/r-lib/pkgdepends/issues}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mxr_extract_clumped_genes.R
\name{mxr_extract_clumped_genes}
\alias{mxr_extract_clumped_genes}
\title{Extract Genes from the Clumps.}
\usage{
mxr_extract_clumped_genes(out_prefix = "", verbose = FALSE)
}
\arguments{
\item{out_prefix}{Path and prefix of the output files.}
\item{verbose}{(Optional) Show verbose output. (DEFAULT=FALSE)}
}
\value{
TRUE if the PLINK run completed successfully. FALSE, otherwise.
}
\description{
\code{mxr_extract_clumped_genes} extracts the genes that overlap from the
clumps.
}
\details{
This function needs to be run after the \code{mxr_clump} function.
}
| /man/mxr_extract_clumped_genes.Rd | no_license | roslen/mxr | R | false | true | 669 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mxr_extract_clumped_genes.R
\name{mxr_extract_clumped_genes}
\alias{mxr_extract_clumped_genes}
\title{Extract Genes from the Clumps.}
\usage{
mxr_extract_clumped_genes(out_prefix = "", verbose = FALSE)
}
\arguments{
\item{out_prefix}{Path and prefix of the output files.}
\item{verbose}{(Optional) Show verbose output. (DEFAULT=FALSE)}
}
\value{
TRUE if the PLINK run completed successfully. FALSE, otherwise.
}
\description{
\code{mxr_extract_clumped_genes} extracts the genes that overlap from the
clumps.
}
\details{
This function needs to be run after the \code{mxr_clump} function.
}
|
## read and select date
x <- read.csv("household_power_consumption.txt", sep=";", na.strings="?")
x <- rbind(x[x$Date == "1/2/2007",], x[x$Date == "2/2/2007",])
## convert to date / time classes
x$DateTime <- as.POSIXct(paste(x$Date, x$Time), format="%d/%m/%Y %H:%M:%S")
## open png graphic device
png(filename = "Plot1.png",
width = 480,
height = 480,
bg="transparent")
## Plot
hist(x$Global_active_power,
col="red",
main="Global Active Power",
xlab = "Global Active Power (kilowatts)"
)
## close graphic device
dev.off() | /plot1.R | no_license | ChronoVision/ExData_Plotting1 | R | false | false | 563 | r | ## read and select date
x <- read.csv("household_power_consumption.txt", sep=";", na.strings="?")
x <- rbind(x[x$Date == "1/2/2007",], x[x$Date == "2/2/2007",])
## convert to date / time classes
x$DateTime <- as.POSIXct(paste(x$Date, x$Time), format="%d/%m/%Y %H:%M:%S")
## open png graphic device
png(filename = "Plot1.png",
width = 480,
height = 480,
bg="transparent")
## Plot
hist(x$Global_active_power,
col="red",
main="Global Active Power",
xlab = "Global Active Power (kilowatts)"
)
## close graphic device
dev.off() |
source("./word-count.R")
library(testthat)
# When comparing lists, all.equal expects the objects to be in the same order
# This expectation instead checks that a) the set of names are the same and
# b) each named object is equal
expect_equal_pairs <- function(object, expected) {
expect_equal(sort(names(object)),
sort(names(expected)),
info = "names in lists differ")
for (name in names(expected)) {
expect_equal(object[name], expected[name], info = "list element missing")
}
}
test_that("count one word", {
expect_equal_pairs(word_count("word"),
list("word" = 1))
})
test_that("count one of each word", {
expect_equal_pairs(word_count("one of each"),
list(
"one" = 1,
"of" = 1,
"each" = 1
))
})
test_that("multiple occurrences of a word", {
expect_equal_pairs(
word_count("one fish two fish red fish blue fish"),
list(
"one" = 1,
"fish" = 4,
"two" = 1,
"red" = 1,
"blue" = 1
)
)
})
test_that("ignore punctuation", {
expect_equal_pairs(
word_count("car : carpet as java : javascript!!&@$%^&"),
list(
"car" = 1,
"carpet" = 1,
"as" = 1,
"java" = 1,
"javascript" = 1
)
)
})
test_that("include numbers", {
expect_equal_pairs(word_count("testing, 1, 2 testing"),
list(
"testing" = 2,
"1" = 1,
"2" = 1
))
})
test_that("normalize case", {
expect_equal_pairs(word_count("go Go GO Stop stop"),
list("go" = 3, "stop" = 2))
})
message("All tests passed for exercise: word-count")
| /Taller06-Strings/exs/word_count/test_word_count.R | no_license | rlabuonora/taller_R | R | false | false | 1,782 | r | source("./word-count.R")
library(testthat)
# When comparing lists, all.equal expects the objects to be in the same order
# This expectation instead checks that a) the set of names are the same and
# b) each named object is equal
expect_equal_pairs <- function(object, expected) {
expect_equal(sort(names(object)),
sort(names(expected)),
info = "names in lists differ")
for (name in names(expected)) {
expect_equal(object[name], expected[name], info = "list element missing")
}
}
test_that("count one word", {
expect_equal_pairs(word_count("word"),
list("word" = 1))
})
test_that("count one of each word", {
expect_equal_pairs(word_count("one of each"),
list(
"one" = 1,
"of" = 1,
"each" = 1
))
})
test_that("multiple occurrences of a word", {
expect_equal_pairs(
word_count("one fish two fish red fish blue fish"),
list(
"one" = 1,
"fish" = 4,
"two" = 1,
"red" = 1,
"blue" = 1
)
)
})
test_that("ignore punctuation", {
expect_equal_pairs(
word_count("car : carpet as java : javascript!!&@$%^&"),
list(
"car" = 1,
"carpet" = 1,
"as" = 1,
"java" = 1,
"javascript" = 1
)
)
})
test_that("include numbers", {
expect_equal_pairs(word_count("testing, 1, 2 testing"),
list(
"testing" = 2,
"1" = 1,
"2" = 1
))
})
test_that("normalize case", {
expect_equal_pairs(word_count("go Go GO Stop stop"),
list("go" = 3, "stop" = 2))
})
message("All tests passed for exercise: word-count")
|
library(kmconfband)
### Name: noe.compute.cgh
### Title: Intermediate Steps in the Noe Recursions for the Exact Coverage
### Probability of a Nonparametric Confidence Band for the Survivor
### Function
### Aliases: noe.compute.cgh
### ** Examples
## Check of Noe recursion calculations.
a<-c(0.001340,0.028958,0.114653,0.335379)
b<-c(0.664621,0.885347,0.971042,0.998660)
noe.compute.cgh(4,a,b)
| /data/genthat_extracted_code/kmconfband/examples/noe.compute.cgh.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 406 | r | library(kmconfband)
### Name: noe.compute.cgh
### Title: Intermediate Steps in the Noe Recursions for the Exact Coverage
### Probability of a Nonparametric Confidence Band for the Survivor
### Function
### Aliases: noe.compute.cgh
### ** Examples
## Check of Noe recursion calculations.
a<-c(0.001340,0.028958,0.114653,0.335379)
b<-c(0.664621,0.885347,0.971042,0.998660)
noe.compute.cgh(4,a,b)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/consolidate.r
\name{consolidate}
\alias{consolidate}
\title{Consolidate a set of similarly-cohorted data sets}
\usage{
consolidate(data_list, on)
}
\arguments{
\item{data_list}{the list of similarly-cohorted data sets.}
\item{on}{the variable to consolidate on.}
}
\description{
Consolidate a set of similarly-cohorted data sets
}
| /man/consolidate.Rd | permissive | kaneplusplus/forceps | R | false | true | 410 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/consolidate.r
\name{consolidate}
\alias{consolidate}
\title{Consolidate a set of similarly-cohorted data sets}
\usage{
consolidate(data_list, on)
}
\arguments{
\item{data_list}{the list of similarly-cohorted data sets.}
\item{on}{the variable to consolidate on.}
}
\description{
Consolidate a set of similarly-cohorted data sets
}
|
/code/saopaulo.R | permissive | ErisonBarros/Declividade-Rede-Viaria | R | false | false | 5,098 | r | ||
#' @export mp.read.results
#' @title Read RAMAS (c) *.MP file with results
#' @description
#' \code{mp.read.results} reads in the *.MP file used in the RAMAS Metapop software.
#' The *.MP file is a simple text file containing all of the values necessary
#' to configure a metapopulation simulation. This function is an extension of
#' \link{mp.read} that includes the reading of the results section of a *.MP file.
#'
#' @param mpFile The name of the *.MP file to be read.
#'
#' @return `mp.read` returns a nested list object. The first level includes
#' two elements, the version of the *.MP file and list names "mp.file". The
#' "mp.file" list is composed of 53 elements that provide all of the information
#' found in the *.MP file and a list of all of the results from the *.MP file.
#'
#' @details
#' mp.file 'list' structure elements. Most element names from RAMAS Metapop source code
#' The assignment of each element of the mp.file list is based on the Metapopulation input file
#' type for version 5.0.
#' \enumerate{
#' \item MaxRep: maximum number of replications (integer; 1-10000)
#' \item MaxDur: Duration of simulation (integer; 1-500)
#' \item Demog_Stoch: Use demographic stochasticity? (boolean)
#' \item Stages: Number of stages (integer; 1-50)
#' \item Stages.IgnoreConstraints: Ingore constraints? (boolean - TRUE/FALSE)
#' \item Cat1: Information associated with catastrophe 1
#' This information is subdivided in RAMAS Input, but will not be subdivided
#' in the first version of the sensitivity analysis package
#' \item Cat2: Information associated with catastrophe 2
#' See note associated with Cat1
#' \item DDActing: Information regarding density dependence
#' \item Distrib: Distribution (Normal or Lognormal) to use for Environmental
#' Stochasticity.
#' \item AdvancedStochSetting: Advanced Environmental Stoch. Settings: (0 or PoolVar or
#' NegSurvCorr)
#' \item dispCV: Coefficient of variation for dispersal
#' \item WhenBelow: When below local threshold (count in total or don't count or assume dead)
#' \item corrwith: Within population correlation -
#' 1 - (F, S, K Correlated)
#' 2 - (F and S correlated, K independent)
#' 3 - (F, S, K independent
#' \item DispersalDependsOnTargetPopK: Just like the variable name implie; boolean
#' \item DDBasis: Density basis type (char)
#' \item PopSpecificDD: Density dependence type is population specific (boolean - Yes/No)
#' \item DDforAllPop: Density dependence type for all populations (char)
#' \item UserDllFileName: Filename for user-defined density dependence function
#' \item TimeStepSize: Time step size
#' \item TimeStepUnits: Time step units
#' \item SexStructure: Description of Sex Structure (char)
#' \item FemaleStages: Number of female stages
#' \item MatingSystem: Description of Mating system selected (char)
#' \item FemalesPerMale: Number of females per male as given by user
#' \item MalesPerFemale: Number of males per female as given by user
#' \item CVerror: sampling error for N
#' \item FirstTforRisk: Initial number of time steps to exclude from risk calculations
#' Population Parameters
#' \item PopList: Individual population data - saved as a 'nested list' structure, one 'list' for
#' each population.
#' Dispersal (Migration) Parameters
#' \item UseDispDistFunc: True if dispersal rates are based on dispersal distance function; false if
#' they are specified in the dispersal matrix
#' \item DispDistFunc: Dispersal-distance function parameters - a, b, c, Dmax - Mij = a exp(-Dij^c/b)
#' \item DispMatr: Dispersal matrix, as defined or calculated in 'fill.matrix.df.r'
#' Correlation Parameters
#' \item UseCorrDistFunc: True if correlations between populations is based on correlation distance
#' function; False if they are specified in the correlation matrix
#' \item CorrDistFunc: Correlation-distance function parameters - a, b, c - Cij = a exp(-Dij^c/b)
#' \item CorrMatr: User specified correlation matrix, as defined or calculated in 'fill.matrix.df.r'
#' Stage Matrices Information
#' \item StMatrNumber: number of stage matrices defined by user in *.mp file
#' \item StMatr: a 'list' object containing information about each stage matrix. For each stage
#' matrix the following information is stored:
#' StMatrName: Name of the stage matrix
#' StMatrSurvMult: Survival multiplier for stage matrix (for generating new matrices; not used in simulation)
#' StMatrFecMult: Fecundity multiplier for stage matrix (for generating new matrices; not used in simulation)
#' Matr: a matrix object with numeric values
#' Standard Deviation Matrices Information
#' \item SDMatrNumber: number of st. dev. matrices defined by user in *.mp file
#' \item SDMatr: a 'list' object containing information for each st.dev. matrix. For each st.dev.
#' matrix the following information is stored:
#' SDMatrName: Name of the st.dev. matrix
#' Matr: a matrix object of numeric values
#' \item ConstraintsMatr: Constraints Matrix
#' \item StMig: Stage Relative Migration (Dispersal) Rates
#' \item Cat1EffMat: Catastrophe 1 effects on vital rates; one row per stage, seperated by spaces
#' \item Cat1EffNst: Catastrophe 1 effects on abundances
#' \item Cat2EffMat: Catastrophe 2 effects on vital rates; one row per stage, seperated by spaces
#' \item Cat2EffNst: Catastrophe 2 effects on abundances
#' \item StInit: Initial Abundance for each stage for each population
#' Stage Properties
#' \item StProp: a 'list' object containing the following information for each stage
#' StName: Name of stage
#' StWeight: Relative weight of stage
#' StExclude: Exclude from total (boolean: TRUE or FALSE)
#' StBasisForDD: Basis for density-dependence (boolean: TRUE or FALSE)
#' StBreeding: Proportion of stage physically capable of breeding
#' Population Management Actions
#' \item NPopManage: Number of population managment actions
#' \item PopManageProp: All of the lines associated with population management, unpartitioned
#' \item ExtinctThr: Extinction threshold
#' \item ExplodeThr: Explosion threshold
#' \item stepsize: step size
#' \item PopData_df: Population level information in data.frame format
#'}
mp.read.results <- function(mpFile) {
# Read data from the *.mp file, including the results section.
# Based on Metapop version 5 and 5.1 formats.
#
# Author: Matthew Aiello-Lammens
# Created: 13 December 2011 (created using mp.read.r as basis)
# Update:
#
# Args:
# mpFile: the name and path of the *.mp file to be read. Path can be either from the
# current working directory or from the root directory
#
# Returns:
# A list of two elements: version and mp.file
#
# version: the Metapop version used to create the mp.file
#
# mp.file: a sorted 'list' object containing all of the mp.file input information. It does not
# contain any results information. Elements of the list strucutre are named to conform to naming
# scheme used in Metapop code.
#
###################################################################################################
##### BEGIN MP INPUT PARAMETERS SECTION #####
# Inform the user that the mp.read() function has been called.
print( paste( "Begin mp.read.results function with file: ", mpFile ) )
# Save the file path to mpFile
mpFilePath <- mpFile
# Read *.mp files into a long unsorted 'list' structure, 1 element per line in the file.
mpFile <- readLines(mpFile)
# Clear first line of "map=\xff". This step is needed because the "\x" leads to regex problems :(
mpFile[1] <- sub('map.*','',mpFile[1])
# Find begining of simulation results
res.start <- grep('Simulation results', mpFile, ignore.case=TRUE)
# Check to see that there are results in this *.mp file
if( !length(res.start) ) {
stop( paste('No Simulation results found in *.mp file:', mpFilePath) )
}
# Set values that will be used through out the function
FirstPopLine <- 45 # Line 45 correlates with first population data line
MigrationLine <- which(mpFile == "Migration")
CorrLine <- which(mpFile == "Correlation")
ConstraintsLine <- which(mpFile == "Constraints Matrix")
# Get Metapop Version information using metapopversion() function
metaVer <- metapopversion(mpFile)
# Create mp.file list and initiate with length 0
mp.file <- vector("list",length = 0)
# Create results list and initiate with length 0
results <- vector("list",length = 0)
# MaxRep: Number of replications (integer; 1-100000)
mp.file$MaxRep <- as.numeric(mpFile[7])
# MaxDur: Duration of simulation (integer; 1-500)
mp.file$MaxDur <- as.numeric(mpFile[8])
# Demog_Stoch: Use demographic stochasticity? (boolean)
mp.file$Demog_Stoch <- as.logical(mpFile[9])
# Stages: Number of stages (integer; 1-50)
# Stages.IgnoreConstraints: Ingore constraints? (boolean - TRUE/FALSE)
stageLine <- unlist(strsplit(mpFile[10],' '))
mp.file$Stages <- as.numeric(stageLine[1])
mp.file$Stages.IgnoreConstraints <- as.logical(stageLine[2])
# Cat1: Information associated with catastrophe 1
# This information is subdivided in RAMAS Input, but will not be subdivided
# in the first version of the sensitivity analysis package
mp.file$Cat1 <- mpFile[11:17]
# Cat2: Information associated with catastrophe 2
mp.file$Cat2 <- mpFile[18:25]
# DDActing: Information regarding density dependence
mp.file$DDActing <- mpFile[26]
# Distrib: Distribution (Normal or Lognormal) to use for Environmental
# Stochasticity.
# AdvancedStochSetting: Advanced Environmental Stoch. Settings: (0 or PoolVar or
# NegSurvCorr)
stochLine <- unlist(strsplit(mpFile[27],','))
mp.file$Distrib <- stochLine[1]
mp.file$AdvancedStochSetting <- stochLine[2]
# dispCV: Coefficient of variation for dispersal
mp.file$dispCV <- as.numeric(mpFile[28])
# WhenBelow: When below local threshold (count in total or don't count or assume dead)
mp.file$WhenBelow <- mpFile[29]
# corrwith: Within population correlation -
# 1 - (F, S, K Correlated)
# 2 - (F and S correlated, K independent)
# 3 - (F, S, K independent
mp.file$corrwith <- mpFile[30]
# DispersalDependsOnTargetPopK: Just like the variable name implie; boolean
mp.file$DispersalDependsOnTargetPopK <- mpFile[31]
# DDBasis: Density basis type (char). Possiblities are 'AllStages', 'SelectedStages', 'FecundityWeighted'
mp.file$DDBasis <- mpFile[32]
# PopSpecificDD: Density dependence type is population specific (boolean - Yes/No)
mp.file$PopSpecificDD <- mpFile[33]
# DDforAllPop: Density dependence type for all populations (char)
mp.file$DDforAllPop <- mpFile[34]
# UserDllFileName: Filename for user-defined density dependence function
mp.file$UserDllFileName <- mpFile[35]
# TimeStepSize: Time step size (integer)
mp.file$TimeStepSize <- mpFile[36]
# TimeStepUnits: Time step units (char)
mp.file$TimeStepUnits <- mpFile[37]
# SexStructure: Description of Sex Structure (char)
mp.file$SexStructure <- mpFile[38]
# FemaleStages: Number of female stages (integer)
mp.file$FemaleStages <- mpFile[39]
# MatingSystem: Description of Mating system selected (char)
mp.file$MatingSystem <- mpFile[40]
# FemalesPerMale: Number of females per male as given by user (number)
mp.file$FemalesPerMale <- as.numeric(mpFile[41])
# MalesPerFemale: Number of males per female as given by user
mp.file$MalesPerFemale <- as.numeric(mpFile[42])
# CVerror: sampling error for N
mp.file$CVerror <- as.numeric(mpFile[43])
# FirstTforRisk: Initial number of time steps to exclude from risk calculations
mp.file$FirstTforRisk <- as.numeric(mpFile[44])
# ----------------------------------------------------------------------------------------------- #
# PopList: Population level information
print( "mp.read: Reading population information")
# First determine the number of populations (popNumber). In version 5.0, population data begins at line 45
# and the 'Migration' section begins immediately after the last population's data
# The population level information is stored in two different structures - 1) a List format that
# corresponds with how information is stored in RAMAS and 2) a data.frame that is convenient
# for functions.
##### TO DO - Below is setup for if there is no user defined variables
PopNumber <- MigrationLine - FirstPopLine
if (PopNumber < 1) {
stop("Error: mp.read: Insufficient number of populations. Check 'first pop. line' value in mp.read.r")
}
PopRawData <- mpFile[FirstPopLine:(FirstPopLine + PopNumber - 1)]
# Create population data list object
PopData <- vector('list',length=0) # Initiate a PopData list
AllPopData <- vector('list',length=0) # Initiate a AllPopData list
PopData_df_rownames <- vector() # Initiate a row names vector
if ( metaVer == 50 ) {
PopData_df_rownames <- c("name","X_coord","Y_coord","InitAbund","DensDep","MaxR","K","Ksdstr","Allee","KchangeSt","DD_Migr","Cat1.Multiplier","Cat1.Prob","IncludeInSum","StageMatType","RelFec","RelSur","localthr","Cat2.Multiplier","Cat2.Prob","SDMatType","TargetPopK","Cat1.TimeSinceLast","Cat2.TimeSinceLast","RelDisp")
for ( pop in 1:PopNumber ) {
popLine <- unlist(strsplit( PopRawData[ pop ], ',' ))
PopData$name <- popLine[1]
PopData$X_coord <- as.numeric(popLine[2])
PopData$Y_coord <- as.numeric(popLine[3])
PopData$InitAbund <- popLine[4]
PopData$DensDep <- popLine[5]
PopData$MaxR <- popLine[6]
PopData$K <- popLine[7]
PopData$Ksdstr <- popLine[8]
PopData$Allee <- popLine[9]
PopData$KchangeSt <- popLine[10]
PopData$DD_Migr <- popLine[11]
PopData$Cat1.Multiplier <- popLine[12]
PopData$Cat1.Prob <- popLine[13]
PopData$IncludeInSum <- popLine[14]
PopData$StageMatType <- popLine[15]
PopData$RelFec <- popLine[16]
PopData$RelSur <- popLine[17]
PopData$localthr <- popLine[18]
PopData$Cat2.Multiplier <- popLine[19]
PopData$Cat2.Prob <- popLine[20]
PopData$SDMatType <- popLine[21]
PopData$TargetPopK <- popLine[22]
PopData$Cat1.TimeSinceLast <- popLine[23]
PopData$Cat2.TimeSinceLast <- popLine[24]
PopData$RelDisp <- popLine[25]
AllPopData[[pop]] <- PopData # Add PopData to full list of population data
}
} else if ( metaVer >= 51 ) {
PopData_df_rownames <- c("name","X_coord","Y_coord","InitAbund","DensDep","MaxR","K","Ksdstr","Allee","KchangeSt","DD_Migr","Cat1.Multiplier","Cat1.Prob","IncludeInSum","StageMatType","RelFec","RelSur","localthr","Cat2.Multiplier","Cat2.Prob","SDMatType","TargetPopK","Cat1.TimeSinceLast","Cat2.TimeSinceLast","RelDisp","RelVarFec","RelVarSurv")
for ( pop in 1:PopNumber ) {
popLine <- unlist(strsplit( PopRawData[ pop ], ',' ))
PopData$name <- popLine[1]
PopData$X_coord <- as.numeric(popLine[2])
PopData$Y_coord <- as.numeric(popLine[3])
PopData$InitAbund <- as.numeric(popLine[4])
PopData$DensDep <- popLine[5]
PopData$MaxR <- as.numeric(popLine[6])
PopData$K <- as.numeric(popLine[7])
PopData$Ksdstr <- as.numeric(popLine[8])
PopData$Allee <- as.numeric(popLine[9])
PopData$KchangeSt <- popLine[10]
PopData$DD_Migr <- popLine[11]
PopData$Cat1.Multiplier <- popLine[12]
PopData$Cat1.Prob <- popLine[13]
PopData$IncludeInSum <- popLine[14]
PopData$StageMatType <- popLine[15]
PopData$RelFec <- popLine[16]
PopData$RelSur <- popLine[17]
PopData$localthr <- popLine[18]
PopData$Cat2.Multiplier <- popLine[19]
PopData$Cat2.Prob <- popLine[20]
PopData$SDMatType <- popLine[21]
PopData$TargetPopK <- popLine[22]
PopData$Cat1.TimeSinceLast <- popLine[23]
PopData$Cat2.TimeSinceLast <- popLine[24]
PopData$RelDisp <- popLine[25]
PopData$RelVarFec <- popLine[26]
PopData$RelVarSurv <- popLine[27]
AllPopData[[pop]] <- PopData # Add PopData to full list of population data
}
}
mp.file$PopList <- AllPopData
# Create population data data.frame
PopData_df <- read.csv( mpFilePath, header=FALSE, skip=44, nrows=PopNumber )
### # Only use as many columns as there are elements in the PopData_df_rownames. The rest of the columns
### # are associated with user defined density dependence parameters, not used at this time.
### PopData_df <- PopData_df[1:length(PopData_df_rownames)]
# Turn NAs into empty strings
PopData_df[is.na(PopData_df)] <- ''
# Check if PopData_df includes user defined d-d values
if (ncol(PopData_df)>27){
# Get the number of userd defined d-d pars
Num_udd_pars <- ncol(PopData_df)-27
udd_names <- paste('udd_',1:Num_udd_pars,sep='')
PopData_df_rownames <- c(PopData_df_rownames,udd_names)
}
# Assign columns names
names(PopData_df) <- PopData_df_rownames
# ----------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------------------------------------------- #
# Dispersal (Migration) Data
print( "mp.read: Reading dispersal (migration) information" )
# UseDispDistFunc: True if dispersal rates are based on dispersal distance function; false if
# they are specified in the dispersal matrix
mp.file$UseDispDistFunc <- as.logical( mpFile[MigrationLine + 1] )
# DispDistFunc: Dispersal-distance function parameters - a, b, c, Dmax - Mij = a exp(-Dij^c/b)
mp.file$DispDistFunc <- as.numeric(unlist(strsplit( mpFile[MigrationLine + 2],',' )))
# DispMatr: User specified dispersal matrix. If the user selected dispersal based on
# disp-dist function then there are no rows for dispersal matrix. The definition of
# DispMatr has non-intuitive indexing to account for the fact that this first two lines
# after "Migration" are not part of the matrix, they are UseDispDistFunc and
# DispDistFunc, respectively.
if ( mp.file$UseDispDistFunc ) {
# migLines is a variable used to identify the number of lines used to define the
# disp-dist func parameters and the disp matrix, if one is defined
migLines <- 1 #Only one line necessary for migration parameters
mp.file$DispMatr <- fill.matrix.df( PopData_df, mp.file$DispDistFunc, 'disp' )
} else {
migLines <- (1 + PopNumber)
DispMatr <- mpFile[ (MigrationLine + 3):(MigrationLine + 1 + migLines) ]
mp.file$DispMatr <- matrix(as.numeric(unlist(strsplit( DispMatr,',' ))), nrow = PopNumber, byrow = TRUE)
}
# ----------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------------------------------------------- #
# Correlation Data
print( "mp.read: Reading correlation information")
# UseCorrDistFunc: True if correlations between populations is based on correlation distance
# function; False if they are specified in the correlation matrix
mp.file$UseCorrDistFunc <- as.logical( mpFile[CorrLine +1] )
# CorrDistFunc: Correlation-distance function parameters - a, b, c - Cij = a exp(-Dij^c/b)
mp.file$CorrDistFunc <- as.numeric(unlist(strsplit( mpFile[CorrLine + 2],',' )))
# CorrMatr: User specified correlation matrix. Similar to dispersl matrix (See notes for DispMatr
# above), however the matrix is a lower triangular matrix, thus it is not saved as a matrix object
if ( mp.file$UseCorrDistFunc ) {
corrLines <- 1
# Create a Correlation Matrix from the Correlation Distance function. If there is only one
# population, then the number 1 is returned
mp.file$CorrMatr <- fill.matrix.df( PopData_df, mp.file$CorrDistFunc, 'corr' )
} else {
print("Using Correlation matrix") ### WARNING LINE
# Define a new function used to read correlation distance matrices
# addZeroes: used to read a correlation distance matrix. This function adds zeroes to each line
# of the lower triangular corr-dist matrix stored in the *.mp file
addZeroes <- function( vect ) {
zeroes <- PopNumber - length(vect)
new_vect <- c( vect, rep(0, zeroes) )
return(new_vect)
}
corrLines <- (1 + PopNumber)
# If user defined, the corr-matr is a lower-triangular matrix. To make comparisons easy, the
# matrix is transformed into a 'matrix' object
#
# Read corrMatr as list from readLines input
corrMatr <- mpFile[ (CorrLine+3):(CorrLine+2+PopNumber) ]
# Split each list element by comma
corrMatr <- strsplit( corrMatr, ',' )
# Make each list element a vector of numeric values
corrMatr <- lapply( corrMatr, as.numeric)
# Apply the addZeroes function
corrMatr <- lapply( corrMatr, addZeroes )
# Make the 'list' into a 'matrix'
corrMatr <- do.call( rbind, corrMatr )
# Set CorrMatr in mp.file 'list'
mp.file$CorrMatr <- corrMatr
}
# ----------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------------------------------------------- #
# Stage Matrices Information
# Determine the number of stage matrix types (i.e., how many stage matrices are defined)
# The number of stage matices (stage matrix types) is the first number of the line that
# contains the phrase "stage matrix" in it.
stgMatrLine <- grep('stage matrix', mpFile, ignore.case=TRUE)
StMatrNumber <- unlist(strsplit( mpFile[stgMatrLine],' ' ))
StMatrNumber <- as.numeric( StMatrNumber[1] )
# StMatrNumber: number of stage matrices defined by user in *.mp file
mp.file$StMatrNumber <- StMatrNumber
#
# StMatr: a list object containing information about each stage matrix.
StMatr <- vector('list',length=0) # Create an empty stage matrix
oneMatr <- 4 + mp.file$Stages # The number of lines of information for one matrix
allMatr <- StMatrNumber * oneMatr # The number of lines of information for all matrices
# Extract from mpFile all of the lines of information for all of the matrices
AllStMatrLines <- mpFile[ (stgMatrLine + 1):(stgMatrLine + 1 + allMatr) ]
#
AllStMatr <- vector('list',length=0) # Create an empty list of stage matrices
for ( matr in 1:mp.file$StMatrNumber ) {
LineAdd <- matr - 1
# StMatrName: Name of the stage matrix
StMatr$StMatrName <- AllStMatrLines[ 1 + (LineAdd*oneMatr) ]
# StMatrSurvMult: Survival multiplier for stage matrix (for generating new matrices; not used in simulation)
StMatr$StMatrSurvMult <- AllStMatrLines[ 2 + (LineAdd*oneMatr) ]
# StMatrFecMult: Fecundity multiplier for stage matrix (for generating new matrices; not used in simulation)
StMatr$StMatrFecMult <- AllStMatrLines[ 3 + (LineAdd*oneMatr) ]
# Matr: a matrix object with numeric values
dumbMatr <- AllStMatrLines[ (5 + (LineAdd*oneMatr)):( (4 + mp.file$Stages) + (LineAdd*oneMatr) ) ]
StMatr$Matr <- matrix(as.numeric(unlist(strsplit( dumbMatr,' ' ))), nrow = mp.file$Stages, byrow = TRUE)
AllStMatr[[matr]] <- StMatr
}
mp.file$StMatr <- AllStMatr
# ----------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------------------------------------------- #
# Standard Deviation Matrices Information
# Determine the number of std. dev. matrix types.
# The number of standard deviation matrices (st dev matrix types) is the first number of the line that
# contains the phrase "st.dev. matrix" in it.
sdMatrLine <- grep('st.dev. matrix', mpFile, ignore.case=TRUE)
sdMatrNumber <- unlist(strsplit( mpFile[sdMatrLine],' ' ))
sdMatrNumber <- as.numeric( sdMatrNumber[1] )
# SDMatrNumber: number of st. dev. matrices defined by user in *.mp file
mp.file$SDMatrNumber <- sdMatrNumber
#
# SDMatr: a list object containing information for each st.dev. matrix.
SDMatr <- vector('list',length=0) # Create an empty st.dev. matrix list object
oneSDMatr <- 1 + mp.file$Stages # The number of lines of information for one st.dev. matrix
allSDMatr <- sdMatrNumber * oneSDMatr # The number of lines of information for all st.dev. matrices
# Extract from mpFile all of the lines of information for all of the matrices
AllSDMatrLines <- mpFile[ (sdMatrLine + 1):(sdMatrLine + 1 + allSDMatr) ]
#
AllSDMatr <- vector('list',length=0) # Create an empty list of stage matrices
for ( matr in 1:mp.file$SDMatrNumber ) {
LineAdd <- matr - 1 # Addition factor to skip to the lines associated with the matrix of interest
# SDMatrName: Name of the st.dev. matrix
SDMatr$SDMatrName <- AllStMatrLines[ 1 + (LineAdd*oneSDMatr) ]
# Matr: a matrix object of numeric values
dumbMatr <- AllSDMatrLines[ (2 + (LineAdd*oneSDMatr)):( (1 + mp.file$Stages) + (LineAdd*oneSDMatr) ) ]
SDMatr$Matr <- matrix(as.numeric(unlist(strsplit( dumbMatr,' ' ))), nrow = mp.file$Stages, byrow = TRUE)
AllSDMatr[[matr]] <- SDMatr
}
mp.file$SDMatr <- AllSDMatr
# ----------------------------------------------------------------------------------------------- #
# ConstraintsMatr: Constraints Matrix
dumbMatr <- mpFile[ (ConstraintsLine + 1):(ConstraintsLine + mp.file$Stages) ]
mp.file$ConstraintsMatr <- matrix(as.numeric(unlist(strsplit( dumbMatr,' '))),nrow = mp.file$Stages, byrow = TRUE)
# StMig: Stage Relative Migration (Dispersal) Rates
mp.file$StMig <- as.numeric(unlist(strsplit( mpFile[ ConstraintsLine + mp.file$Stages + 1 ],' ' )))
# Cat1EffMat: Catastrophe 1 effects on vital rates; one row per stage, seperated by spaces
dumbMatr <- mpFile[ (ConstraintsLine + mp.file$Stages + 2):(ConstraintsLine + 2*mp.file$Stages + 1) ]
mp.file$Cat1EffMat <- matrix(as.numeric(unlist(strsplit( dumbMatr,' ' ))), nrow = mp.file$Stages, byrow = TRUE)
# Cat1EffNst: Catastrophe 1 effects on abundances
mp.file$Cat1EffNst <- as.numeric(unlist(strsplit( mpFile[ ConstraintsLine + 2*mp.file$Stages + 2 ],' ' )))
# Cat2EffMat: Catastrophe 2 effects on vital rates; one row per stage, seperated by spaces
dumbMatr <- mpFile[ (ConstraintsLine + 2*mp.file$Stages + 3):(ConstraintsLine + 3*mp.file$Stages + 2) ]
mp.file$Cat2EffMat <- matrix(as.numeric(unlist(strsplit( dumbMatr,' ' ))), nrow = mp.file$Stages, byrow = TRUE)
# Cat2EffNst: Catastrophe 2 effects on abundances
mp.file$Cat2EffNst <- as.numeric(unlist(strsplit( mpFile[ ConstraintsLine + 3*mp.file$Stages + 3 ],' ' )))
# StInit: Initial Abundance for each stage for each population
InitAbLine <- ConstraintsLine + 3*mp.file$Stages + 4 # First line
InitAb <- mpFile[ InitAbLine:(InitAbLine + PopNumber - 1) ]
mp.file$StInit <- matrix(as.numeric(unlist(strsplit( InitAb,' ' ))), nrow = PopNumber, byrow = TRUE)
# ----------------------------------------------------------------------------------------------- #
# Stage Properties
InitStPropLine <- InitAbLine + PopNumber # First line of stage properties in the *.mp file
# StProp: a list object containing information for each stage
StProp <- vector('list',length=0) # Create an empty stage property list object
# For each stage there are five properties, thus five lines of information
allStProp <- 5 * mp.file$Stages
AllStPropLines <- mpFile[ InitStPropLine:(InitStPropLine + allStProp - 1) ]
#
AllStProp <- vector('list',length=0) # Create an empty list of St.Prop. lists
for ( stg in 1:mp.file$Stages ) {
firstinfo <- (stg - 1)*5 + 1 # First line of info for stage 'stg'
# StName: Name of stage
StProp$StName <- AllStPropLines[ firstinfo ]
# StWeight: Relative weight of stage
StProp$StWeight <- AllStPropLines[ firstinfo + 1 ]
#StExclude: Exclude from total (boolean: TRUE or FALSE)
StProp$StExclude <- AllStPropLines[ firstinfo + 2 ]
#StBasisForDD: Basis for density-dependence (boolean: TRUE or FALSE)
StProp$StBasisForDD <- AllStPropLines[ firstinfo + 3 ]
#StBreeding: ###### WHAT IS THIS????
StProp$StBreeding <- AllStPropLines[ firstinfo + 4 ]
AllStProp[[ stg ]] <- StProp
}
mp.file$StProp <- AllStProp
# ----------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------------------------------------------- #
# Population Management Actions
# NPopManage: The number of population managment actions
mgmntLine <- grep('pop mgmnt',mpFile,ignore.case=TRUE)
NPopManage <- unlist(strsplit(mpFile[mgmntLine],' '))
NPopManage <- as.numeric(NPopManage[1])
mp.file$NPopManage <- NPopManage
if ( NPopManage > 0 ) {
# PopManageProp: All of the lines associated with population management, unpartitioned
####mp.file$PopManageProp <- mpFile[ (mgmntLine + 1):(mgmntLine + NPopManage) ]
PopManageProp.colNames <- c('active', 'mng.type', 'from.pop', 'to.pop',
'begin.time','end.time','period','when','num.or.prop',
'number','proportion',
'from.stage','to.stage','cond.type','cond.abund.low',
'cond.abund.high','from.all.stgs','cond.quant.2','cond.func.N1',
'cond.func.N2','abund.each.stg_div_all.stg',
'abund.each.pop_div_all.pop')
mp.file$PopManageProp <- read.table( mpFilePath , skip=mgmntLine, nrows=NPopManage,
col.names=PopManageProp.colNames)
} else {
mp.file$PopManageProp <- "NA" # Fill with 'NA' value
}
# ----------------------------------------------------------------------------------------------- #
# The next three elements of the list are defined based on the position of the population
# management line. This may change in the future and have to be adjusted
# ExtinctThr: Extinction Threshold
mp.file$ExtinctThr <- as.numeric( mpFile[ mgmntLine + NPopManage + 1 ] )
# ExplodeThr: Explosion Threshold
mp.file$ExplodeThr <- as.numeric( mpFile[ mgmntLine + NPopManage + 2 ] )
# stepsize: Stepsize
mp.file$stepsize <- as.numeric( mpFile[ mgmntLine + NPopManage + 3 ] )
# ----------------------------------------------------------------------------------------------- #
# The last element of the list is the population data data.frame
mp.file$PopData_df <- PopData_df
###################################################################################################
## ******************************************************************** ##
## END MP INPUT PARAMETERS SECTION
## ******************************************************************** ##
###################################################################################################
##### BEGIN MP RESULTS READ SECTION ######
print('mp.read.results: Reading simulation results')
# Get number of replications in simulation
SimRepLine <- unlist( strsplit ( mpFile[ (res.start + 1) ], ' ' ) )
results$SimRep <- as.numeric( SimRepLine[1] )
# Read in Pop. ALL Results
pop.all.line <- grep( 'Pop. ALL', mpFile )
results$PopAll <- read.table( mpFilePath, skip=pop.all.line, nrows=mp.file$MaxDur )
names( results$PopAll ) <- c('Mean', 'StDev', 'Min', 'Max')
# Read in individual population Results
# PopInd variable is a 3-dim Array of size 'Duration of Simulation' x 4 x 'Number of Populations'
# The second dimension (length=4) corresponds to the Mean, StDev, Min, and Max population size
###browser()
PopInd <- vector()
# Calculate start of individual population information
pop.ind.start <- pop.all.line + mp.file$MaxDur + 1
# Calculate end of individual population information
pop.ind.stop <- pop.all.line + mp.file$MaxDur + (mp.file$MaxDur+1)*PopNumber
# Identify where the population ID lines are (i.e., the lines that say Pop. #)
pop.ind.ID.lines <-
seq(from=(pop.all.line+mp.file$MaxDur+1),
to=(pop.all.line+mp.file$MaxDur+((mp.file$MaxDur+1)*PopNumber)),
by=(mp.file$MaxDur+1))
# Make a vector of all lines
pop.ind.lines <- pop.ind.start:pop.ind.stop
# Remove ID lines
pop.ind.lines <- setdiff(pop.ind.lines,pop.ind.ID.lines)
# Get these values
pvals <- mpFile[pop.ind.lines]
# Covert to numeric
pvals.num <- as.numeric(unlist(strsplit(pvals, split=" ")))
# Convert to matrix. There are allways four columns in these matrices.
pop.vals <- matrix(pvals.num,ncol=4,byrow=TRUE)
# Make a lits of PopNumber matrices
pop.vals.list <- lapply(split(pop.vals,0:(nrow(pop.vals)-1)%/%mp.file$MaxDur),matrix,nrow=mp.file$MaxDur)
# Convert the list to an array
pop.vals.array <- array(unlist(pop.vals.list),c(mp.file$MaxDur,4,PopNumber))
results$PopInd <- pop.vals.array
#
# for ( pop in 1:PopNumber ){
# # Number of lines past Pop. ALL to skip to start 'pop' values. Last '+1' for Pop. # Label
# start.pop <- pop.all.line + pop*(mp.file$MaxDur +1) + 1
# # Number of lines past Pop. ALL to skip to stop 'pop' values.
# stop.pop <- (start.pop-1) + mp.file$MaxDur
# # Get pop values from mpFile. Initially is read as characters
# pvals <- mpFile[start.pop:stop.pop]
# # Covert to numeric
# pvals.num <- as.numeric(unlist(strsplit(pvals, split=" ")))
# # Convert to matrix. There are allways four columns in these matrices.
# pop.vals <- matrix(pvals.num,ncol=4,byrow=TRUE)
# # Combine new matrix with PopInd matrix
# PopInd <- c(PopInd,pop.vals)
# }
# results$PopInd <- array( PopInd, dim=c(mp.file$MaxDur,4,PopNumber) )
# Read in Occupancy Results - a summary stat. of number of patches occupied at each time step during a simulation
occ.line <- grep( '^Occupancy', mpFile ) # Note carrot used to capture line that begins with 'Occupancy'
results$Occupancy <- read.table( mpFilePath, skip=occ.line, nrows=mp.file$MaxDur )
names( results$Occupancy ) <- c('Mean', 'StDev', 'Min', 'Max')
# Read in Local Occupancy Results - a summary stat. for occupancy rate (prop. of time patches remained occupied)
occ.loc.line <- grep( 'Local Occupancy', mpFile )
results$LocOccupancy <- read.table( mpFilePath, skip=occ.loc.line, nrows=PopNumber )
names( results$LocOccupancy ) <- c('Mean', 'StDev', 'Min', 'Max')
# Read Min., Max., and Ter. - the min, max, and final population abundance values for each
# replication of the mp model. each column is ordered seperately
rep.line <- grep( 'Min. Max. Ter.', mpFile )
results$Replications <- read.table( mpFilePath, skip=rep.line, nrows=results$SimRep )
names( results$Replications ) <- c('Min', 'Max', 'Ter')
# Read Time to cross - used to determine quasi-extinction/ -explosion risk. The number of
# rows in the mp file depends on the stepsize. Each row is a time-step and the first col
# is the number of times the pop. abund. crossed the min threshold for the first time in that
# time step and the second is associated with crossing the max threshold
t.cross.line <- grep( 'Time to cross', mpFile )
t.cross.rows <- (mp.file$MaxDur %/% mp.file$stepsize) + (mp.file$MaxDur %% mp.file$stepsize)
results$TimeCross <- read.table( mpFilePath, skip=t.cross.line, nrows=t.cross.rows )
names( results$TimeCross ) <- c('QuasiExtinct','QuasiExpl')
# Read Final stage abundances results
# results$FinalStAb variable is a 3-dim Array of size Numer of Stages (rows) x 4 (col) x Number of Populations (slices)
# to call the results of one populaiton (e.g., Pop. 1) use third index (e.g., results$FinalStAb[,,1] )
# Columns of the matrix are Mean, StDev, Min, Max
fin.stg.ab.line <- grep( 'Final stage abundances', mpFile )
fin.stg.ab.rows <- PopNumber * mp.file$Stages
FinalStAb <- as.matrix( read.table( mpFilePath, skip=fin.stg.ab.line, nrows=fin.stg.ab.rows ) )
# Seperate out FinalStAb into the different populations
fsa.first <- 1 # Initial first line for partitioning Final Stage Abundance matrix
fsa.list <- lapply(split(FinalStAb,0:(nrow(FinalStAb)-1)%/%mp.file$Stages),matrix,nrow=mp.file$Stages)
fsa.array <- array(unlist(fsa.list),c(mp.file$Stages,4,PopNumber))
results$FinalStAb <- fsa.array
#
# FinalStAb.vect <- vector()
# for ( pop in 1:PopNumber ){
# fsa.last <- pop*mp.file$Stages
# FinalStAb.vect <- c( FinalStAb.vect, FinalStAb[ fsa.first:fsa.last, ] )
# fsa.first <- fsa.last + 1
# }
# results$FinalStAb <- array( FinalStAb.vect, dim=c(mp.file$Stages, 4, PopNumber) )
# Read LocExtDur results
loc.ext.dur.line <- grep( 'LocExtDur', mpFile )
results$LocExtDur <- read.table( mpFilePath, skip=loc.ext.dur.line, nrow=PopNumber )
names( results$LocExtDur ) <- c('Mean','StDev','Max','Min')
# Read Harvest results
# First line is the total harvest results, the second line is the number of lines dedicated
# to individual time units for harvest
harvest.line <- grep( '^Harvest', mpFile )
results$HarvestTot <- read.table( mpFilePath, skip=harvest.line, nrow=1 )
names( results$HarvestTot ) <- c('Mean','StDev','Min','Max')
# Determine number of time steps with harvest data
harvest.steps <- mpFile[ harvest.line + 2 ]
harvest.steps <- unlist( strsplit( harvest.steps, ' ' ) )
harvest.steps <- as.numeric( harvest.steps[1] )
if ( harvest.steps > 0 ) {
results$HarvestSteps <- read.table( mpFilePath, skip=(harvest.line + 2), nrow=harvest.steps )
names( results$HarvestSteps ) <- c('Time', 'Mean', 'StDev', 'Min', 'Max')
}
# Read RiskOfLowHarvest results
risk.harvest.line <- grep( 'RiskOfLowHarvest', mpFile )
results$RiskLowHarvest <- read.table( mpFilePath, skip=risk.harvest.line, nrow=results$SimRep )
# Read Average stage abundances results
# First line after 'avg.st.ab.line' is the number of populations and number of time
# steps recorded (dependent on maxdur and stepsize)
# After this, there are popnumber * time steps lines by number of stages columns
# The values are the stage abundance values for each stage in each population
avg.st.ab.line <- grep( 'Average stage abundances', mpFile )
ab.steps <- mpFile[ avg.st.ab.line + 1 ]
ab.steps <- unlist( strsplit( ab.steps, ' ' ) )
ab.pops <- as.numeric( ab.steps[1] )
ab.steps <- as.numeric( ab.steps[2] )
avg.st.ab.rows <- ab.pops * ab.steps
AvgStAb <- as.matrix( read.table( mpFilePath, skip=(avg.st.ab.line + 1), nrow=avg.st.ab.rows ) )
# Seperate out AvgStAb into different populations
asb.first <- 1
AvgStAb.vect <- vector()
for ( pop in 1:ab.pops ){
asb.last <- pop*ab.steps
AvgStAb.vect <- c( AvgStAb.vect, AvgStAb[ asb.first:asb.last, ] )
asb.first <- asb.last + 1
}
results$AvgStAb <- array( AvgStAb.vect, dim=c(ab.steps, mp.file$Stages, ab.pops) )
mp.file$results <- results
return( list( version = metaVer, mp.file = mp.file) )
} # End mp.read function
| /R/mp.read.results.r | no_license | Akcakaya/demgsa | R | false | false | 38,762 | r | #' @export mp.read.results
#' @title Read RAMAS (c) *.MP file with results
#' @description
#' \code{mp.read.results} reads in the *.MP file used in the RAMAS Metapop software.
#' The *.MP file is a simple text file containing all of the values necessary
#' to configure a metapopulation simulation. This function is an extension of
#' \link{mp.read} that includes the reading of the results section of a *.MP file.
#'
#' @param mpFile The name of the *.MP file to be read.
#'
#' @return `mp.read` returns a nested list object. The first level includes
#' two elements, the version of the *.MP file and list names "mp.file". The
#' "mp.file" list is composed of 53 elements that provide all of the information
#' found in the *.MP file and a list of all of the results from the *.MP file.
#'
#' @details
#' mp.file 'list' structure elements. Most element names from RAMAS Metapop source code
#' The assignment of each element of the mp.file list is based on the Metapopulation input file
#' type for version 5.0.
#' \enumerate{
#' \item MaxRep: maximum number of replications (integer; 1-10000)
#' \item MaxDur: Duration of simulation (integer; 1-500)
#' \item Demog_Stoch: Use demographic stochasticity? (boolean)
#' \item Stages: Number of stages (integer; 1-50)
#' \item Stages.IgnoreConstraints: Ingore constraints? (boolean - TRUE/FALSE)
#' \item Cat1: Information associated with catastrophe 1
#' This information is subdivided in RAMAS Input, but will not be subdivided
#' in the first version of the sensitivity analysis package
#' \item Cat2: Information associated with catastrophe 2
#' See note associated with Cat1
#' \item DDActing: Information regarding density dependence
#' \item Distrib: Distribution (Normal or Lognormal) to use for Environmental
#' Stochasticity.
#' \item AdvancedStochSetting: Advanced Environmental Stoch. Settings: (0 or PoolVar or
#' NegSurvCorr)
#' \item dispCV: Coefficient of variation for dispersal
#' \item WhenBelow: When below local threshold (count in total or don't count or assume dead)
#' \item corrwith: Within population correlation -
#' 1 - (F, S, K Correlated)
#' 2 - (F and S correlated, K independent)
#' 3 - (F, S, K independent
#' \item DispersalDependsOnTargetPopK: Just like the variable name implie; boolean
#' \item DDBasis: Density basis type (char)
#' \item PopSpecificDD: Density dependence type is population specific (boolean - Yes/No)
#' \item DDforAllPop: Density dependence type for all populations (char)
#' \item UserDllFileName: Filename for user-defined density dependence function
#' \item TimeStepSize: Time step size
#' \item TimeStepUnits: Time step units
#' \item SexStructure: Description of Sex Structure (char)
#' \item FemaleStages: Number of female stages
#' \item MatingSystem: Description of Mating system selected (char)
#' \item FemalesPerMale: Number of females per male as given by user
#' \item MalesPerFemale: Number of males per female as given by user
#' \item CVerror: sampling error for N
#' \item FirstTforRisk: Initial number of time steps to exclude from risk calculations
#' Population Parameters
#' \item PopList: Individual population data - saved as a 'nested list' structure, one 'list' for
#' each population.
#' Dispersal (Migration) Parameters
#' \item UseDispDistFunc: True if dispersal rates are based on dispersal distance function; false if
#' they are specified in the dispersal matrix
#' \item DispDistFunc: Dispersal-distance function parameters - a, b, c, Dmax - Mij = a exp(-Dij^c/b)
#' \item DispMatr: Dispersal matrix, as defined or calculated in 'fill.matrix.df.r'
#' Correlation Parameters
#' \item UseCorrDistFunc: True if correlations between populations is based on correlation distance
#' function; False if they are specified in the correlation matrix
#' \item CorrDistFunc: Correlation-distance function parameters - a, b, c - Cij = a exp(-Dij^c/b)
#' \item CorrMatr: User specified correlation matrix, as defined or calculated in 'fill.matrix.df.r'
#' Stage Matrices Information
#' \item StMatrNumber: number of stage matrices defined by user in *.mp file
#' \item StMatr: a 'list' object containing information about each stage matrix. For each stage
#' matrix the following information is stored:
#' StMatrName: Name of the stage matrix
#' StMatrSurvMult: Survival multiplier for stage matrix (for generating new matrices; not used in simulation)
#' StMatrFecMult: Fecundity multiplier for stage matrix (for generating new matrices; not used in simulation)
#' Matr: a matrix object with numeric values
#' Standard Deviation Matrices Information
#' \item SDMatrNumber: number of st. dev. matrices defined by user in *.mp file
#' \item SDMatr: a 'list' object containing information for each st.dev. matrix. For each st.dev.
#' matrix the following information is stored:
#' SDMatrName: Name of the st.dev. matrix
#' Matr: a matrix object of numeric values
#' \item ConstraintsMatr: Constraints Matrix
#' \item StMig: Stage Relative Migration (Dispersal) Rates
#' \item Cat1EffMat: Catastrophe 1 effects on vital rates; one row per stage, seperated by spaces
#' \item Cat1EffNst: Catastrophe 1 effects on abundances
#' \item Cat2EffMat: Catastrophe 2 effects on vital rates; one row per stage, seperated by spaces
#' \item Cat2EffNst: Catastrophe 2 effects on abundances
#' \item StInit: Initial Abundance for each stage for each population
#' Stage Properties
#' \item StProp: a 'list' object containing the following information for each stage
#' StName: Name of stage
#' StWeight: Relative weight of stage
#' StExclude: Exclude from total (boolean: TRUE or FALSE)
#' StBasisForDD: Basis for density-dependence (boolean: TRUE or FALSE)
#' StBreeding: Proportion of stage physically capable of breeding
#' Population Management Actions
#' \item NPopManage: Number of population managment actions
#' \item PopManageProp: All of the lines associated with population management, unpartitioned
#' \item ExtinctThr: Extinction threshold
#' \item ExplodeThr: Explosion threshold
#' \item stepsize: step size
#' \item PopData_df: Population level information in data.frame format
#'}
mp.read.results <- function(mpFile) {
# Read data from the *.mp file, including the results section.
# Based on Metapop version 5 and 5.1 formats.
#
# Author: Matthew Aiello-Lammens
# Created: 13 December 2011 (created using mp.read.r as basis)
# Update:
#
# Args:
# mpFile: the name and path of the *.mp file to be read. Path can be either from the
# current working directory or from the root directory
#
# Returns:
# A list of two elements: version and mp.file
#
# version: the Metapop version used to create the mp.file
#
# mp.file: a sorted 'list' object containing all of the mp.file input information. It does not
# contain any results information. Elements of the list strucutre are named to conform to naming
# scheme used in Metapop code.
#
###################################################################################################
##### BEGIN MP INPUT PARAMETERS SECTION #####
# Inform the user that the mp.read() function has been called.
print( paste( "Begin mp.read.results function with file: ", mpFile ) )
# Save the file path to mpFile
mpFilePath <- mpFile
# Read *.mp files into a long unsorted 'list' structure, 1 element per line in the file.
mpFile <- readLines(mpFile)
# Clear first line of "map=\xff". This step is needed because the "\x" leads to regex problems :(
mpFile[1] <- sub('map.*','',mpFile[1])
# Find begining of simulation results
res.start <- grep('Simulation results', mpFile, ignore.case=TRUE)
# Check to see that there are results in this *.mp file
if( !length(res.start) ) {
stop( paste('No Simulation results found in *.mp file:', mpFilePath) )
}
# Set values that will be used through out the function
FirstPopLine <- 45 # Line 45 correlates with first population data line
MigrationLine <- which(mpFile == "Migration")
CorrLine <- which(mpFile == "Correlation")
ConstraintsLine <- which(mpFile == "Constraints Matrix")
# Get Metapop Version information using metapopversion() function
metaVer <- metapopversion(mpFile)
# Create mp.file list and initiate with length 0
mp.file <- vector("list",length = 0)
# Create results list and initiate with length 0
results <- vector("list",length = 0)
# MaxRep: Number of replications (integer; 1-100000)
mp.file$MaxRep <- as.numeric(mpFile[7])
# MaxDur: Duration of simulation (integer; 1-500)
mp.file$MaxDur <- as.numeric(mpFile[8])
# Demog_Stoch: Use demographic stochasticity? (boolean)
mp.file$Demog_Stoch <- as.logical(mpFile[9])
# Stages: Number of stages (integer; 1-50)
# Stages.IgnoreConstraints: Ingore constraints? (boolean - TRUE/FALSE)
stageLine <- unlist(strsplit(mpFile[10],' '))
mp.file$Stages <- as.numeric(stageLine[1])
mp.file$Stages.IgnoreConstraints <- as.logical(stageLine[2])
# Cat1: Information associated with catastrophe 1
# This information is subdivided in RAMAS Input, but will not be subdivided
# in the first version of the sensitivity analysis package
mp.file$Cat1 <- mpFile[11:17]
# Cat2: Information associated with catastrophe 2
mp.file$Cat2 <- mpFile[18:25]
# DDActing: Information regarding density dependence
mp.file$DDActing <- mpFile[26]
# Distrib: Distribution (Normal or Lognormal) to use for Environmental
# Stochasticity.
# AdvancedStochSetting: Advanced Environmental Stoch. Settings: (0 or PoolVar or
# NegSurvCorr)
stochLine <- unlist(strsplit(mpFile[27],','))
mp.file$Distrib <- stochLine[1]
mp.file$AdvancedStochSetting <- stochLine[2]
# dispCV: Coefficient of variation for dispersal
mp.file$dispCV <- as.numeric(mpFile[28])
# WhenBelow: When below local threshold (count in total or don't count or assume dead)
mp.file$WhenBelow <- mpFile[29]
# corrwith: Within population correlation -
# 1 - (F, S, K Correlated)
# 2 - (F and S correlated, K independent)
# 3 - (F, S, K independent
mp.file$corrwith <- mpFile[30]
# DispersalDependsOnTargetPopK: Just like the variable name implie; boolean
mp.file$DispersalDependsOnTargetPopK <- mpFile[31]
# DDBasis: Density basis type (char). Possiblities are 'AllStages', 'SelectedStages', 'FecundityWeighted'
mp.file$DDBasis <- mpFile[32]
# PopSpecificDD: Density dependence type is population specific (boolean - Yes/No)
mp.file$PopSpecificDD <- mpFile[33]
# DDforAllPop: Density dependence type for all populations (char)
mp.file$DDforAllPop <- mpFile[34]
# UserDllFileName: Filename for user-defined density dependence function
mp.file$UserDllFileName <- mpFile[35]
# TimeStepSize: Time step size (integer)
mp.file$TimeStepSize <- mpFile[36]
# TimeStepUnits: Time step units (char)
mp.file$TimeStepUnits <- mpFile[37]
# SexStructure: Description of Sex Structure (char)
mp.file$SexStructure <- mpFile[38]
# FemaleStages: Number of female stages (integer)
mp.file$FemaleStages <- mpFile[39]
# MatingSystem: Description of Mating system selected (char)
mp.file$MatingSystem <- mpFile[40]
# FemalesPerMale: Number of females per male as given by user (number)
mp.file$FemalesPerMale <- as.numeric(mpFile[41])
# MalesPerFemale: Number of males per female as given by user
mp.file$MalesPerFemale <- as.numeric(mpFile[42])
# CVerror: sampling error for N
mp.file$CVerror <- as.numeric(mpFile[43])
# FirstTforRisk: Initial number of time steps to exclude from risk calculations
mp.file$FirstTforRisk <- as.numeric(mpFile[44])
# ----------------------------------------------------------------------------------------------- #
# PopList: Population level information
print( "mp.read: Reading population information")
# First determine the number of populations (popNumber). In version 5.0, population data begins at line 45
# and the 'Migration' section begins immediately after the last population's data
# The population level information is stored in two different structures - 1) a List format that
# corresponds with how information is stored in RAMAS and 2) a data.frame that is convenient
# for functions.
##### TO DO - Below is setup for if there is no user defined variables
PopNumber <- MigrationLine - FirstPopLine
if (PopNumber < 1) {
stop("Error: mp.read: Insufficient number of populations. Check 'first pop. line' value in mp.read.r")
}
PopRawData <- mpFile[FirstPopLine:(FirstPopLine + PopNumber - 1)]
# Create population data list object
PopData <- vector('list',length=0) # Initiate a PopData list
AllPopData <- vector('list',length=0) # Initiate a AllPopData list
PopData_df_rownames <- vector() # Initiate a row names vector
if ( metaVer == 50 ) {
PopData_df_rownames <- c("name","X_coord","Y_coord","InitAbund","DensDep","MaxR","K","Ksdstr","Allee","KchangeSt","DD_Migr","Cat1.Multiplier","Cat1.Prob","IncludeInSum","StageMatType","RelFec","RelSur","localthr","Cat2.Multiplier","Cat2.Prob","SDMatType","TargetPopK","Cat1.TimeSinceLast","Cat2.TimeSinceLast","RelDisp")
for ( pop in 1:PopNumber ) {
popLine <- unlist(strsplit( PopRawData[ pop ], ',' ))
PopData$name <- popLine[1]
PopData$X_coord <- as.numeric(popLine[2])
PopData$Y_coord <- as.numeric(popLine[3])
PopData$InitAbund <- popLine[4]
PopData$DensDep <- popLine[5]
PopData$MaxR <- popLine[6]
PopData$K <- popLine[7]
PopData$Ksdstr <- popLine[8]
PopData$Allee <- popLine[9]
PopData$KchangeSt <- popLine[10]
PopData$DD_Migr <- popLine[11]
PopData$Cat1.Multiplier <- popLine[12]
PopData$Cat1.Prob <- popLine[13]
PopData$IncludeInSum <- popLine[14]
PopData$StageMatType <- popLine[15]
PopData$RelFec <- popLine[16]
PopData$RelSur <- popLine[17]
PopData$localthr <- popLine[18]
PopData$Cat2.Multiplier <- popLine[19]
PopData$Cat2.Prob <- popLine[20]
PopData$SDMatType <- popLine[21]
PopData$TargetPopK <- popLine[22]
PopData$Cat1.TimeSinceLast <- popLine[23]
PopData$Cat2.TimeSinceLast <- popLine[24]
PopData$RelDisp <- popLine[25]
AllPopData[[pop]] <- PopData # Add PopData to full list of population data
}
} else if ( metaVer >= 51 ) {
PopData_df_rownames <- c("name","X_coord","Y_coord","InitAbund","DensDep","MaxR","K","Ksdstr","Allee","KchangeSt","DD_Migr","Cat1.Multiplier","Cat1.Prob","IncludeInSum","StageMatType","RelFec","RelSur","localthr","Cat2.Multiplier","Cat2.Prob","SDMatType","TargetPopK","Cat1.TimeSinceLast","Cat2.TimeSinceLast","RelDisp","RelVarFec","RelVarSurv")
for ( pop in 1:PopNumber ) {
popLine <- unlist(strsplit( PopRawData[ pop ], ',' ))
PopData$name <- popLine[1]
PopData$X_coord <- as.numeric(popLine[2])
PopData$Y_coord <- as.numeric(popLine[3])
PopData$InitAbund <- as.numeric(popLine[4])
PopData$DensDep <- popLine[5]
PopData$MaxR <- as.numeric(popLine[6])
PopData$K <- as.numeric(popLine[7])
PopData$Ksdstr <- as.numeric(popLine[8])
PopData$Allee <- as.numeric(popLine[9])
PopData$KchangeSt <- popLine[10]
PopData$DD_Migr <- popLine[11]
PopData$Cat1.Multiplier <- popLine[12]
PopData$Cat1.Prob <- popLine[13]
PopData$IncludeInSum <- popLine[14]
PopData$StageMatType <- popLine[15]
PopData$RelFec <- popLine[16]
PopData$RelSur <- popLine[17]
PopData$localthr <- popLine[18]
PopData$Cat2.Multiplier <- popLine[19]
PopData$Cat2.Prob <- popLine[20]
PopData$SDMatType <- popLine[21]
PopData$TargetPopK <- popLine[22]
PopData$Cat1.TimeSinceLast <- popLine[23]
PopData$Cat2.TimeSinceLast <- popLine[24]
PopData$RelDisp <- popLine[25]
PopData$RelVarFec <- popLine[26]
PopData$RelVarSurv <- popLine[27]
AllPopData[[pop]] <- PopData # Add PopData to full list of population data
}
}
mp.file$PopList <- AllPopData
# Create population data data.frame
PopData_df <- read.csv( mpFilePath, header=FALSE, skip=44, nrows=PopNumber )
### # Only use as many columns as there are elements in the PopData_df_rownames. The rest of the columns
### # are associated with user defined density dependence parameters, not used at this time.
### PopData_df <- PopData_df[1:length(PopData_df_rownames)]
# Turn NAs into empty strings
PopData_df[is.na(PopData_df)] <- ''
# Check if PopData_df includes user defined d-d values
if (ncol(PopData_df)>27){
# Get the number of userd defined d-d pars
Num_udd_pars <- ncol(PopData_df)-27
udd_names <- paste('udd_',1:Num_udd_pars,sep='')
PopData_df_rownames <- c(PopData_df_rownames,udd_names)
}
# Assign columns names
names(PopData_df) <- PopData_df_rownames
# ----------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------------------------------------------- #
# Dispersal (Migration) Data
print( "mp.read: Reading dispersal (migration) information" )
# UseDispDistFunc: True if dispersal rates are based on dispersal distance function; false if
# they are specified in the dispersal matrix
mp.file$UseDispDistFunc <- as.logical( mpFile[MigrationLine + 1] )
# DispDistFunc: Dispersal-distance function parameters - a, b, c, Dmax - Mij = a exp(-Dij^c/b)
mp.file$DispDistFunc <- as.numeric(unlist(strsplit( mpFile[MigrationLine + 2],',' )))
# DispMatr: User specified dispersal matrix. If the user selected dispersal based on
# disp-dist function then there are no rows for dispersal matrix. The definition of
# DispMatr has non-intuitive indexing to account for the fact that this first two lines
# after "Migration" are not part of the matrix, they are UseDispDistFunc and
# DispDistFunc, respectively.
if ( mp.file$UseDispDistFunc ) {
# migLines is a variable used to identify the number of lines used to define the
# disp-dist func parameters and the disp matrix, if one is defined
migLines <- 1 #Only one line necessary for migration parameters
mp.file$DispMatr <- fill.matrix.df( PopData_df, mp.file$DispDistFunc, 'disp' )
} else {
migLines <- (1 + PopNumber)
DispMatr <- mpFile[ (MigrationLine + 3):(MigrationLine + 1 + migLines) ]
mp.file$DispMatr <- matrix(as.numeric(unlist(strsplit( DispMatr,',' ))), nrow = PopNumber, byrow = TRUE)
}
# ----------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------------------------------------------- #
# Correlation Data
print( "mp.read: Reading correlation information")
# UseCorrDistFunc: True if correlations between populations is based on correlation distance
# function; False if they are specified in the correlation matrix
mp.file$UseCorrDistFunc <- as.logical( mpFile[CorrLine +1] )
# CorrDistFunc: Correlation-distance function parameters - a, b, c - Cij = a exp(-Dij^c/b)
mp.file$CorrDistFunc <- as.numeric(unlist(strsplit( mpFile[CorrLine + 2],',' )))
# CorrMatr: User specified correlation matrix. Similar to dispersl matrix (See notes for DispMatr
# above), however the matrix is a lower triangular matrix, thus it is not saved as a matrix object
if ( mp.file$UseCorrDistFunc ) {
corrLines <- 1
# Create a Correlation Matrix from the Correlation Distance function. If there is only one
# population, then the number 1 is returned
mp.file$CorrMatr <- fill.matrix.df( PopData_df, mp.file$CorrDistFunc, 'corr' )
} else {
print("Using Correlation matrix") ### WARNING LINE
# Define a new function used to read correlation distance matrices
# addZeroes: used to read a correlation distance matrix. This function adds zeroes to each line
# of the lower triangular corr-dist matrix stored in the *.mp file
addZeroes <- function( vect ) {
zeroes <- PopNumber - length(vect)
new_vect <- c( vect, rep(0, zeroes) )
return(new_vect)
}
corrLines <- (1 + PopNumber)
# If user defined, the corr-matr is a lower-triangular matrix. To make comparisons easy, the
# matrix is transformed into a 'matrix' object
#
# Read corrMatr as list from readLines input
corrMatr <- mpFile[ (CorrLine+3):(CorrLine+2+PopNumber) ]
# Split each list element by comma
corrMatr <- strsplit( corrMatr, ',' )
# Make each list element a vector of numeric values
corrMatr <- lapply( corrMatr, as.numeric)
# Apply the addZeroes function
corrMatr <- lapply( corrMatr, addZeroes )
# Make the 'list' into a 'matrix'
corrMatr <- do.call( rbind, corrMatr )
# Set CorrMatr in mp.file 'list'
mp.file$CorrMatr <- corrMatr
}
# ----------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------------------------------------------- #
# Stage Matrices Information
# Determine the number of stage matrix types (i.e., how many stage matrices are defined)
# The number of stage matices (stage matrix types) is the first number of the line that
# contains the phrase "stage matrix" in it.
stgMatrLine <- grep('stage matrix', mpFile, ignore.case=TRUE)
StMatrNumber <- unlist(strsplit( mpFile[stgMatrLine],' ' ))
StMatrNumber <- as.numeric( StMatrNumber[1] )
# StMatrNumber: number of stage matrices defined by user in *.mp file
mp.file$StMatrNumber <- StMatrNumber
#
# StMatr: a list object containing information about each stage matrix.
StMatr <- vector('list',length=0) # Create an empty stage matrix
oneMatr <- 4 + mp.file$Stages # The number of lines of information for one matrix
allMatr <- StMatrNumber * oneMatr # The number of lines of information for all matrices
# Extract from mpFile all of the lines of information for all of the matrices
AllStMatrLines <- mpFile[ (stgMatrLine + 1):(stgMatrLine + 1 + allMatr) ]
#
AllStMatr <- vector('list',length=0) # Create an empty list of stage matrices
for ( matr in 1:mp.file$StMatrNumber ) {
LineAdd <- matr - 1
# StMatrName: Name of the stage matrix
StMatr$StMatrName <- AllStMatrLines[ 1 + (LineAdd*oneMatr) ]
# StMatrSurvMult: Survival multiplier for stage matrix (for generating new matrices; not used in simulation)
StMatr$StMatrSurvMult <- AllStMatrLines[ 2 + (LineAdd*oneMatr) ]
# StMatrFecMult: Fecundity multiplier for stage matrix (for generating new matrices; not used in simulation)
StMatr$StMatrFecMult <- AllStMatrLines[ 3 + (LineAdd*oneMatr) ]
# Matr: a matrix object with numeric values
dumbMatr <- AllStMatrLines[ (5 + (LineAdd*oneMatr)):( (4 + mp.file$Stages) + (LineAdd*oneMatr) ) ]
StMatr$Matr <- matrix(as.numeric(unlist(strsplit( dumbMatr,' ' ))), nrow = mp.file$Stages, byrow = TRUE)
AllStMatr[[matr]] <- StMatr
}
mp.file$StMatr <- AllStMatr
# ----------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------------------------------------------- #
# Standard Deviation Matrices Information
# Determine the number of std. dev. matrix types.
# The number of standard deviation matrices (st dev matrix types) is the first number of the line that
# contains the phrase "st.dev. matrix" in it.
sdMatrLine <- grep('st.dev. matrix', mpFile, ignore.case=TRUE)
sdMatrNumber <- unlist(strsplit( mpFile[sdMatrLine],' ' ))
sdMatrNumber <- as.numeric( sdMatrNumber[1] )
# SDMatrNumber: number of st. dev. matrices defined by user in *.mp file
mp.file$SDMatrNumber <- sdMatrNumber
#
# SDMatr: a list object containing information for each st.dev. matrix.
SDMatr <- vector('list',length=0) # Create an empty st.dev. matrix list object
oneSDMatr <- 1 + mp.file$Stages # The number of lines of information for one st.dev. matrix
allSDMatr <- sdMatrNumber * oneSDMatr # The number of lines of information for all st.dev. matrices
# Extract from mpFile all of the lines of information for all of the matrices
AllSDMatrLines <- mpFile[ (sdMatrLine + 1):(sdMatrLine + 1 + allSDMatr) ]
#
AllSDMatr <- vector('list',length=0) # Create an empty list of stage matrices
for ( matr in 1:mp.file$SDMatrNumber ) {
LineAdd <- matr - 1 # Addition factor to skip to the lines associated with the matrix of interest
# SDMatrName: Name of the st.dev. matrix
SDMatr$SDMatrName <- AllStMatrLines[ 1 + (LineAdd*oneSDMatr) ]
# Matr: a matrix object of numeric values
dumbMatr <- AllSDMatrLines[ (2 + (LineAdd*oneSDMatr)):( (1 + mp.file$Stages) + (LineAdd*oneSDMatr) ) ]
SDMatr$Matr <- matrix(as.numeric(unlist(strsplit( dumbMatr,' ' ))), nrow = mp.file$Stages, byrow = TRUE)
AllSDMatr[[matr]] <- SDMatr
}
mp.file$SDMatr <- AllSDMatr
# ----------------------------------------------------------------------------------------------- #
# ConstraintsMatr: Constraints Matrix
dumbMatr <- mpFile[ (ConstraintsLine + 1):(ConstraintsLine + mp.file$Stages) ]
mp.file$ConstraintsMatr <- matrix(as.numeric(unlist(strsplit( dumbMatr,' '))),nrow = mp.file$Stages, byrow = TRUE)
# StMig: Stage Relative Migration (Dispersal) Rates
mp.file$StMig <- as.numeric(unlist(strsplit( mpFile[ ConstraintsLine + mp.file$Stages + 1 ],' ' )))
# Cat1EffMat: Catastrophe 1 effects on vital rates; one row per stage, seperated by spaces
dumbMatr <- mpFile[ (ConstraintsLine + mp.file$Stages + 2):(ConstraintsLine + 2*mp.file$Stages + 1) ]
mp.file$Cat1EffMat <- matrix(as.numeric(unlist(strsplit( dumbMatr,' ' ))), nrow = mp.file$Stages, byrow = TRUE)
# Cat1EffNst: Catastrophe 1 effects on abundances
mp.file$Cat1EffNst <- as.numeric(unlist(strsplit( mpFile[ ConstraintsLine + 2*mp.file$Stages + 2 ],' ' )))
# Cat2EffMat: Catastrophe 2 effects on vital rates; one row per stage, seperated by spaces
dumbMatr <- mpFile[ (ConstraintsLine + 2*mp.file$Stages + 3):(ConstraintsLine + 3*mp.file$Stages + 2) ]
mp.file$Cat2EffMat <- matrix(as.numeric(unlist(strsplit( dumbMatr,' ' ))), nrow = mp.file$Stages, byrow = TRUE)
# Cat2EffNst: Catastrophe 2 effects on abundances
mp.file$Cat2EffNst <- as.numeric(unlist(strsplit( mpFile[ ConstraintsLine + 3*mp.file$Stages + 3 ],' ' )))
# StInit: Initial Abundance for each stage for each population
InitAbLine <- ConstraintsLine + 3*mp.file$Stages + 4 # First line
InitAb <- mpFile[ InitAbLine:(InitAbLine + PopNumber - 1) ]
mp.file$StInit <- matrix(as.numeric(unlist(strsplit( InitAb,' ' ))), nrow = PopNumber, byrow = TRUE)
# ----------------------------------------------------------------------------------------------- #
# Stage Properties
InitStPropLine <- InitAbLine + PopNumber # First line of stage properties in the *.mp file
# StProp: a list object containing information for each stage
StProp <- vector('list',length=0) # Create an empty stage property list object
# For each stage there are five properties, thus five lines of information
allStProp <- 5 * mp.file$Stages
AllStPropLines <- mpFile[ InitStPropLine:(InitStPropLine + allStProp - 1) ]
#
AllStProp <- vector('list',length=0) # Create an empty list of St.Prop. lists
for ( stg in 1:mp.file$Stages ) {
firstinfo <- (stg - 1)*5 + 1 # First line of info for stage 'stg'
# StName: Name of stage
StProp$StName <- AllStPropLines[ firstinfo ]
# StWeight: Relative weight of stage
StProp$StWeight <- AllStPropLines[ firstinfo + 1 ]
#StExclude: Exclude from total (boolean: TRUE or FALSE)
StProp$StExclude <- AllStPropLines[ firstinfo + 2 ]
#StBasisForDD: Basis for density-dependence (boolean: TRUE or FALSE)
StProp$StBasisForDD <- AllStPropLines[ firstinfo + 3 ]
#StBreeding: ###### WHAT IS THIS????
StProp$StBreeding <- AllStPropLines[ firstinfo + 4 ]
AllStProp[[ stg ]] <- StProp
}
mp.file$StProp <- AllStProp
# ----------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------------------------------------------- #
# Population Management Actions
# NPopManage: The number of population managment actions
mgmntLine <- grep('pop mgmnt',mpFile,ignore.case=TRUE)
NPopManage <- unlist(strsplit(mpFile[mgmntLine],' '))
NPopManage <- as.numeric(NPopManage[1])
mp.file$NPopManage <- NPopManage
if ( NPopManage > 0 ) {
# PopManageProp: All of the lines associated with population management, unpartitioned
####mp.file$PopManageProp <- mpFile[ (mgmntLine + 1):(mgmntLine + NPopManage) ]
PopManageProp.colNames <- c('active', 'mng.type', 'from.pop', 'to.pop',
'begin.time','end.time','period','when','num.or.prop',
'number','proportion',
'from.stage','to.stage','cond.type','cond.abund.low',
'cond.abund.high','from.all.stgs','cond.quant.2','cond.func.N1',
'cond.func.N2','abund.each.stg_div_all.stg',
'abund.each.pop_div_all.pop')
mp.file$PopManageProp <- read.table( mpFilePath , skip=mgmntLine, nrows=NPopManage,
col.names=PopManageProp.colNames)
} else {
mp.file$PopManageProp <- "NA" # Fill with 'NA' value
}
# ----------------------------------------------------------------------------------------------- #
# The next three elements of the list are defined based on the position of the population
# management line. This may change in the future and have to be adjusted
# ExtinctThr: Extinction Threshold
mp.file$ExtinctThr <- as.numeric( mpFile[ mgmntLine + NPopManage + 1 ] )
# ExplodeThr: Explosion Threshold
mp.file$ExplodeThr <- as.numeric( mpFile[ mgmntLine + NPopManage + 2 ] )
# stepsize: Stepsize
mp.file$stepsize <- as.numeric( mpFile[ mgmntLine + NPopManage + 3 ] )
# ----------------------------------------------------------------------------------------------- #
# The last element of the list is the population data data.frame
mp.file$PopData_df <- PopData_df
###################################################################################################
## ******************************************************************** ##
## END MP INPUT PARAMETERS SECTION
## ******************************************************************** ##
###################################################################################################
##### BEGIN MP RESULTS READ SECTION ######
print('mp.read.results: Reading simulation results')
# Get number of replications in simulation
SimRepLine <- unlist( strsplit ( mpFile[ (res.start + 1) ], ' ' ) )
results$SimRep <- as.numeric( SimRepLine[1] )
# Read in Pop. ALL Results
pop.all.line <- grep( 'Pop. ALL', mpFile )
results$PopAll <- read.table( mpFilePath, skip=pop.all.line, nrows=mp.file$MaxDur )
names( results$PopAll ) <- c('Mean', 'StDev', 'Min', 'Max')
# Read in individual population Results
# PopInd variable is a 3-dim Array of size 'Duration of Simulation' x 4 x 'Number of Populations'
# The second dimension (length=4) corresponds to the Mean, StDev, Min, and Max population size
###browser()
PopInd <- vector()
# Calculate start of individual population information
pop.ind.start <- pop.all.line + mp.file$MaxDur + 1
# Calculate end of individual population information
pop.ind.stop <- pop.all.line + mp.file$MaxDur + (mp.file$MaxDur+1)*PopNumber
# Identify where the population ID lines are (i.e., the lines that say Pop. #)
pop.ind.ID.lines <-
seq(from=(pop.all.line+mp.file$MaxDur+1),
to=(pop.all.line+mp.file$MaxDur+((mp.file$MaxDur+1)*PopNumber)),
by=(mp.file$MaxDur+1))
# Make a vector of all lines
pop.ind.lines <- pop.ind.start:pop.ind.stop
# Remove ID lines
pop.ind.lines <- setdiff(pop.ind.lines,pop.ind.ID.lines)
# Get these values
pvals <- mpFile[pop.ind.lines]
# Covert to numeric
pvals.num <- as.numeric(unlist(strsplit(pvals, split=" ")))
# Convert to matrix. There are allways four columns in these matrices.
pop.vals <- matrix(pvals.num,ncol=4,byrow=TRUE)
# Make a lits of PopNumber matrices
pop.vals.list <- lapply(split(pop.vals,0:(nrow(pop.vals)-1)%/%mp.file$MaxDur),matrix,nrow=mp.file$MaxDur)
# Convert the list to an array
pop.vals.array <- array(unlist(pop.vals.list),c(mp.file$MaxDur,4,PopNumber))
results$PopInd <- pop.vals.array
#
# for ( pop in 1:PopNumber ){
# # Number of lines past Pop. ALL to skip to start 'pop' values. Last '+1' for Pop. # Label
# start.pop <- pop.all.line + pop*(mp.file$MaxDur +1) + 1
# # Number of lines past Pop. ALL to skip to stop 'pop' values.
# stop.pop <- (start.pop-1) + mp.file$MaxDur
# # Get pop values from mpFile. Initially is read as characters
# pvals <- mpFile[start.pop:stop.pop]
# # Covert to numeric
# pvals.num <- as.numeric(unlist(strsplit(pvals, split=" ")))
# # Convert to matrix. There are allways four columns in these matrices.
# pop.vals <- matrix(pvals.num,ncol=4,byrow=TRUE)
# # Combine new matrix with PopInd matrix
# PopInd <- c(PopInd,pop.vals)
# }
# results$PopInd <- array( PopInd, dim=c(mp.file$MaxDur,4,PopNumber) )
# Read in Occupancy Results - a summary stat. of number of patches occupied at each time step during a simulation
occ.line <- grep( '^Occupancy', mpFile ) # Note carrot used to capture line that begins with 'Occupancy'
results$Occupancy <- read.table( mpFilePath, skip=occ.line, nrows=mp.file$MaxDur )
names( results$Occupancy ) <- c('Mean', 'StDev', 'Min', 'Max')
# Read in Local Occupancy Results - a summary stat. for occupancy rate (prop. of time patches remained occupied)
occ.loc.line <- grep( 'Local Occupancy', mpFile )
results$LocOccupancy <- read.table( mpFilePath, skip=occ.loc.line, nrows=PopNumber )
names( results$LocOccupancy ) <- c('Mean', 'StDev', 'Min', 'Max')
# Read Min., Max., and Ter. - the min, max, and final population abundance values for each
# replication of the mp model. each column is ordered seperately
rep.line <- grep( 'Min. Max. Ter.', mpFile )
results$Replications <- read.table( mpFilePath, skip=rep.line, nrows=results$SimRep )
names( results$Replications ) <- c('Min', 'Max', 'Ter')
# Read Time to cross - used to determine quasi-extinction/ -explosion risk. The number of
# rows in the mp file depends on the stepsize. Each row is a time-step and the first col
# is the number of times the pop. abund. crossed the min threshold for the first time in that
# time step and the second is associated with crossing the max threshold
t.cross.line <- grep( 'Time to cross', mpFile )
t.cross.rows <- (mp.file$MaxDur %/% mp.file$stepsize) + (mp.file$MaxDur %% mp.file$stepsize)
results$TimeCross <- read.table( mpFilePath, skip=t.cross.line, nrows=t.cross.rows )
names( results$TimeCross ) <- c('QuasiExtinct','QuasiExpl')
# Read Final stage abundances results
# results$FinalStAb variable is a 3-dim Array of size Numer of Stages (rows) x 4 (col) x Number of Populations (slices)
# to call the results of one populaiton (e.g., Pop. 1) use third index (e.g., results$FinalStAb[,,1] )
# Columns of the matrix are Mean, StDev, Min, Max
fin.stg.ab.line <- grep( 'Final stage abundances', mpFile )
fin.stg.ab.rows <- PopNumber * mp.file$Stages
FinalStAb <- as.matrix( read.table( mpFilePath, skip=fin.stg.ab.line, nrows=fin.stg.ab.rows ) )
# Seperate out FinalStAb into the different populations
fsa.first <- 1 # Initial first line for partitioning Final Stage Abundance matrix
fsa.list <- lapply(split(FinalStAb,0:(nrow(FinalStAb)-1)%/%mp.file$Stages),matrix,nrow=mp.file$Stages)
fsa.array <- array(unlist(fsa.list),c(mp.file$Stages,4,PopNumber))
results$FinalStAb <- fsa.array
#
# FinalStAb.vect <- vector()
# for ( pop in 1:PopNumber ){
# fsa.last <- pop*mp.file$Stages
# FinalStAb.vect <- c( FinalStAb.vect, FinalStAb[ fsa.first:fsa.last, ] )
# fsa.first <- fsa.last + 1
# }
# results$FinalStAb <- array( FinalStAb.vect, dim=c(mp.file$Stages, 4, PopNumber) )
# Read LocExtDur results
loc.ext.dur.line <- grep( 'LocExtDur', mpFile )
results$LocExtDur <- read.table( mpFilePath, skip=loc.ext.dur.line, nrow=PopNumber )
names( results$LocExtDur ) <- c('Mean','StDev','Max','Min')
# Read Harvest results
# First line is the total harvest results, the second line is the number of lines dedicated
# to individual time units for harvest
harvest.line <- grep( '^Harvest', mpFile )
results$HarvestTot <- read.table( mpFilePath, skip=harvest.line, nrow=1 )
names( results$HarvestTot ) <- c('Mean','StDev','Min','Max')
# Determine number of time steps with harvest data
harvest.steps <- mpFile[ harvest.line + 2 ]
harvest.steps <- unlist( strsplit( harvest.steps, ' ' ) )
harvest.steps <- as.numeric( harvest.steps[1] )
if ( harvest.steps > 0 ) {
results$HarvestSteps <- read.table( mpFilePath, skip=(harvest.line + 2), nrow=harvest.steps )
names( results$HarvestSteps ) <- c('Time', 'Mean', 'StDev', 'Min', 'Max')
}
# Read RiskOfLowHarvest results
risk.harvest.line <- grep( 'RiskOfLowHarvest', mpFile )
results$RiskLowHarvest <- read.table( mpFilePath, skip=risk.harvest.line, nrow=results$SimRep )
# Read Average stage abundances results
# First line after 'avg.st.ab.line' is the number of populations and number of time
# steps recorded (dependent on maxdur and stepsize)
# After this, there are popnumber * time steps lines by number of stages columns
# The values are the stage abundance values for each stage in each population
avg.st.ab.line <- grep( 'Average stage abundances', mpFile )
ab.steps <- mpFile[ avg.st.ab.line + 1 ]
ab.steps <- unlist( strsplit( ab.steps, ' ' ) )
ab.pops <- as.numeric( ab.steps[1] )
ab.steps <- as.numeric( ab.steps[2] )
avg.st.ab.rows <- ab.pops * ab.steps
AvgStAb <- as.matrix( read.table( mpFilePath, skip=(avg.st.ab.line + 1), nrow=avg.st.ab.rows ) )
# Seperate out AvgStAb into different populations
asb.first <- 1
AvgStAb.vect <- vector()
for ( pop in 1:ab.pops ){
asb.last <- pop*ab.steps
AvgStAb.vect <- c( AvgStAb.vect, AvgStAb[ asb.first:asb.last, ] )
asb.first <- asb.last + 1
}
results$AvgStAb <- array( AvgStAb.vect, dim=c(ab.steps, mp.file$Stages, ab.pops) )
mp.file$results <- results
return( list( version = metaVer, mp.file = mp.file) )
} # End mp.read function
|
miss='mar'
vers='_v5'
misspct = c(2,5,10,20,30)
seed = 100217
namefile = 'mar.under.bin.pois.ip.RData'
nsim = 1000
library(survey)
library(data.table)
dirdata = '/pine/scr/b/a/baldoni/Cai/Visit2/Manuscript_MissingData/Data/IPW/'
dirwts = '/pine/scr/b/a/baldoni/Cai/Visit2/Manuscript_MissingData/Data/IPW_Underspec/'
dirwork = '/pine/scr/b/a/baldoni/Cai/Visit2/Manuscript_MissingData/Codes/'
diroutp = '/pine/scr/b/a/baldoni/Cai/Visit2/Manuscript_MissingData/Output/'
setwd(dirwork)
files = paste0(dirdata,'widewt_samp_mar2017_ip_',1:nsim,'.csv')
files.wts = paste0(dirwts,'widewt_samp_mar2017_ipwtsmis_',1:nsim,'.csv')
foo = function(miss,vers,cut,misspct,seed){
label = paste0('bin_pois_',cut,'_',miss)
df = data.frame()
ymiss = paste0('y_',miss,vers,'_',misspct)
yimp = paste0('y_bin_',cut,'_',miss,'_',misspct,'_imp')
timp = paste0('x6_',miss,'_',misspct,'_imp')
allcomp = paste0('allcomp_',misspct)
for(i in 1:length(files)){
dat = read.csv(file=files[i],header=T)
dat.wts = read.csv(file=files.wts[i],header=T)
simnum = i
cat(simnum)
###################s
#Creating variables
dat$strat1 = 1*(dat$strat==1)
dat$strat2 = 1*(dat$strat==2)
dat$strat3 = 1*(dat$strat==3)
dat$strat4 = 1*(dat$strat==4)
for(j in 1:length(ymiss)){
dat.anal = dat
dat.anal[[yimp[j]]] = ifelse(dat.anal[[ymiss[j]]]==0,dat.anal[[paste0('y_bin_gfr_',cut,'_v3')]],NA)
dat.anal[[timp[j]]] = ifelse(dat.anal[[ymiss[j]]]==0,dat.anal[['x6']],NA)
dat.anal[[allcomp[j]]] = ifelse(dat.anal[[ymiss[j]]]==0,1,0)
### Running Multiple Imputation
subvar = c('BGid','strat','bghhsub_s2','subid', #Design variables
'strat1','strat2','strat3','age_base','x2','x8','x12','x13','x14','x15', #Variables that Poulami is using in her 'Under'-specified models
timp[j],yimp[j], #Missing variables
paste0('y1_bin_gfr_',cut,'_v3'))
subdat = subset(dat.anal,select=subvar)
subdat = merge(x=subdat,y=subset(dat.wts,select=c('subid',paste0('W_ip_',miss,vers,'_',misspct[j]))),by='subid',all.x=T)
subdat$response = subdat[[yimp[j]]]
subdat$baseline = subdat[[paste0('y1_bin_gfr_',cut,'_v3')]]
subdat$x6imp = subdat[[timp[j]]]
subdat$wts = subdat[[paste0('W_ip_',miss,vers,'_',misspct[j])]]
### Analyzing data ###
design = svydesign(id=~BGid, strata=~strat, weights=~wts, data=subdat)
model = svyglm(response~x13+x15+offset(log(x6imp)),subset=(baseline==0),
family=quasipoisson(link = "log"),design=design)
df = rbind(df,data.frame(sim=simnum,missing=misspct[j],
par=c('Int','x13','x15'),
results=model$coefficients,se=sqrt(diag(model$cov.unscaled)),
X.lower=confint(model)[,1],upper.=confint(model)[,2],missInfo='0'))
}
}
colnames(df) = c('sim','misspct','par','beta','se','lb','ub','missinfo')
rownames(df) = NULL
return(df)
}
df.low = foo(miss=miss,vers=vers,cut='low',misspct=misspct,seed=seed)
df.hi = foo(miss=miss,vers=vers,cut='hi',misspct=misspct,seed=seed)
save(df.low,df.hi,file=paste0(diroutp,namefile))
| /ip/ip_under_bin_pois_mar.R | no_license | plbaldoni/HCHSattrition | R | false | false | 3,236 | r | miss='mar'
vers='_v5'
misspct = c(2,5,10,20,30)
seed = 100217
namefile = 'mar.under.bin.pois.ip.RData'
nsim = 1000
library(survey)
library(data.table)
dirdata = '/pine/scr/b/a/baldoni/Cai/Visit2/Manuscript_MissingData/Data/IPW/'
dirwts = '/pine/scr/b/a/baldoni/Cai/Visit2/Manuscript_MissingData/Data/IPW_Underspec/'
dirwork = '/pine/scr/b/a/baldoni/Cai/Visit2/Manuscript_MissingData/Codes/'
diroutp = '/pine/scr/b/a/baldoni/Cai/Visit2/Manuscript_MissingData/Output/'
setwd(dirwork)
files = paste0(dirdata,'widewt_samp_mar2017_ip_',1:nsim,'.csv')
files.wts = paste0(dirwts,'widewt_samp_mar2017_ipwtsmis_',1:nsim,'.csv')
foo = function(miss,vers,cut,misspct,seed){
label = paste0('bin_pois_',cut,'_',miss)
df = data.frame()
ymiss = paste0('y_',miss,vers,'_',misspct)
yimp = paste0('y_bin_',cut,'_',miss,'_',misspct,'_imp')
timp = paste0('x6_',miss,'_',misspct,'_imp')
allcomp = paste0('allcomp_',misspct)
for(i in 1:length(files)){
dat = read.csv(file=files[i],header=T)
dat.wts = read.csv(file=files.wts[i],header=T)
simnum = i
cat(simnum)
###################s
#Creating variables
dat$strat1 = 1*(dat$strat==1)
dat$strat2 = 1*(dat$strat==2)
dat$strat3 = 1*(dat$strat==3)
dat$strat4 = 1*(dat$strat==4)
for(j in 1:length(ymiss)){
dat.anal = dat
dat.anal[[yimp[j]]] = ifelse(dat.anal[[ymiss[j]]]==0,dat.anal[[paste0('y_bin_gfr_',cut,'_v3')]],NA)
dat.anal[[timp[j]]] = ifelse(dat.anal[[ymiss[j]]]==0,dat.anal[['x6']],NA)
dat.anal[[allcomp[j]]] = ifelse(dat.anal[[ymiss[j]]]==0,1,0)
### Running Multiple Imputation
subvar = c('BGid','strat','bghhsub_s2','subid', #Design variables
'strat1','strat2','strat3','age_base','x2','x8','x12','x13','x14','x15', #Variables that Poulami is using in her 'Under'-specified models
timp[j],yimp[j], #Missing variables
paste0('y1_bin_gfr_',cut,'_v3'))
subdat = subset(dat.anal,select=subvar)
subdat = merge(x=subdat,y=subset(dat.wts,select=c('subid',paste0('W_ip_',miss,vers,'_',misspct[j]))),by='subid',all.x=T)
subdat$response = subdat[[yimp[j]]]
subdat$baseline = subdat[[paste0('y1_bin_gfr_',cut,'_v3')]]
subdat$x6imp = subdat[[timp[j]]]
subdat$wts = subdat[[paste0('W_ip_',miss,vers,'_',misspct[j])]]
### Analyzing data ###
design = svydesign(id=~BGid, strata=~strat, weights=~wts, data=subdat)
model = svyglm(response~x13+x15+offset(log(x6imp)),subset=(baseline==0),
family=quasipoisson(link = "log"),design=design)
df = rbind(df,data.frame(sim=simnum,missing=misspct[j],
par=c('Int','x13','x15'),
results=model$coefficients,se=sqrt(diag(model$cov.unscaled)),
X.lower=confint(model)[,1],upper.=confint(model)[,2],missInfo='0'))
}
}
colnames(df) = c('sim','misspct','par','beta','se','lb','ub','missinfo')
rownames(df) = NULL
return(df)
}
df.low = foo(miss=miss,vers=vers,cut='low',misspct=misspct,seed=seed)
df.hi = foo(miss=miss,vers=vers,cut='hi',misspct=misspct,seed=seed)
save(df.low,df.hi,file=paste0(diroutp,namefile))
|
/r/Lecture60_pca_r.r | permissive | praveentn/hgwxx7 | R | false | false | 1,524 | r | ||
#'@title Get lake average turbidity
#'
#'@inheritParams get_kd_avg
#'
#'
#'@author Luke Winslow
#'
#'
#'
#'@export
get_turbidity_avg = function(ids, src='in-situ'){
ids = toupper(ids)
#first filter by site ids so we're using a smaller dataset
tmp = filter(turbidity, site_id %in% ids, source==src)
tmp = group_by(tmp, site_id) %>%
summarise(turbidity_avg=mean(turbidity_ntu)) %>%
right_join(data.frame(site_id=ids, stringsAsFactors=FALSE)) #this maintains order
return(tmp)
}
| /R/get_turbidity_avg.R | permissive | mhines-usgs/lakeattributes | R | false | false | 497 | r | #'@title Get lake average turbidity
#'
#'@inheritParams get_kd_avg
#'
#'
#'@author Luke Winslow
#'
#'
#'
#'@export
get_turbidity_avg = function(ids, src='in-situ'){
ids = toupper(ids)
#first filter by site ids so we're using a smaller dataset
tmp = filter(turbidity, site_id %in% ids, source==src)
tmp = group_by(tmp, site_id) %>%
summarise(turbidity_avg=mean(turbidity_ntu)) %>%
right_join(data.frame(site_id=ids, stringsAsFactors=FALSE)) #this maintains order
return(tmp)
}
|
MoM_Value_Size_monthly_returns <- read_excel("MoM_Value_Size_monthly_returns.xlsx",
+ col_types = c("date", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric"))
round(MoM_Value_Size_monthly_returns[-1],2)
colnames(MoM_Value_Size_monthly_returns)[13]="Avg 7-Hi Prior"
MoM_Value_Size_monthly_returns = as.xts(MoM_Value_Size_monthly_returns[-1], order.by = MoM_Value_Size_monthly_returns$Date) %>% ./100
factor.returns = MoM_Value_Size_monthly_returns[,c(11,12,14,16,32,34),]
factor.returns.table = data.frame(t(table.AnnualizedReturns(factor.returns)))
levels(factor.returns.table) = colnames(factor.returns.table)
factor.returns.table$factors = factor(rownames(factor.returns.table))
library(plotly)
dat1 <- data.frame(
sex = factor(c("Female","Female","Male","Male")),
time = factor(c("Lunch","Dinner","Lunch","Dinner"), levels=c("Lunch","Dinner")),
total_bill = c(13.53, 16.81, 16.24, 17.42)
)
# Bar graph, time on x-axis, color fill grouped by sex -- use position_dodge()
ggplot(data=factor.returns.table, aes(x=factors, y=Annualized.Std.Dev, group=factors)) +
geom_bar(colour="black", stat="identity",
position=position_dodge(),
size=.3) + # Thinner lines
xlab("Time of day") + ylab("Total bill") + # Set axis labels
ggtitle("Average bill for 2 people") + # Set title
theme_bw()
ggplotly()
| /R/MoM_value_size.R | no_license | ebna/momentum | R | false | false | 3,175 | r | MoM_Value_Size_monthly_returns <- read_excel("MoM_Value_Size_monthly_returns.xlsx",
+ col_types = c("date", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric", "numeric",
+ "numeric", "numeric"))
round(MoM_Value_Size_monthly_returns[-1],2)
colnames(MoM_Value_Size_monthly_returns)[13]="Avg 7-Hi Prior"
MoM_Value_Size_monthly_returns = as.xts(MoM_Value_Size_monthly_returns[-1], order.by = MoM_Value_Size_monthly_returns$Date) %>% ./100
factor.returns = MoM_Value_Size_monthly_returns[,c(11,12,14,16,32,34),]
factor.returns.table = data.frame(t(table.AnnualizedReturns(factor.returns)))
levels(factor.returns.table) = colnames(factor.returns.table)
factor.returns.table$factors = factor(rownames(factor.returns.table))
library(plotly)
dat1 <- data.frame(
sex = factor(c("Female","Female","Male","Male")),
time = factor(c("Lunch","Dinner","Lunch","Dinner"), levels=c("Lunch","Dinner")),
total_bill = c(13.53, 16.81, 16.24, 17.42)
)
# Bar graph, time on x-axis, color fill grouped by sex -- use position_dodge()
ggplot(data=factor.returns.table, aes(x=factors, y=Annualized.Std.Dev, group=factors)) +
geom_bar(colour="black", stat="identity",
position=position_dodge(),
size=.3) + # Thinner lines
xlab("Time of day") + ylab("Total bill") + # Set axis labels
ggtitle("Average bill for 2 people") + # Set title
theme_bw()
ggplotly()
|
# HUGO_map_prep.R
#
# Purpose: Sample processing of gene expression studies with RNA seq and
# microarray platforms
# Version: 1.2
# Date: 2018 02 16
# Author: Gregory Huang <gregory.huang@mail.utoronto.ca>
#
#
# ToDo:
# Notes: R Code adapted from RPR-GEO2R ABC learning unit.
#
# Quantile normalization needs to cite:
# Bolstad, B. M., Irizarry R. A., Astrand, M, and Speed, T. P. (2003)
# A Comparison of Normalization Methods for High Density
# Oligonucleotide Array Data Based on Bias and Variance.
# Bioinformatics 19(2) ,pp 185-193.
#
# Should we calculate DE instead and normalize that? There is otherwise
# a danger of ML fitting the noise more than the otherwise sparse
# signal. We could use (test - control) * pDE and set all insignificant
# changes to 0.?
#
# ==============================================================================
# ==== PACKAGES ==============================================================
if (! require(Biobase, quietly=TRUE)) {
if (! exists("biocLite")) {
source("https://bioconductor.org/biocLite.R")
}
biocLite("Biobase")
library(Biobase)
}
if (! require(GEOquery, quietly=TRUE)) {
if (! exists("biocLite")) {
source("https://bioconductor.org/biocLite.R")
}
biocLite("GEOquery")
library(GEOquery)
}
# for quantile normalization ...
if (! require(preprocessCore, quietly=TRUE)) {
if (! exists("biocLite")) {
source("https://bioconductor.org/biocLite.R")
}
biocLite("preprocessCore")
library(preprocessCore)
}
# ==============================================================================
#
# ==== Microarray data =========
#
# ==============================================================================
# Load in GSE gene expression data from GEO
GSE54017<- getGEO("GSE54017", GSEMatrix =TRUE, AnnotGPL=TRUE)
GSE54017 <- GSE54017[[1]]
# Save so there's no need to go to GEO every time
save(GSE54017, file="GSE54017.RData")
load("GSE54017.RData")
# ==== What data do we have?
nrow(exprs(GSE54017)) # 22215 rows
ncol(exprs(GSE54017)) # 8 (4 control replicates, 4 treatment replicates)
colnames(exprs(GSE54017)) # sample names, in the order of c1-t1-c2-t2-c3-t3-c4-t4
# Assess distributions
# define a color scheme: controls: greens, test: purples
c1 <- colorRampPalette(c("#C27E7E", "#816EBA"))(1) # test
c2 <- colorRampPalette(c("#758AC9", "#82C9B6"))(1) # ctrl
# a color vector for the 24 samples ...
myArrayCols <- c(rep(c2[1], 4), # ctrl 4 reps
rep(c1[1], 4)) # test 4 reps
#reorder - 1,3,5,7 are control, and 2,4,6,8 are treatment
iReorder <- c(1,3,5,7,2,4,6,8)
boxplot(log(exprs(GSE54017)[ , iReorder]),
boxwex = 0.6,
notch = TRUE,
main = "GSE54017",
outline = FALSE,
col = myArrayCols)
# ==== extract columnd in new order
myEx <- exprs(GSE54017)[ , iReorder]
colnames(myEx) <- c("ca", "cb", "cc", "cd",
"ta", "tb", "tc", "td")
boxplot(log(myEx),
boxwex = 0.6,
notch = TRUE,
main = "GSE54017",
outline = FALSE,
col = myArrayCols)
# How to normalize? One way is to quantile normalize all replicate sets
# individually, but keep the trend differences between them.
myEx[ , 1:4] <- normalize.quantiles(myEx[ , 1:4], copy = FALSE)
myEx[ , 5:8] <- normalize.quantiles(myEx[ , 5:8], copy = FALSE)
boxplot(log(myEx),
boxwex = 0.6,
notch = TRUE,
main = "GSE54017-normQuant",
outline = FALSE,
col = myArrayCols)
# ==== Prepare annotation data
str(GSE54017@featureData@data) # Have annotations been properly loaded ?
myAnnot <- GSE54017@featureData@data[ , c("ID", "Gene symbol")]
str(myAnnot) # confirm
colnames(myAnnot) <- c("probeIDs", "symbols") # rename
myAnnot$probeIDs <- as.character(myAnnot$probeIDs) # convert to character
myAnnot$symbols <- as.character(myAnnot$symbols) # convert to character
any(is.na(myAnnot$symbols)) # FALSE
sum(myAnnot$symbols == "") # 1069 probes are not annotated with a
# HUGO symbol. We will throw them out.
sum(grepl("/", myAnnot$symbols)) # 1223 probes are annotated with more than
# one symbol. We will throw them out too.
#just to keep track of number of rows
original_myEx <- nrow(myEx)
original_myAnnot <- nrow(myAnnot)
remove_dup <- grepl("/", myAnnot$symbols) #get index where there are dups (abc///abd)
myEx <- myEx[!remove_dup == TRUE,] # get rid of rows with dups in myEx
myAnnot <- myAnnot[!(remove_dup) == TRUE,] # same for myAnnot
remove_empty <- myAnnot$symbols == "" # get index where there are no symbols
myEx <- myEx[!(remove_empty)==TRUE,] # get rid of rows with no symbols in myEx
myAnnot <- myAnnot[!(remove_empty)==TRUE,] # same for myAnnot
#double check again, should be both zero
sum(myAnnot$symbols == "") # 0
sum(grepl("/", myAnnot$symbols)) # 0
sum(duplicated(myAnnot$symbols[myAnnot$symbols != ""])) # 7421 duplicated
# symbols (not counting the un-annotated rows).
# How many unique symbols do these contain?
x <- unique(myAnnot$symbols[duplicated(myAnnot$symbols)]) # 4499 ...
# 7421 duplicated symbols, 4499 unique ones in the set of 7421
# ==================================================
# === More considerations of the symbol annotations. How do the existing
# symbols compare to our traget vector of gene symbols?
load("./inst/extdata/HUGOsymbols.RData")
load("./inst/extdata/synMap.RData")
# How many target symbols do we have covered?
sum(HUGOsymbols %in% unique(myAnnot$symbol)) # 11920; 11920/20347 = ~58%
# make a vector of missing symbols
missingSymbols <- HUGOsymbols[!(HUGOsymbols %in% unique(myAnnot$symbol))]
# What annotation symbols do we have that are NOT in our symbol list?
x <- unique(myAnnot$symbol) #12502
extraSymbols <- x[!(x %in% HUGOsymbols)] #582
head(extraSymbols, 50)
#let's check for symbols we have that don't match HUGO
matched_symbols <- match(myAnnot$symbols,HUGOsymbols)
#and check symbols we have that match with synonyms (precautionary)
matched_synonyms <- match(myAnnot$symbols, synMap$synoyms)
#get the locations of where the myAnnot$symbols match with synMap$synonyms
need_substitute <- !is.na(matched_synonyms)
#keep the synonym matches
myAnnot$symbols[need_substitute] <- synMap$symbols[matched_synonyms[need_substitute]]
#now we deal with the HUGO Symbol mismatches. There are 758 of them.
sum(is.na(match(myAnnot$symbols, HUGOsymbols)))
#remove the non-matches
ignore <- (is.na(match(myAnnot$symbols, HUGOsymbols)))
myEx <- myEx[!ignore,]
myAnnot <- myAnnot[!ignore,]
nrow(myEx)
nrow(myAnnot) #19165 lines left for both myEx and myAnnot
#check if there are any NAs left; 0
sum(is.na(match(myAnnot$symbols,HUGOsymbols)))
#Time to deal with the HUGO duplicates
#Use two duplicated() functions combined with an OR statement
#to make sure that every duplicated item is selected
from_beginning <- duplicated(myAnnot$symbols)
from_behind <- duplicated(myAnnot$symbols, fromLast = TRUE)
combined <- (from_beginning | from_behind)
all_duplicates <- myAnnot[combined, ] #all values with duplicates
all_uniques <- myAnnot[!combined, ] #quick sanity check; all unique values
# add up to 19165
#now that we have a key for duplicates, remove them by the control group medians.
dfCombined <- myEx[combined,]
nrow(dfCombined) #11614 rows of symbols that have duplicates
#use this data frame to deal with the duplicate controls so medians are calculated
dfCombined_control <- data.frame(dfCombined[,1:4])
#medians for the duplicates are calculated
med <- apply(dfCombined_control, 1, median)
#bind the medians and symbol names for the controls to the df
dfCombined_control <- cbind(dfCombined_control, median = med)
dfCombined_control <- cbind(dfCombined_control, symbol = all_duplicates$symbols)
#Order the df by the medians from largest to smallest
dfCombined_control <- dfCombined_control[order(dfCombined_control$median, decreasing = TRUE),]
#Top-down duplicate search, this way, the duplicates with smaller medians get removed
dfCombined_control <- dfCombined_control[!duplicated(dfCombined_control$symbol),]
nrow(dfCombined_control) #4369 rows remain; that means we deleted 7245 rows of duplicates
# list of probeIDs that will be in our resulting data frame
final_rownames <- append(rownames(dfCombined_control), rownames(all_uniques))
#prepping for final data frame
result <- myEx[final_rownames,]
result_myAnnot <- myAnnot[final_rownames,]
#Calculate the control and treatment group means
ctrl_mean <- apply(result[,1:4], 1, mean)
treatment_mean <- apply(result[,5:8], 1, mean)
#===Final product===
#setup data frames
HUGO_result <-data.frame(symbol = HUGOsymbols, stringsAsFactors = FALSE)
setup <- data.frame(symbol = result_myAnnot$symbols,ctrl_mean,treatment_mean)
#add ctrl and treatment averages to the full list of HUGO symbols
HUGO_result$GSE54017.ctrl <- setup$ctrl_mean[match(HUGO_result$symbol,
setup$symbol)]
HUGO_result$GSE54017.treatment <- setup$treatment_mean[match(HUGO_result$symbol,
setup$symbol)]
coverage = (nrow(setup)/nrow(HUGO_result))*100
coverage #~58% coverage, same with earlier result; indeed, we kept the unique entries
# [END]
| /HUGO_map_prep.R | no_license | greghuang8/BCB420_Bioinformatics_Symbols | R | false | false | 9,368 | r | # HUGO_map_prep.R
#
# Purpose: Sample processing of gene expression studies with RNA seq and
# microarray platforms
# Version: 1.2
# Date: 2018 02 16
# Author: Gregory Huang <gregory.huang@mail.utoronto.ca>
#
#
# ToDo:
# Notes: R Code adapted from RPR-GEO2R ABC learning unit.
#
# Quantile normalization needs to cite:
# Bolstad, B. M., Irizarry R. A., Astrand, M, and Speed, T. P. (2003)
# A Comparison of Normalization Methods for High Density
# Oligonucleotide Array Data Based on Bias and Variance.
# Bioinformatics 19(2) ,pp 185-193.
#
# Should we calculate DE instead and normalize that? There is otherwise
# a danger of ML fitting the noise more than the otherwise sparse
# signal. We could use (test - control) * pDE and set all insignificant
# changes to 0.?
#
# ==============================================================================
# ==== PACKAGES ==============================================================
if (! require(Biobase, quietly=TRUE)) {
if (! exists("biocLite")) {
source("https://bioconductor.org/biocLite.R")
}
biocLite("Biobase")
library(Biobase)
}
if (! require(GEOquery, quietly=TRUE)) {
if (! exists("biocLite")) {
source("https://bioconductor.org/biocLite.R")
}
biocLite("GEOquery")
library(GEOquery)
}
# for quantile normalization ...
if (! require(preprocessCore, quietly=TRUE)) {
if (! exists("biocLite")) {
source("https://bioconductor.org/biocLite.R")
}
biocLite("preprocessCore")
library(preprocessCore)
}
# ==============================================================================
#
# ==== Microarray data =========
#
# ==============================================================================
# Load in GSE gene expression data from GEO
GSE54017<- getGEO("GSE54017", GSEMatrix =TRUE, AnnotGPL=TRUE)
GSE54017 <- GSE54017[[1]]
# Save so there's no need to go to GEO every time
save(GSE54017, file="GSE54017.RData")
load("GSE54017.RData")
# ==== What data do we have?
nrow(exprs(GSE54017)) # 22215 rows
ncol(exprs(GSE54017)) # 8 (4 control replicates, 4 treatment replicates)
colnames(exprs(GSE54017)) # sample names, in the order of c1-t1-c2-t2-c3-t3-c4-t4
# Assess distributions
# define a color scheme: controls: greens, test: purples
c1 <- colorRampPalette(c("#C27E7E", "#816EBA"))(1) # test
c2 <- colorRampPalette(c("#758AC9", "#82C9B6"))(1) # ctrl
# a color vector for the 24 samples ...
myArrayCols <- c(rep(c2[1], 4), # ctrl 4 reps
rep(c1[1], 4)) # test 4 reps
#reorder - 1,3,5,7 are control, and 2,4,6,8 are treatment
iReorder <- c(1,3,5,7,2,4,6,8)
boxplot(log(exprs(GSE54017)[ , iReorder]),
boxwex = 0.6,
notch = TRUE,
main = "GSE54017",
outline = FALSE,
col = myArrayCols)
# ==== extract columnd in new order
myEx <- exprs(GSE54017)[ , iReorder]
colnames(myEx) <- c("ca", "cb", "cc", "cd",
"ta", "tb", "tc", "td")
boxplot(log(myEx),
boxwex = 0.6,
notch = TRUE,
main = "GSE54017",
outline = FALSE,
col = myArrayCols)
# How to normalize? One way is to quantile normalize all replicate sets
# individually, but keep the trend differences between them.
myEx[ , 1:4] <- normalize.quantiles(myEx[ , 1:4], copy = FALSE)
myEx[ , 5:8] <- normalize.quantiles(myEx[ , 5:8], copy = FALSE)
boxplot(log(myEx),
boxwex = 0.6,
notch = TRUE,
main = "GSE54017-normQuant",
outline = FALSE,
col = myArrayCols)
# ==== Prepare annotation data
str(GSE54017@featureData@data) # Have annotations been properly loaded ?
myAnnot <- GSE54017@featureData@data[ , c("ID", "Gene symbol")]
str(myAnnot) # confirm
colnames(myAnnot) <- c("probeIDs", "symbols") # rename
myAnnot$probeIDs <- as.character(myAnnot$probeIDs) # convert to character
myAnnot$symbols <- as.character(myAnnot$symbols) # convert to character
any(is.na(myAnnot$symbols)) # FALSE
sum(myAnnot$symbols == "") # 1069 probes are not annotated with a
# HUGO symbol. We will throw them out.
sum(grepl("/", myAnnot$symbols)) # 1223 probes are annotated with more than
# one symbol. We will throw them out too.
#just to keep track of number of rows
original_myEx <- nrow(myEx)
original_myAnnot <- nrow(myAnnot)
remove_dup <- grepl("/", myAnnot$symbols) #get index where there are dups (abc///abd)
myEx <- myEx[!remove_dup == TRUE,] # get rid of rows with dups in myEx
myAnnot <- myAnnot[!(remove_dup) == TRUE,] # same for myAnnot
remove_empty <- myAnnot$symbols == "" # get index where there are no symbols
myEx <- myEx[!(remove_empty)==TRUE,] # get rid of rows with no symbols in myEx
myAnnot <- myAnnot[!(remove_empty)==TRUE,] # same for myAnnot
#double check again, should be both zero
sum(myAnnot$symbols == "") # 0
sum(grepl("/", myAnnot$symbols)) # 0
sum(duplicated(myAnnot$symbols[myAnnot$symbols != ""])) # 7421 duplicated
# symbols (not counting the un-annotated rows).
# How many unique symbols do these contain?
x <- unique(myAnnot$symbols[duplicated(myAnnot$symbols)]) # 4499 ...
# 7421 duplicated symbols, 4499 unique ones in the set of 7421
# ==================================================
# === More considerations of the symbol annotations. How do the existing
# symbols compare to our traget vector of gene symbols?
load("./inst/extdata/HUGOsymbols.RData")
load("./inst/extdata/synMap.RData")
# How many target symbols do we have covered?
sum(HUGOsymbols %in% unique(myAnnot$symbol)) # 11920; 11920/20347 = ~58%
# make a vector of missing symbols
missingSymbols <- HUGOsymbols[!(HUGOsymbols %in% unique(myAnnot$symbol))]
# What annotation symbols do we have that are NOT in our symbol list?
x <- unique(myAnnot$symbol) #12502
extraSymbols <- x[!(x %in% HUGOsymbols)] #582
head(extraSymbols, 50)
#let's check for symbols we have that don't match HUGO
matched_symbols <- match(myAnnot$symbols,HUGOsymbols)
#and check symbols we have that match with synonyms (precautionary)
matched_synonyms <- match(myAnnot$symbols, synMap$synoyms)
#get the locations of where the myAnnot$symbols match with synMap$synonyms
need_substitute <- !is.na(matched_synonyms)
#keep the synonym matches
myAnnot$symbols[need_substitute] <- synMap$symbols[matched_synonyms[need_substitute]]
#now we deal with the HUGO Symbol mismatches. There are 758 of them.
sum(is.na(match(myAnnot$symbols, HUGOsymbols)))
#remove the non-matches
ignore <- (is.na(match(myAnnot$symbols, HUGOsymbols)))
myEx <- myEx[!ignore,]
myAnnot <- myAnnot[!ignore,]
nrow(myEx)
nrow(myAnnot) #19165 lines left for both myEx and myAnnot
#check if there are any NAs left; 0
sum(is.na(match(myAnnot$symbols,HUGOsymbols)))
#Time to deal with the HUGO duplicates
#Use two duplicated() functions combined with an OR statement
#to make sure that every duplicated item is selected
from_beginning <- duplicated(myAnnot$symbols)
from_behind <- duplicated(myAnnot$symbols, fromLast = TRUE)
combined <- (from_beginning | from_behind)
all_duplicates <- myAnnot[combined, ] #all values with duplicates
all_uniques <- myAnnot[!combined, ] #quick sanity check; all unique values
# add up to 19165
#now that we have a key for duplicates, remove them by the control group medians.
dfCombined <- myEx[combined,]
nrow(dfCombined) #11614 rows of symbols that have duplicates
#use this data frame to deal with the duplicate controls so medians are calculated
dfCombined_control <- data.frame(dfCombined[,1:4])
#medians for the duplicates are calculated
med <- apply(dfCombined_control, 1, median)
#bind the medians and symbol names for the controls to the df
dfCombined_control <- cbind(dfCombined_control, median = med)
dfCombined_control <- cbind(dfCombined_control, symbol = all_duplicates$symbols)
#Order the df by the medians from largest to smallest
dfCombined_control <- dfCombined_control[order(dfCombined_control$median, decreasing = TRUE),]
#Top-down duplicate search, this way, the duplicates with smaller medians get removed
dfCombined_control <- dfCombined_control[!duplicated(dfCombined_control$symbol),]
nrow(dfCombined_control) #4369 rows remain; that means we deleted 7245 rows of duplicates
# list of probeIDs that will be in our resulting data frame
final_rownames <- append(rownames(dfCombined_control), rownames(all_uniques))
#prepping for final data frame
result <- myEx[final_rownames,]
result_myAnnot <- myAnnot[final_rownames,]
#Calculate the control and treatment group means
ctrl_mean <- apply(result[,1:4], 1, mean)
treatment_mean <- apply(result[,5:8], 1, mean)
#===Final product===
#setup data frames
HUGO_result <-data.frame(symbol = HUGOsymbols, stringsAsFactors = FALSE)
setup <- data.frame(symbol = result_myAnnot$symbols,ctrl_mean,treatment_mean)
#add ctrl and treatment averages to the full list of HUGO symbols
HUGO_result$GSE54017.ctrl <- setup$ctrl_mean[match(HUGO_result$symbol,
setup$symbol)]
HUGO_result$GSE54017.treatment <- setup$treatment_mean[match(HUGO_result$symbol,
setup$symbol)]
coverage = (nrow(setup)/nrow(HUGO_result))*100
coverage #~58% coverage, same with earlier result; indeed, we kept the unique entries
# [END]
|
#' process.set.variable
#' @keywords internal
process.set.variable <- function(variableName, concept, sym.obj.names) {
suppressWarnings(sqldf(paste0(
"CREATE INDEX IF NOT EXISTS main.", variableName,
" ON dataTable (", variableName, ")"
)))
conceptColumns <- paste(concept, collapse = ", ")
conceptConcatenation <- paste(concept, collapse = "||'.'||")
categories <- sqldf(paste0(
"SELECT DISTINCT ", variableName, " FROM main.dataTable ORDER BY ",
variableName
))[[1]]
result <- data.frame(rep("$S", length(sym.obj.names)), length(categories), check.names = F)
colnames(result) <- c("$S", substr(variableName, 2, nchar(variableName) - 1))
for (i in seq(from = 1, to = length(categories), by = 64)) {
if (length(categories) - i + 1 >= 64) {
categoryGroup <- categories[i:(i + 63)]
} else {
categoryGroup <- categories[i:length(categories)]
}
queries <- character()
for (category in categoryGroup) {
queries <- c(queries, paste0(
"(SELECT SymObjNames, SymObjNames IN (SELECT DISTINCT ",
conceptConcatenation, " FROM main.dataTable WHERE ", variableName,
" = '", category, "') AS '", category, "' FROM main.symObjTable)"
))
}
queries <- paste(queries, collapse = " NATURAL JOIN ")
result <- cbind(result, sqldf(paste0("SELECT * FROM ", queries))[-1])
}
return(result)
}
| /R/process.set.variable.R | no_license | Frenchyy1/RSDA | R | false | false | 1,390 | r | #' process.set.variable
#' @keywords internal
process.set.variable <- function(variableName, concept, sym.obj.names) {
suppressWarnings(sqldf(paste0(
"CREATE INDEX IF NOT EXISTS main.", variableName,
" ON dataTable (", variableName, ")"
)))
conceptColumns <- paste(concept, collapse = ", ")
conceptConcatenation <- paste(concept, collapse = "||'.'||")
categories <- sqldf(paste0(
"SELECT DISTINCT ", variableName, " FROM main.dataTable ORDER BY ",
variableName
))[[1]]
result <- data.frame(rep("$S", length(sym.obj.names)), length(categories), check.names = F)
colnames(result) <- c("$S", substr(variableName, 2, nchar(variableName) - 1))
for (i in seq(from = 1, to = length(categories), by = 64)) {
if (length(categories) - i + 1 >= 64) {
categoryGroup <- categories[i:(i + 63)]
} else {
categoryGroup <- categories[i:length(categories)]
}
queries <- character()
for (category in categoryGroup) {
queries <- c(queries, paste0(
"(SELECT SymObjNames, SymObjNames IN (SELECT DISTINCT ",
conceptConcatenation, " FROM main.dataTable WHERE ", variableName,
" = '", category, "') AS '", category, "' FROM main.symObjTable)"
))
}
queries <- paste(queries, collapse = " NATURAL JOIN ")
result <- cbind(result, sqldf(paste0("SELECT * FROM ", queries))[-1])
}
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/searchconsole_objects.R
\name{RunMobileFriendlyTestRequest}
\alias{RunMobileFriendlyTestRequest}
\title{RunMobileFriendlyTestRequest Object}
\usage{
RunMobileFriendlyTestRequest(requestScreenshot = NULL, url = NULL)
}
\arguments{
\item{requestScreenshot}{Whether or not screenshot is requested}
\item{url}{URL for inspection}
}
\value{
RunMobileFriendlyTestRequest object
}
\description{
RunMobileFriendlyTestRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Mobile-friendly test request.
}
\seealso{
Other RunMobileFriendlyTestRequest functions: \code{\link{urlTestingTools.mobileFriendlyTest.run}}
}
| /googlesearchconsolev1.auto/man/RunMobileFriendlyTestRequest.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 726 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/searchconsole_objects.R
\name{RunMobileFriendlyTestRequest}
\alias{RunMobileFriendlyTestRequest}
\title{RunMobileFriendlyTestRequest Object}
\usage{
RunMobileFriendlyTestRequest(requestScreenshot = NULL, url = NULL)
}
\arguments{
\item{requestScreenshot}{Whether or not screenshot is requested}
\item{url}{URL for inspection}
}
\value{
RunMobileFriendlyTestRequest object
}
\description{
RunMobileFriendlyTestRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Mobile-friendly test request.
}
\seealso{
Other RunMobileFriendlyTestRequest functions: \code{\link{urlTestingTools.mobileFriendlyTest.run}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BasicFunctions.R
\name{getstr}
\alias{getstr}
\title{String extraction function}
\usage{
getstr(mystring, initial.character = "_", final.character = "_")
}
\arguments{
\item{mystring}{Character vector to extract from.}
\item{initial.character}{Character determining the starting point of extractions}
\item{final.character}{Character determining the end point of extractions}
}
\value{
snippet
}
\description{
Function extracting string between two specific characters, minor customization of this one
http://www.r-bloggers.com/how-to-extract-a-string-between-2-characters-in-r-and-sas/
}
| /man/getstr.Rd | no_license | PointProcess/SealPupProduction-JRSSC-code | R | false | true | 669 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BasicFunctions.R
\name{getstr}
\alias{getstr}
\title{String extraction function}
\usage{
getstr(mystring, initial.character = "_", final.character = "_")
}
\arguments{
\item{mystring}{Character vector to extract from.}
\item{initial.character}{Character determining the starting point of extractions}
\item{final.character}{Character determining the end point of extractions}
}
\value{
snippet
}
\description{
Function extracting string between two specific characters, minor customization of this one
http://www.r-bloggers.com/how-to-extract-a-string-between-2-characters-in-r-and-sas/
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mxmaps.R
\docType{package}
\name{mxmaps}
\alias{mxmaps}
\alias{mxmaps-package}
\title{\code{mxmaps} package}
\description{
Tools for static and interactive choropleths of Mexico. Includes functions to
manipulate INEGI state and municipio codes along with options to download data
from the INEGI API.
}
\details{
See the website on
\href{https://www.diegovalle.net/mxmaps}{mxmaps}
}
| /man/mxmaps.Rd | permissive | oscaramtz/mxmaps | R | false | true | 460 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mxmaps.R
\docType{package}
\name{mxmaps}
\alias{mxmaps}
\alias{mxmaps-package}
\title{\code{mxmaps} package}
\description{
Tools for static and interactive choropleths of Mexico. Includes functions to
manipulate INEGI state and municipio codes along with options to download data
from the INEGI API.
}
\details{
See the website on
\href{https://www.diegovalle.net/mxmaps}{mxmaps}
}
|
# 1
fmax <- max(table(singer$voice.part))
# 2
singer2 <- singer %>%
group_by(voice.part) %>%
arrange(height) %>%
mutate(gid = 1:length(height),
f.org = (1:length(height) - 0.5 ) / n()) %>%
ungroup() %>%
complete(voice.part, gid )
# 3
library(stringr)
singer3 <- singer2 %>%
group_by(voice.part) %>%
mutate( f.val = (1:length(height) - 0.5 ) / n(),
hgt2 = approx(f.org, height, f.val)$y ) %>%
ungroup() %>%
mutate(voice.part = str_replace_all(voice.part," ", "")) %>%
select(f.val, voice.part, hgt2) %>%
spread( key=voice.part, value=hgt2) %>%
na.omit %>% as.data.frame()
ggplot(singer2, aes(x=height, y=height)) + geom_point() + facet_grid(voice.part~voice.part)
library(GGally)
ggscatmat(singer3, columns=2:9)
| /scratch.r | no_license | rlugojr/ES218 | R | false | false | 911 | r | # 1
fmax <- max(table(singer$voice.part))
# 2
singer2 <- singer %>%
group_by(voice.part) %>%
arrange(height) %>%
mutate(gid = 1:length(height),
f.org = (1:length(height) - 0.5 ) / n()) %>%
ungroup() %>%
complete(voice.part, gid )
# 3
library(stringr)
singer3 <- singer2 %>%
group_by(voice.part) %>%
mutate( f.val = (1:length(height) - 0.5 ) / n(),
hgt2 = approx(f.org, height, f.val)$y ) %>%
ungroup() %>%
mutate(voice.part = str_replace_all(voice.part," ", "")) %>%
select(f.val, voice.part, hgt2) %>%
spread( key=voice.part, value=hgt2) %>%
na.omit %>% as.data.frame()
ggplot(singer2, aes(x=height, y=height)) + geom_point() + facet_grid(voice.part~voice.part)
library(GGally)
ggscatmat(singer3, columns=2:9)
|
r=359.43
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d73w2r/media/images/d73w2r-008/svc:tesseract/full/full/359.43/default.jpg Accept:application/hocr+xml
| /tesseract/rotate/d73w2r-008.r | permissive | ucd-library/wine-price-extraction | R | false | false | 199 | r | r=359.43
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d73w2r/media/images/d73w2r-008/svc:tesseract/full/full/359.43/default.jpg Accept:application/hocr+xml
|
# linting
# make sure working directory is set to package "./cspp"
setwd(".")
devtools::install_github("shaylafolson/cspp") | /install.R | permissive | colbrydi/cspp | R | false | false | 123 | r | # linting
# make sure working directory is set to package "./cspp"
setwd(".")
devtools::install_github("shaylafolson/cspp") |
library(Matrix.utils)
### Name: aggregate.Matrix
### Title: Compute summary statistics of a Matrix
### Aliases: aggregate.Matrix
### ** Examples
skus<-Matrix(as.matrix(data.frame(
orderNum=sample(1000,10000,TRUE),
sku=sample(1000,10000,TRUE),
amount=runif(10000))),sparse=TRUE)
#Calculate sums for each sku
a<-aggregate.Matrix(skus[,'amount'],skus[,'sku',drop=FALSE],fun='sum')
#Calculate counts for each sku
b<-aggregate.Matrix(skus[,'amount'],skus[,'sku',drop=FALSE],fun='count')
#Calculate mean for each sku
c<-aggregate.Matrix(skus[,'amount'],skus[,'sku',drop=FALSE],fun='mean')
m<-rsparsematrix(1000000,100,.001)
labels<-as.factor(sample(1e4,1e6,TRUE))
b<-aggregate.Matrix(m,labels)
## Not run:
##D orders<-data.frame(orderNum=as.factor(sample(1e6, 1e7, TRUE)),
##D sku=as.factor(sample(1e3, 1e7, TRUE)),
##D customer=as.factor(sample(1e4,1e7,TRUE)),
##D state = sample(letters, 1e7, TRUE), amount=runif(1e7))
##D system.time(d<-aggregate.Matrix(orders[,'amount',drop=FALSE],orders$orderNum))
##D system.time(e<-aggregate.Matrix(orders[,'amount',drop=FALSE],orders[,c('customer','state')]))
## End(Not run)
| /data/genthat_extracted_code/Matrix.utils/examples/aggregate.Matrix.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,140 | r | library(Matrix.utils)
### Name: aggregate.Matrix
### Title: Compute summary statistics of a Matrix
### Aliases: aggregate.Matrix
### ** Examples
skus<-Matrix(as.matrix(data.frame(
orderNum=sample(1000,10000,TRUE),
sku=sample(1000,10000,TRUE),
amount=runif(10000))),sparse=TRUE)
#Calculate sums for each sku
a<-aggregate.Matrix(skus[,'amount'],skus[,'sku',drop=FALSE],fun='sum')
#Calculate counts for each sku
b<-aggregate.Matrix(skus[,'amount'],skus[,'sku',drop=FALSE],fun='count')
#Calculate mean for each sku
c<-aggregate.Matrix(skus[,'amount'],skus[,'sku',drop=FALSE],fun='mean')
m<-rsparsematrix(1000000,100,.001)
labels<-as.factor(sample(1e4,1e6,TRUE))
b<-aggregate.Matrix(m,labels)
## Not run:
##D orders<-data.frame(orderNum=as.factor(sample(1e6, 1e7, TRUE)),
##D sku=as.factor(sample(1e3, 1e7, TRUE)),
##D customer=as.factor(sample(1e4,1e7,TRUE)),
##D state = sample(letters, 1e7, TRUE), amount=runif(1e7))
##D system.time(d<-aggregate.Matrix(orders[,'amount',drop=FALSE],orders$orderNum))
##D system.time(e<-aggregate.Matrix(orders[,'amount',drop=FALSE],orders[,c('customer','state')]))
## End(Not run)
|
\name{oc.gentwostage.bdry}
\title{Two-stage boundary operating characteristics}
\alias{oc.gentwostage.bdry}
\keyword{design}
\description{
Calculates the operating characteristics of a two-stage boundary based
on the R function oc.twostage.bdry.
}
\usage{
oc.gentwostage.bdry(pu, pa, r1, n1, r, n)
}
\arguments{
\item{pu}{unacceptable response rate}
\item{pa}{response rate that is desirable}
\item{r1}{first stage threshold to declare treatment undesirable}
\item{n1}{first stage sample size}
\item{r}{overall threshold to declare treatment undesirable}
\item{n}{total sample size}
}
\value{
oc.gentwostage.bdry returns the type I and II error rates as well as the
probability of early temination and expected sample size under pu for
a specific boundary.
}
\seealso{
\code{\link{gen2simon}}
}
\examples{
# Optimal two-stage safety design with pu (p0) = 0.33 vs. pa (p1) = 0.20
oc.gentwostage.bdry(0.33, 0.20, 8, 26, 22, 85)
# Optimal two-stage efficacy design with pu (p0) = 0.67 vs. pa (p1) = 0.80
oc.gentwostage.bdry(0.67, 0.80, 18, 26, 63, 85)
}
\references{
Kim S and Wong WK. Phase II Two-Stage Single-Arm Clinical Trials for Testing Toxicity Levels. \emph{Commun Stat Appl Methods. 2019 Mar;26(2):163-173.} \url{https://www.ncbi.nlm.nih.gov/pubmed/31106162}.
}
\keyword{design}
| /man/oc.gentwostage.bdry.Rd | no_license | cran/gen2stage | R | false | false | 1,363 | rd | \name{oc.gentwostage.bdry}
\title{Two-stage boundary operating characteristics}
\alias{oc.gentwostage.bdry}
\keyword{design}
\description{
Calculates the operating characteristics of a two-stage boundary based
on the R function oc.twostage.bdry.
}
\usage{
oc.gentwostage.bdry(pu, pa, r1, n1, r, n)
}
\arguments{
\item{pu}{unacceptable response rate}
\item{pa}{response rate that is desirable}
\item{r1}{first stage threshold to declare treatment undesirable}
\item{n1}{first stage sample size}
\item{r}{overall threshold to declare treatment undesirable}
\item{n}{total sample size}
}
\value{
oc.gentwostage.bdry returns the type I and II error rates as well as the
probability of early temination and expected sample size under pu for
a specific boundary.
}
\seealso{
\code{\link{gen2simon}}
}
\examples{
# Optimal two-stage safety design with pu (p0) = 0.33 vs. pa (p1) = 0.20
oc.gentwostage.bdry(0.33, 0.20, 8, 26, 22, 85)
# Optimal two-stage efficacy design with pu (p0) = 0.67 vs. pa (p1) = 0.80
oc.gentwostage.bdry(0.67, 0.80, 18, 26, 63, 85)
}
\references{
Kim S and Wong WK. Phase II Two-Stage Single-Arm Clinical Trials for Testing Toxicity Levels. \emph{Commun Stat Appl Methods. 2019 Mar;26(2):163-173.} \url{https://www.ncbi.nlm.nih.gov/pubmed/31106162}.
}
\keyword{design}
|
\name{mhglm.control}
\alias{mhglm.control}
\title{
Auxiliary for Controlling Moment Heirarchical GLM Fitting
}
\description{
Auxiliary function for \code{\link{mhglm}} fitting. Typically only used
internally by \code{\link{mhglm.fit}}, but may be used to construct a
control argument to either function.
}
\usage{
mhglm.control(standardize = TRUE, steps = 1, parallel = FALSE, diagcov = FALSE,
fit.method = "firthglm.fit", fit.control = list(...), ...)
}
\arguments{
\item{standardize}{
logitcal indicating if predictors should be standardized before
moment-based fitted
}
\item{steps}{
number of refinement steps
}
\item{parallel}{
fit the group-specific estimates in parallel rather than sequentially
}
\item{diagcov}{
estimate random effect covairance matrix with diagonal approximation
}
\item{fit.method}{
method for obtaining group-specific effect estimates
}
\item{fit.control}{
control parameters for \code{fit.method}
}
\item{\dots}{
arguments to be used to form the \code{fit.control} argument if
it is not supplied directly.
}
}
\details{
Setting \code{standardize = TRUE} ensures that the procedure is equivariant,
and generally leads to better estimation performance.
The \code{steps} argument gives the number of refinement steps for the moment
based parameters. In each step, the previous fixed effect and random effect
covariance matrix estimates are used to weight the subpopulation-specific
effect estimates. In principle, higher values of \code{steps} could lead to
more accurate estimates, but in simulations, the differences are negligible.
}
\value{
A list with components named as the arguments.
}
\seealso{
\code{\link{mhglm.fit}}, the fitting procedure used by
\code{\link{mhglm}}.
\code{\link{firthglm.fit}}, the default subpopulation-specific fitting method.
}
\examples{
library(lme4) # for cbpp data
# The default fitting method uses Firth's bias-corrected estimates
(gm.firth <- mhglm(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = cbpp, family = binomial,
control=mhglm.control(fit.method="firthglm.fit")))
# Using maximum likelihood estimates is less reliable
(gm.ml <- mhglm(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = cbpp, family = binomial,
control=mhglm.control(fit.method="glm.fit")))
}
\keyword{optimize}
\keyword{models}
| /man/mhglm.control.Rd | permissive | zhangns07/r-mbest-multilevel | R | false | false | 2,441 | rd | \name{mhglm.control}
\alias{mhglm.control}
\title{
Auxiliary for Controlling Moment Heirarchical GLM Fitting
}
\description{
Auxiliary function for \code{\link{mhglm}} fitting. Typically only used
internally by \code{\link{mhglm.fit}}, but may be used to construct a
control argument to either function.
}
\usage{
mhglm.control(standardize = TRUE, steps = 1, parallel = FALSE, diagcov = FALSE,
fit.method = "firthglm.fit", fit.control = list(...), ...)
}
\arguments{
\item{standardize}{
logitcal indicating if predictors should be standardized before
moment-based fitted
}
\item{steps}{
number of refinement steps
}
\item{parallel}{
fit the group-specific estimates in parallel rather than sequentially
}
\item{diagcov}{
estimate random effect covairance matrix with diagonal approximation
}
\item{fit.method}{
method for obtaining group-specific effect estimates
}
\item{fit.control}{
control parameters for \code{fit.method}
}
\item{\dots}{
arguments to be used to form the \code{fit.control} argument if
it is not supplied directly.
}
}
\details{
Setting \code{standardize = TRUE} ensures that the procedure is equivariant,
and generally leads to better estimation performance.
The \code{steps} argument gives the number of refinement steps for the moment
based parameters. In each step, the previous fixed effect and random effect
covariance matrix estimates are used to weight the subpopulation-specific
effect estimates. In principle, higher values of \code{steps} could lead to
more accurate estimates, but in simulations, the differences are negligible.
}
\value{
A list with components named as the arguments.
}
\seealso{
\code{\link{mhglm.fit}}, the fitting procedure used by
\code{\link{mhglm}}.
\code{\link{firthglm.fit}}, the default subpopulation-specific fitting method.
}
\examples{
library(lme4) # for cbpp data
# The default fitting method uses Firth's bias-corrected estimates
(gm.firth <- mhglm(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = cbpp, family = binomial,
control=mhglm.control(fit.method="firthglm.fit")))
# Using maximum likelihood estimates is less reliable
(gm.ml <- mhglm(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = cbpp, family = binomial,
control=mhglm.control(fit.method="glm.fit")))
}
\keyword{optimize}
\keyword{models}
|
base <- lapply(
1:15,
function(nbits) {
as.integer(sum(2 ^ seq.int(0, nbits-1)))
}
)
shifts <- lapply(1:15, function(nbits) {
(seq.int(31 %/% nbits) - 1L) * nbits
})
create_masks <- function(n) { # nocov start
n <- as.integer(n)
b <- base[[n]]
ss <- shifts[[n]]
vapply(
ss,
function(shift) {
bitwShiftL(b, shift)
},
integer(1)
)
} # nocov end
masks <- lapply(1:15, create_masks)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Pack integer values into a single integer
#'
#' @param small_ints vector of small integer values. these are recycle if necessary
#' @param nbits number of bits we want to pack these into
#'
#' @return single integer value
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pack_into_single_int <- function(small_ints, nbits) {
# ToDo: assert that length of small_ints matches expectations of 'nbits'
# i.e. if nbits=16, then small_ints can only have a maximum of 2 values
maxlen <- floor(31/nbits)
stopifnot(length(small_ints) <= maxlen)
shifted_small_ints <-
bitwShiftL(
bitwAnd(small_ints, base[[nbits]]), # ensure bits are within range
shifts[[nbits]]
)
sum(shifted_small_ints)
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Unpacka an integer value into a vector of small integers of 'nbits' each
#'
#' @param int single integer value
#' @inheritParams pack_into_single_int
#'
#' @return vector of small integers
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
unpack_from_single_int <- function(int, nbits) {
shifted_ints <- bitwAnd(int, masks[[nbits]])
bitwShiftR(shifted_ints, shifts[[nbits]])
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Pack an unbounded vector of small integers into 'nbit' values inside a a vector of 32bit integers
#'
#' @inheritParams pack_into_single_int
#'
#' @return vector of integer values
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pack_ints <- function(small_ints, nbits) {
N <- 31L %/% nbits
chunks <- chunk(small_ints, N)
res <- vapply(chunks, pack_into_single_int, integer(1), nbits = nbits)
attr(res, 'N') <- length(small_ints)
res
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Unpack a 32-bit integer values into a vector of small integers of 'nbits' each
#'
#' @param ints integer vector
#' @inheritParams unpack_from_single_int
#'
#' @return vector of small integers
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
unpack_ints <- function(ints, nbits) {
res <- unlist(lapply(ints, unpack_from_single_int, nbits = nbits))
N <- attr(ints, 'N', exact = TRUE)
if (!is.null(N)) {
res <- res[seq.int(N)]
}
res
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Split a vector in chunks of size 'n'
#'
#' @param x vector
#' @param n chunk size
#'
#' @return list of vectors
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
chunk <- function(x, n) {
N <- length(x)
S <- seq.int(from=1L, to=N, by=n)
mapply(
function(a, b) {x[a:b]},
S,
pmin(S + (n - 1L), N),
SIMPLIFY = FALSE
)
}
| /R/pack.R | permissive | coolbutuseless/smallfactor | R | false | false | 3,341 | r |
base <- lapply(
1:15,
function(nbits) {
as.integer(sum(2 ^ seq.int(0, nbits-1)))
}
)
shifts <- lapply(1:15, function(nbits) {
(seq.int(31 %/% nbits) - 1L) * nbits
})
create_masks <- function(n) { # nocov start
n <- as.integer(n)
b <- base[[n]]
ss <- shifts[[n]]
vapply(
ss,
function(shift) {
bitwShiftL(b, shift)
},
integer(1)
)
} # nocov end
masks <- lapply(1:15, create_masks)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Pack integer values into a single integer
#'
#' @param small_ints vector of small integer values. these are recycle if necessary
#' @param nbits number of bits we want to pack these into
#'
#' @return single integer value
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pack_into_single_int <- function(small_ints, nbits) {
# ToDo: assert that length of small_ints matches expectations of 'nbits'
# i.e. if nbits=16, then small_ints can only have a maximum of 2 values
maxlen <- floor(31/nbits)
stopifnot(length(small_ints) <= maxlen)
shifted_small_ints <-
bitwShiftL(
bitwAnd(small_ints, base[[nbits]]), # ensure bits are within range
shifts[[nbits]]
)
sum(shifted_small_ints)
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Unpacka an integer value into a vector of small integers of 'nbits' each
#'
#' @param int single integer value
#' @inheritParams pack_into_single_int
#'
#' @return vector of small integers
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
unpack_from_single_int <- function(int, nbits) {
shifted_ints <- bitwAnd(int, masks[[nbits]])
bitwShiftR(shifted_ints, shifts[[nbits]])
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Pack an unbounded vector of small integers into 'nbit' values inside a a vector of 32bit integers
#'
#' @inheritParams pack_into_single_int
#'
#' @return vector of integer values
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pack_ints <- function(small_ints, nbits) {
N <- 31L %/% nbits
chunks <- chunk(small_ints, N)
res <- vapply(chunks, pack_into_single_int, integer(1), nbits = nbits)
attr(res, 'N') <- length(small_ints)
res
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Unpack a 32-bit integer values into a vector of small integers of 'nbits' each
#'
#' @param ints integer vector
#' @inheritParams unpack_from_single_int
#'
#' @return vector of small integers
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
unpack_ints <- function(ints, nbits) {
res <- unlist(lapply(ints, unpack_from_single_int, nbits = nbits))
N <- attr(ints, 'N', exact = TRUE)
if (!is.null(N)) {
res <- res[seq.int(N)]
}
res
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Split a vector in chunks of size 'n'
#'
#' @param x vector
#' @param n chunk size
#'
#' @return list of vectors
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
chunk <- function(x, n) {
N <- length(x)
S <- seq.int(from=1L, to=N, by=n)
mapply(
function(a, b) {x[a:b]},
S,
pmin(S + (n - 1L), N),
SIMPLIFY = FALSE
)
}
|
# reads a QIIME otu/metadata/taxon/distance table.
# Support legacy formats, where
# the header may or may not start with '#',
# and comment lines can be anywhere in the file.
# return value is a matrix unless as.data.frame is TRUE
"read.qiime.table" <- function(filepath, as.data.frame=FALSE){
header.index <- get.header.index(filepath)
# read the header
f <- file(filepath,'r')
header <- scan(filepath, what='character', sep='\t',comment='',skip=header.index-1,quote='"',
nlines=1,quiet=TRUE)
close(f)
# read the rest of the table
datatable <- read.table(filepath,sep='\t',skip=header.index, comment='#',quote='"',
head=F,row.names=1,check=FALSE,strip.white=TRUE)
# set column names using header
colnames(datatable) <- header[-1]
if(!as.data.frame) datatable <- as.matrix(datatable)
return(datatable)
}
"load.qiime.mapping.file" <- function(filepath){
return(read.qiime.table(filepath, as.data.frame=TRUE))
}
"load.qiime.otu.table" <- function(filepath,include.lineages=FALSE){
otus <- read.qiime.table(filepath, as.data.frame=TRUE)
# drop "Consensus Lineage" column if present
if(otu.table.has.metadata(colnames(otus))){
C <- ncol(otus)
lineages <- as.character(otus[,C])
otus <- otus[,-C]
} else {
lineages <- NULL
}
otus <- as.matrix(t(otus))
if(include.lineages){
return(list(otus=otus,lineages=lineages))
} else {
return(otus=otus)
}
}
# TRUE if last column is "Consensus Lineage" or "OTU Metadata"
"otu.table.has.metadata" <- function(headers){
C <- length(headers)
has.metadata <- grepl('consensus[ ]lineage|otu[ ]*metadata|taxonomy',
headers[C], ignore.case=TRUE)
return(has.metadata)
}
# returns the index of the header line
# note: lines after the header may be comments with '#'
# read.table should therefore be called with (skip=header.index, comment='#')
"get.header.index" <- function(filepath){
ncolumns.per.line <- NULL
# read lines until the first line without a '#'
# for each line, obtain the number of tab-delimited columns
linecount <- 0
start.character <- '#'
while(start.character == '#'){
linecount <- linecount + 1
f <- file(filepath,'r') # open file in read mode
line <- scan(f,what='character',skip=linecount-1,nlines=1, sep='\t',
quote='"', quiet=TRUE)
close(f)
# ncolumns is the number of entries in this line
# not including trailing empties
ncolumns <- max(which(sapply(line,nchar) > 0))
ncolumns.per.line <- c(ncolumns.per.line, ncolumns)
start.character <- substring(line[1],1,1)
}
# first non-comment line gives the number of columns
C <- ncolumns.per.line[linecount]
if(linecount == 1){
# if there are no comment lines, then the first line is the header
header.index <- 1
} else {
if(any(ncolumns.per.line[-linecount] == C)){
# if there is a comment line with the correct number of columns,
# it is the header
header.index <- max(which(ncolumns.per.line[-linecount] == C))
} else {
# if there is no comment line with the correct number of columns,
# the first non-comment line is the header
header.index <- linecount
}
}
return(header.index)
}
"load.qiime.taxon.table" <- function(filepath){
taxa <- as.matrix(t(read.table(filepath,sep='\t',head=T,row.names=1,check=FALSE,quote='"')))
return(taxa)
}
"load.qiime.distance.matrix" <- function(filepath){
d <- as.matrix(read.table(filepath,sep='\t',head=T,row.names=1,check=FALSE,quote='"'))
return(d)
}
# ensure map, data table, etc., contain the same samples in the same order
"remove.nonoverlapping.samples" <- function(map=NULL,otus=NULL,taxa=NULL,distmat=NULL){
IDs <- NULL
objects <- list(map=map,otus=otus,taxa=taxa,distmat=distmat)
# find overlapping samples in all tables
for(obj in objects){
if(!is.null(obj)) {
if(is.null(IDs)){
IDs <- rownames(obj)
} else {
IDs <- intersect(rownames(obj), IDs)
}
}
}
# drop non-overlapping samples
for(i in 1:length(objects)){
if(!is.null(objects[[i]])) {
objects[[i]] <- objects[[i]][IDs,,drop=F]
# for mapping file, drop any empty levels from factors that might
# have occurred due to dropped samples
if(i == 1) objects[[i]] <- droplevels(objects[[i]])
# for distance matrix, get subset of columns too
if(i == 4) objects[[i]] <- objects[[i]][,IDs]
}
}
return(objects)
}
| /lib/util.load.r | no_license | pvangay/mwas | R | false | false | 4,840 | r | # reads a QIIME otu/metadata/taxon/distance table.
# Support legacy formats, where
# the header may or may not start with '#',
# and comment lines can be anywhere in the file.
# return value is a matrix unless as.data.frame is TRUE
"read.qiime.table" <- function(filepath, as.data.frame=FALSE){
header.index <- get.header.index(filepath)
# read the header
f <- file(filepath,'r')
header <- scan(filepath, what='character', sep='\t',comment='',skip=header.index-1,quote='"',
nlines=1,quiet=TRUE)
close(f)
# read the rest of the table
datatable <- read.table(filepath,sep='\t',skip=header.index, comment='#',quote='"',
head=F,row.names=1,check=FALSE,strip.white=TRUE)
# set column names using header
colnames(datatable) <- header[-1]
if(!as.data.frame) datatable <- as.matrix(datatable)
return(datatable)
}
"load.qiime.mapping.file" <- function(filepath){
return(read.qiime.table(filepath, as.data.frame=TRUE))
}
"load.qiime.otu.table" <- function(filepath,include.lineages=FALSE){
otus <- read.qiime.table(filepath, as.data.frame=TRUE)
# drop "Consensus Lineage" column if present
if(otu.table.has.metadata(colnames(otus))){
C <- ncol(otus)
lineages <- as.character(otus[,C])
otus <- otus[,-C]
} else {
lineages <- NULL
}
otus <- as.matrix(t(otus))
if(include.lineages){
return(list(otus=otus,lineages=lineages))
} else {
return(otus=otus)
}
}
# TRUE if last column is "Consensus Lineage" or "OTU Metadata"
"otu.table.has.metadata" <- function(headers){
C <- length(headers)
has.metadata <- grepl('consensus[ ]lineage|otu[ ]*metadata|taxonomy',
headers[C], ignore.case=TRUE)
return(has.metadata)
}
# returns the index of the header line
# note: lines after the header may be comments with '#'
# read.table should therefore be called with (skip=header.index, comment='#')
"get.header.index" <- function(filepath){
ncolumns.per.line <- NULL
# read lines until the first line without a '#'
# for each line, obtain the number of tab-delimited columns
linecount <- 0
start.character <- '#'
while(start.character == '#'){
linecount <- linecount + 1
f <- file(filepath,'r') # open file in read mode
line <- scan(f,what='character',skip=linecount-1,nlines=1, sep='\t',
quote='"', quiet=TRUE)
close(f)
# ncolumns is the number of entries in this line
# not including trailing empties
ncolumns <- max(which(sapply(line,nchar) > 0))
ncolumns.per.line <- c(ncolumns.per.line, ncolumns)
start.character <- substring(line[1],1,1)
}
# first non-comment line gives the number of columns
C <- ncolumns.per.line[linecount]
if(linecount == 1){
# if there are no comment lines, then the first line is the header
header.index <- 1
} else {
if(any(ncolumns.per.line[-linecount] == C)){
# if there is a comment line with the correct number of columns,
# it is the header
header.index <- max(which(ncolumns.per.line[-linecount] == C))
} else {
# if there is no comment line with the correct number of columns,
# the first non-comment line is the header
header.index <- linecount
}
}
return(header.index)
}
"load.qiime.taxon.table" <- function(filepath){
taxa <- as.matrix(t(read.table(filepath,sep='\t',head=T,row.names=1,check=FALSE,quote='"')))
return(taxa)
}
"load.qiime.distance.matrix" <- function(filepath){
d <- as.matrix(read.table(filepath,sep='\t',head=T,row.names=1,check=FALSE,quote='"'))
return(d)
}
# ensure map, data table, etc., contain the same samples in the same order
"remove.nonoverlapping.samples" <- function(map=NULL,otus=NULL,taxa=NULL,distmat=NULL){
IDs <- NULL
objects <- list(map=map,otus=otus,taxa=taxa,distmat=distmat)
# find overlapping samples in all tables
for(obj in objects){
if(!is.null(obj)) {
if(is.null(IDs)){
IDs <- rownames(obj)
} else {
IDs <- intersect(rownames(obj), IDs)
}
}
}
# drop non-overlapping samples
for(i in 1:length(objects)){
if(!is.null(objects[[i]])) {
objects[[i]] <- objects[[i]][IDs,,drop=F]
# for mapping file, drop any empty levels from factors that might
# have occurred due to dropped samples
if(i == 1) objects[[i]] <- droplevels(objects[[i]])
# for distance matrix, get subset of columns too
if(i == 4) objects[[i]] <- objects[[i]][,IDs]
}
}
return(objects)
}
|
\name{SelectResult}
\docType{class}
\alias{SelectResult}
\alias{SelectResult-class}
\alias{SelectResult,character,character,list,list-method}
\alias{show,SelectResult-method}
\title{Container for Storing Feature Selection Results}
\description{
Contains a list of ranked indices or names of features, from most discriminative to least discriminative,
and a list of indicies of features selected for use in classification. The names or indices will be in a data frame
if the input dataset is a \code{\link{MultiAssayExperiment}}, with the first column containing the name of the
data table the feature is from and the second column the index or name of the feature. Each vector or data frame element
in the list corresponds to a particular iteration of classifier training. Nested lists will be present if
the permutation and folding cross-validation scheme was used. This class is not intended to be created by the user,
but could be used in another software package.
}
\section{Constructor}{
\describe{
\item{}{
\code{SelectResult(datasetName, selectionName, rankedFeatures, chosenFeatures)}}
}
\describe{
\item{\code{datasetName}}{A name associated with the dataset used.}
\item{\code{selectionName}}{A name associated with the classification.}
\item{\code{rankedFeatures}}{Indices or names of all features, from most to least
discriminative.}
\item{\code{chosenFeatures}}{Indices or names of features selected at each fold.}
}
}
\section{Summary}{
A method which summarises the results is available.
\code{result} is a \code{SelectResult} object.
\describe{
\item{}{
\code{show(result)}{ Prints a short summary of what \code{result} contains.}
}}
}
\author{Dario Strbenac}
\examples{
SelectResult("Melanoma", "Moderated t-test", list(1:50), list(1:10))
}
| /man/SelectResult-class.Rd | no_license | garthtarr/ClassifyR | R | false | false | 1,918 | rd | \name{SelectResult}
\docType{class}
\alias{SelectResult}
\alias{SelectResult-class}
\alias{SelectResult,character,character,list,list-method}
\alias{show,SelectResult-method}
\title{Container for Storing Feature Selection Results}
\description{
Contains a list of ranked indices or names of features, from most discriminative to least discriminative,
and a list of indicies of features selected for use in classification. The names or indices will be in a data frame
if the input dataset is a \code{\link{MultiAssayExperiment}}, with the first column containing the name of the
data table the feature is from and the second column the index or name of the feature. Each vector or data frame element
in the list corresponds to a particular iteration of classifier training. Nested lists will be present if
the permutation and folding cross-validation scheme was used. This class is not intended to be created by the user,
but could be used in another software package.
}
\section{Constructor}{
\describe{
\item{}{
\code{SelectResult(datasetName, selectionName, rankedFeatures, chosenFeatures)}}
}
\describe{
\item{\code{datasetName}}{A name associated with the dataset used.}
\item{\code{selectionName}}{A name associated with the classification.}
\item{\code{rankedFeatures}}{Indices or names of all features, from most to least
discriminative.}
\item{\code{chosenFeatures}}{Indices or names of features selected at each fold.}
}
}
\section{Summary}{
A method which summarises the results is available.
\code{result} is a \code{SelectResult} object.
\describe{
\item{}{
\code{show(result)}{ Prints a short summary of what \code{result} contains.}
}}
}
\author{Dario Strbenac}
\examples{
SelectResult("Melanoma", "Moderated t-test", list(1:50), list(1:10))
}
|
# ---
# title: "MoRph functions"
# author: "Duncan Golicher"
# date: "01/12/2016"
# output: html_document
# ---
#
#
# # PostGIS database functions.
#
# All functions that work on the database begin with the suffix Pg. The naming convention thereafter is to use two capitalised. words for each function.
#
# ## Creating a new data base
# Save this with the name .pgpass to the home directory.
#
# New databases can be added directly from the command line, but a simple R function can be used.
# The function drops the database if it is not in use and then creates it. So use with care if the database already exists! It only needs to be called once!
#
# ```{r}
PgMakeDb<-function(dbname="brant"){
com<-paste("dropdb -h postgis -U docker ",dbname,sep="")
system(com)
com<-paste("createdb -h postgis -U docker ",dbname,sep="")
system(com)
}
# ```
#
# ### Allowing connections to the database using RODBC
#
# Every database being used needs an entry in the odbc.ini file that is placed in /etc/odbc.ini.
# As this is only directly editable with root priviledges the best strategy is to edit an odbc.ini file in the home directory and then copy it into place by opening a shell.
#
# #### odbc.ini example
#
# **Make sure that the connection name (in this case brant) matches the database name, as this convention will be used in the subsequent functions.**
#
# ```{bash, eval=FALSE}
# [brant]
# Driver = /usr/lib/x86_64-linux-gnu/odbc/psqlodbcw.so
# Database = brant
# Servername = postgis
# Username = docker
# Password = docker
# Protocol = 8.2.5
# ReadOnly = 0
# ```
#
# Then open a shell and run
#
# ```{bash,eval=FALSE}
# sudo cp odbc.ini /etc/odbc.ini
# ```
#
# A new entry that follows this format should be added to the odbc.ini file ever time a new data base is created. It is envisaged that a new database would be used for each model, with all tables being placed in the public schema. In some cases it may be useful to use more schemas within the database for separate sites, but this may not be necessary. The concept is to allow storage and backup of all the relevant information by dumping the database to a single file.
#
# ### Adding extensions to the database
#
# ```{r}
PgInit<-function(dbname="brant"){
require(RODBC)
con<-odbcConnect(dbname) ## Note the use of the connection name. IT MUST MATCH THE DATABASE!
odbcQuery(con,"create extension postgis")
odbcQuery(con,"create extension plr")
}
# ```
#
#
# ### Adding PLR statistical functions to the database
#
# Many useful statistical functions from R can be added as functions to the database. Again this can be done directly through R by using the open connection. The general format of all the functions involves coercing the arguments to numeric(to be on the safe side if characters are passed) and calculating stats after removing NAs. A float is returned. It is easy to add more to this function if required.
#
# ```{r}
PgPlr<-function(dbname="brant"){
require(RODBC)
con<-odbcConnect(dbname)
query<-"CREATE OR REPLACE FUNCTION median (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
median(x,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION q10 (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
quantile(x,0.1,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION q90 (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
quantile(x,0.9,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION q75 (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
quantile(x,0.75,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION q25 (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
quantile(x,0.25,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION minimum (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
min(x,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION maximum (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
max(x,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION mean (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
mean(x,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION sd (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
sd(x,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION se (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
sd(x,na.rm=TRUE)/sqrt(length(x))'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION length (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
length(x)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION PSuitable (float[],float[],float,float,float) RETURNS float AS '
x<-arg1
q<-arg2
depth<-arg3
ht<-arg4
tide<-arg5
depth<-depth+tide
x2<-q[x>=depth&x<=ht]
x2<-max(x2)-min(x2)
if(is.na(x2))x2<-0
if(x2==-Inf)x2<-0
x2'
LANGUAGE 'plr' STRICT;
"
odbcQuery(con,query)
}
# ```
#
#
# ## Loading raster layers
#
# Raster layers uses a few more arguments. The layers are loaded in database (as referenced rasters can't be transfered). The tiles are usually square, but the x and y width can be set. Arguments are thus
#
# 1. flnm: name of file
# 2. x: Number of pixels in tile x dimension
# 3. y: Number of pixels in tile y dimension
# 4. tabnm: Name of table to hold data
# 5. db: Database name
# 6. srid: If this is 0 the srid will be taken from the file if it is included. It is usually safer to set this if known.
# 7. path: Path to file with trailing /
#
#
# ```{r}
PgLoadRaster<-function(flnm="cold_bay_3857_clip.tiff",x=10,y=10,tabnm="dem",db="brant",srid=3857,path="/home/rstudio/shiny_morph/"){
flnm<-paste(path,flnm,sep="")
command <- paste("raster2pgsql -s ",srid, "-I -d -M ",flnm, " -F -t ",x,"x",y," ",tabnm,"|psql -h postgis -U docker -d ",db,sep="")
system(command)
}
#
# ## Loading vector layers
#
# Vector layers can be loaded directly from the canvas of QQGIS after logging into the database through the dbmanager interface. However if they are stored on the server this command will load them into the data base from a shapefile. There is no need to specify the .shp extension for the name of the file.
#
# Arguments are:
#
# 1. flnm: name of file
# 2. tabnm: Name of table to hold data
# 3. db: Database name
# 4. srid: If this is 0 the srid will be taken from the file if it is included. It is usually safer to set this if known.
# 5. path: Path to file with trailing /
#
#
# ```{r}
PgLoadVector<-function(flnm="tide_regime",tabnm="tide_regime",db="brant",srid=3857,path="/home/rstudio/shiny_morph/"){
flnm<-paste(path,flnm,sep="")
command <- sprintf("shp2pgsql -s %s -d -I %s %s |psql -h postgis -U docker -d %s",srid,flnm,tabnm,db)
command
system(command)
}
#
#
#
# ## Setting up graticule from dem
#
# In the MoRph application patches will be either square or rectangular polygons that are derived from vectorising the raster dem that is first added to the data base. Statistics are calulated from the pixel values of the dem and held as attributes. The function can drop graticules where the minimum value is below a certain level and those with a maximum above a certain level, as this may be useful along coastlines.
#
#
# ```{r}
PgMakeGrat<-function(dem="dem",minht=-10,maxht=10,db="brant")
{
require(RODBC)
con<-odbcConnect(db)
query<-paste("
drop table if exists grat;
create table grat as
select s.* from
(select rid, st_envelope(rast) geom,
minimum((st_dumpvalues(rast)).valarray) min,
q10((st_dumpvalues(rast)).valarray) q10,
q25((st_dumpvalues(rast)).valarray) q25,
median((st_dumpvalues(rast)).valarray) median,
mean((st_dumpvalues(rast)).valarray) mean,
q75((st_dumpvalues(rast)).valarray) q75,
q90((st_dumpvalues(rast)).valarray) q90,
maximum((st_dumpvalues(rast)).valarray) max
from ",dem,") s
where min>",minht," and max < ",maxht," and min <1000000000000;
CREATE INDEX grat_gix ON grat USING GIST (geom);",sep="")
odbcQuery(con,query)
query<-"
ALTER TABLE grat ADD COLUMN psuitable numeric(3);
"
odbcQuery(con,query)
}
## Calulate the proportion of each graticule within a suitable heght range,
PgPSuitable<-function(db="brant",depth=-2,height=5)
{
require(RODBC)
con<-odbcConnect(db)
query<-sprintf("update grat set psuitable = PSuitable(array[min,q10,q25,median,q75,q90,max],array[0,10,25,50,75,90,100],%s,%s,0);",depth,height)
odbcQuery(con,query)
}
#
# ## Getting vector layer from the data base
#
# ```{r}
PgGetQuery <- function(query="select * from grat",db="brant") {
require(RODBC)
require(rgdal)
con<-odbcConnect(db)
query <- paste("create view temp_view as ", query, sep = "")
odbcQuery(con, query)
dsn<-paste("PG:dbname='",db,"' host='postgis' port=5432 user= 'docker'",sep="")
result <- readOGR(dsn, "temp_view")
odbcQuery(con, "drop view temp_view")
return(result)
}
#
# ## Adding mean and median from resource layer
#
# This query works by overlaying the graticule onto any raster layer that has been first uploaded into the data base using PgLoadRaster. A temporary table is formed, then renamed grat and re-indexed. This is a more robust method than adding columns to grat.
#
# ```{r}
PgAddResource<-function(db="brant",resource="dem")
{
require(RODBC)
con<-odbcConnect(db)
query<-sprintf("create table tmp as
select g.*,
med median_%s,
mn mean_%s
from
grat g,
(select
t2.rid,
median((st_dumpvalues(st_union(st_clip(rast,geom)))).valarray) med,
mean((st_dumpvalues(st_union(st_clip(rast,geom)))).valarray) mn
from %s t,
(select * from grat) t2
where st_intersects(rast,geom)
group by t2.rid) s
where s.rid=g.rid;
drop table grat;
ALTER TABLE tmp RENAME TO grat;
CREATE INDEX grat_gix ON grat USING GIST (geom);",resource,resource,resource)
odbcQuery(con,query)
}
#
# ## Extracting attribute to grat from vector polygon layer
#
# The idea behind this function is quite specific to MoRph, but can be adapted. Assuming that there is a polyon layer loaded using PgLoadVector. The example is a layer with codes representing the tide regime in each bay. Some of the graticule patches may possiby overlap the boundary between tide regimes but only one of the values is needed. The function simply chooses (arbitrarily) the minimum value. This is not going to be a problem in most cases as the boundary is also fairly arbitrary.
#
# ```{r}
PgAddVector<-function(db="brant",l1="grat",l2="tide_regime",col="tide"){
require(RODBC)
con<-odbcConnect(db)
query<-sprintf("
drop table if exists tmp;
create table tmp as
select gg.*,s.%s from
%s gg,
(select g.rid,min(%s) %s from
grat g,
%s t
where st_intersects(t.geom,g.geom)
group by rid,g.geom) s
where s.rid=gg.rid;
drop table %s;
ALTER TABLE tmp RENAME TO %s;
CREATE INDEX %s_gix ON %s USING GIST (geom);",col,l1,col,col,l2,l1,l1,l1,l1)
odbcQuery(con,query)
}
| /scripts/MoRPh_Functions.R | no_license | dgolicher/morph | R | false | false | 11,008 | r | # ---
# title: "MoRph functions"
# author: "Duncan Golicher"
# date: "01/12/2016"
# output: html_document
# ---
#
#
# # PostGIS database functions.
#
# All functions that work on the database begin with the suffix Pg. The naming convention thereafter is to use two capitalised. words for each function.
#
# ## Creating a new data base
# Save this with the name .pgpass to the home directory.
#
# New databases can be added directly from the command line, but a simple R function can be used.
# The function drops the database if it is not in use and then creates it. So use with care if the database already exists! It only needs to be called once!
#
# ```{r}
PgMakeDb<-function(dbname="brant"){
com<-paste("dropdb -h postgis -U docker ",dbname,sep="")
system(com)
com<-paste("createdb -h postgis -U docker ",dbname,sep="")
system(com)
}
# ```
#
# ### Allowing connections to the database using RODBC
#
# Every database being used needs an entry in the odbc.ini file that is placed in /etc/odbc.ini.
# As this is only directly editable with root priviledges the best strategy is to edit an odbc.ini file in the home directory and then copy it into place by opening a shell.
#
# #### odbc.ini example
#
# **Make sure that the connection name (in this case brant) matches the database name, as this convention will be used in the subsequent functions.**
#
# ```{bash, eval=FALSE}
# [brant]
# Driver = /usr/lib/x86_64-linux-gnu/odbc/psqlodbcw.so
# Database = brant
# Servername = postgis
# Username = docker
# Password = docker
# Protocol = 8.2.5
# ReadOnly = 0
# ```
#
# Then open a shell and run
#
# ```{bash,eval=FALSE}
# sudo cp odbc.ini /etc/odbc.ini
# ```
#
# A new entry that follows this format should be added to the odbc.ini file ever time a new data base is created. It is envisaged that a new database would be used for each model, with all tables being placed in the public schema. In some cases it may be useful to use more schemas within the database for separate sites, but this may not be necessary. The concept is to allow storage and backup of all the relevant information by dumping the database to a single file.
#
# ### Adding extensions to the database
#
# ```{r}
PgInit<-function(dbname="brant"){
require(RODBC)
con<-odbcConnect(dbname) ## Note the use of the connection name. IT MUST MATCH THE DATABASE!
odbcQuery(con,"create extension postgis")
odbcQuery(con,"create extension plr")
}
# ```
#
#
# ### Adding PLR statistical functions to the database
#
# Many useful statistical functions from R can be added as functions to the database. Again this can be done directly through R by using the open connection. The general format of all the functions involves coercing the arguments to numeric(to be on the safe side if characters are passed) and calculating stats after removing NAs. A float is returned. It is easy to add more to this function if required.
#
# ```{r}
PgPlr<-function(dbname="brant"){
require(RODBC)
con<-odbcConnect(dbname)
query<-"CREATE OR REPLACE FUNCTION median (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
median(x,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION q10 (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
quantile(x,0.1,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION q90 (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
quantile(x,0.9,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION q75 (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
quantile(x,0.75,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION q25 (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
quantile(x,0.25,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION minimum (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
min(x,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION maximum (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
max(x,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION mean (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
mean(x,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION sd (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
sd(x,na.rm=TRUE)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION se (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
sd(x,na.rm=TRUE)/sqrt(length(x))'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION length (float[]) RETURNS float AS '
x<-arg1
x<-as.numeric(as.character(x))
x<-na.omit(x)
length(x)'
LANGUAGE 'plr' STRICT;
CREATE OR REPLACE FUNCTION PSuitable (float[],float[],float,float,float) RETURNS float AS '
x<-arg1
q<-arg2
depth<-arg3
ht<-arg4
tide<-arg5
depth<-depth+tide
x2<-q[x>=depth&x<=ht]
x2<-max(x2)-min(x2)
if(is.na(x2))x2<-0
if(x2==-Inf)x2<-0
x2'
LANGUAGE 'plr' STRICT;
"
odbcQuery(con,query)
}
# ```
#
#
# ## Loading raster layers
#
# Raster layers uses a few more arguments. The layers are loaded in database (as referenced rasters can't be transfered). The tiles are usually square, but the x and y width can be set. Arguments are thus
#
# 1. flnm: name of file
# 2. x: Number of pixels in tile x dimension
# 3. y: Number of pixels in tile y dimension
# 4. tabnm: Name of table to hold data
# 5. db: Database name
# 6. srid: If this is 0 the srid will be taken from the file if it is included. It is usually safer to set this if known.
# 7. path: Path to file with trailing /
#
#
# ```{r}
PgLoadRaster<-function(flnm="cold_bay_3857_clip.tiff",x=10,y=10,tabnm="dem",db="brant",srid=3857,path="/home/rstudio/shiny_morph/"){
flnm<-paste(path,flnm,sep="")
command <- paste("raster2pgsql -s ",srid, "-I -d -M ",flnm, " -F -t ",x,"x",y," ",tabnm,"|psql -h postgis -U docker -d ",db,sep="")
system(command)
}
#
# ## Loading vector layers
#
# Vector layers can be loaded directly from the canvas of QQGIS after logging into the database through the dbmanager interface. However if they are stored on the server this command will load them into the data base from a shapefile. There is no need to specify the .shp extension for the name of the file.
#
# Arguments are:
#
# 1. flnm: name of file
# 2. tabnm: Name of table to hold data
# 3. db: Database name
# 4. srid: If this is 0 the srid will be taken from the file if it is included. It is usually safer to set this if known.
# 5. path: Path to file with trailing /
#
#
# ```{r}
PgLoadVector<-function(flnm="tide_regime",tabnm="tide_regime",db="brant",srid=3857,path="/home/rstudio/shiny_morph/"){
flnm<-paste(path,flnm,sep="")
command <- sprintf("shp2pgsql -s %s -d -I %s %s |psql -h postgis -U docker -d %s",srid,flnm,tabnm,db)
command
system(command)
}
#
#
#
# ## Setting up graticule from dem
#
# In the MoRph application patches will be either square or rectangular polygons that are derived from vectorising the raster dem that is first added to the data base. Statistics are calulated from the pixel values of the dem and held as attributes. The function can drop graticules where the minimum value is below a certain level and those with a maximum above a certain level, as this may be useful along coastlines.
#
#
# ```{r}
PgMakeGrat<-function(dem="dem",minht=-10,maxht=10,db="brant")
{
require(RODBC)
con<-odbcConnect(db)
query<-paste("
drop table if exists grat;
create table grat as
select s.* from
(select rid, st_envelope(rast) geom,
minimum((st_dumpvalues(rast)).valarray) min,
q10((st_dumpvalues(rast)).valarray) q10,
q25((st_dumpvalues(rast)).valarray) q25,
median((st_dumpvalues(rast)).valarray) median,
mean((st_dumpvalues(rast)).valarray) mean,
q75((st_dumpvalues(rast)).valarray) q75,
q90((st_dumpvalues(rast)).valarray) q90,
maximum((st_dumpvalues(rast)).valarray) max
from ",dem,") s
where min>",minht," and max < ",maxht," and min <1000000000000;
CREATE INDEX grat_gix ON grat USING GIST (geom);",sep="")
odbcQuery(con,query)
query<-"
ALTER TABLE grat ADD COLUMN psuitable numeric(3);
"
odbcQuery(con,query)
}
## Calulate the proportion of each graticule within a suitable heght range,
PgPSuitable<-function(db="brant",depth=-2,height=5)
{
require(RODBC)
con<-odbcConnect(db)
query<-sprintf("update grat set psuitable = PSuitable(array[min,q10,q25,median,q75,q90,max],array[0,10,25,50,75,90,100],%s,%s,0);",depth,height)
odbcQuery(con,query)
}
#
# ## Getting vector layer from the data base
#
# ```{r}
PgGetQuery <- function(query="select * from grat",db="brant") {
require(RODBC)
require(rgdal)
con<-odbcConnect(db)
query <- paste("create view temp_view as ", query, sep = "")
odbcQuery(con, query)
dsn<-paste("PG:dbname='",db,"' host='postgis' port=5432 user= 'docker'",sep="")
result <- readOGR(dsn, "temp_view")
odbcQuery(con, "drop view temp_view")
return(result)
}
#
# ## Adding mean and median from resource layer
#
# This query works by overlaying the graticule onto any raster layer that has been first uploaded into the data base using PgLoadRaster. A temporary table is formed, then renamed grat and re-indexed. This is a more robust method than adding columns to grat.
#
# ```{r}
PgAddResource<-function(db="brant",resource="dem")
{
require(RODBC)
con<-odbcConnect(db)
query<-sprintf("create table tmp as
select g.*,
med median_%s,
mn mean_%s
from
grat g,
(select
t2.rid,
median((st_dumpvalues(st_union(st_clip(rast,geom)))).valarray) med,
mean((st_dumpvalues(st_union(st_clip(rast,geom)))).valarray) mn
from %s t,
(select * from grat) t2
where st_intersects(rast,geom)
group by t2.rid) s
where s.rid=g.rid;
drop table grat;
ALTER TABLE tmp RENAME TO grat;
CREATE INDEX grat_gix ON grat USING GIST (geom);",resource,resource,resource)
odbcQuery(con,query)
}
#
# ## Extracting attribute to grat from vector polygon layer
#
# The idea behind this function is quite specific to MoRph, but can be adapted. Assuming that there is a polyon layer loaded using PgLoadVector. The example is a layer with codes representing the tide regime in each bay. Some of the graticule patches may possiby overlap the boundary between tide regimes but only one of the values is needed. The function simply chooses (arbitrarily) the minimum value. This is not going to be a problem in most cases as the boundary is also fairly arbitrary.
#
# ```{r}
PgAddVector<-function(db="brant",l1="grat",l2="tide_regime",col="tide"){
require(RODBC)
con<-odbcConnect(db)
query<-sprintf("
drop table if exists tmp;
create table tmp as
select gg.*,s.%s from
%s gg,
(select g.rid,min(%s) %s from
grat g,
%s t
where st_intersects(t.geom,g.geom)
group by rid,g.geom) s
where s.rid=gg.rid;
drop table %s;
ALTER TABLE tmp RENAME TO %s;
CREATE INDEX %s_gix ON %s USING GIST (geom);",col,l1,col,col,l2,l1,l1,l1,l1)
odbcQuery(con,query)
}
|
## Information from the submitted job
cmdArgs <- commandArgs(trailingOnly = TRUE)
nw <- as.integer(cmdArgs[1])
fracid <- as.integer(cmdArgs[2])
id <- as.integer(cmdArgs[3])
train <- readRDS(paste0("../data/dem_ml_train_cv_", id, ".rds"))
trainPart <- vector("list", nw)
for (ll in 1:nw) {
group <- train[[ll]]$group
grpLbl <- sort(unique(group))
ngroup <- length(grpLbl)
ranefList0 <- list()
fixefList0 <- list()
rmatList0 <- list()
ylist0 <- list()
grpIdx0 <- list()
for (gg in 1:ngroup) {
grpIdx0[[gg]] <- which(group == grpLbl[gg])
ranefList0[[gg]] <- train[[ll]]$z[grpIdx0[[gg]], , drop = FALSE]
fixefList0[[gg]] <- train[[ll]]$x[grpIdx0[[gg]], , drop = FALSE]
ylist0[[gg]] <- train[[ll]]$y[grpIdx0[[gg]]]
rmatList0[[gg]] <- diag(1, length(grpIdx0[[gg]]))
}
trainPart[[ll]] <- list(z = ranefList0,
x = fixefList0,
y = ylist0,
r = rmatList0)
}
## no. of individuals
nsample0 <- sum(sapply(sapply(lapply(trainPart, function(X) lapply(X$x, function(Y) nrow(Y))), unlist), length))
## no. of observations
nobs0 <- sum(sapply(sapply(lapply(trainPart, function(X) lapply(X$x, function(Y) nrow(Y))), unlist), sum))
cat("( m, n ): (", nsample0, ", ", nobs0, ")\n")
library(Rmpi)
cat("Workers reporting:\n")
mpi.remote.exec(mpi.comm.rank())
source("~/dem/ml/code/dem_estep_sync.R")
mpi.remote.exec(rm(list = ls()))
mpi.remote.exec(ls())
mpi.bcast.Robj2slave(recvData)
mpi.bcast.Robj2slave(distEstep)
mpi.remote.exec(ls())
mpi.bcast.cmd(recvData())
for (ww in 1:nw) {
ranefListPart <- trainPart[[ww]]$z
fixefListPart <- trainPart[[ww]]$x
rmatListPart <- trainPart[[ww]]$r
ylistPart <- trainPart[[ww]]$y
mpi.send.Robj(list(ranefListPart = ranefListPart), ww, 1)
mpi.send.Robj(list(fixefListPart = fixefListPart), ww, 2)
mpi.send.Robj(list(rmatListPart = rmatListPart), ww, 3)
mpi.send.Robj(list(ylistPart = ylistPart), ww, 4)
cat("send data to worker: ", ww, "\n")
}
cat("snapshot of workers GlobalEnv:\n")
mpi.remote.exec(head(ranefListPart[[1]])[ , 1:5])
mpi.remote.exec(head(fixefListPart[[1]])[ , 1:5])
mpi.remote.exec(head(rmatListPart[[1]])[ , 1:5])
mpi.remote.exec(sum(sapply(ylistPart, sum)))
source("~/dem/ml/code/dem_mstep_sync.R")
## MPI setup
nworkers <- nw # same as mpi.comm.size() - 1
workerTasks <- rep(1, nworkers)
returnWorkers <- 0
closedWorkers <- 0
## EM specs
niter <- 1000
emats <- vector("list", nworkers)
bmats <- vector("list", nworkers)
fvecs <- vector("list", nworkers)
quads <- numeric(nworkers)
logLiks <- numeric(nworkers)
logLikVec <- numeric(niter)
frac <- fracid / 100
nactv <- floor(frac * nworkers)
## EM parameter initialization
library(MCMCpack)
nranef0 <- ncol(trainPart[[1]]$z[[1]]); nfixef0 <- ncol(trainPart[[1]]$x[[1]])
muBeta0 <- rep(0, nfixef0); sigBetaInv0 <- diag(0, nfixef0); nu0 <- -(2 + nfixef0); sig0 <- 0
eta0 <- -(nranef0 + 1); tmat0 <- diag(0, nranef0);
dmat0 <- riwish(2 * nranef0, diag(1, nranef0))
errVar0 <- runif(1, 1, 10)
emat0 <- diag(1, nfixef0)
parEst <- list(dmat = dmat0, errVar = errVar0, fixCovDivErr = emat0, fixMean = muBeta0)
workerTrack <- matrix(0, nworkers, niter) ## track
## Call the distEstep on all the workers to get them ready to undertake tasks
mpi.bcast.cmd(distEstep())
## record keeping
## 1. workerTask[ww] = 0 if work has been assigned to worker 'ww'
## = 1 if worker 'ww' is waiting/idle
## 2. workerTrack[ww, iter] = 0 if worker 'ww' didn't return
## sufficient statistics at iteration 'iter'
## = 1 if worker 'ww' returned sufficient
## statistics at iteration 'iter'
## run some initializations
for (ww in 1:nworkers) {
msg <- mpi.recv.Robj(ww, 0)
cat("initial msg recvd from worker: ", ww, "\n")
mpi.send.Robj(parEst, ww, 1)
workerTasks[ww] <- 0 # task has been assigned
cat("DEM setp started at worker: ", ww, "\n")
}
for (ww in 1:nworkers) {
msg <- mpi.recv.Robj(ww, 1)
workerTasks[ww] <- 1 # worker is idle
## sufficient stats for updating beta estimate
emats[[ww]] <- msg$emat
fvecs[[ww]] <- msg$fvec
cat("recvd beta suff. stat. from worker: ", ww, "\n")
}
if (all(sum(sapply(emats, function(x) !(is.null(x)))))) {
fixPost <- masterUpdateFixPost(emats, fvecs, parEst$errVar, muBeta0, sigBetaInv0)
parEst$fixCovDivErr <- fixPost$fixCovDivErr
parEst$fixMean <- fixPost$fixMean
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 2)
workerTasks[ww] <- 0 # task has been assigned
}
cat("sent parameter estimates to workers \n")
} else {
stop("problem in receiving tag=1 \n")
}
for (ww in 1:nworkers) {
msg <- mpi.recv.Robj(ww, 2)
workerTasks[ww] <- 1 # worker is idle
bmats[[ww]] <- msg$bmat
cat("recvd covariance suff. stat. from worker: ", ww, "\n")
}
if (all(sum(sapply(bmats, function(x) !(is.null(x)))))) {
parEst$dmat <- masterUpdateDmat(bmats, parEst$errVar, nsample0, eta0, tmat0)
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 1)
workerTasks[ww] <- 0 # task has been assigned
}
cat("sent updated Dmat to workers \n")
} else {
stop("problem in receiving tag=2 \n")
}
for (ww in 1:nworkers) {
msg <- mpi.recv.Robj(ww, 1)
workerTasks[ww] <- 1 # worker is idle
## sufficient stats for updating beta estimate
emats[[ww]] <- msg$emat
fvecs[[ww]] <- msg$fvec
cat("recvd beta suff. stat. from worker to update err. var.: ", ww, "\n")
}
if (all(sum(sapply(emats, function(x) !(is.null(x)))))) {
fixPost <- masterUpdateFixPost(emats, fvecs, parEst$errVar, muBeta0, sigBetaInv0)
parEst$fixCovDivErr <- fixPost$fixCovDivErr
parEst$fixMean <- fixPost$fixMean
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 3)
workerTasks[ww] <- 0 # task has been assigned
}
cat("sent parameter estimates to workers to finish an EM iteration \n")
} else {
stop("problem in receiving tag=1, before updating error variance \n")
}
for (ww in 1:nworkers) {
msg <- mpi.recv.Robj(ww, 3)
workerTasks[ww] <- 1 # worker is idle
## sufficient stats for updating beta estimate
quads[ww] <- msg$quad
}
## update error variance and finish first round of EM
parEst$errVar <- masterUpdateErrVar(quads, parEst$fixMean, nobs0, muBeta0, sigBetaInv0, sig0, nu0)
## update fixed effects posterior covariance matrix to account for the
## correct error variance
fixPost$fixCov <- parEst$errVar * parEst$fixCovDivErr
## send the current parameter estimates to all the workers for
## estimating the log likelihood at the end of every iteration
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 4)
workerTasks[ww] <- 0 # task has been assigned
}
for (ww in 1:nworkers) {
msg <- mpi.recv.Robj(ww, 4)
workerTasks[ww] <- 1 # worker is idle
## log likelihood contribution of worker "ww"
logLiks[ww] <- msg$logLik
}
## begin DEM iterations now!
## rcrdDmat <- vector("list", niter)
## rcrdErrVar <- numeric(niter)
## rcrdFixPost <- vector("list", niter)
llk0 <- 1e7
## Added by CL
source("dem_ll.R")
startTime <- proc.time()
for (its in 1:niter) {
## if (its %% 5 == 0) cat("DEM iteration: ", its, "\n")
## RANDOM EFFECTS COVARIANCE
## 1. send to all workers
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 2)
workerTasks[ww] <- 0 # task has been assigned
}
## active & inactive workers
actv <- sort(sample(1:nworkers, nactv, replace = FALSE))
inactv <- setdiff(1:nworkers, actv)
workerTrack[actv, its] <- 1
## 2. recv from active set and ignore the inactive set
for (ww in actv) {
msg <- mpi.recv.Robj(ww, 2)
workerTasks[ww] <- 1 # worker is idle
bmats[[ww]] <- msg$bmat
}
for (ww in inactv) {
## ignore message
msg <- mpi.recv.Robj(ww, 2)
}
## update
parEst$dmat <- masterUpdateDmat(bmats, parEst$errVar, nsample0, eta0, tmat0)
## rcrdDmat[[its]] <- parEst$dmat
## FIXED EFFECTS MEAN
## 1. send to all workers
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 1)
workerTasks[ww] <- 0 # task has been assigned
}
## 2. recv from active set and ignore the inactive set
for (ww in actv) {
msg <- mpi.recv.Robj(ww, 1)
workerTasks[ww] <- 1 # worker is idle
## sufficient stats for updating beta estimate
emats[[ww]] <- msg$emat
fvecs[[ww]] <- msg$fvec
}
for (ww in inactv) {
## ignore message
msg <- mpi.recv.Robj(ww, 1)
}
## update
fixPost <- masterUpdateFixPost(emats, fvecs, parEst$errVar, muBeta0, sigBetaInv0)
parEst$fixCovDivErr <- fixPost$fixCovDivErr
parEst$fixMean <- fixPost$fixMean
## ERROR VARIANCE
## 1. send to all workers
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 3)
workerTasks[ww] <- 0 # task has been assigned
}
## 2. recv from active set and ignore the inactive set
for (ww in actv) {
msg <- mpi.recv.Robj(ww, 3)
workerTasks[ww] <- 1 # worker is idle
## sufficient stats for updating beta estimate
quads[ww] <- msg$quad
}
for (ww in inactv) {
## ignore message
msg <- mpi.recv.Robj(ww, 3)
}
## update
parEst$errVar <- masterUpdateErrVar(quads, parEst$fixMean, nobs0, muBeta0, sigBetaInv0, sig0, nu0)
fixPost$fixCov <- parEst$errVar * parEst$fixCovDivErr
## rcrdErrVar[its] <- parEst$errVar
## rcrdFixPost[[its]] <- fixPost[1:2]
## LOG LIKELIHOOD
## 1. send parameter estimates to all workers
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 4)
workerTasks[ww] <- 0 # task has been assigned
}
## 2. recv from ALL the workers
for (ww in 1:nworkers) {
msg <- mpi.recv.Robj(ww, 4)
workerTasks[ww] <- 1 # worker is idle
## log likelihood contribution of worker "ww"
logLiks[ww] <- msg$logLik
}
## calc log-likelihood
logLikVec[its] <- Reduce("+", logLiks)
if (abs(llk0 - logLikVec[its]) > 1e-7) {
llk0 <- logLikVec[its]
} else {
break()
}
}
endTime <- proc.time()
demTime <- endTime - startTime
finalCnt <- its
## rcrdDmat <- rcrdDmat[1:finalCnt]
## rcrdErrVar <- rcrdErrVar[1:finalCnt]
## rcrdFixPost <- rcrdFixPost[1:finalCnt]
res <- list(
pars = parEst,
track = workerTrack[ , 1:finalCnt],
logLik = logLikVec[1:finalCnt],
niters = finalCnt,
time = demTime
)
fname <- paste0("/Shared/ssrivastva/dem/ml/result/dem/ml_dem_cv_", id, "_frac_", fracid, ".rds")
saveRDS(res, fname)
for (ww in 1:nworkers) {
cat("terminating job on worker: ", ww, "\n")
mpi.send.Robj(0, ww, 666)
closedWorkers <- closedWorkers + 1
}
workerSumm <- list()
for (ww in 1:nworkers) {
workerSumm[[ww]] <- mpi.recv.Robj(ww, 666)
}
cat("Closing workers \n")
mpi.close.Rslaves()
mpi.quit()
| /ml/code/ml_dem_mpi.R | no_license | snoweye/DEM | R | false | false | 11,035 | r | ## Information from the submitted job
cmdArgs <- commandArgs(trailingOnly = TRUE)
nw <- as.integer(cmdArgs[1])
fracid <- as.integer(cmdArgs[2])
id <- as.integer(cmdArgs[3])
train <- readRDS(paste0("../data/dem_ml_train_cv_", id, ".rds"))
trainPart <- vector("list", nw)
for (ll in 1:nw) {
group <- train[[ll]]$group
grpLbl <- sort(unique(group))
ngroup <- length(grpLbl)
ranefList0 <- list()
fixefList0 <- list()
rmatList0 <- list()
ylist0 <- list()
grpIdx0 <- list()
for (gg in 1:ngroup) {
grpIdx0[[gg]] <- which(group == grpLbl[gg])
ranefList0[[gg]] <- train[[ll]]$z[grpIdx0[[gg]], , drop = FALSE]
fixefList0[[gg]] <- train[[ll]]$x[grpIdx0[[gg]], , drop = FALSE]
ylist0[[gg]] <- train[[ll]]$y[grpIdx0[[gg]]]
rmatList0[[gg]] <- diag(1, length(grpIdx0[[gg]]))
}
trainPart[[ll]] <- list(z = ranefList0,
x = fixefList0,
y = ylist0,
r = rmatList0)
}
## no. of individuals
nsample0 <- sum(sapply(sapply(lapply(trainPart, function(X) lapply(X$x, function(Y) nrow(Y))), unlist), length))
## no. of observations
nobs0 <- sum(sapply(sapply(lapply(trainPart, function(X) lapply(X$x, function(Y) nrow(Y))), unlist), sum))
cat("( m, n ): (", nsample0, ", ", nobs0, ")\n")
library(Rmpi)
cat("Workers reporting:\n")
mpi.remote.exec(mpi.comm.rank())
source("~/dem/ml/code/dem_estep_sync.R")
mpi.remote.exec(rm(list = ls()))
mpi.remote.exec(ls())
mpi.bcast.Robj2slave(recvData)
mpi.bcast.Robj2slave(distEstep)
mpi.remote.exec(ls())
mpi.bcast.cmd(recvData())
for (ww in 1:nw) {
ranefListPart <- trainPart[[ww]]$z
fixefListPart <- trainPart[[ww]]$x
rmatListPart <- trainPart[[ww]]$r
ylistPart <- trainPart[[ww]]$y
mpi.send.Robj(list(ranefListPart = ranefListPart), ww, 1)
mpi.send.Robj(list(fixefListPart = fixefListPart), ww, 2)
mpi.send.Robj(list(rmatListPart = rmatListPart), ww, 3)
mpi.send.Robj(list(ylistPart = ylistPart), ww, 4)
cat("send data to worker: ", ww, "\n")
}
cat("snapshot of workers GlobalEnv:\n")
mpi.remote.exec(head(ranefListPart[[1]])[ , 1:5])
mpi.remote.exec(head(fixefListPart[[1]])[ , 1:5])
mpi.remote.exec(head(rmatListPart[[1]])[ , 1:5])
mpi.remote.exec(sum(sapply(ylistPart, sum)))
source("~/dem/ml/code/dem_mstep_sync.R")
## MPI setup
nworkers <- nw # same as mpi.comm.size() - 1
workerTasks <- rep(1, nworkers)
returnWorkers <- 0
closedWorkers <- 0
## EM specs
niter <- 1000
emats <- vector("list", nworkers)
bmats <- vector("list", nworkers)
fvecs <- vector("list", nworkers)
quads <- numeric(nworkers)
logLiks <- numeric(nworkers)
logLikVec <- numeric(niter)
frac <- fracid / 100
nactv <- floor(frac * nworkers)
## EM parameter initialization
library(MCMCpack)
nranef0 <- ncol(trainPart[[1]]$z[[1]]); nfixef0 <- ncol(trainPart[[1]]$x[[1]])
muBeta0 <- rep(0, nfixef0); sigBetaInv0 <- diag(0, nfixef0); nu0 <- -(2 + nfixef0); sig0 <- 0
eta0 <- -(nranef0 + 1); tmat0 <- diag(0, nranef0);
dmat0 <- riwish(2 * nranef0, diag(1, nranef0))
errVar0 <- runif(1, 1, 10)
emat0 <- diag(1, nfixef0)
parEst <- list(dmat = dmat0, errVar = errVar0, fixCovDivErr = emat0, fixMean = muBeta0)
workerTrack <- matrix(0, nworkers, niter) ## track
## Call the distEstep on all the workers to get them ready to undertake tasks
mpi.bcast.cmd(distEstep())
## record keeping
## 1. workerTask[ww] = 0 if work has been assigned to worker 'ww'
## = 1 if worker 'ww' is waiting/idle
## 2. workerTrack[ww, iter] = 0 if worker 'ww' didn't return
## sufficient statistics at iteration 'iter'
## = 1 if worker 'ww' returned sufficient
## statistics at iteration 'iter'
## run some initializations
for (ww in 1:nworkers) {
msg <- mpi.recv.Robj(ww, 0)
cat("initial msg recvd from worker: ", ww, "\n")
mpi.send.Robj(parEst, ww, 1)
workerTasks[ww] <- 0 # task has been assigned
cat("DEM setp started at worker: ", ww, "\n")
}
for (ww in 1:nworkers) {
msg <- mpi.recv.Robj(ww, 1)
workerTasks[ww] <- 1 # worker is idle
## sufficient stats for updating beta estimate
emats[[ww]] <- msg$emat
fvecs[[ww]] <- msg$fvec
cat("recvd beta suff. stat. from worker: ", ww, "\n")
}
if (all(sum(sapply(emats, function(x) !(is.null(x)))))) {
fixPost <- masterUpdateFixPost(emats, fvecs, parEst$errVar, muBeta0, sigBetaInv0)
parEst$fixCovDivErr <- fixPost$fixCovDivErr
parEst$fixMean <- fixPost$fixMean
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 2)
workerTasks[ww] <- 0 # task has been assigned
}
cat("sent parameter estimates to workers \n")
} else {
stop("problem in receiving tag=1 \n")
}
for (ww in 1:nworkers) {
msg <- mpi.recv.Robj(ww, 2)
workerTasks[ww] <- 1 # worker is idle
bmats[[ww]] <- msg$bmat
cat("recvd covariance suff. stat. from worker: ", ww, "\n")
}
if (all(sum(sapply(bmats, function(x) !(is.null(x)))))) {
parEst$dmat <- masterUpdateDmat(bmats, parEst$errVar, nsample0, eta0, tmat0)
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 1)
workerTasks[ww] <- 0 # task has been assigned
}
cat("sent updated Dmat to workers \n")
} else {
stop("problem in receiving tag=2 \n")
}
for (ww in 1:nworkers) {
msg <- mpi.recv.Robj(ww, 1)
workerTasks[ww] <- 1 # worker is idle
## sufficient stats for updating beta estimate
emats[[ww]] <- msg$emat
fvecs[[ww]] <- msg$fvec
cat("recvd beta suff. stat. from worker to update err. var.: ", ww, "\n")
}
if (all(sum(sapply(emats, function(x) !(is.null(x)))))) {
fixPost <- masterUpdateFixPost(emats, fvecs, parEst$errVar, muBeta0, sigBetaInv0)
parEst$fixCovDivErr <- fixPost$fixCovDivErr
parEst$fixMean <- fixPost$fixMean
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 3)
workerTasks[ww] <- 0 # task has been assigned
}
cat("sent parameter estimates to workers to finish an EM iteration \n")
} else {
stop("problem in receiving tag=1, before updating error variance \n")
}
for (ww in 1:nworkers) {
msg <- mpi.recv.Robj(ww, 3)
workerTasks[ww] <- 1 # worker is idle
## sufficient stats for updating beta estimate
quads[ww] <- msg$quad
}
## update error variance and finish first round of EM
parEst$errVar <- masterUpdateErrVar(quads, parEst$fixMean, nobs0, muBeta0, sigBetaInv0, sig0, nu0)
## update fixed effects posterior covariance matrix to account for the
## correct error variance
fixPost$fixCov <- parEst$errVar * parEst$fixCovDivErr
## send the current parameter estimates to all the workers for
## estimating the log likelihood at the end of every iteration
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 4)
workerTasks[ww] <- 0 # task has been assigned
}
for (ww in 1:nworkers) {
msg <- mpi.recv.Robj(ww, 4)
workerTasks[ww] <- 1 # worker is idle
## log likelihood contribution of worker "ww"
logLiks[ww] <- msg$logLik
}
## begin DEM iterations now!
## rcrdDmat <- vector("list", niter)
## rcrdErrVar <- numeric(niter)
## rcrdFixPost <- vector("list", niter)
llk0 <- 1e7
## Added by CL
source("dem_ll.R")
startTime <- proc.time()
for (its in 1:niter) {
## if (its %% 5 == 0) cat("DEM iteration: ", its, "\n")
## RANDOM EFFECTS COVARIANCE
## 1. send to all workers
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 2)
workerTasks[ww] <- 0 # task has been assigned
}
## active & inactive workers
actv <- sort(sample(1:nworkers, nactv, replace = FALSE))
inactv <- setdiff(1:nworkers, actv)
workerTrack[actv, its] <- 1
## 2. recv from active set and ignore the inactive set
for (ww in actv) {
msg <- mpi.recv.Robj(ww, 2)
workerTasks[ww] <- 1 # worker is idle
bmats[[ww]] <- msg$bmat
}
for (ww in inactv) {
## ignore message
msg <- mpi.recv.Robj(ww, 2)
}
## update
parEst$dmat <- masterUpdateDmat(bmats, parEst$errVar, nsample0, eta0, tmat0)
## rcrdDmat[[its]] <- parEst$dmat
## FIXED EFFECTS MEAN
## 1. send to all workers
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 1)
workerTasks[ww] <- 0 # task has been assigned
}
## 2. recv from active set and ignore the inactive set
for (ww in actv) {
msg <- mpi.recv.Robj(ww, 1)
workerTasks[ww] <- 1 # worker is idle
## sufficient stats for updating beta estimate
emats[[ww]] <- msg$emat
fvecs[[ww]] <- msg$fvec
}
for (ww in inactv) {
## ignore message
msg <- mpi.recv.Robj(ww, 1)
}
## update
fixPost <- masterUpdateFixPost(emats, fvecs, parEst$errVar, muBeta0, sigBetaInv0)
parEst$fixCovDivErr <- fixPost$fixCovDivErr
parEst$fixMean <- fixPost$fixMean
## ERROR VARIANCE
## 1. send to all workers
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 3)
workerTasks[ww] <- 0 # task has been assigned
}
## 2. recv from active set and ignore the inactive set
for (ww in actv) {
msg <- mpi.recv.Robj(ww, 3)
workerTasks[ww] <- 1 # worker is idle
## sufficient stats for updating beta estimate
quads[ww] <- msg$quad
}
for (ww in inactv) {
## ignore message
msg <- mpi.recv.Robj(ww, 3)
}
## update
parEst$errVar <- masterUpdateErrVar(quads, parEst$fixMean, nobs0, muBeta0, sigBetaInv0, sig0, nu0)
fixPost$fixCov <- parEst$errVar * parEst$fixCovDivErr
## rcrdErrVar[its] <- parEst$errVar
## rcrdFixPost[[its]] <- fixPost[1:2]
## LOG LIKELIHOOD
## 1. send parameter estimates to all workers
for (ww in 1:nworkers) {
mpi.send.Robj(parEst, ww, 4)
workerTasks[ww] <- 0 # task has been assigned
}
## 2. recv from ALL the workers
for (ww in 1:nworkers) {
msg <- mpi.recv.Robj(ww, 4)
workerTasks[ww] <- 1 # worker is idle
## log likelihood contribution of worker "ww"
logLiks[ww] <- msg$logLik
}
## calc log-likelihood
logLikVec[its] <- Reduce("+", logLiks)
if (abs(llk0 - logLikVec[its]) > 1e-7) {
llk0 <- logLikVec[its]
} else {
break()
}
}
endTime <- proc.time()
demTime <- endTime - startTime
finalCnt <- its
## rcrdDmat <- rcrdDmat[1:finalCnt]
## rcrdErrVar <- rcrdErrVar[1:finalCnt]
## rcrdFixPost <- rcrdFixPost[1:finalCnt]
res <- list(
pars = parEst,
track = workerTrack[ , 1:finalCnt],
logLik = logLikVec[1:finalCnt],
niters = finalCnt,
time = demTime
)
fname <- paste0("/Shared/ssrivastva/dem/ml/result/dem/ml_dem_cv_", id, "_frac_", fracid, ".rds")
saveRDS(res, fname)
for (ww in 1:nworkers) {
cat("terminating job on worker: ", ww, "\n")
mpi.send.Robj(0, ww, 666)
closedWorkers <- closedWorkers + 1
}
workerSumm <- list()
for (ww in 1:nworkers) {
workerSumm[[ww]] <- mpi.recv.Robj(ww, 666)
}
cat("Closing workers \n")
mpi.close.Rslaves()
mpi.quit()
|
library(IAPWS95)
### Name: SigmaT
### Title: Surface Tension, Function of Temperature
### Aliases: SigmaT
### ** Examples
T <- 500.
Sig <- SigmaT(T)
Sig
| /data/genthat_extracted_code/IAPWS95/examples/SigmaT.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 161 | r | library(IAPWS95)
### Name: SigmaT
### Title: Surface Tension, Function of Temperature
### Aliases: SigmaT
### ** Examples
T <- 500.
Sig <- SigmaT(T)
Sig
|
testlist <- list(gamma = 4.33898108723878e-234, lambda = 4.50663906728526e+263, response = c(-3.10997619669227e+174, 2.68366358032973e+173, 5.15195773836667e-297, -9.98955817860994e-306, -15813060.3125093, 2.62422363199186e+164, 7.72212087023076e+230, 6.94358129422499e-131, 3.6246747289559e+162, -2.70278760263864e+149, 5.1412334143614e-241, 4.84286343504886e+234, -1.11423674122116e-160, 0.000107391576692519, 1.33842905674072e-283, 2.72234064527406e+68, -4.64205608080478e-166, 2.81838805941482e-90, 5.28580625866325e-204, 3.72523379378187e-306, Inf, 2.85328739922986e-224, -6.252498318038e-263, NaN, 2.30304914252814e-45, -1.96823084222614e+170, 3.36869230592869e+100, 1.69642778221737e-05, NaN, -1.4713658749351e-208, -5.19202704423807e+270, 5.71162804532999e+285, -6.49341257119801e-95, -2.25816401880415e-57, -4.50789090762003e+160, -5.929875658582e-235, -1.02501759395824e+148, 5.15195773836667e-297, -4.95747144672084e-192, -8.69218299797688e+204, 5.11348879704569e+87, -3.91573170902646e+175, -7.03152761844427e-298, -4.88838460346896e-45, -5.21117814657566e+300, 122095113435215, 1.75256263089425e-244, 3.3575758931684e+86, -1.33323081120411e-17, 9.43105188045337e-261, -1.20417288376912e+59, -1.07864445635295e+207, -8.60539838493147e+127, 1.6192991288469e+184, 3.63071840791214e-120, -0.00313329417076933, 0), weight = c(-6.45693453935003e-172, 2.72290364868967e-256, -1.26957421636344e-92, Inf, -3.65803203272529e+248, -5.95564310836751e-46, 1.14838128329549e+157, 1.62871088774944e+222, -7.20123318785917e+223, 1.30151060115348e+77, 4.08812298515408e+163, -7.5324999901496e+230, 1.2255764813417e-280, -2.40757700887417e-06, 1.63462840555053e-43, -9.1577828539383e+276, 2.11586913114236e-116, 9.38134416442235e+206, -1.13088568336565e-124, 4.25652755407588e-110, -1.63876627833511e-76, Inf, 5.14315258492333e+81, 1.04197300486636e-240, 5.79744067983513e+30, -1.77021559109993e+105, -1.95534985565611e-293, 3.32646684753535e+167, 2.22865956354546e-62, 3.99809370946795e+78, 7.05372502174313e+91, -60808.2045876794, -5.18201023944126e+42, 2.4899793182544e-214, 1.84233747584812e-205, -1.59548059958246e-179, -3.02547548968509e+94, 1.25635487843838e-294, 2.07127531581826e-190, 5.41738154840269e-31, NaN, -5.95517132147917e-205, 3.1825695636902e-171, 4.58395047619845e-228, -2.69358746806744e-204, -8.8272381369111e+227, -1.13458888838333e-79, 7.18719140803302e-129, 3.95094967307021e+296, -1.04842749954331e-296, 2.11573272242714e+122, 1.53904483064049e-179, -1.61057575294615e+50, -4.74294978593362e-73, -2.52740931840868e-295, 4.34463588567088e-69, 3.46145241642054e+289, -9.21485914778244e+137, Inf, 2.81608422709759e-145, 8.03983729061744e+104, 0))
result <- do.call(CatReg:::DoBlock,testlist)
str(result) | /issuestests/CatReg/inst/testfiles/DoBlock/DoBlock_output/log_616069abf2c10905a3176fa7492562fc46caf65e/DoBlock-test.R | no_license | akhikolla/RcppDeepStateTest | R | false | false | 2,911 | r | testlist <- list(gamma = 4.33898108723878e-234, lambda = 4.50663906728526e+263, response = c(-3.10997619669227e+174, 2.68366358032973e+173, 5.15195773836667e-297, -9.98955817860994e-306, -15813060.3125093, 2.62422363199186e+164, 7.72212087023076e+230, 6.94358129422499e-131, 3.6246747289559e+162, -2.70278760263864e+149, 5.1412334143614e-241, 4.84286343504886e+234, -1.11423674122116e-160, 0.000107391576692519, 1.33842905674072e-283, 2.72234064527406e+68, -4.64205608080478e-166, 2.81838805941482e-90, 5.28580625866325e-204, 3.72523379378187e-306, Inf, 2.85328739922986e-224, -6.252498318038e-263, NaN, 2.30304914252814e-45, -1.96823084222614e+170, 3.36869230592869e+100, 1.69642778221737e-05, NaN, -1.4713658749351e-208, -5.19202704423807e+270, 5.71162804532999e+285, -6.49341257119801e-95, -2.25816401880415e-57, -4.50789090762003e+160, -5.929875658582e-235, -1.02501759395824e+148, 5.15195773836667e-297, -4.95747144672084e-192, -8.69218299797688e+204, 5.11348879704569e+87, -3.91573170902646e+175, -7.03152761844427e-298, -4.88838460346896e-45, -5.21117814657566e+300, 122095113435215, 1.75256263089425e-244, 3.3575758931684e+86, -1.33323081120411e-17, 9.43105188045337e-261, -1.20417288376912e+59, -1.07864445635295e+207, -8.60539838493147e+127, 1.6192991288469e+184, 3.63071840791214e-120, -0.00313329417076933, 0), weight = c(-6.45693453935003e-172, 2.72290364868967e-256, -1.26957421636344e-92, Inf, -3.65803203272529e+248, -5.95564310836751e-46, 1.14838128329549e+157, 1.62871088774944e+222, -7.20123318785917e+223, 1.30151060115348e+77, 4.08812298515408e+163, -7.5324999901496e+230, 1.2255764813417e-280, -2.40757700887417e-06, 1.63462840555053e-43, -9.1577828539383e+276, 2.11586913114236e-116, 9.38134416442235e+206, -1.13088568336565e-124, 4.25652755407588e-110, -1.63876627833511e-76, Inf, 5.14315258492333e+81, 1.04197300486636e-240, 5.79744067983513e+30, -1.77021559109993e+105, -1.95534985565611e-293, 3.32646684753535e+167, 2.22865956354546e-62, 3.99809370946795e+78, 7.05372502174313e+91, -60808.2045876794, -5.18201023944126e+42, 2.4899793182544e-214, 1.84233747584812e-205, -1.59548059958246e-179, -3.02547548968509e+94, 1.25635487843838e-294, 2.07127531581826e-190, 5.41738154840269e-31, NaN, -5.95517132147917e-205, 3.1825695636902e-171, 4.58395047619845e-228, -2.69358746806744e-204, -8.8272381369111e+227, -1.13458888838333e-79, 7.18719140803302e-129, 3.95094967307021e+296, -1.04842749954331e-296, 2.11573272242714e+122, 1.53904483064049e-179, -1.61057575294615e+50, -4.74294978593362e-73, -2.52740931840868e-295, 4.34463588567088e-69, 3.46145241642054e+289, -9.21485914778244e+137, Inf, 2.81608422709759e-145, 8.03983729061744e+104, 0))
result <- do.call(CatReg:::DoBlock,testlist)
str(result) |
plot1 <- function() {
## Get the dataframe to work with
dataSet <- getData()
## Convert the values in column Global_active_power to numeric values
gap <- with(dataSet, as.numeric(as.character(Global_active_power)))
## Open png device with given width and height
png("plot1.png", width=480, height=480)
## Plot the graph
hist(gap, col = "red", bg="transparent", main="Global Active Power",
xlab = "Global Active Power (kilowatts)")
## Turn off the png device
dev.off()
}
getData <- function() {
## Location of the input zip file
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
## Download the file in working directory, if the file does not already
## exist, else use the zip file from the working directory
if(!file.exists("household_power_consumption.zip")) {
srcFile <- download.file(fileUrl, "household_power_consumption.zip")
} else {
srcFile <- "household_power_consumption.zip"
}
## Unzip the downloaded file
inputFile <- unz(srcFile, "household_power_consumption.txt")
## Read the dataset from the input file into a data frame
## Account for the separator character, which is ";"
## Account for missing values which are indicated by "?"
hpc <- read.table(inputFile, header=TRUE,
sep=";", na.strings = "?")
## Subset the dataset for the requied dates
subhpc <- hpc[as.character(hpc$Date) %in% c("1/2/2007", "2/2/2007"),]
## Get the values in Date Column and convert them to be of class Date
dates <- with(subhpc, as.Date(Date, format="%d/%m/%Y"))
## Get the values in Date and Time coloumns
## Concatenate these values and convert into Date/Time class
dateAndTime <- with(subhpc,
strptime(paste(Date, Time),
"%d/%m/%Y %H:%M:%S"))
## Use the vectors of dates and dateAndTime created above along with
## other columns from original dataset to create a new dataframe. This
## dataframe contains the subset of data required with Date/Time columns
## in appropriate format. The separator and na character has also been
## taken care of.
inphpc <- data.frame(Date=dates, Time=dateAndTime, subhpc[3], subhpc[4],
subhpc[5], subhpc[6], subhpc[7], subhpc[8], subhpc[9])
## Return the dataframe
inphpc
} | /plot1.R | no_license | ahujarv/ExData_Plotting1 | R | false | false | 2,739 | r | plot1 <- function() {
## Get the dataframe to work with
dataSet <- getData()
## Convert the values in column Global_active_power to numeric values
gap <- with(dataSet, as.numeric(as.character(Global_active_power)))
## Open png device with given width and height
png("plot1.png", width=480, height=480)
## Plot the graph
hist(gap, col = "red", bg="transparent", main="Global Active Power",
xlab = "Global Active Power (kilowatts)")
## Turn off the png device
dev.off()
}
getData <- function() {
## Location of the input zip file
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
## Download the file in working directory, if the file does not already
## exist, else use the zip file from the working directory
if(!file.exists("household_power_consumption.zip")) {
srcFile <- download.file(fileUrl, "household_power_consumption.zip")
} else {
srcFile <- "household_power_consumption.zip"
}
## Unzip the downloaded file
inputFile <- unz(srcFile, "household_power_consumption.txt")
## Read the dataset from the input file into a data frame
## Account for the separator character, which is ";"
## Account for missing values which are indicated by "?"
hpc <- read.table(inputFile, header=TRUE,
sep=";", na.strings = "?")
## Subset the dataset for the requied dates
subhpc <- hpc[as.character(hpc$Date) %in% c("1/2/2007", "2/2/2007"),]
## Get the values in Date Column and convert them to be of class Date
dates <- with(subhpc, as.Date(Date, format="%d/%m/%Y"))
## Get the values in Date and Time coloumns
## Concatenate these values and convert into Date/Time class
dateAndTime <- with(subhpc,
strptime(paste(Date, Time),
"%d/%m/%Y %H:%M:%S"))
## Use the vectors of dates and dateAndTime created above along with
## other columns from original dataset to create a new dataframe. This
## dataframe contains the subset of data required with Date/Time columns
## in appropriate format. The separator and na character has also been
## taken care of.
inphpc <- data.frame(Date=dates, Time=dateAndTime, subhpc[3], subhpc[4],
subhpc[5], subhpc[6], subhpc[7], subhpc[8], subhpc[9])
## Return the dataframe
inphpc
} |
#' Use the NHTSA API to Decode VINs
#'
#' @param vin either a single vehicle identification number in a character
#' string, or multiple vehicle identification numbers in a character vector.
#' @param ... additional arguments passed to the url builder functions.
#'
#' @return a data frame with the VIN, Make, Model, Model Year, Fuel Type, and
#' Gross Vehicle Weight Rating (GVWR) for the specified VINs.
#' @export
#'
#' @examples
#' \dontrun{
#' # Decode a single VIN:
#' decode_vin("JHLRD68404C018253")
#'
#' # Decode multiple VINs:
#' decode_vin(c("JHLRD68404C018253", "JH4DA9450MS001229"))
#' }
decode_vin <- function(vin, ...) {
if (length(vin) == 1) {
response <- httr::GET(build_vin_url(vin, ...))
} else {
vins <- paste(vin, collapse = ";")
response <- httr::POST(build_vin_batch_url(vins, ...))
}
if (response$status_code != 200) {
msg <- paste("API responded with status code", response$status_code)
stop(msg)
}
con <- httr::content(response)$Results
if (requireNamespace("purrr", quietly = TRUE)) {
VIN <- purrr::map_chr(con, "VIN")
make <- purrr::map_chr(con, "Make")
model <- purrr::map_chr(con, "Model")
model_year <- purrr::map_chr(con, "ModelYear")
fuel_type <- purrr::map_chr(con, "FuelTypePrimary")
GVWR <- purrr::map_chr(con, "GVWR")
} else {
VIN <- c()
make <- c()
model <- c()
model_year <- c()
fuel_type <- c()
GVWR <- c()
for (i in seq_along(con)) {
VIN <- append(VIN, con[[i]]$VIN)
make <- append(make, con[[i]]$Make)
model <- append(model, con[[i]]$Model)
model_year <- append(model_year, con[[i]]$ModelYear)
fuel_type <- append(fuel_type, con[[i]]$FuelTypePrimary)
GVWR <- append(GVWR, con[[i]]$GVWR)
}
}
data.frame(VIN, make, model, model_year, fuel_type, GVWR)
}
| /R/decode_vin.R | permissive | burch-cm/vindecodr | R | false | false | 2,064 | r | #' Use the NHTSA API to Decode VINs
#'
#' @param vin either a single vehicle identification number in a character
#' string, or multiple vehicle identification numbers in a character vector.
#' @param ... additional arguments passed to the url builder functions.
#'
#' @return a data frame with the VIN, Make, Model, Model Year, Fuel Type, and
#' Gross Vehicle Weight Rating (GVWR) for the specified VINs.
#' @export
#'
#' @examples
#' \dontrun{
#' # Decode a single VIN:
#' decode_vin("JHLRD68404C018253")
#'
#' # Decode multiple VINs:
#' decode_vin(c("JHLRD68404C018253", "JH4DA9450MS001229"))
#' }
decode_vin <- function(vin, ...) {
if (length(vin) == 1) {
response <- httr::GET(build_vin_url(vin, ...))
} else {
vins <- paste(vin, collapse = ";")
response <- httr::POST(build_vin_batch_url(vins, ...))
}
if (response$status_code != 200) {
msg <- paste("API responded with status code", response$status_code)
stop(msg)
}
con <- httr::content(response)$Results
if (requireNamespace("purrr", quietly = TRUE)) {
VIN <- purrr::map_chr(con, "VIN")
make <- purrr::map_chr(con, "Make")
model <- purrr::map_chr(con, "Model")
model_year <- purrr::map_chr(con, "ModelYear")
fuel_type <- purrr::map_chr(con, "FuelTypePrimary")
GVWR <- purrr::map_chr(con, "GVWR")
} else {
VIN <- c()
make <- c()
model <- c()
model_year <- c()
fuel_type <- c()
GVWR <- c()
for (i in seq_along(con)) {
VIN <- append(VIN, con[[i]]$VIN)
make <- append(make, con[[i]]$Make)
model <- append(model, con[[i]]$Model)
model_year <- append(model_year, con[[i]]$ModelYear)
fuel_type <- append(fuel_type, con[[i]]$FuelTypePrimary)
GVWR <- append(GVWR, con[[i]]$GVWR)
}
}
data.frame(VIN, make, model, model_year, fuel_type, GVWR)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datadoc.R
\docType{data}
\name{flogcheby}
\alias{flogcheby}
\title{f vector for the Log Chebyshev Approximation Problem}
\format{A vector with length 20}
\usage{
data(flogcheby)
}
\description{
f vector for the Log Chebyshev Approximation Problem
}
\keyword{datasets}
| /man/flogcheby.Rd | no_license | cran/sdpt3r | R | false | true | 360 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datadoc.R
\docType{data}
\name{flogcheby}
\alias{flogcheby}
\title{f vector for the Log Chebyshev Approximation Problem}
\format{A vector with length 20}
\usage{
data(flogcheby)
}
\description{
f vector for the Log Chebyshev Approximation Problem
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{tsbplot}
\alias{tsbplot}
\title{CCAM TSB plot}
\usage{
tsbplot(x, ...)
}
\arguments{
\item{x}{the object(s) returned from ccam.fit}
\item{...}{extra arguments transferred to plotit}
}
\description{
CCAM TSB plot
}
\details{
Plot of total stock biomass
}
| /man/tsbplot.Rd | no_license | elisvb/CCAM | R | false | true | 346 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{tsbplot}
\alias{tsbplot}
\title{CCAM TSB plot}
\usage{
tsbplot(x, ...)
}
\arguments{
\item{x}{the object(s) returned from ccam.fit}
\item{...}{extra arguments transferred to plotit}
}
\description{
CCAM TSB plot
}
\details{
Plot of total stock biomass
}
|
# coordProf_UQ function
#' @name coordProf_UQ
#' @author Dario Azzimonti
#' @title Coordinate profiles UQ from a kriging model
#' @description The function coordProf_UQ computes the profile extrema functions for posterior realizations of a Gaussian process and its confidence bounds
#' @param object either a \link[DiceKriging]{km} model or a list containing partial results. If \code{object} is a km model then all computations are carried out. If \code{object} is a list, then the function carries out all computations to complete the results list.
#' @param threshold the threshold of interest
#' @param allResMean a list resulting from \code{getAllMaxMin} or \code{approxMaxMin} for the profile extrema on the mean. If NULL the median from the observations is plotted
#' @param quantiles_uq a vector containing the quantiles to be computed
#' @param options_approx an optional list of options for approxMaxMin, see \link{approxMaxMin} for details.
#' @param options_full_sims an optional list of options for getAllMaxMin, see \link{getAllMaxMin} for details. If NULL the full computations are not excuted. NOTE: this computations might be very expensive!
#' @param options_sims an optional list of options for the posterior simulations.
#' \itemize{
#' \item{\code{algorithm:}} string choice of the algorithm to select the pilot points ("A" or "B", default "B");
#' \item{\code{lower:}} \eqn{d} dimensional vector with lower bounds for pilot points, default \code{rep(0,d)};
#' \item{\code{upper:}} \eqn{d} dimensional vector with upper bounds for pilot points, default \code{rep(1,d)};
#' \item{\code{batchsize:}} number of pilot points, default \code{120};
#' \item{\code{optimcontrol:}} list containing the options for optimization, see \link[pGPx]{optim_dist_measure};
#' \item{\code{integcontrol:}} list containing the options for numerical integration of the criterion, see \link[pGPx]{optim_dist_measure};
#' \item{\code{integration.param:}} list containing the integration design, obtained with the function \link[KrigInv]{integration_design};
#' \item{\code{nsim:}} number of approximate GP simulations, default \code{300}.
#' }
#' @param options_bound an optional list containing \code{beta} the confidence level for the approximation and \code{alpha} the confidence level for the bound. Note that \code{alpha > 2*beta}. If \code{NULL}, the bound is not computed.
#' @param plot_level an integer to select the plots to return (0=no plots, 1=basic plots, 2= all plots)
#' @param plot_options an optional list of parameters for plots. See \link{setPlotOptions} for currently available options.
#' @param return_level an integer to select the amount of details returned
#' @return If return_level=1 a list containing \itemize{
#' \item{\code{profSups:}}{an array \code{dxfullDesignSizexnsims} containing the profile sup for each coordinate for each realization.}
#' \item{\code{profInfs:}}{an array \code{dxfullDesignSizexnsims} containing the profile inf for each coordinate for each realization.}
#' \item{\code{prof_quantiles_approx:}}{a list containing the quantiles (levels set by \code{quantiles_uq}) of the profile extrema functions.}
#' } if return_level=2 the same list as above but also including \code{more:} a list containing \itemize{
#' \item{\code{times:}}{a list containing
#' \itemize{
#' \item{\code{tSpts:} }{computational time for selecting pilot points.}
#' \item{\code{tApprox1ord:}}{vector containing the computational time required for profile extrema computation for each realization}
#' }}
#' \item{\code{simuls:}}{ a matrix containing the value of the field simulated at the pilot points}
#' \item{\code{sPts:}}{the pilot points}
#' }
#' @examples
#' if (!requireNamespace("DiceKriging", quietly = TRUE)) {
#' stop("DiceKriging needed for this example to work. Please install it.",
#' call. = FALSE)
#' }
#' # Compute a kriging model from 50 evaluations of the Branin function
#' # Define the function
#' g<-function(x){
#' return(-branin(x))
#' }
#' gp_des<-lhs::maximinLHS(20,2)
#' reals<-apply(gp_des,1,g)
#' kmModel<-km(design = gp_des,response = reals,covtype = "matern3_2")
#'
#' threshold=-10
#' d<-2
#'
#' # Compute coordinate profiles UQ starting from GP model
#' # define simulation options
#' options_sims<-list(algorithm="B", lower=rep(0,d), upper=rep(1,d),
#' batchsize=80, optimcontrol = list(method="genoud",pop.size=100,print.level=0),
#' integcontrol = list(distrib="sobol",n.points=1000), nsim=150)
#' # define 1 order approximation options
#' init_des<-lhs::maximinLHS(15,d)
#' options_approx<- list(multistart=4,heavyReturn=TRUE,
#' initDesign=init_des,fullDesignSize=100,
#' smoother="1order")
#' # define plot options
#' options_plots<-list(save=FALSE, titleProf = "Coordinate profiles",
#' title2d = "Posterior mean",qq_fill=TRUE)
#' \dontrun{
#' # profile UQ on approximate coordinate profiles
#' cProfiles_UQ<-coordProf_UQ(object = kmModel,threshold = threshold,allResMean = NULL,
#' quantiles_uq = c(0.05,0.95),options_approx = options_approx,
#' options_full_sims = NULL,options_sims = options_sims,
#' options_bound = NULL,plot_level = 3,
#' plot_options = options_plots,return_level = 3)
#' # profile UQ on full optim coordinate profiles
#' options_full_sims<-list(multistart=4,heavyReturn=TRUE)
#' cProfiles_UQ_full<-coordProf_UQ(object = cProfiles_UQ,threshold = threshold,allResMean = NULL,
#' quantiles_uq = c(0.05,0.95),options_approx = options_approx,
#' options_full_sims = options_full_sims,options_sims = options_sims,
#' options_bound = NULL,plot_level = 3,
#' plot_options = options_plots,return_level = 3)
#'
#' # profile UQ on full optim coordinate profiles with bound
#' cProfiles_UQ_full_bound<-coordProf_UQ(object = cProfiles_UQ_full,threshold = threshold,
#' allResMean = NULL, quantiles_uq = c(0.05,0.95),
#' options_approx = options_approx,
#' options_full_sims = options_full_sims,
#' options_sims = options_sims,
#' options_bound = list(beta=0.024,alpha=0.05),
#' plot_level = 3, plot_options = options_plots,
#' return_level = 3)
#' }
#' @export
coordProf_UQ = function(object,threshold,allResMean=NULL,quantiles_uq=c(0.05,0.95),options_approx=NULL,options_full_sims=NULL,options_sims=NULL,options_bound=NULL,plot_level=0,plot_options=NULL,return_level=1){
# number of thresholds
num_T<-length(threshold)
# Check object
if(is(object,"km")){
object<-list(kmModel=object)
}else if(!is.list(object)){
stop("object must be either a list or a km object")
}
# set up dimension
d<-object$kmModel@d
# Options setup
if(is.null(options_approx)){
init_des<-maximinLHS(10,2)
options_approx<- list(multistart=8,heavyReturn=TRUE,initDesign=init_des,fullDesignSize=100)
}
# Set up plot options
plot_options<-setPlotOptions(plot_options = plot_options,d=d,num_T=num_T,kmModel=object$kmModel)
# Set-up simulation options
if(is.null(options_sims)){
options_sims<-list(algorithm="B", lower=rep(0,d), upper=rep(1,d),
batchsize=120, optimcontrol = list(method="genoud",pop.size=100,print.level=0),
integcontrol = list(distrib="sobol",n.points=1000),
nsim=300)
options_sims$integration.param = integration_design(options_sims$integcontrol,d,options_sims$lower,options_sims$upper,object$kmModel,threshold)
options_sims$integration.param$alpha <- 0.5
}
if(is.null(allResMean)){
quantiles_uq<-c(quantiles_uq,0.5)
}else{
changePP<-getChangePoints(threshold = threshold,allRes = allResMean)
}
##### Get the pilot points
# If not already in object, obtain the pilot points
if(is.null(object$sPts)){
timeIn<-get_nanotime()
object$sPts<-optim_dist_measure(model = object$kmModel,threshold = threshold[1],
lower = options_sims$lower,upper = options_sims$upper,batchsize = options_sims$batchsize,
algorithm = options_sims$algorithm,verb=1,optimcontrol = options_sims$optimcontrol,integration.param = options_sims$integration.param)
timeMdist<-(get_nanotime()-timeIn)*1e-9
}else{
if(is.null(object$more$times$tSpts)){
times<-list(tSpts=NA)
if(is.null(object$more)){
object$more<-list(times=times)
}else{
object$more$times=times
}
}
}
#cairo_pdf(paste(plot_options$folderPlots,"critVal_tt",index_exp,".pdf",sep=""),width = 14,height = 14)
if(plot_level>0){
oldpar<-par()
if(plot_options$save)
cairo_pdf(filename = paste(plot_options$folderPlots,"sPtsCritVal",plot_options$id_save,".pdf",sep=""),width = 12,height = 12)
plot(object$sPts$value,type='o',main="Optimized criterion",ylab="y",xlab="Iteration")
if(plot_options$save)
dev.off()
}
###
# Prepare the functions for UQ profiles
nugget.sim=1e-6
type="UK"
simu_points<-object$sPts$par
if(is.null(object$more$simuls)){
some.simu <- simulate(object=object$kmModel,nsim=options_sims$nsim,newdata=simu_points,nugget.sim=nugget.sim,
cond=TRUE,checkNames = FALSE)
}else{
some.simu<-object$more$simuls
}
g_uq<-function(x,realization,kmModel,simupoints,F.mat=NULL,T.mat=NULL){
x<-matrix(x,ncol=kmModel@d)
colnames(x)<-colnames(kmModel@X)
obj <- krig_weight_GPsimu(object=kmModel,simu_points=simupoints,krig_points=x,T.mat = T.mat,F.mat = F.mat)
krig.mean.init <- matrix(obj$krig.mean.init,ncol=1)
weights <- t(obj$Lambda.end)
return(krig.mean.init + tcrossprod(weights,matrix(realization,nrow=1)))
}
g_uq_deriv<-function(x,realization,kmModel,simupoints,T.mat=NULL,F.mat=NULL){
x<-matrix(x,ncol=kmModel@d)
colnames(x)<-colnames(kmModel@X)
obj_deriv<-grad_kweights(object = kmModel,simu_points = simupoints,krig_points = matrix(x,ncol=kmModel@d),T.mat = T.mat,F.mat = F.mat)
krig_mean_init <- matrix(obj_deriv$krig.mean.init,ncol=kmModel@d)
weights <- t(obj_deriv$Lambda.end)
return(krig_mean_init + tcrossprod(matrix(realization,nrow=1),weights))
}
# Useful one-time computations
F.mat <- model.matrix(object=object$kmModel@trend.formula, data = data.frame(rbind(object$kmModel@X,simu_points)))
K <- covMatrix(object=object$kmModel@covariance,X=rbind(object$kmModel@X,simu_points))$C
T.mat <- chol(K)
### Lets compute the profile extrema for this realization
# if the profSups and profInfs are not already there, compute them
if(is.null(object$profSups) || is.null(object$profInfs)){
# choose size of full design
object$profSups<-array(NA,dim = c(d,options_approx$fullDesignSize,options_sims$nsim))
object$profInfs<-array(NA,dim = c(d,options_approx$fullDesignSize,options_sims$nsim))
tApprox1ord<-rep(NA,options_sims$nsim)
}
if(!is.null(options_full_sims) && is.null(object$profSups_full)){
object$profSups_full<-array(NA,dim = c(d,options_approx$fullDesignSize,options_sims$nsim))
object$profInfs_full<-array(NA,dim = c(d,options_approx$fullDesignSize,options_sims$nsim))
tFull<-rep(NA,options_sims$nsim)
}
for(i in seq(options_sims$nsim)){
g_uq_spec<-function(x){
return(g_uq(x=x,realization=some.simu[i,],kmModel = object$kmModel,simupoints = simu_points,F.mat = F.mat,T.mat = T.mat))
}
g_uq_der_spec<-function(x){
return(g_uq_deriv(x=x,realization=some.simu[i,],kmModel = object$kmModel,simupoints = simu_points,T.mat = T.mat,F.mat = F.mat))
}
if(!is.null(options_full_sims) && is.na(object$profSups_full[1,1,i])){
if(i%%10==0){
cat("Full_sims.Realization ",i,"\n")
}
timeIn<-get_nanotime()
temp_full<-getAllMaxMin(f = g_uq_spec,fprime = g_uq_der_spec,d = d,options = options_full_sims)
tFull[i]<-(get_nanotime()-timeIn)*1e-9
object$profSups_full[,,i]<-t(temp_full$res$max)
object$profInfs_full[,,i]<-t(temp_full$res$min)
}
if(is.na(object$profSups[1,1,i]) || is.na(object$profInfs[1,1,i])){
if(i%%10==0){
cat("Approx_sims. Realization ",i,"\n")
}
timeIn<-get_nanotime()
temp_1o<-approxMaxMin(f = g_uq_spec,fprime = g_uq_der_spec,d = d,opts = options_approx)
tApprox1ord[i]<-(get_nanotime()-timeIn)*1e-9
# temp<-getAllMaxMin(f=g_uq_spec,fprime = NULL,d=2,options = list(multistart=2,heavyReturn=TRUE))
object$profSups[,,i]<-t(temp_1o$res$max)
object$profInfs[,,i]<-t(temp_1o$res$min)
}
}
# save quantiles for approximations
object$prof_quantiles_approx<-list()
for(i in seq(length(quantiles_uq))){
object$prof_quantiles_approx[[i]]<-list(res=list(min=matrix(NA,nrow = options_approx$fullDesignSize,ncol = d),
max=matrix(NA,nrow = options_approx$fullDesignSize,ncol = d)))
}
names(object$prof_quantiles_approx)<-quantiles_uq
ccPP<-list()
for(j in seq(length(quantiles_uq))){
for(coord in seq(d)){
object$prof_quantiles_approx[[j]]$res$max[,coord]<-apply(object$profSups[coord,,],1,function(x){return(quantile(x,quantiles_uq[j]))})
object$prof_quantiles_approx[[j]]$res$min[,coord]<-apply(object$profInfs[coord,,],1,function(x){return(quantile(x,quantiles_uq[j]))})
}
ccPP[[j]]<-getChangePoints(threshold = threshold,allRes = object$prof_quantiles_approx[[j]])
}
names(ccPP)<-quantiles_uq
# save quantiles for full optim
if(!is.null(options_full_sims)){
object$prof_quantiles_full<-list()
for(i in seq(length(quantiles_uq))){
object$prof_quantiles_full[[i]]<-list(res=list(min=matrix(NA,nrow = options_approx$fullDesignSize,ncol = d),
max=matrix(NA,nrow = options_approx$fullDesignSize,ncol = d)))
}
names(object$prof_quantiles_full)<-quantiles_uq
ccPP_full<-list()
for(j in seq(length(quantiles_uq))){
for(coord in seq(d)){
object$prof_quantiles_full[[j]]$res$max[,coord]<-apply(object$profSups_full[coord,,],1,function(x){return(quantile(x,quantiles_uq[j]))})
object$prof_quantiles_full[[j]]$res$min[,coord]<-apply(object$profInfs_full[coord,,],1,function(x){return(quantile(x,quantiles_uq[j]))})
}
ccPP_full[[j]]<-getChangePoints(threshold = threshold,allRes = object$prof_quantiles_full[[j]])
}
names(ccPP_full)<-quantiles_uq
}
## Plot profiles with Uncertainty
dd<-seq(0,1,,length.out = options_approx$fullDesignSize)
if(is.null(allResMean))
changePP<-ccPP$`0.5`
# Plot the posterior mean and visualize the actual excursion set and the regions of no-excursion according to the profile extrema functions.
if(plot_level>=2 && d==2){
# since dimension==2 we can plot the posterior mean
newdata<-expand.grid(seq(0,1,,100),seq(0,1,,100))
colnames(newdata)<-colnames(object$kmModel@X)
pred2d<-predict.km(object$kmModel,newdata = newdata,type = "UK",light.return = TRUE,se.compute = FALSE)
if(plot_options$save)
cairo_pdf(filename = paste(plot_options$folderPlots,"profMean_UQ",plot_options$id_save,".pdf",sep=""),width = 12,height = 12)
par(mar = c(5, 5, 4, 2) + 0.1)
image(matrix(pred2d$mean,nrow = 100),col=gray.colors(20), main=plot_options$title2d,xlab = "", ylab = "", #colnames(object$kmModel@X)[1],ylab= colnames(object$kmModel@X)[2],
cex.main=3,cex.axis=1.8,cex.lab=2.8)
contour(matrix(pred2d$mean,nrow = 100),add=T,nlevels = 10,lwd=1.5,labcex=1.2)
contour(matrix(pred2d$mean,nrow = 100),add=T,levels = threshold,col=plot_options$col_thresh,lwd=3,labcex=1.5)
for(tt in seq(num_T)){
abline(v = changePP$neverEx[[tt]][[1]],col=plot_options$col_CCPthresh_nev[tt],lwd=2.5)
abline(h = changePP$neverEx[[tt]][[2]],col=plot_options$col_CCPthresh_nev[tt],lwd=2.5)
abline(v = changePP$alwaysEx[[tt]][[1]],col=plot_options$col_CCPthresh_alw[tt],lwd=2.5)
abline(h = changePP$alwaysEx[[tt]][[2]],col=plot_options$col_CCPthresh_alw[tt],lwd=2.5)
}
if(!is.null(options_full_sims)){
for(j in seq(length(quantiles_uq))){
for(tt in seq(num_T)){
abline(v = ccPP_full[[j]]$neverEx[[tt]][[1]],col=plot_options$col_CCPthresh_nev[tt],lwd=2,lty=2)
abline(h = ccPP_full[[j]]$neverEx[[tt]][[2]],col=plot_options$col_CCPthresh_nev[tt],lwd=2,lty=2)
abline(v = ccPP_full[[j]]$alwaysEx[[tt]][[1]],col=plot_options$col_CCPthresh_alw[tt],lwd=2,lty=2)
abline(h = ccPP_full[[j]]$alwaysEx[[tt]][[2]],col=plot_options$col_CCPthresh_alw[tt],lwd=2,lty=2)
}
}
}else{
for(j in seq(length(quantiles_uq))){
for(tt in seq(num_T)){
abline(v = ccPP[[j]]$neverEx[[tt]][[1]],col=plot_options$col_CCPthresh_nev[tt],lwd=2,lty=2)
abline(h = ccPP[[j]]$neverEx[[tt]][[2]],col=plot_options$col_CCPthresh_nev[tt],lwd=2,lty=2)
abline(v = ccPP[[j]]$alwaysEx[[tt]][[1]],col=plot_options$col_CCPthresh_alw[tt],lwd=2,lty=2)
abline(h = ccPP[[j]]$alwaysEx[[tt]][[2]],col=plot_options$col_CCPthresh_alw[tt],lwd=2,lty=2)
}
}
}
if(plot_options$fun_evals>0){
points(object$kmModel@X,pch=17,cex=1.6)
}
if(plot_options$save)
dev.off()
}
if(plot_level>=1){
object$bound$bound <- NULL
plot_univariate_profiles_UQ(objectUQ = object, plot_options = plot_options,nsims = options_sims$nsim,quantiles_uq=quantiles_uq,
threshold = threshold,nameFile ="prof_UQ_approx", profMean = allResMean,typeProf = "approx")
if(!is.null(options_full_sims))
plot_univariate_profiles_UQ(objectUQ = object, plot_options = plot_options,nsims = options_sims$nsim,quantiles_uq=quantiles_uq,
threshold = threshold,nameFile ="prof_UQ_full", profMean = allResMean,typeProf = "full")
}
# object$profSups=profSups
# object$profInfs=profInfs
# object$prof_quantiles_approx=prof_quantiles_approx
# object$sPts=m_dist
# Compute the bound correction
if(!is.null(options_bound)){
object$bound<-bound_profiles(objectUQ = object,mean_var_delta = object$bound$mean_var_D,beta = options_bound$beta,alpha = options_bound$alpha,
options_approx = options_approx,options_full_sims = options_full_sims)
if(plot_level>=1){
plot_univariate_profiles_UQ(objectUQ = object, plot_options = plot_options,nsims = options_sims$nsim,quantiles_uq=quantiles_uq,
threshold = threshold,nameFile ="prof_UQ_bound_approx", profMean = allResMean,typeProf = "approx")
if(!is.null(options_full_sims))
plot_univariate_profiles_UQ(objectUQ = object, plot_options = plot_options,nsims = options_sims$nsim,quantiles_uq=quantiles_uq,
threshold = threshold,nameFile ="prof_UQ_bound_full", profMean = allResMean,typeProf = "full")
}
}
if(return_level==1){
return(object)
}else{
if(is.null(object$more)){
times<-list(tSpts=timeMdist,tApprox1ord=tApprox1ord)
if(!is.null(options_full_sims)){
times$tFull<-tFull
}
object$more<-list(simuls=some.simu,times=times)
}
return(object)
}
}
| /R/coordProf_UQ.R | no_license | cran/profExtrema | R | false | false | 19,626 | r | # coordProf_UQ function
#' @name coordProf_UQ
#' @author Dario Azzimonti
#' @title Coordinate profiles UQ from a kriging model
#' @description The function coordProf_UQ computes the profile extrema functions for posterior realizations of a Gaussian process and its confidence bounds
#' @param object either a \link[DiceKriging]{km} model or a list containing partial results. If \code{object} is a km model then all computations are carried out. If \code{object} is a list, then the function carries out all computations to complete the results list.
#' @param threshold the threshold of interest
#' @param allResMean a list resulting from \code{getAllMaxMin} or \code{approxMaxMin} for the profile extrema on the mean. If NULL the median from the observations is plotted
#' @param quantiles_uq a vector containing the quantiles to be computed
#' @param options_approx an optional list of options for approxMaxMin, see \link{approxMaxMin} for details.
#' @param options_full_sims an optional list of options for getAllMaxMin, see \link{getAllMaxMin} for details. If NULL the full computations are not excuted. NOTE: this computations might be very expensive!
#' @param options_sims an optional list of options for the posterior simulations.
#' \itemize{
#' \item{\code{algorithm:}} string choice of the algorithm to select the pilot points ("A" or "B", default "B");
#' \item{\code{lower:}} \eqn{d} dimensional vector with lower bounds for pilot points, default \code{rep(0,d)};
#' \item{\code{upper:}} \eqn{d} dimensional vector with upper bounds for pilot points, default \code{rep(1,d)};
#' \item{\code{batchsize:}} number of pilot points, default \code{120};
#' \item{\code{optimcontrol:}} list containing the options for optimization, see \link[pGPx]{optim_dist_measure};
#' \item{\code{integcontrol:}} list containing the options for numerical integration of the criterion, see \link[pGPx]{optim_dist_measure};
#' \item{\code{integration.param:}} list containing the integration design, obtained with the function \link[KrigInv]{integration_design};
#' \item{\code{nsim:}} number of approximate GP simulations, default \code{300}.
#' }
#' @param options_bound an optional list containing \code{beta} the confidence level for the approximation and \code{alpha} the confidence level for the bound. Note that \code{alpha > 2*beta}. If \code{NULL}, the bound is not computed.
#' @param plot_level an integer to select the plots to return (0=no plots, 1=basic plots, 2= all plots)
#' @param plot_options an optional list of parameters for plots. See \link{setPlotOptions} for currently available options.
#' @param return_level an integer to select the amount of details returned
#' @return If return_level=1 a list containing \itemize{
#' \item{\code{profSups:}}{an array \code{dxfullDesignSizexnsims} containing the profile sup for each coordinate for each realization.}
#' \item{\code{profInfs:}}{an array \code{dxfullDesignSizexnsims} containing the profile inf for each coordinate for each realization.}
#' \item{\code{prof_quantiles_approx:}}{a list containing the quantiles (levels set by \code{quantiles_uq}) of the profile extrema functions.}
#' } if return_level=2 the same list as above but also including \code{more:} a list containing \itemize{
#' \item{\code{times:}}{a list containing
#' \itemize{
#' \item{\code{tSpts:} }{computational time for selecting pilot points.}
#' \item{\code{tApprox1ord:}}{vector containing the computational time required for profile extrema computation for each realization}
#' }}
#' \item{\code{simuls:}}{ a matrix containing the value of the field simulated at the pilot points}
#' \item{\code{sPts:}}{the pilot points}
#' }
#' @examples
#' if (!requireNamespace("DiceKriging", quietly = TRUE)) {
#' stop("DiceKriging needed for this example to work. Please install it.",
#' call. = FALSE)
#' }
#' # Compute a kriging model from 50 evaluations of the Branin function
#' # Define the function
#' g<-function(x){
#' return(-branin(x))
#' }
#' gp_des<-lhs::maximinLHS(20,2)
#' reals<-apply(gp_des,1,g)
#' kmModel<-km(design = gp_des,response = reals,covtype = "matern3_2")
#'
#' threshold=-10
#' d<-2
#'
#' # Compute coordinate profiles UQ starting from GP model
#' # define simulation options
#' options_sims<-list(algorithm="B", lower=rep(0,d), upper=rep(1,d),
#' batchsize=80, optimcontrol = list(method="genoud",pop.size=100,print.level=0),
#' integcontrol = list(distrib="sobol",n.points=1000), nsim=150)
#' # define 1 order approximation options
#' init_des<-lhs::maximinLHS(15,d)
#' options_approx<- list(multistart=4,heavyReturn=TRUE,
#' initDesign=init_des,fullDesignSize=100,
#' smoother="1order")
#' # define plot options
#' options_plots<-list(save=FALSE, titleProf = "Coordinate profiles",
#' title2d = "Posterior mean",qq_fill=TRUE)
#' \dontrun{
#' # profile UQ on approximate coordinate profiles
#' cProfiles_UQ<-coordProf_UQ(object = kmModel,threshold = threshold,allResMean = NULL,
#' quantiles_uq = c(0.05,0.95),options_approx = options_approx,
#' options_full_sims = NULL,options_sims = options_sims,
#' options_bound = NULL,plot_level = 3,
#' plot_options = options_plots,return_level = 3)
#' # profile UQ on full optim coordinate profiles
#' options_full_sims<-list(multistart=4,heavyReturn=TRUE)
#' cProfiles_UQ_full<-coordProf_UQ(object = cProfiles_UQ,threshold = threshold,allResMean = NULL,
#' quantiles_uq = c(0.05,0.95),options_approx = options_approx,
#' options_full_sims = options_full_sims,options_sims = options_sims,
#' options_bound = NULL,plot_level = 3,
#' plot_options = options_plots,return_level = 3)
#'
#' # profile UQ on full optim coordinate profiles with bound
#' cProfiles_UQ_full_bound<-coordProf_UQ(object = cProfiles_UQ_full,threshold = threshold,
#' allResMean = NULL, quantiles_uq = c(0.05,0.95),
#' options_approx = options_approx,
#' options_full_sims = options_full_sims,
#' options_sims = options_sims,
#' options_bound = list(beta=0.024,alpha=0.05),
#' plot_level = 3, plot_options = options_plots,
#' return_level = 3)
#' }
#' @export
coordProf_UQ = function(object,threshold,allResMean=NULL,quantiles_uq=c(0.05,0.95),options_approx=NULL,options_full_sims=NULL,options_sims=NULL,options_bound=NULL,plot_level=0,plot_options=NULL,return_level=1){
# number of thresholds
num_T<-length(threshold)
# Check object
if(is(object,"km")){
object<-list(kmModel=object)
}else if(!is.list(object)){
stop("object must be either a list or a km object")
}
# set up dimension
d<-object$kmModel@d
# Options setup
if(is.null(options_approx)){
init_des<-maximinLHS(10,2)
options_approx<- list(multistart=8,heavyReturn=TRUE,initDesign=init_des,fullDesignSize=100)
}
# Set up plot options
plot_options<-setPlotOptions(plot_options = plot_options,d=d,num_T=num_T,kmModel=object$kmModel)
# Set-up simulation options
if(is.null(options_sims)){
options_sims<-list(algorithm="B", lower=rep(0,d), upper=rep(1,d),
batchsize=120, optimcontrol = list(method="genoud",pop.size=100,print.level=0),
integcontrol = list(distrib="sobol",n.points=1000),
nsim=300)
options_sims$integration.param = integration_design(options_sims$integcontrol,d,options_sims$lower,options_sims$upper,object$kmModel,threshold)
options_sims$integration.param$alpha <- 0.5
}
if(is.null(allResMean)){
quantiles_uq<-c(quantiles_uq,0.5)
}else{
changePP<-getChangePoints(threshold = threshold,allRes = allResMean)
}
##### Get the pilot points
# If not already in object, obtain the pilot points
if(is.null(object$sPts)){
timeIn<-get_nanotime()
object$sPts<-optim_dist_measure(model = object$kmModel,threshold = threshold[1],
lower = options_sims$lower,upper = options_sims$upper,batchsize = options_sims$batchsize,
algorithm = options_sims$algorithm,verb=1,optimcontrol = options_sims$optimcontrol,integration.param = options_sims$integration.param)
timeMdist<-(get_nanotime()-timeIn)*1e-9
}else{
if(is.null(object$more$times$tSpts)){
times<-list(tSpts=NA)
if(is.null(object$more)){
object$more<-list(times=times)
}else{
object$more$times=times
}
}
}
#cairo_pdf(paste(plot_options$folderPlots,"critVal_tt",index_exp,".pdf",sep=""),width = 14,height = 14)
if(plot_level>0){
oldpar<-par()
if(plot_options$save)
cairo_pdf(filename = paste(plot_options$folderPlots,"sPtsCritVal",plot_options$id_save,".pdf",sep=""),width = 12,height = 12)
plot(object$sPts$value,type='o',main="Optimized criterion",ylab="y",xlab="Iteration")
if(plot_options$save)
dev.off()
}
###
# Prepare the functions for UQ profiles
nugget.sim=1e-6
type="UK"
simu_points<-object$sPts$par
if(is.null(object$more$simuls)){
some.simu <- simulate(object=object$kmModel,nsim=options_sims$nsim,newdata=simu_points,nugget.sim=nugget.sim,
cond=TRUE,checkNames = FALSE)
}else{
some.simu<-object$more$simuls
}
g_uq<-function(x,realization,kmModel,simupoints,F.mat=NULL,T.mat=NULL){
x<-matrix(x,ncol=kmModel@d)
colnames(x)<-colnames(kmModel@X)
obj <- krig_weight_GPsimu(object=kmModel,simu_points=simupoints,krig_points=x,T.mat = T.mat,F.mat = F.mat)
krig.mean.init <- matrix(obj$krig.mean.init,ncol=1)
weights <- t(obj$Lambda.end)
return(krig.mean.init + tcrossprod(weights,matrix(realization,nrow=1)))
}
g_uq_deriv<-function(x,realization,kmModel,simupoints,T.mat=NULL,F.mat=NULL){
x<-matrix(x,ncol=kmModel@d)
colnames(x)<-colnames(kmModel@X)
obj_deriv<-grad_kweights(object = kmModel,simu_points = simupoints,krig_points = matrix(x,ncol=kmModel@d),T.mat = T.mat,F.mat = F.mat)
krig_mean_init <- matrix(obj_deriv$krig.mean.init,ncol=kmModel@d)
weights <- t(obj_deriv$Lambda.end)
return(krig_mean_init + tcrossprod(matrix(realization,nrow=1),weights))
}
# Useful one-time computations
F.mat <- model.matrix(object=object$kmModel@trend.formula, data = data.frame(rbind(object$kmModel@X,simu_points)))
K <- covMatrix(object=object$kmModel@covariance,X=rbind(object$kmModel@X,simu_points))$C
T.mat <- chol(K)
### Lets compute the profile extrema for this realization
# if the profSups and profInfs are not already there, compute them
if(is.null(object$profSups) || is.null(object$profInfs)){
# choose size of full design
object$profSups<-array(NA,dim = c(d,options_approx$fullDesignSize,options_sims$nsim))
object$profInfs<-array(NA,dim = c(d,options_approx$fullDesignSize,options_sims$nsim))
tApprox1ord<-rep(NA,options_sims$nsim)
}
if(!is.null(options_full_sims) && is.null(object$profSups_full)){
object$profSups_full<-array(NA,dim = c(d,options_approx$fullDesignSize,options_sims$nsim))
object$profInfs_full<-array(NA,dim = c(d,options_approx$fullDesignSize,options_sims$nsim))
tFull<-rep(NA,options_sims$nsim)
}
for(i in seq(options_sims$nsim)){
g_uq_spec<-function(x){
return(g_uq(x=x,realization=some.simu[i,],kmModel = object$kmModel,simupoints = simu_points,F.mat = F.mat,T.mat = T.mat))
}
g_uq_der_spec<-function(x){
return(g_uq_deriv(x=x,realization=some.simu[i,],kmModel = object$kmModel,simupoints = simu_points,T.mat = T.mat,F.mat = F.mat))
}
if(!is.null(options_full_sims) && is.na(object$profSups_full[1,1,i])){
if(i%%10==0){
cat("Full_sims.Realization ",i,"\n")
}
timeIn<-get_nanotime()
temp_full<-getAllMaxMin(f = g_uq_spec,fprime = g_uq_der_spec,d = d,options = options_full_sims)
tFull[i]<-(get_nanotime()-timeIn)*1e-9
object$profSups_full[,,i]<-t(temp_full$res$max)
object$profInfs_full[,,i]<-t(temp_full$res$min)
}
if(is.na(object$profSups[1,1,i]) || is.na(object$profInfs[1,1,i])){
if(i%%10==0){
cat("Approx_sims. Realization ",i,"\n")
}
timeIn<-get_nanotime()
temp_1o<-approxMaxMin(f = g_uq_spec,fprime = g_uq_der_spec,d = d,opts = options_approx)
tApprox1ord[i]<-(get_nanotime()-timeIn)*1e-9
# temp<-getAllMaxMin(f=g_uq_spec,fprime = NULL,d=2,options = list(multistart=2,heavyReturn=TRUE))
object$profSups[,,i]<-t(temp_1o$res$max)
object$profInfs[,,i]<-t(temp_1o$res$min)
}
}
# save quantiles for approximations
object$prof_quantiles_approx<-list()
for(i in seq(length(quantiles_uq))){
object$prof_quantiles_approx[[i]]<-list(res=list(min=matrix(NA,nrow = options_approx$fullDesignSize,ncol = d),
max=matrix(NA,nrow = options_approx$fullDesignSize,ncol = d)))
}
names(object$prof_quantiles_approx)<-quantiles_uq
ccPP<-list()
for(j in seq(length(quantiles_uq))){
for(coord in seq(d)){
object$prof_quantiles_approx[[j]]$res$max[,coord]<-apply(object$profSups[coord,,],1,function(x){return(quantile(x,quantiles_uq[j]))})
object$prof_quantiles_approx[[j]]$res$min[,coord]<-apply(object$profInfs[coord,,],1,function(x){return(quantile(x,quantiles_uq[j]))})
}
ccPP[[j]]<-getChangePoints(threshold = threshold,allRes = object$prof_quantiles_approx[[j]])
}
names(ccPP)<-quantiles_uq
# save quantiles for full optim
if(!is.null(options_full_sims)){
object$prof_quantiles_full<-list()
for(i in seq(length(quantiles_uq))){
object$prof_quantiles_full[[i]]<-list(res=list(min=matrix(NA,nrow = options_approx$fullDesignSize,ncol = d),
max=matrix(NA,nrow = options_approx$fullDesignSize,ncol = d)))
}
names(object$prof_quantiles_full)<-quantiles_uq
ccPP_full<-list()
for(j in seq(length(quantiles_uq))){
for(coord in seq(d)){
object$prof_quantiles_full[[j]]$res$max[,coord]<-apply(object$profSups_full[coord,,],1,function(x){return(quantile(x,quantiles_uq[j]))})
object$prof_quantiles_full[[j]]$res$min[,coord]<-apply(object$profInfs_full[coord,,],1,function(x){return(quantile(x,quantiles_uq[j]))})
}
ccPP_full[[j]]<-getChangePoints(threshold = threshold,allRes = object$prof_quantiles_full[[j]])
}
names(ccPP_full)<-quantiles_uq
}
## Plot profiles with Uncertainty
dd<-seq(0,1,,length.out = options_approx$fullDesignSize)
if(is.null(allResMean))
changePP<-ccPP$`0.5`
# Plot the posterior mean and visualize the actual excursion set and the regions of no-excursion according to the profile extrema functions.
if(plot_level>=2 && d==2){
# since dimension==2 we can plot the posterior mean
newdata<-expand.grid(seq(0,1,,100),seq(0,1,,100))
colnames(newdata)<-colnames(object$kmModel@X)
pred2d<-predict.km(object$kmModel,newdata = newdata,type = "UK",light.return = TRUE,se.compute = FALSE)
if(plot_options$save)
cairo_pdf(filename = paste(plot_options$folderPlots,"profMean_UQ",plot_options$id_save,".pdf",sep=""),width = 12,height = 12)
par(mar = c(5, 5, 4, 2) + 0.1)
image(matrix(pred2d$mean,nrow = 100),col=gray.colors(20), main=plot_options$title2d,xlab = "", ylab = "", #colnames(object$kmModel@X)[1],ylab= colnames(object$kmModel@X)[2],
cex.main=3,cex.axis=1.8,cex.lab=2.8)
contour(matrix(pred2d$mean,nrow = 100),add=T,nlevels = 10,lwd=1.5,labcex=1.2)
contour(matrix(pred2d$mean,nrow = 100),add=T,levels = threshold,col=plot_options$col_thresh,lwd=3,labcex=1.5)
for(tt in seq(num_T)){
abline(v = changePP$neverEx[[tt]][[1]],col=plot_options$col_CCPthresh_nev[tt],lwd=2.5)
abline(h = changePP$neverEx[[tt]][[2]],col=plot_options$col_CCPthresh_nev[tt],lwd=2.5)
abline(v = changePP$alwaysEx[[tt]][[1]],col=plot_options$col_CCPthresh_alw[tt],lwd=2.5)
abline(h = changePP$alwaysEx[[tt]][[2]],col=plot_options$col_CCPthresh_alw[tt],lwd=2.5)
}
if(!is.null(options_full_sims)){
for(j in seq(length(quantiles_uq))){
for(tt in seq(num_T)){
abline(v = ccPP_full[[j]]$neverEx[[tt]][[1]],col=plot_options$col_CCPthresh_nev[tt],lwd=2,lty=2)
abline(h = ccPP_full[[j]]$neverEx[[tt]][[2]],col=plot_options$col_CCPthresh_nev[tt],lwd=2,lty=2)
abline(v = ccPP_full[[j]]$alwaysEx[[tt]][[1]],col=plot_options$col_CCPthresh_alw[tt],lwd=2,lty=2)
abline(h = ccPP_full[[j]]$alwaysEx[[tt]][[2]],col=plot_options$col_CCPthresh_alw[tt],lwd=2,lty=2)
}
}
}else{
for(j in seq(length(quantiles_uq))){
for(tt in seq(num_T)){
abline(v = ccPP[[j]]$neverEx[[tt]][[1]],col=plot_options$col_CCPthresh_nev[tt],lwd=2,lty=2)
abline(h = ccPP[[j]]$neverEx[[tt]][[2]],col=plot_options$col_CCPthresh_nev[tt],lwd=2,lty=2)
abline(v = ccPP[[j]]$alwaysEx[[tt]][[1]],col=plot_options$col_CCPthresh_alw[tt],lwd=2,lty=2)
abline(h = ccPP[[j]]$alwaysEx[[tt]][[2]],col=plot_options$col_CCPthresh_alw[tt],lwd=2,lty=2)
}
}
}
if(plot_options$fun_evals>0){
points(object$kmModel@X,pch=17,cex=1.6)
}
if(plot_options$save)
dev.off()
}
if(plot_level>=1){
object$bound$bound <- NULL
plot_univariate_profiles_UQ(objectUQ = object, plot_options = plot_options,nsims = options_sims$nsim,quantiles_uq=quantiles_uq,
threshold = threshold,nameFile ="prof_UQ_approx", profMean = allResMean,typeProf = "approx")
if(!is.null(options_full_sims))
plot_univariate_profiles_UQ(objectUQ = object, plot_options = plot_options,nsims = options_sims$nsim,quantiles_uq=quantiles_uq,
threshold = threshold,nameFile ="prof_UQ_full", profMean = allResMean,typeProf = "full")
}
# object$profSups=profSups
# object$profInfs=profInfs
# object$prof_quantiles_approx=prof_quantiles_approx
# object$sPts=m_dist
# Compute the bound correction
if(!is.null(options_bound)){
object$bound<-bound_profiles(objectUQ = object,mean_var_delta = object$bound$mean_var_D,beta = options_bound$beta,alpha = options_bound$alpha,
options_approx = options_approx,options_full_sims = options_full_sims)
if(plot_level>=1){
plot_univariate_profiles_UQ(objectUQ = object, plot_options = plot_options,nsims = options_sims$nsim,quantiles_uq=quantiles_uq,
threshold = threshold,nameFile ="prof_UQ_bound_approx", profMean = allResMean,typeProf = "approx")
if(!is.null(options_full_sims))
plot_univariate_profiles_UQ(objectUQ = object, plot_options = plot_options,nsims = options_sims$nsim,quantiles_uq=quantiles_uq,
threshold = threshold,nameFile ="prof_UQ_bound_full", profMean = allResMean,typeProf = "full")
}
}
if(return_level==1){
return(object)
}else{
if(is.null(object$more)){
times<-list(tSpts=timeMdist,tApprox1ord=tApprox1ord)
if(!is.null(options_full_sims)){
times$tFull<-tFull
}
object$more<-list(simuls=some.simu,times=times)
}
return(object)
}
}
|
appendStateToDf <- function(df){
newFrame = df[!is.na(df$lat),]
source("find_append.R")
StatesList = latlong2state(data.frame(x = c(newFrame$lng), y = c(newFrame$lat)))
StatesList = StatesList[!is.na(StatesList)]
tweetFreq = summary(as.factor(StatesList))
listedStates = c("alabama", "alaska", "arizona",
"arkansas" , "california", "colorado",
"connecticut", "delaware", "district of columbia",
"florida", "georgia", "hawaii",
"idaho" , "illinois", "indiana" ,
"iowa" , "kansas", "kentucky",
"louisiana" , "maine" , "maryland",
"massachusetts", "michigan", "minnesota",
"mississippi", "missouri" , "montana",
"nebraska", "nevada" , "new hampshire" ,
"new jersey" , "new mexico", "new york" ,
"north carolina" , "north dakota", "ohio",
"oklahoma" , "oregon", "pennsylvania",
"rhode island" , "south carolina", "south dakota",
"tennessee" , "texas" , "utah",
"vermont", "virginia", "washington",
"west virginia" , "wisconsin", "wyoming" )
listedStates
Statesdf = data.frame(state =listedStates, Freq = rep(0,1,length(listedStates)) )
#Statesdf= as.data.frame(tweetFreq )
tweetFreq.df <-as.data.frame(tweetFreq)
Statesdf[is.element(listedStates, rownames(tweetFreq.df)),2]= tweetFreq.df[,1]
return(Statesdf)
}
latlong2state <- function(dataframe) {
states <- map('state', fill=TRUE, col="transparent", plot=FALSE)
IDs <- sapply(strsplit(states$names, ":"), function(x) x[1])
states_sp <- map2SpatialPolygons(states, IDs=IDs,
proj4string=CRS("+proj=longlat +datum=WGS84"))
# Convert dataframe to a SpatialPoints object
pointsSP <- SpatialPoints(dataframe,
proj4string=CRS("+proj=longlat +datum=WGS84"))
# find where the coordinates are in the state
indices <- over(pointsSP, states_sp)
# Return the state names
stateNames <- sapply(states_sp@polygons, function(x) x@ID)
stateNames[indices]
}
addColToData <- function(df1, df_data, variable2){
# if (length(variable_data) == 1){
# variable_data <- rep(0,1,2)
# variable_data[1] <-variable_data
# variable_data[2] <-variable_data
# }
# df1[variable_data[2]] =tolower(df_data[variable_data[1]])
df1[variable2] <- df_data[variable2]
return(df1)
}
| /02_Fall_2016/05_Twitter_Sentiment/find_append.R | no_license | ranjankislay/Final_Projects | R | false | false | 3,093 | r |
appendStateToDf <- function(df){
newFrame = df[!is.na(df$lat),]
source("find_append.R")
StatesList = latlong2state(data.frame(x = c(newFrame$lng), y = c(newFrame$lat)))
StatesList = StatesList[!is.na(StatesList)]
tweetFreq = summary(as.factor(StatesList))
listedStates = c("alabama", "alaska", "arizona",
"arkansas" , "california", "colorado",
"connecticut", "delaware", "district of columbia",
"florida", "georgia", "hawaii",
"idaho" , "illinois", "indiana" ,
"iowa" , "kansas", "kentucky",
"louisiana" , "maine" , "maryland",
"massachusetts", "michigan", "minnesota",
"mississippi", "missouri" , "montana",
"nebraska", "nevada" , "new hampshire" ,
"new jersey" , "new mexico", "new york" ,
"north carolina" , "north dakota", "ohio",
"oklahoma" , "oregon", "pennsylvania",
"rhode island" , "south carolina", "south dakota",
"tennessee" , "texas" , "utah",
"vermont", "virginia", "washington",
"west virginia" , "wisconsin", "wyoming" )
listedStates
Statesdf = data.frame(state =listedStates, Freq = rep(0,1,length(listedStates)) )
#Statesdf= as.data.frame(tweetFreq )
tweetFreq.df <-as.data.frame(tweetFreq)
Statesdf[is.element(listedStates, rownames(tweetFreq.df)),2]= tweetFreq.df[,1]
return(Statesdf)
}
latlong2state <- function(dataframe) {
states <- map('state', fill=TRUE, col="transparent", plot=FALSE)
IDs <- sapply(strsplit(states$names, ":"), function(x) x[1])
states_sp <- map2SpatialPolygons(states, IDs=IDs,
proj4string=CRS("+proj=longlat +datum=WGS84"))
# Convert dataframe to a SpatialPoints object
pointsSP <- SpatialPoints(dataframe,
proj4string=CRS("+proj=longlat +datum=WGS84"))
# find where the coordinates are in the state
indices <- over(pointsSP, states_sp)
# Return the state names
stateNames <- sapply(states_sp@polygons, function(x) x@ID)
stateNames[indices]
}
addColToData <- function(df1, df_data, variable2){
# if (length(variable_data) == 1){
# variable_data <- rep(0,1,2)
# variable_data[1] <-variable_data
# variable_data[2] <-variable_data
# }
# df1[variable_data[2]] =tolower(df_data[variable_data[1]])
df1[variable2] <- df_data[variable2]
return(df1)
}
|
library(tidyverse)
library(dplyr)
library(ggplot2)
library(readr)
library(gtable)
library(gridExtra)
coded_data <- read_csv("https://raw.githubusercontent.com/xchen101/HEPSurvey/master/Data/Base/Usable_QC_FA_coded.csv")
OA1 <- coded_data %>%
group_by(`p2q2 [OA1]`) %>% #<- change
summarize(count = n())
OA2 <- coded_data %>%
group_by(`p2q2 [OA2]`) %>% #<- change
summarize(count = n())
OA1$QN <- "Find and read papers"
OA2$QN <- "Submit papers"
OA <- bind_rows(OA1, OA2)
OA$bin <- rowSums(OA[, c ("p2q2 [OA1]", "p2q2 [OA2]")], na.rm = T)
OA$bin <- recode(OA$bin, "1" = "Strongly disagree", "2" = "Somewhat disagree", "3" = "Neutral", "4" = "Somewhat agree", "5" = "Strongly agree")
ggplot(OA[order(OA$bin, decreasing = T),], aes(fill = factor(bin, levels = c("Strongly disagree", "Somewhat disagree", "Neutral", "Somewhat agree", "Strongly agree")), y = count, x = QN)) +
geom_bar(stat = "identity", position = "fill", width = 0.5) +
# geom_text(aes(label = count, size = 2, position = stack(vjust = 0.5))) +
scale_fill_manual(values = c("#ca0020", "#f4a582", "#f7f7f7", "#92c5de", "#0571b0") )+
theme(legend.title=element_blank())+
labs(x = "OA Changed how I...", y = "Frequency")
| /R/OA.R | permissive | xchen101/HEPSurvey | R | false | false | 1,213 | r | library(tidyverse)
library(dplyr)
library(ggplot2)
library(readr)
library(gtable)
library(gridExtra)
coded_data <- read_csv("https://raw.githubusercontent.com/xchen101/HEPSurvey/master/Data/Base/Usable_QC_FA_coded.csv")
OA1 <- coded_data %>%
group_by(`p2q2 [OA1]`) %>% #<- change
summarize(count = n())
OA2 <- coded_data %>%
group_by(`p2q2 [OA2]`) %>% #<- change
summarize(count = n())
OA1$QN <- "Find and read papers"
OA2$QN <- "Submit papers"
OA <- bind_rows(OA1, OA2)
OA$bin <- rowSums(OA[, c ("p2q2 [OA1]", "p2q2 [OA2]")], na.rm = T)
OA$bin <- recode(OA$bin, "1" = "Strongly disagree", "2" = "Somewhat disagree", "3" = "Neutral", "4" = "Somewhat agree", "5" = "Strongly agree")
ggplot(OA[order(OA$bin, decreasing = T),], aes(fill = factor(bin, levels = c("Strongly disagree", "Somewhat disagree", "Neutral", "Somewhat agree", "Strongly agree")), y = count, x = QN)) +
geom_bar(stat = "identity", position = "fill", width = 0.5) +
# geom_text(aes(label = count, size = 2, position = stack(vjust = 0.5))) +
scale_fill_manual(values = c("#ca0020", "#f4a582", "#f7f7f7", "#92c5de", "#0571b0") )+
theme(legend.title=element_blank())+
labs(x = "OA Changed how I...", y = "Frequency")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{data_cap_cost_tech}
\alias{data_cap_cost_tech}
\title{data_cap_cost_tech}
\format{
.csv
}
\source{
paste(rawDataFolder,"L2233.GlobalTechCapital_elecPassthru.csv", sep="")
}
\usage{
data_cap_cost_tech
}
\description{
data_cap_cost_tech
}
\examples{
\dontrun{
library(plutus);
plutus::data_cap_cost_tech
}
}
\keyword{datasets}
| /man/data_cap_cost_tech.Rd | permissive | JGCRI/plutus | R | false | true | 433 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{data_cap_cost_tech}
\alias{data_cap_cost_tech}
\title{data_cap_cost_tech}
\format{
.csv
}
\source{
paste(rawDataFolder,"L2233.GlobalTechCapital_elecPassthru.csv", sep="")
}
\usage{
data_cap_cost_tech
}
\description{
data_cap_cost_tech
}
\examples{
\dontrun{
library(plutus);
plutus::data_cap_cost_tech
}
}
\keyword{datasets}
|
install.packages("MatchIt")
library(MatchIt)
library(dplyr)
library(ggplot2)
setwd("/Users/Tatiksha/Documents/Customer Social Analytics/Midterm")
hn <- read.csv("HighNoteDataMidterm.csv")
#1. Summary Statistics
#Calculating difference-in-means for adopter and non-adopter samples
hn_cov <- c('age', 'male', 'friend_cnt', 'avg_friend_age', 'avg_friend_male', 'friend_country_cnt',
'subscriber_friend_cnt','songsListened', 'lovedTracks', 'posts','playlists',
'shouts', 'tenure','good_country')
hn %>%
group_by(adopter) %>%
select(one_of(hn_cov)) %>%
summarise_all(funs(mean(., na.rm = T)))
#Conduct t-tests to see if the means are statistically distinguishable
lapply(hn_cov, function(v) {
t.test(hn[, v] ~ hn[, 'adopter'])
})
#looking at the t-test results we can see that age, male, avg_friend_age, avg_friend_age_male,
#and tenure had similar/close means while others had either relatively large or vast differences in the mean.
#Even though subscriber friend count had a mean difference of ~4
#(which was higher than some other variables, we'll still use it for our further analysis
#since it did show some promising insights from the EDA shown in python based on correlation
#and it's relationship with free users (as well as premium users)
#3.Propensity score matching (PSM)
#First we'll run a logit model. Outcome variable is a binary variable that indicates if users became a premium user (adopter =1) or stayed a free user (adopter=0)
#using only some variables [I ran logistic regression in #4 to determine significant variables before doing #3]
some_log <- glm(adopter ~ age + male + friend_cnt +avg_friend_age +friend_country_cnt +
subscriber_friend_cnt + songsListened + lovedTracks + playlists
+ tenure + good_country,
family = binomial(), data = hn)
#creating treatment group where subscriber_friend_cnt is >=1 or 0
hn_2 <- mutate(hn, treatment=ifelse(hn$subscriber_friend_cnt >=1,1,0))
hn_2 %>%
group_by(adopter)%>% summarise(mean_treatment = mean(treatment),users=n())
with(hn_2, t.test(treatment ~adopter))
hn_cov1 <- c('age', 'male', 'friend_cnt', 'avg_friend_age', 'friend_country_cnt',
'subscriber_friend_cnt','songsListened', 'lovedTracks', 'playlists',
'tenure','good_country')
hn_2 %>%
group_by(treatment) %>%
select(one_of(hn_cov1)) %>%
summarise_all(funs(mean(., na.rm = T)))
lapply(hn_cov1, function(v) {
t.test(hn_2[, v] ~ hn_2$treatment)
})
M_PS <- glm(treatment ~age + male + friend_cnt +avg_friend_age +friend_country_cnt +
songsListened + lovedTracks + playlists
+ tenure + good_country, data= hn_2)
prs_df <- data.frame(pr_score = predict(M_PS, type = "response"),
treatment = M_PS$model$treatment)
head(prs_df)
#plotting a histogram
labs <- paste("Number of Friends:", c("Zero", "More than 1"))
prs_df %>%
mutate(treatment = ifelse(treatment == 0, labs[1], labs[2])) %>%
ggplot(aes(x = pr_score)) +
geom_histogram(color = "white") +
facet_wrap(~treatment) +
xlab("Subscriber friends affecting probabilty of becoming adopter") +
theme_bw()
#executing a matching algorithm
hn_2_nomiss <- hn_2 %>% # MatchIt does not allow missing values
select(adopter, treatment, one_of(hn_cov1)) %>%
na.omit()
#logging variables since they weren't normally distributed (after looking at histograms in Python) + logging all to ensure consistency
hn_2$age <- log(hn$age+1)
hn_2$male <- log(hn$male+1)
hn_2$friend_cnt <- log(hn$friend_cnt+1)
hn_2$avg_friend_age <- log(hn$avg_friend_age+1)
hn_2$friend_country_cnt <- log(hn$friend_country_cnt+1)
hn_2$songsListened <- log(hn$songsListened+1)
hn_2$lovedTracks <- log(hn$lovedTracks+1)
hn_2$playlists <- log(hn$playlists+1)
hn_2$tenure <- log(hn$tenure+1)
hn_2$good_country <- log(hn$good_country+1)
MPS_Match <- matchit(treatment ~ age + male + friend_cnt + avg_friend_age+ + friend_country_cnt +
songsListened + lovedTracks + playlists + tenure + good_country, data= hn_2_nomiss, method='nearest')
summary(MPS_Match)
plot(MPS_Match)
#creating a dataframe containing only the matched observations
dta_m <- match.data(MPS_Match)
dim(dta_m)
#The final dataset is smaller than the original: it contains 19,646 observations, meaning that 9823 pairs of treated and control observations were matched
#Also note that the final dataset contains a variable called distance, which is the propensity score (mean diff = 0.35)
# 4. Regression Analysis
#logit model for all variables
all_log <- glm(adopter ~ age + male + friend_cnt +avg_friend_age+avg_friend_male+friend_country_cnt +
treatment + songsListened + lovedTracks + posts + playlists +
shouts + tenure + good_country,
family = binomial(), data = hn_2)
summary(all_log)
#Out of all the variables, friend_cnt, avg_friend_male, posts, shouts were not statistically significant
#building a logistic regression model using only statistically significant results
some_log <- glm(adopter ~ age + male +avg_friend_age +friend_country_cnt +
treatment + songsListened + lovedTracks + playlists
+ tenure + good_country,
family = binomial(), data = hn_2)
summary(some_log)
#Interpreting the results (for some_log):
#For every one unit change in age, the log odds of adopter=1 (versus adopter=0 'free user') increases by 0.93
#For every one unit change in male, the log odds of adopter=1 (versus adopter=0 'free user') increases by 0.54
#For every one unit change in avg_friend_age, the log odds of adopter=1 (versus adopter=0 'free user') increases 0.83
#For every one unit change in friend_country_cnt, the log odds of adopter=1 (versus adopter=0 'free user') increases by 0.03
#For every one unit change in treatment (subscriber_friend_cnt >=1), the log odds of adopter=1 (versus adopter=0 'free user') increases by 0.63
#For every one unit change in songsListened, the log odds of adopter=1 (versus adopter=0 'free user') increases by 0.21
#For every one unit change in lovedtracks, the log odds of adopter=1 (versus adopter=0 'free user') increases by 0.29
#For every one unit change in playlists, the log odds of adopter=1 (versus adopter=0 'free user') increases by 0.16
#For every one unit change in tenure, the log odds of adopter=1 (versus adopter=0 'free user') decreases by -0.32
#For every one unit change in good_country, the log odds of adopter=1 (versus adopter=0 'free user') decreases by -0.64
#Calculating odds-ratio for select variables:
exp(coef(some_log))
#Interpreting the results (for exp some_log):
#For a one unit increase in age, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 2.53e+00
#For a one unit increase in male, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 1.73.e+00
#For a one unit increase in avg_friend_age, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 2.30e+00
#For a one unit increase in friend_country_cnt, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 1.03e+00
#For a one unit increase in treatment, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 1.89e+00
#For a one unit increase in songsListened, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 1.24e+00
#For a one unit increase in lovedTracks, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 1.345e+00
#For a one unit increase in playlists, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 1.18e+00
#For a one unit increase in tenure, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 7.26e-01
#For a one unit increase in good_country, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 5.26e-01
# Difference of means
dta_m %>%
group_by(adopter) %>%
select(one_of(hn_cov1)) %>%
summarise_all(funs(mean))
lapply(hn_cov1, function(v) {
t.test(dta_m[, v] ~ dta_m$adopter)
})
#after reviewing the difference in means for the covariates in the model,
#we can see that age, friend_country_cnt, treatment, songsListened, tenure had means that were similar in group 0 and 1 | /Midterm.R | no_license | ttsingh95/SocialAnalytics | R | false | false | 8,688 | r | install.packages("MatchIt")
library(MatchIt)
library(dplyr)
library(ggplot2)
setwd("/Users/Tatiksha/Documents/Customer Social Analytics/Midterm")
hn <- read.csv("HighNoteDataMidterm.csv")
#1. Summary Statistics
#Calculating difference-in-means for adopter and non-adopter samples
hn_cov <- c('age', 'male', 'friend_cnt', 'avg_friend_age', 'avg_friend_male', 'friend_country_cnt',
'subscriber_friend_cnt','songsListened', 'lovedTracks', 'posts','playlists',
'shouts', 'tenure','good_country')
hn %>%
group_by(adopter) %>%
select(one_of(hn_cov)) %>%
summarise_all(funs(mean(., na.rm = T)))
#Conduct t-tests to see if the means are statistically distinguishable
lapply(hn_cov, function(v) {
t.test(hn[, v] ~ hn[, 'adopter'])
})
#looking at the t-test results we can see that age, male, avg_friend_age, avg_friend_age_male,
#and tenure had similar/close means while others had either relatively large or vast differences in the mean.
#Even though subscriber friend count had a mean difference of ~4
#(which was higher than some other variables, we'll still use it for our further analysis
#since it did show some promising insights from the EDA shown in python based on correlation
#and it's relationship with free users (as well as premium users)
#3.Propensity score matching (PSM)
#First we'll run a logit model. Outcome variable is a binary variable that indicates if users became a premium user (adopter =1) or stayed a free user (adopter=0)
#using only some variables [I ran logistic regression in #4 to determine significant variables before doing #3]
some_log <- glm(adopter ~ age + male + friend_cnt +avg_friend_age +friend_country_cnt +
subscriber_friend_cnt + songsListened + lovedTracks + playlists
+ tenure + good_country,
family = binomial(), data = hn)
#creating treatment group where subscriber_friend_cnt is >=1 or 0
hn_2 <- mutate(hn, treatment=ifelse(hn$subscriber_friend_cnt >=1,1,0))
hn_2 %>%
group_by(adopter)%>% summarise(mean_treatment = mean(treatment),users=n())
with(hn_2, t.test(treatment ~adopter))
hn_cov1 <- c('age', 'male', 'friend_cnt', 'avg_friend_age', 'friend_country_cnt',
'subscriber_friend_cnt','songsListened', 'lovedTracks', 'playlists',
'tenure','good_country')
hn_2 %>%
group_by(treatment) %>%
select(one_of(hn_cov1)) %>%
summarise_all(funs(mean(., na.rm = T)))
lapply(hn_cov1, function(v) {
t.test(hn_2[, v] ~ hn_2$treatment)
})
M_PS <- glm(treatment ~age + male + friend_cnt +avg_friend_age +friend_country_cnt +
songsListened + lovedTracks + playlists
+ tenure + good_country, data= hn_2)
prs_df <- data.frame(pr_score = predict(M_PS, type = "response"),
treatment = M_PS$model$treatment)
head(prs_df)
#plotting a histogram
labs <- paste("Number of Friends:", c("Zero", "More than 1"))
prs_df %>%
mutate(treatment = ifelse(treatment == 0, labs[1], labs[2])) %>%
ggplot(aes(x = pr_score)) +
geom_histogram(color = "white") +
facet_wrap(~treatment) +
xlab("Subscriber friends affecting probabilty of becoming adopter") +
theme_bw()
#executing a matching algorithm
hn_2_nomiss <- hn_2 %>% # MatchIt does not allow missing values
select(adopter, treatment, one_of(hn_cov1)) %>%
na.omit()
#logging variables since they weren't normally distributed (after looking at histograms in Python) + logging all to ensure consistency
hn_2$age <- log(hn$age+1)
hn_2$male <- log(hn$male+1)
hn_2$friend_cnt <- log(hn$friend_cnt+1)
hn_2$avg_friend_age <- log(hn$avg_friend_age+1)
hn_2$friend_country_cnt <- log(hn$friend_country_cnt+1)
hn_2$songsListened <- log(hn$songsListened+1)
hn_2$lovedTracks <- log(hn$lovedTracks+1)
hn_2$playlists <- log(hn$playlists+1)
hn_2$tenure <- log(hn$tenure+1)
hn_2$good_country <- log(hn$good_country+1)
MPS_Match <- matchit(treatment ~ age + male + friend_cnt + avg_friend_age+ + friend_country_cnt +
songsListened + lovedTracks + playlists + tenure + good_country, data= hn_2_nomiss, method='nearest')
summary(MPS_Match)
plot(MPS_Match)
#creating a dataframe containing only the matched observations
dta_m <- match.data(MPS_Match)
dim(dta_m)
#The final dataset is smaller than the original: it contains 19,646 observations, meaning that 9823 pairs of treated and control observations were matched
#Also note that the final dataset contains a variable called distance, which is the propensity score (mean diff = 0.35)
# 4. Regression Analysis
#logit model for all variables
all_log <- glm(adopter ~ age + male + friend_cnt +avg_friend_age+avg_friend_male+friend_country_cnt +
treatment + songsListened + lovedTracks + posts + playlists +
shouts + tenure + good_country,
family = binomial(), data = hn_2)
summary(all_log)
#Out of all the variables, friend_cnt, avg_friend_male, posts, shouts were not statistically significant
#building a logistic regression model using only statistically significant results
some_log <- glm(adopter ~ age + male +avg_friend_age +friend_country_cnt +
treatment + songsListened + lovedTracks + playlists
+ tenure + good_country,
family = binomial(), data = hn_2)
summary(some_log)
#Interpreting the results (for some_log):
#For every one unit change in age, the log odds of adopter=1 (versus adopter=0 'free user') increases by 0.93
#For every one unit change in male, the log odds of adopter=1 (versus adopter=0 'free user') increases by 0.54
#For every one unit change in avg_friend_age, the log odds of adopter=1 (versus adopter=0 'free user') increases 0.83
#For every one unit change in friend_country_cnt, the log odds of adopter=1 (versus adopter=0 'free user') increases by 0.03
#For every one unit change in treatment (subscriber_friend_cnt >=1), the log odds of adopter=1 (versus adopter=0 'free user') increases by 0.63
#For every one unit change in songsListened, the log odds of adopter=1 (versus adopter=0 'free user') increases by 0.21
#For every one unit change in lovedtracks, the log odds of adopter=1 (versus adopter=0 'free user') increases by 0.29
#For every one unit change in playlists, the log odds of adopter=1 (versus adopter=0 'free user') increases by 0.16
#For every one unit change in tenure, the log odds of adopter=1 (versus adopter=0 'free user') decreases by -0.32
#For every one unit change in good_country, the log odds of adopter=1 (versus adopter=0 'free user') decreases by -0.64
#Calculating odds-ratio for select variables:
exp(coef(some_log))
#Interpreting the results (for exp some_log):
#For a one unit increase in age, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 2.53e+00
#For a one unit increase in male, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 1.73.e+00
#For a one unit increase in avg_friend_age, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 2.30e+00
#For a one unit increase in friend_country_cnt, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 1.03e+00
#For a one unit increase in treatment, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 1.89e+00
#For a one unit increase in songsListened, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 1.24e+00
#For a one unit increase in lovedTracks, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 1.345e+00
#For a one unit increase in playlists, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 1.18e+00
#For a one unit increase in tenure, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 7.26e-01
#For a one unit increase in good_country, the odds of paying to become a premium user (versus not paying adopter=0) increase by a factor of 5.26e-01
# Difference of means
dta_m %>%
group_by(adopter) %>%
select(one_of(hn_cov1)) %>%
summarise_all(funs(mean))
lapply(hn_cov1, function(v) {
t.test(dta_m[, v] ~ dta_m$adopter)
})
#after reviewing the difference in means for the covariates in the model,
#we can see that age, friend_country_cnt, treatment, songsListened, tenure had means that were similar in group 0 and 1 |
library(KoNLP)
library(dplyr)
library(stringr)
txt <- readLines("hiphop.txt")
head(txt)
txt <- str_replace_all(txt, "\\W", " ")
nouns <- extractNoun(txt)
cnt <- table(unlist(nouns))
# nouns는 list이므로 unlist해서 table로 바꿔줌
df_cnt <- as.data.frame(cnt, stringsAsFactors=F)
# vector로 바꿔서 받겠다.
colnames(df_cnt) <- c("word", "freq")
# column 이름을 바꿔주기
head(df_cnt)
str(df_cnt)
df_word <- filter(df_cnt, nchar(word) >= 2)
df_word
top_20 <- df_word %>%
arrange(desc(freq)) %>%
head(20)
top_20
library(wordcloud)
library(RColorBrewer)
pal <- brewer.pal(8, "Dark2")
set.seed(1234)
png("wordcloud_hiphop.png", width=600, height=500)
wordcloud(words=df_word$word,
freq=df_word$freq,
min.freq=2,
max.words=200,
random.order=F,
rot.per=.1,
scale=c(4,0.3),
colors=pal)
dev.off()
## 10-2. 국정원 트윗 텍스트 마이닝
twitter <- read.csv("twitter.csv",
header=T,
stringsAsFactors = F,
fileEncoding="UTF-8")
View(twitter)
# 변수명 수정
twitter <- rename(twitter,
no=번호,
id=계정이름,
date=작성일,
tw=내용)
head(twitter$tw)
# 특수문자 제거
twitter$tw <- str_replace_all(twitter$tw, "\\W", " ")
head(twitter$tw)
#트윗에서 명사 추출
nouns <- extractNoun(twitter$tw)
#추출한 명사 list를 문자열 벡터로 변환, 빈도표
nouns
wordcount <- table(unlist(nouns))
head(wordcount)
#데이터 프레임으로 변환
df_word <- as.data.frame(wordcount, stringsAsFactors = F)
str(df_word)
# 변수명 수정
df_word <- rename(df_word,
word=Var1,
freq=Freq)
head(df_word)
# 두 글자 이상 단어만 추출
df_word <- filter(df_word, nchar(word)>=2)
# 상위 20개 추출
top20<- df_word %>%
arrange(desc(freq)) %>%
head(20)
top20
# 단어 빈도 막대 그래프 만들기
library(ggplot2)
order <- arrange(top20, freq)$word
png("plot1.png", width=600, height=500)
ggplot(data=top20, aes(x=word, y=freq))+
ylim(0, 2500)+
geom_col()+
coord_flip()+
scale_x_discrete(limit = order)+
geom_text(aes(label=freq), hjust=-0.3)
dev.off()
pal <- brewer.pal(8,"Dark2")
set.seed(1234)
png("plot2.png", width=600, height=500)
wordcloud(words=df_word$word,
freq=df_word$freq,
min.freq=10,
max.words=200,
random.order=F,
rot.per=.1,
scale=c(6,0.2),
colors=pal)
dev.off()
| /R/r4/r01.R | no_license | saltandlight/TIL | R | false | false | 2,591 | r | library(KoNLP)
library(dplyr)
library(stringr)
txt <- readLines("hiphop.txt")
head(txt)
txt <- str_replace_all(txt, "\\W", " ")
nouns <- extractNoun(txt)
cnt <- table(unlist(nouns))
# nouns는 list이므로 unlist해서 table로 바꿔줌
df_cnt <- as.data.frame(cnt, stringsAsFactors=F)
# vector로 바꿔서 받겠다.
colnames(df_cnt) <- c("word", "freq")
# column 이름을 바꿔주기
head(df_cnt)
str(df_cnt)
df_word <- filter(df_cnt, nchar(word) >= 2)
df_word
top_20 <- df_word %>%
arrange(desc(freq)) %>%
head(20)
top_20
library(wordcloud)
library(RColorBrewer)
pal <- brewer.pal(8, "Dark2")
set.seed(1234)
png("wordcloud_hiphop.png", width=600, height=500)
wordcloud(words=df_word$word,
freq=df_word$freq,
min.freq=2,
max.words=200,
random.order=F,
rot.per=.1,
scale=c(4,0.3),
colors=pal)
dev.off()
## 10-2. 국정원 트윗 텍스트 마이닝
twitter <- read.csv("twitter.csv",
header=T,
stringsAsFactors = F,
fileEncoding="UTF-8")
View(twitter)
# 변수명 수정
twitter <- rename(twitter,
no=번호,
id=계정이름,
date=작성일,
tw=내용)
head(twitter$tw)
# 특수문자 제거
twitter$tw <- str_replace_all(twitter$tw, "\\W", " ")
head(twitter$tw)
#트윗에서 명사 추출
nouns <- extractNoun(twitter$tw)
#추출한 명사 list를 문자열 벡터로 변환, 빈도표
nouns
wordcount <- table(unlist(nouns))
head(wordcount)
#데이터 프레임으로 변환
df_word <- as.data.frame(wordcount, stringsAsFactors = F)
str(df_word)
# 변수명 수정
df_word <- rename(df_word,
word=Var1,
freq=Freq)
head(df_word)
# 두 글자 이상 단어만 추출
df_word <- filter(df_word, nchar(word)>=2)
# 상위 20개 추출
top20<- df_word %>%
arrange(desc(freq)) %>%
head(20)
top20
# 단어 빈도 막대 그래프 만들기
library(ggplot2)
order <- arrange(top20, freq)$word
png("plot1.png", width=600, height=500)
ggplot(data=top20, aes(x=word, y=freq))+
ylim(0, 2500)+
geom_col()+
coord_flip()+
scale_x_discrete(limit = order)+
geom_text(aes(label=freq), hjust=-0.3)
dev.off()
pal <- brewer.pal(8,"Dark2")
set.seed(1234)
png("plot2.png", width=600, height=500)
wordcloud(words=df_word$word,
freq=df_word$freq,
min.freq=10,
max.words=200,
random.order=F,
rot.per=.1,
scale=c(6,0.2),
colors=pal)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/teamBowlingPerfDetails.R
\name{teamBowlingPerfDetails}
\alias{teamBowlingPerfDetails}
\title{get team bowling performance details}
\usage{
teamBowlingPerfDetails(match,theTeam,includeInfo=FALSE)
}
\arguments{
\item{match}{The data frame of all match}
\item{theTeam}{The team for which the performance is required}
\item{includeInfo}{If true details like venie,winner, result etc are included}
}
\value{
dataframe
The dataframe of bowling performance
}
\description{
This function computes performance of bowlers of a team a
}
\note{
Maintainer: Tinniam V Ganesh \email{tvganesh.85@gmail.com}
}
\examples{
\dontrun{
# Get all matches between India and Australia
match <- getMatchDetails("England","Pakistan","2006-09-05",dir="../temp")
teamBowlingPerf(match,"India",includeInfo=TRUE)
}
}
\author{
Tinniam V Ganesh
}
\references{
\url{http://cricsheet.org/}\cr
\url{https://gigadom.wordpress.com/}
}
\seealso{
\code{\link{teamBatsmenPartnershipAllOppnAllMatches}}\cr
\code{\link{teamBatsmenPartnershipAllOppnAllMatchesPlot}}\cr
\code{\link{teamBatsmenPartnershipOppnAllMatchesChart}}\cr
\code{\link{teamBowlersVsBatsmenAllOppnAllMatchesRept}}\cr
\code{\link{teamBowlersWicketRunsOppnAllMatches}}\cr
}
| /man/teamBowlingPerfDetails.Rd | no_license | bcdunbar/yorkr | R | false | true | 1,281 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/teamBowlingPerfDetails.R
\name{teamBowlingPerfDetails}
\alias{teamBowlingPerfDetails}
\title{get team bowling performance details}
\usage{
teamBowlingPerfDetails(match,theTeam,includeInfo=FALSE)
}
\arguments{
\item{match}{The data frame of all match}
\item{theTeam}{The team for which the performance is required}
\item{includeInfo}{If true details like venie,winner, result etc are included}
}
\value{
dataframe
The dataframe of bowling performance
}
\description{
This function computes performance of bowlers of a team a
}
\note{
Maintainer: Tinniam V Ganesh \email{tvganesh.85@gmail.com}
}
\examples{
\dontrun{
# Get all matches between India and Australia
match <- getMatchDetails("England","Pakistan","2006-09-05",dir="../temp")
teamBowlingPerf(match,"India",includeInfo=TRUE)
}
}
\author{
Tinniam V Ganesh
}
\references{
\url{http://cricsheet.org/}\cr
\url{https://gigadom.wordpress.com/}
}
\seealso{
\code{\link{teamBatsmenPartnershipAllOppnAllMatches}}\cr
\code{\link{teamBatsmenPartnershipAllOppnAllMatchesPlot}}\cr
\code{\link{teamBatsmenPartnershipOppnAllMatchesChart}}\cr
\code{\link{teamBowlersVsBatsmenAllOppnAllMatchesRept}}\cr
\code{\link{teamBowlersWicketRunsOppnAllMatches}}\cr
}
|
dat <- read.table("./mapped/BUSCO.cov", header=F);
#hist(dat$V5)
cat(mean(dat$V5))
| /genome_assembly/finalpolishing/avgcov.R | no_license | wangchengww/killigenomics | R | false | false | 83 | r | dat <- read.table("./mapped/BUSCO.cov", header=F);
#hist(dat$V5)
cat(mean(dat$V5))
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## The makeCacheMatrix function creates a matrix that can store the date of its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(inverse) m <<- inverse
getInverse <- function() m
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
## The cacheSolve function computes the inverse of the matrix of the makeCacheMatrix function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if (!is.null(m)) {
message("getting cached data")
return(m)
}
mat <- x$get()
m <- solve(mat, ...)
x$setInverse(m)
m
}
| /cachematrix.R | no_license | JenHorng/ProgrammingAssignment2 | R | false | false | 920 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## The makeCacheMatrix function creates a matrix that can store the date of its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(inverse) m <<- inverse
getInverse <- function() m
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
## The cacheSolve function computes the inverse of the matrix of the makeCacheMatrix function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if (!is.null(m)) {
message("getting cached data")
return(m)
}
mat <- x$get()
m <- solve(mat, ...)
x$setInverse(m)
m
}
|
library("rstan")
stacking_opt_model <- stan_model("../stan/crps_test.stan")
stacking_weight = function(predict_sample, y, K, R, T, S,
lambda=NULL, gamma=NULL, dirichlet_alpha=1.001){
if ( dim(predict_sample)!=c(T, R, S,K) | dim(y)!=c(R, T) )
# TODO: fix the dimension of predict_sample and y on R-T.
stop("Input dimensions do not match.")
if ( K < 2 )
stop("At least two models are required model averaging.")
if( is.null(lambda) )
lambda = 2 - (1 - c(1:T) /T)^2
if( is.null(gamma) )
gamma=rep(1/R, R)
standata <- list(K = K,
R = R,
T = T,
S = S,
predict_sample_mat = predict_sample,
y = y,
lambda=lambda,
gamma=gamma,
dirichlet_alpha=dirichlet_alpha)
opt <- rstan::optimizing(stacking_opt_model, data = standata)
return(opt)
}
##compute pointwise CRPS when the prediction comes from a mixtures
CRPS_pointwise=function(predict_sample_pointwise, y, w){
S = nrow(predict_sample_pointwise)
K = ncol(predict_sample_pointwise)
if ( length(y)!=1 )
stop("Input dimensions do not match.")
if ( length(w)!=K | min(w)<0 | sum(w)!=1 )
stop("The weight has to be a simplex.")
mean_bias= apply(abs(predict_sample_pointwise-y), 2, mean)
entropy= matrix(0,K,K)
for( k1 in 1:K)
for(k2 in 1:k1)
for(s1 in 1:S)
entropy[k1, k2] = entropy[k1, k2] + 1/(S^2)* sum( abs( predict_sample_pointwise[s1, k1] - predict_sample_pointwise[s2, ]))
for( k1 in 1:(K-1) )
for(k2 in (k1+1):K)
entropy[k1, k2]=entropy[k2, k1]
entropy_aggregrate=0
for( k1 in 1:K)
for(k2 in 1:K)
entropy_aggregrate=entropy_aggregrate+entropy[k1, k2]*w[k1]*w[k2]
return( mean_bias %*% w - 1/2* entropy_aggregrate )
}
##compute CRPS over panel data when the prediction comes from a mixtures
CRPS_mixture=function(predict_sample, y,
K, R, T, S,w, lambda=NULL, gamma=NULL){
if ( dim(predict_sample)!=c(T, R, S,K) | dim(y)!=c(R, T) )
stop("Input dimensions do not match.")
if ( length(w)!=K | min(w)<0 | sum(w)!=1 )
stop("The weight has to be a simplex.")
if( is.null(lambda) )
lambda = 2 - (1 - c(1:T) /T)^2
if( is.null(gamma) )
gamma=rep(1/R, R)
lambda=lambda/sum(lambda)
gamma=gamma/sum(gamma)
CRPS_sum=0
for(t in 1:T)
for(r in 1:R)
CRPS_sum=CRPS_sum+lambda[t]*gamma(r)*CRPS_pointwise(predict_sample[t,r,,], y[r, t], w)
return(CRPS_sum)
}
| /R/stacking_function.R | no_license | nikosbosse/model_stacking | R | false | false | 2,392 | r | library("rstan")
stacking_opt_model <- stan_model("../stan/crps_test.stan")
stacking_weight = function(predict_sample, y, K, R, T, S,
lambda=NULL, gamma=NULL, dirichlet_alpha=1.001){
if ( dim(predict_sample)!=c(T, R, S,K) | dim(y)!=c(R, T) )
# TODO: fix the dimension of predict_sample and y on R-T.
stop("Input dimensions do not match.")
if ( K < 2 )
stop("At least two models are required model averaging.")
if( is.null(lambda) )
lambda = 2 - (1 - c(1:T) /T)^2
if( is.null(gamma) )
gamma=rep(1/R, R)
standata <- list(K = K,
R = R,
T = T,
S = S,
predict_sample_mat = predict_sample,
y = y,
lambda=lambda,
gamma=gamma,
dirichlet_alpha=dirichlet_alpha)
opt <- rstan::optimizing(stacking_opt_model, data = standata)
return(opt)
}
##compute pointwise CRPS when the prediction comes from a mixtures
CRPS_pointwise=function(predict_sample_pointwise, y, w){
S = nrow(predict_sample_pointwise)
K = ncol(predict_sample_pointwise)
if ( length(y)!=1 )
stop("Input dimensions do not match.")
if ( length(w)!=K | min(w)<0 | sum(w)!=1 )
stop("The weight has to be a simplex.")
mean_bias= apply(abs(predict_sample_pointwise-y), 2, mean)
entropy= matrix(0,K,K)
for( k1 in 1:K)
for(k2 in 1:k1)
for(s1 in 1:S)
entropy[k1, k2] = entropy[k1, k2] + 1/(S^2)* sum( abs( predict_sample_pointwise[s1, k1] - predict_sample_pointwise[s2, ]))
for( k1 in 1:(K-1) )
for(k2 in (k1+1):K)
entropy[k1, k2]=entropy[k2, k1]
entropy_aggregrate=0
for( k1 in 1:K)
for(k2 in 1:K)
entropy_aggregrate=entropy_aggregrate+entropy[k1, k2]*w[k1]*w[k2]
return( mean_bias %*% w - 1/2* entropy_aggregrate )
}
##compute CRPS over panel data when the prediction comes from a mixtures
CRPS_mixture=function(predict_sample, y,
K, R, T, S,w, lambda=NULL, gamma=NULL){
if ( dim(predict_sample)!=c(T, R, S,K) | dim(y)!=c(R, T) )
stop("Input dimensions do not match.")
if ( length(w)!=K | min(w)<0 | sum(w)!=1 )
stop("The weight has to be a simplex.")
if( is.null(lambda) )
lambda = 2 - (1 - c(1:T) /T)^2
if( is.null(gamma) )
gamma=rep(1/R, R)
lambda=lambda/sum(lambda)
gamma=gamma/sum(gamma)
CRPS_sum=0
for(t in 1:T)
for(r in 1:R)
CRPS_sum=CRPS_sum+lambda[t]*gamma(r)*CRPS_pointwise(predict_sample[t,r,,], y[r, t], w)
return(CRPS_sum)
}
|
library(randomForest)
require(caTools)
options(digits = 3)
library(matrixStats)
library(tidyverse)
library(caret)
library(descr)
library(ggplot2)
library(ISLR)
library(pROC)
library(ROCR)
library(Metrics)
library(ROSE)
Credit_Risk_Tree <- read.csv("creditrisk.csv")
#Cleaning NA
is.na(Credit_Risk_Tree)
credit_risk_new <- na.omit(Credit_Risk_Tree)
credit_risk_new$loan_status <- as.factor(credit_risk_new$loan_status)
str(credit_risk_new)
# Call CrossTable() on Credit_Risk
CrossTable(credit_risk_new$loan_status)
#Split dataset into train and test
# creates a value for dividing the data into train and test. In this case the value is defined as 75%
#of the number of rows in the dataset
smp_siz = floor(0.75*nrow(credit_risk_new))
# shows the value of the sample size
CR_Train <- sample(seq_len(nrow(credit_risk_new)),size = smp_siz)
#creates the training dataset with row numbers stored in CR_Train
train =credit_risk_new[CR_Train,]
#creates the training dataset with row numbers stored in CR_Train
test=credit_risk_new[-CR_Train,]
# Solve imbalnce problem in dataset using over sampling
Credit_Risk_Tree_balanced <- ovun.sample(loan_status~ ., data = train, p=0.5, seed=1,method="over")$data
table(Credit_Risk_Tree_balanced$loan_status)
# Execute random forest package using over sampling
rf_1 <- randomForest(loan_status ~ .,data=Credit_Risk_Tree_balanced)
rf_1
Predict_Rf <- predict(rf_1, newdata = test,type = "response")
# Solve imbalnce problem in dataset using both Over and under sampling
Credit_Risk_Tree_balanced_both <- ovun.sample(loan_status~ ., data = train, p=0.5, seed=1,method="both")$data
table(Credit_Risk_Tree_balanced_both$loan_status)
# Execute random forest package using both method
rf_3 <- randomForest(loan_status ~ .,data=Credit_Risk_Tree_balanced_both)
rf_3
Predict_Rf_1 <- predict(rf_3, newdata = test,type = "response")
# Solve imbalnce problem in dataset using under sampling
Credit_Risk_Tree_balanced_under <- ovun.sample(loan_status~ ., data = train, ,method="under",p=0.5, seed=1)$data
table(Credit_Risk_Tree_balanced_under$loan_status)
rf_4 <- randomForest(loan_status ~ .,data=Credit_Risk_Tree_balanced_under)
rf_4
Predict_Rf_2 <- predict(rf_4, newdata = test,type = "response")
#Evaluating accuracy
#ROC curve over sampling
roc.curve(test$loan_status, Predict_Rf)
#ROC curve both
roc.curve(test$loan_status, Predict_Rf_1)
#ROC curve under sampling
roc.curve(test$loan_status, Predict_Rf_2)
| /Credit_Risk_Model/Rforest1.R | no_license | SeanStanislaw/HarvardX_DataScience_CapstoneProject_CreditRisk | R | false | false | 2,514 | r | library(randomForest)
require(caTools)
options(digits = 3)
library(matrixStats)
library(tidyverse)
library(caret)
library(descr)
library(ggplot2)
library(ISLR)
library(pROC)
library(ROCR)
library(Metrics)
library(ROSE)
Credit_Risk_Tree <- read.csv("creditrisk.csv")
#Cleaning NA
is.na(Credit_Risk_Tree)
credit_risk_new <- na.omit(Credit_Risk_Tree)
credit_risk_new$loan_status <- as.factor(credit_risk_new$loan_status)
str(credit_risk_new)
# Call CrossTable() on Credit_Risk
CrossTable(credit_risk_new$loan_status)
#Split dataset into train and test
# creates a value for dividing the data into train and test. In this case the value is defined as 75%
#of the number of rows in the dataset
smp_siz = floor(0.75*nrow(credit_risk_new))
# shows the value of the sample size
CR_Train <- sample(seq_len(nrow(credit_risk_new)),size = smp_siz)
#creates the training dataset with row numbers stored in CR_Train
train =credit_risk_new[CR_Train,]
#creates the training dataset with row numbers stored in CR_Train
test=credit_risk_new[-CR_Train,]
# Solve imbalnce problem in dataset using over sampling
Credit_Risk_Tree_balanced <- ovun.sample(loan_status~ ., data = train, p=0.5, seed=1,method="over")$data
table(Credit_Risk_Tree_balanced$loan_status)
# Execute random forest package using over sampling
rf_1 <- randomForest(loan_status ~ .,data=Credit_Risk_Tree_balanced)
rf_1
Predict_Rf <- predict(rf_1, newdata = test,type = "response")
# Solve imbalnce problem in dataset using both Over and under sampling
Credit_Risk_Tree_balanced_both <- ovun.sample(loan_status~ ., data = train, p=0.5, seed=1,method="both")$data
table(Credit_Risk_Tree_balanced_both$loan_status)
# Execute random forest package using both method
rf_3 <- randomForest(loan_status ~ .,data=Credit_Risk_Tree_balanced_both)
rf_3
Predict_Rf_1 <- predict(rf_3, newdata = test,type = "response")
# Solve imbalnce problem in dataset using under sampling
Credit_Risk_Tree_balanced_under <- ovun.sample(loan_status~ ., data = train, ,method="under",p=0.5, seed=1)$data
table(Credit_Risk_Tree_balanced_under$loan_status)
rf_4 <- randomForest(loan_status ~ .,data=Credit_Risk_Tree_balanced_under)
rf_4
Predict_Rf_2 <- predict(rf_4, newdata = test,type = "response")
#Evaluating accuracy
#ROC curve over sampling
roc.curve(test$loan_status, Predict_Rf)
#ROC curve both
roc.curve(test$loan_status, Predict_Rf_1)
#ROC curve under sampling
roc.curve(test$loan_status, Predict_Rf_2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/removeArea.R
\name{checkRemovedArea}
\alias{checkRemovedArea}
\title{Seek for a removed area}
\usage{
checkRemovedArea(area, all_files = TRUE, opts = antaresRead::simOptions())
}
\arguments{
\item{area}{An area}
\item{all_files}{Check files in study directory.}
\item{opts}{List of simulation parameters returned by the function
\code{antaresRead::setSimulationPath}}
}
\value{
a named list with two elements
}
\description{
Check if it remains trace of a deleted area in the input folder
}
\examples{
\dontrun{
checkRemovedArea("myarea")
}
}
| /man/checkRemovedArea.Rd | no_license | cran/antaresEditObject | R | false | true | 650 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/removeArea.R
\name{checkRemovedArea}
\alias{checkRemovedArea}
\title{Seek for a removed area}
\usage{
checkRemovedArea(area, all_files = TRUE, opts = antaresRead::simOptions())
}
\arguments{
\item{area}{An area}
\item{all_files}{Check files in study directory.}
\item{opts}{List of simulation parameters returned by the function
\code{antaresRead::setSimulationPath}}
}
\value{
a named list with two elements
}
\description{
Check if it remains trace of a deleted area in the input folder
}
\examples{
\dontrun{
checkRemovedArea("myarea")
}
}
|
# nathan dot lazar at gmail dot com
# Combines results from all permutation analyses into one
# data frame
combine_per <- function(bp.permute, gene.permute.list,
rep.permute, rep.permute.list, SINE.permute.list, cpg.permute.list) {
df <- matrix(0, nrow=18 , ncol=9,
dimnames=list(c('all', 'gene', 'exon', 'intron',
'promoter', 'reps', 'LINE',
'SINE', 'DNA', 'LTR', 'Satellite',
'Alu', 'MIR', 'AluJ', 'AluS',
'AluY', 'cpg_isl', 'cpg_shore'),
c('count', 'meth.p', 'cov.p', 'cpg.p', 'per.p',
'meth.n', 'cov.n', 'cpg.n', 'per.n')))
df[1,] <- c(unlist(bp.permute[c('bp.count', 'meth.p', 'cov.p', 'cpg.p')]),NA,rep(1008,4))
for(i in 1:length(gene.permute.list))
df[i+1,] <- unlist(gene.permute.list[[i]][c('bp.count', 'meth.p', 'cov.p', 'cpg.p',
'per.p', 'meth.n', 'cov.n', 'cpg.n', 'per.n')])
df[6,] <- unlist(rep.permute[c('bp.count', 'meth.p', 'cov.p', 'cpg.p',
'per.p', 'meth.n', 'cov.n', 'cpg.n', 'per.n')])
for(i in 1:length(rep.permute.list))
df[i+6,] <- unlist(rep.permute.list[[i]][c('bp.count', 'meth.p', 'cov.p', 'cpg.p',
'per.p', 'meth.n', 'cov.n', 'cpg.n', 'per.n')])
for(i in 1:length(SINE.permute.list))
df[i+11,] <- unlist(SINE.permute.list[[i]][c('bp.count', 'meth.p', 'cov.p', 'cpg.p',
'per.p', 'meth.n', 'cov.n', 'cpg.n', 'per.n')])
for(i in 1:length(cpg.permute.list))
df[i+16,] <- unlist(cpg.permute.list[[i]][c('bp.count', 'meth.p', 'cov.p', 'cpg.p',
'per.p', 'meth.n', 'cov.n', 'cpg.n', 'per.n')])
df
} | /combine_per.R | no_license | yuzhenpeng/APE_METH_bin | R | false | false | 1,901 | r | # nathan dot lazar at gmail dot com
# Combines results from all permutation analyses into one
# data frame
combine_per <- function(bp.permute, gene.permute.list,
rep.permute, rep.permute.list, SINE.permute.list, cpg.permute.list) {
df <- matrix(0, nrow=18 , ncol=9,
dimnames=list(c('all', 'gene', 'exon', 'intron',
'promoter', 'reps', 'LINE',
'SINE', 'DNA', 'LTR', 'Satellite',
'Alu', 'MIR', 'AluJ', 'AluS',
'AluY', 'cpg_isl', 'cpg_shore'),
c('count', 'meth.p', 'cov.p', 'cpg.p', 'per.p',
'meth.n', 'cov.n', 'cpg.n', 'per.n')))
df[1,] <- c(unlist(bp.permute[c('bp.count', 'meth.p', 'cov.p', 'cpg.p')]),NA,rep(1008,4))
for(i in 1:length(gene.permute.list))
df[i+1,] <- unlist(gene.permute.list[[i]][c('bp.count', 'meth.p', 'cov.p', 'cpg.p',
'per.p', 'meth.n', 'cov.n', 'cpg.n', 'per.n')])
df[6,] <- unlist(rep.permute[c('bp.count', 'meth.p', 'cov.p', 'cpg.p',
'per.p', 'meth.n', 'cov.n', 'cpg.n', 'per.n')])
for(i in 1:length(rep.permute.list))
df[i+6,] <- unlist(rep.permute.list[[i]][c('bp.count', 'meth.p', 'cov.p', 'cpg.p',
'per.p', 'meth.n', 'cov.n', 'cpg.n', 'per.n')])
for(i in 1:length(SINE.permute.list))
df[i+11,] <- unlist(SINE.permute.list[[i]][c('bp.count', 'meth.p', 'cov.p', 'cpg.p',
'per.p', 'meth.n', 'cov.n', 'cpg.n', 'per.n')])
for(i in 1:length(cpg.permute.list))
df[i+16,] <- unlist(cpg.permute.list[[i]][c('bp.count', 'meth.p', 'cov.p', 'cpg.p',
'per.p', 'meth.n', 'cov.n', 'cpg.n', 'per.n')])
df
} |
install.packages("reshape")
library(reshape)
#we will need to do some melting of data in order to make quick #exploratory images
#find my file directory
setwd('C:\\coursera\\exdata-data-household_power_consumption')
#upload files
raw_data <- read.table("household_power_consumption.txt", header = T, sep=';')
#I will need to create a custom value called datetime
#it will include the concatenation of Date and Time
#first I need to convert these from the raw data into character #vectors
raw_data$date_char <- as.character(raw_data$Date)
raw_data$times <- as.character(raw_data$Time)
raw_data$dates <- as.Date(raw_data$date_char,"%d/%m/%Y")
#here I concatenate them with the paste and strptime commands
raw_data$datetime <- strptime(paste(raw_data$dates, raw_data$times), "%Y-%m-%d %H:%M:%S")
#Lets melt our global active power data to make it easier to work #with
power_melt <- melt(raw_data, id=c("datetime"),measure.vars=c("Global_active_power"))
#now that we have the data set we need lets check how much memory
#the raw data is using
print(object.size(raw_data),units="Mb")
#lets remove the raw data object to free up space
rm(raw_data)
#Now lets subset by the suggested dates
power_melt_filtered <- subset(power_melt, as.Date(datetime) >= '2007-02-01 00:00:00' & as.Date(datetime) <= '2007-02-02 23:59:59', select=c(datetime, value))
# remove ? values
power_melt_filtered[power_melt_filtered=="?"]<-NA
power_melt_filtered_no_outlier <- (na.omit(power_melt_filtered))
#Lets plot our cleaned up values and convert to kilowatts
png(filename = "C:\\coursera\\images\\plot2.png", width = 480, height = 480,bg = "white")
plot(power_melt_filtered_no_outlier$datetime, as.double(power_melt_filtered_no_outlier$value)/1000, type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.off()
#close our png image
| /plot2.R | no_license | greener98103/Course_Project_1_Exploratory_Data_Analysis | R | false | false | 1,829 | r | install.packages("reshape")
library(reshape)
#we will need to do some melting of data in order to make quick #exploratory images
#find my file directory
setwd('C:\\coursera\\exdata-data-household_power_consumption')
#upload files
raw_data <- read.table("household_power_consumption.txt", header = T, sep=';')
#I will need to create a custom value called datetime
#it will include the concatenation of Date and Time
#first I need to convert these from the raw data into character #vectors
raw_data$date_char <- as.character(raw_data$Date)
raw_data$times <- as.character(raw_data$Time)
raw_data$dates <- as.Date(raw_data$date_char,"%d/%m/%Y")
#here I concatenate them with the paste and strptime commands
raw_data$datetime <- strptime(paste(raw_data$dates, raw_data$times), "%Y-%m-%d %H:%M:%S")
#Lets melt our global active power data to make it easier to work #with
power_melt <- melt(raw_data, id=c("datetime"),measure.vars=c("Global_active_power"))
#now that we have the data set we need lets check how much memory
#the raw data is using
print(object.size(raw_data),units="Mb")
#lets remove the raw data object to free up space
rm(raw_data)
#Now lets subset by the suggested dates
power_melt_filtered <- subset(power_melt, as.Date(datetime) >= '2007-02-01 00:00:00' & as.Date(datetime) <= '2007-02-02 23:59:59', select=c(datetime, value))
# remove ? values
power_melt_filtered[power_melt_filtered=="?"]<-NA
power_melt_filtered_no_outlier <- (na.omit(power_melt_filtered))
#Lets plot our cleaned up values and convert to kilowatts
png(filename = "C:\\coursera\\images\\plot2.png", width = 480, height = 480,bg = "white")
plot(power_melt_filtered_no_outlier$datetime, as.double(power_melt_filtered_no_outlier$value)/1000, type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.off()
#close our png image
|
\name{peregrine}
\alias{peregrine}
\encoding{UTF-8}
\docType{data}
\title{
Data for peregrine falcons from the Jura Mountains, 1965-2007
}
\description{
Data for peregrine falcons (\emph{Falco peregrinus}) from the Jura Mountains straddling the Franco-Swiss border for 1965 to 2007. We combined data collected by Gaby Banderet and colleagues in Switzerland and René-Jean Monneret, René Ruffinoni and their colleagues in France.
Data comprise the annual number of breeding pairs, information on productivity and dead-recoveries of marked individuals.
}
\usage{data(peregrine)}
\format{
\code{peregrine} is a list with 3 components:
\describe{
\item{count }{a 2-column matrix with the number of breeding pairs recorded in each year.}
\item{productivity }{a 3-column matrix with the number of broods surveyed and the total number of fledglings for each year.}
\item{recoveries }{an individuals x years matrix, with 1 when an individual was ringed as a nestling and when recovered dead; otherwise 0.}
}
}
\source{Swiss data from Gabriel Banderet and the Swiss Ornithological Institute. French data from Fonds Sauvegarde Faune Flore Jurassienne - Groupe Pèlerin Jura.}
\references{
Kéry, M., Banderet, G., Neuhaus, M., Weggler, M., Schmid, H., Sattler, T., Parish, D. (2018) Population trends of the Peregrine Falcon in Switzerland with special reference to the period 2005-2016. \emph{Ornis Hungarica} 26, 91-103.
Monneret, R.-J., Rufinioni, R., Parish, D., Pinaud, D., Kéry, M. (2018) The Peregrine population study in the French Jura mountains 1964-2016: use of occupancy modeling to estimate population size and analyze site persistence and colonization rates. \emph{Ornis Hungarica} 26, 69-90.
Schaub, M., Kéry, M. (2022) \emph{Integrated Population Models}, Academic Press, chapter 12.
}
\examples{
data(peregrine)
str(peregrine)
}
\keyword{datasets}
| /man/data_peregrine.Rd | no_license | mikemeredith/IPMbook | R | false | false | 1,876 | rd | \name{peregrine}
\alias{peregrine}
\encoding{UTF-8}
\docType{data}
\title{
Data for peregrine falcons from the Jura Mountains, 1965-2007
}
\description{
Data for peregrine falcons (\emph{Falco peregrinus}) from the Jura Mountains straddling the Franco-Swiss border for 1965 to 2007. We combined data collected by Gaby Banderet and colleagues in Switzerland and René-Jean Monneret, René Ruffinoni and their colleagues in France.
Data comprise the annual number of breeding pairs, information on productivity and dead-recoveries of marked individuals.
}
\usage{data(peregrine)}
\format{
\code{peregrine} is a list with 3 components:
\describe{
\item{count }{a 2-column matrix with the number of breeding pairs recorded in each year.}
\item{productivity }{a 3-column matrix with the number of broods surveyed and the total number of fledglings for each year.}
\item{recoveries }{an individuals x years matrix, with 1 when an individual was ringed as a nestling and when recovered dead; otherwise 0.}
}
}
\source{Swiss data from Gabriel Banderet and the Swiss Ornithological Institute. French data from Fonds Sauvegarde Faune Flore Jurassienne - Groupe Pèlerin Jura.}
\references{
Kéry, M., Banderet, G., Neuhaus, M., Weggler, M., Schmid, H., Sattler, T., Parish, D. (2018) Population trends of the Peregrine Falcon in Switzerland with special reference to the period 2005-2016. \emph{Ornis Hungarica} 26, 91-103.
Monneret, R.-J., Rufinioni, R., Parish, D., Pinaud, D., Kéry, M. (2018) The Peregrine population study in the French Jura mountains 1964-2016: use of occupancy modeling to estimate population size and analyze site persistence and colonization rates. \emph{Ornis Hungarica} 26, 69-90.
Schaub, M., Kéry, M. (2022) \emph{Integrated Population Models}, Academic Press, chapter 12.
}
\examples{
data(peregrine)
str(peregrine)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/document.R
\name{explain.condition}
\alias{explain.condition}
\title{Explain the fbsmt_grade field}
\usage{
\method{explain}{condition}(field)
}
\arguments{
\item{field}{Field to explain}
}
\description{
Explain the fbsmt_grade field
}
| /man/explain.condition.Rd | no_license | andykrause/kingCoData | R | false | true | 314 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/document.R
\name{explain.condition}
\alias{explain.condition}
\title{Explain the fbsmt_grade field}
\usage{
\method{explain}{condition}(field)
}
\arguments{
\item{field}{Field to explain}
}
\description{
Explain the fbsmt_grade field
}
|
# closed_cmr_estimation_Bayes.R
# runs the augmented CMR analysis to estimate abundance
# from simualted data - see closed_cmr_simulation_estimation.R
# Tomo Eguchi
# 30 January 2017
rm(list=ls())
#data.dir<-"C:/Users/mike/Dropbox/teaching/workshop/13"
#require(RMark)
library(rjags)
source('Cm_SDB_functions.R')
n.chains <- 5
n.adapt <- 1000
n.update <- 10000
n.iter <- 5000
nz <- 50 # the augmented additional rows of individuals
RData.files <- list.files(path = 'RData/',
pattern = '_pt1_')
k0 <- 1
for (k0 in 1:length(RData.files)){
load(paste0('RData/', RData.files[k0]))
#bayes.out.all <- vector(mode = 'list',
# length = length(sim.results.all))
k1 <- 1
for (k1 in 1:length(sim.results.all)){ # different sample occassions
k2 <- 1
bayes.out <- vector(mode = 'list',
length = length(sim.results.all[[k1]]))
for (k2 in 1:length(sim.results.all[[k1]])){ # # simulations
print(paste0('k0 = ', k0, '; k1 = ', k1, '; k2 = ', k2))
y.full <- sim.results.all[[k1]][[k2]]$y.full
bayes.out[[k2]] <- estim_Bayes(y.full,
'Mt',
params = c("N", "p", "Omega", "deviance"),
nz = nz,
n.chains = n.chains,
n.adapt = n.adapt,
n.update = n.update,
n.iter = n.iter)
}
#bayes.out.all[[k1]] <- bayes.out
#save(list = c('summary.data', 'sample.size', 'N', 'p', 'k',
# 'sim.results.all', 'bayes.out'),
# file = paste0('RData/Bayes/', unlist(strsplit(RData.files[k0],
# split = '2017-02-02'))[1],
# 'k_', k[k1], 'withBayes_', Sys.Date(), '.RData'))
save(list = c('N', 'p', 'k',
'sim.results.all', 'bayes.out'),
file = paste0('RData/Bayes/', unlist(strsplit(RData.files[k0],
split = '2017-02-02'))[1],
'k_', k[k1], '_withBayes_2017-01-30.RData'))
}
}
| /closed_cmr_estimation_Bayes_Mt.R | no_license | mteguchi/Cm_SDB_CMR | R | false | false | 2,216 | r | # closed_cmr_estimation_Bayes.R
# runs the augmented CMR analysis to estimate abundance
# from simualted data - see closed_cmr_simulation_estimation.R
# Tomo Eguchi
# 30 January 2017
rm(list=ls())
#data.dir<-"C:/Users/mike/Dropbox/teaching/workshop/13"
#require(RMark)
library(rjags)
source('Cm_SDB_functions.R')
n.chains <- 5
n.adapt <- 1000
n.update <- 10000
n.iter <- 5000
nz <- 50 # the augmented additional rows of individuals
RData.files <- list.files(path = 'RData/',
pattern = '_pt1_')
k0 <- 1
for (k0 in 1:length(RData.files)){
load(paste0('RData/', RData.files[k0]))
#bayes.out.all <- vector(mode = 'list',
# length = length(sim.results.all))
k1 <- 1
for (k1 in 1:length(sim.results.all)){ # different sample occassions
k2 <- 1
bayes.out <- vector(mode = 'list',
length = length(sim.results.all[[k1]]))
for (k2 in 1:length(sim.results.all[[k1]])){ # # simulations
print(paste0('k0 = ', k0, '; k1 = ', k1, '; k2 = ', k2))
y.full <- sim.results.all[[k1]][[k2]]$y.full
bayes.out[[k2]] <- estim_Bayes(y.full,
'Mt',
params = c("N", "p", "Omega", "deviance"),
nz = nz,
n.chains = n.chains,
n.adapt = n.adapt,
n.update = n.update,
n.iter = n.iter)
}
#bayes.out.all[[k1]] <- bayes.out
#save(list = c('summary.data', 'sample.size', 'N', 'p', 'k',
# 'sim.results.all', 'bayes.out'),
# file = paste0('RData/Bayes/', unlist(strsplit(RData.files[k0],
# split = '2017-02-02'))[1],
# 'k_', k[k1], 'withBayes_', Sys.Date(), '.RData'))
save(list = c('N', 'p', 'k',
'sim.results.all', 'bayes.out'),
file = paste0('RData/Bayes/', unlist(strsplit(RData.files[k0],
split = '2017-02-02'))[1],
'k_', k[k1], '_withBayes_2017-01-30.RData'))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/retrospective_functions.r
\name{PlotRetroWrapper}
\alias{PlotRetroWrapper}
\title{Plot retrospectives}
\usage{
PlotRetroWrapper(wd, asap.name, asap, save.plots, od, plotf)
}
\arguments{
\item{wd}{directory where ASAP run is located}
\item{asap.name}{Base name of original dat file (without the .dat extension)}
\item{asap}{name of the variable that read in the asap.rdat file}
\item{save.plots}{save individual plots}
\item{od}{output directory for plots and csv files}
\item{plotf}{type of plot to save}
}
\description{
Plots both standard and relative retrospectives and computes Mohn's rho values. Uses functions get.retro and plot.retro.
}
| /man/PlotRetroWrapper.Rd | permissive | cmlegault/ASAPplots | R | false | true | 751 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/retrospective_functions.r
\name{PlotRetroWrapper}
\alias{PlotRetroWrapper}
\title{Plot retrospectives}
\usage{
PlotRetroWrapper(wd, asap.name, asap, save.plots, od, plotf)
}
\arguments{
\item{wd}{directory where ASAP run is located}
\item{asap.name}{Base name of original dat file (without the .dat extension)}
\item{asap}{name of the variable that read in the asap.rdat file}
\item{save.plots}{save individual plots}
\item{od}{output directory for plots and csv files}
\item{plotf}{type of plot to save}
}
\description{
Plots both standard and relative retrospectives and computes Mohn's rho values. Uses functions get.retro and plot.retro.
}
|
library(tidyverse)
library(GenomicRanges)
library(BSgenome)
library(plyranges)
library(reshape2)
library(RColorBrewer)
species_list <- read_tsv("~/Genomes/Birds/bird_genomes.tsv", col_names = c("species_name", "genome_name"))
work_dir <- "~/temp"
query_dir <- "~/Birds/OrthologueRegions/cluster"
genome_dir <- "~/Genomes/Birds"
repbase_rep <- "RepBase_for_class.fasta"
subfamily_colours <- tibble(subfamily = c("CR1-C", "CR1-Croc", "CR1-E", "CR1-J", "CR1-W", "CR1-X", "CR1-Y", "CR1-Z"),
class_colour = c("#FEFA70", "#9D9D9D", "#FED16B", "#98FF6F", "#D363FE", "#6F87FF", "#B56FFF", "#FE6363"))
subfamilies <- read_tsv("extended_fastas/recip_blast/classes.tsv") %>%
inner_join(subfamily_colours)
# set query species
species_name <- species_list$species_name[12]
query_info <- tibble(qseqid = names(readDNAStringSet(paste0("extended_fastas/", species_name, "_extended_small.fasta")))) %>%
mutate(qseqid_2 = qseqid) %>%
tidyr::separate(qseqid_2, into = c("qseqnames", "ranges"), ":") %>%
tidyr::separate(ranges, into = c("ranges", "ostrand"), "\\(") %>%
tidyr::separate(ranges, into = c("ostart", "oend"), "-") %>%
mutate(ostrand = sub(")", "", ostrand),
ostart = as.integer(ostart),
oend = as.integer(oend),
olength = oend - ostart + 1)
collated_info <- query_info %>% dplyr::select(qseqid)
queries <- query_info %>% dplyr::select(qseqid, olength)
for(i in c(9:13, 58)){
# set subject species
s_species <- species_list$species_name[i]
# skip if subject species is query species
if(species_name == s_species){
collated_info <- collated_info %>% mutate(!! s_species := 0)
next()
}
# read in blast, remove small hits, label flank, and determine start, end and strand
other_blast <- read_tsv(paste0("extended_fastas/blast_out/", species_name, "_in_", s_species, "_small.tsv") ,col_names = c("qseqid", "sseqid", "qstart", "qend", "sstart", "send", "bitscore", "length", "qlen", "slen", "pident")) %>%
mutate(qseqid = sub("#.*", "", qseqid))
old <- other_blast %>%
filter(qstart <= 450, qend >= qlen - 450) %>%
dplyr::group_by(qseqid) %>%
dplyr::slice(1) %>%
dplyr::ungroup()
other_filtered <- other_blast %>%
filter(!qseqid %in% old$qseqid) %>%
mutate(olen = qlen - 1200) %>%
filter(qstart <= 450 | qend >= qlen - 450) %>%
mutate(sstrand = ifelse(sstart < send, "+", "-"),
start = ifelse(sstart < send, sstart, send),
end = ifelse(sstart < send, send, sstart),
strand = case_when(qstart <= 450 ~ "5",
qend >= qlen - 450 ~ "3"),
strand_filter = paste0(qseqid, "#", sstrand)) %>%
arrange(qseqid, start) %>%
mutate(sstart = start, send = end) %>%
dplyr::select(-start, -end)
# select single hits
hit_no <- tibble(strand_filter = names(table(other_filtered$strand_filter)), n = as.integer(table(other_filtered$strand_filter))) %>%
filter(n == 2)
hit_no_s <- tibble(qseqid = names(table(sub("#.", "", hit_no$strand_filter))), n = as.integer(table(sub("#.", "", hit_no$strand_filter)))) %>%
filter(n == 1)
hit_no <- hit_no %>%
filter(sub("#.", "", strand_filter) %in% hit_no_s$qseqid)
# work out distances between query stop start and subject stop start
other_further_filtered_5_3 <- other_filtered %>%
filter(strand_filter %in% hit_no$strand_filter, sstrand == "+") %>%
dplyr::select(-strand_filter) %>%
filter((qseqid == lead(qseqid) & strand == "5" & lead(strand == "3")) |
(qseqid == lag(qseqid) & strand == "3" & lag(strand == "5"))) %>%
mutate(sdist = case_when(qseqid == lead(qseqid) ~ lead(sstart) - send + 1,
qseqid == lag(qseqid) ~ sstart - lag(send) + 1),
qdist = case_when(qseqid == lead(qseqid) ~ lead(qstart) - qend + 1,
qseqid == lag(qseqid) ~ qstart - lag(qend) + 1),
diff = qdist - sdist)
other_further_filtered_3_5 <- other_filtered %>%
filter(strand_filter %in% hit_no$strand_filter, sstrand == "-") %>%
dplyr::select(-strand_filter) %>%
filter((qseqid == lead(qseqid) & strand == "3" & lead(strand == "5")) |
(qseqid == lag(qseqid) & strand == "5" & lag(strand == "3"))) %>%
mutate(sdist = case_when(qseqid == lead(qseqid) ~ lead(sstart) - send + 1,
qseqid == lag(qseqid) ~ sstart - lag(send) + 1),
qdist = case_when(qseqid == lead(qseqid) ~ qstart - lead(qend) + 1,
qseqid == lag(qseqid) ~ lag(qstart) - qend + 1),
diff = qdist - sdist)
# find old by weeding out overlaps (potential deletions)
other_further_filtered_old_5_3 <- other_further_filtered_5_3 %>%
filter((strand == 5 & qend > 616 & lead(qseqid) == qseqid) |
(lag(qseqid) == qseqid & lag(strand) == 5 & lag(qend) > 616) |
(strand == 3 & qstart < qlen - 616 & lag(qseqid) == qseqid) |
(lead(qseqid) == qseqid & lead(strand) == 3 & lead(qstart) < qlen - 616)) %>%
filter(abs(diff) < 10000) %>%
mutate(start = ifelse(qseqid == lead(qseqid), sstart, lag(sstart)),
end = ifelse(qseqid == lead(qseqid), lead(send), send)) %>%
group_by(qseqid) %>%
dplyr::slice(1) %>%
ungroup() %>%
mutate(state = 0) %>%
dplyr::select(sseqid, start, end, qseqid, sstrand, state)
# find old by weeding out overlaps (potential deletions)
other_further_filtered_old_3_5 <- other_further_filtered_3_5 %>%
filter((strand == 5 & qend > 616 & lag(qseqid) == qseqid) |
(strand == 3 & lead(qend) > 616 & lead(qseqid) == qseqid) |
(strand == 3 & qstart < qlen - 616 & lead(qseqid) == qseqid) |
(strand == 5 & lag(qstart) < qlen - 616 & lag(qseqid) == qseqid)) %>%
filter(abs(diff) < 10000) %>%
mutate(start = ifelse(qseqid == lead(qseqid), sstart, lag(sstart)),
end = ifelse(qseqid == lead(qseqid), lead(send), send)) %>%
group_by(qseqid) %>%
dplyr::slice(1) %>%
ungroup() %>%
mutate(state = 0) %>%
dplyr::select(sseqid, start, end, qseqid, sstrand, state)
# determine old vs new
other_further_filtered <- rbind(other_further_filtered_5_3, other_further_filtered_3_5) %>%
filter(sdist > -1000, sdist < 1000, !qseqid %in% other_further_filtered_old_3_5$qseqid, !qseqid %in% other_further_filtered_old_5_3$qseqid) %>%
mutate(state = case_when((sdist <= 16 & qdist >= olen - 24) ~ 1,
(qdist >= -16 & qdist <= 16 & sdist >= 90) ~ 0,
TRUE ~ -1)) %>%
mutate(start = ifelse(qseqid == lead(qseqid), sstart, lag(sstart)),
end = ifelse(qseqid == lead(qseqid), lead(send), send)) %>%
group_by(qseqid) %>%
dplyr::slice(1) %>%
ungroup() %>%
dplyr::select(sseqid, start, end, qseqid, sstrand, state)
old <- old %>%
mutate(sstrand = ifelse(sstart < send, "+", "-"),
start = ifelse(sstart < send, sstart, send),
end = ifelse(sstart < send, send, sstart),
state = 0) %>%
dplyr::select(sseqid, start, end, qseqid, sstrand, state)
present_bound_resolved <- rbind(old, other_further_filtered_old_3_5, other_further_filtered_old_5_3, other_further_filtered) %>%
arrange(qseqid)
# write resolved locations to file
write_tsv(x = present_bound_resolved, file = paste0("extended_fastas/resolved_locations/small_", species_name, "_in_", s_species, ".tsv"), col_names = F)
# select relevant info for further resolution
pre_info <- present_bound_resolved %>%
select(qseqid, state) %>%
full_join(queries) %>%
mutate(state = ifelse(is.na(state), -1, state)) %>%
dplyr::rename(!! s_species := state) %>%
dplyr::select(-olength)
# collate with previous info
collated_info <- full_join(pre_info, collated_info)
}
# missing = -1, new = 1, ancestral = 0
# join all data
collated_info <- inner_join(collated_info, query_info)
# write to file
write_tsv(x = collated_info, file = paste0("extended_fastas/collated_results/", species_name, "_small_collated_results.tsv"), col_names = T)
# plotting
species_name <- species_list$species_name[12]
collated_info <- read_tsv(paste0("extended_fastas/collated_results/", species_name, "_small_collated_results.tsv")) %>%
filter(olength >= 100)
subfamilies <- read_tsv("extended_fastas/recip_blast/classes.tsv")
reciprocal_blast_out <- read_tsv(paste0("extended_fastas/recip_blast/", species_name, ".out"),
col_names = c("qseqid", "sseqid", "pident", "length", "mismatch", "gapopen", "qstart", "qend", "sstart", "send", "evalue", "bitscore")) %>%
mutate(d = mismatch/length) %>%
mutate(jc_dist = (-3 / 4) * log(1 - (4 * d / 3))) %>%
dplyr::group_by(qseqid) %>% # group using query sequence
dplyr::slice(1) %>% # select top hits (highest bitscore)
dplyr::ungroup() %>%
mutate(sseqid = sub(";.*", "", sub(".*#", "", as.character(sseqid)))) %>%
inner_join(subfamilies) %>%
dplyr::select(qseqid, jc_dist, subfamily, sseqid)
# determine whether ancestral or recent (needs to be adjusted for each species)
collated_info_2 <- collated_info %>%
mutate(state = case_when(
(Anas_zonorhyncha == 0 & Anser_indicus == 0 & Anser_brachyrhynchus == 0 & Gallus_gallus == 0) ~ 0, # very ancestral
(Anas_zonorhyncha == 0 & Anser_indicus == 0 & Anser_brachyrhynchus == 0 & Gallus_gallus == 1) ~ 1, # ancestral
(Anas_zonorhyncha == 1 & Anser_indicus == 0 & Anser_brachyrhynchus == 0 & Gallus_gallus == 1) ~ 2, # since Anas
(Anas_zonorhyncha == 1 & Anser_indicus == 0 & Anser_brachyrhynchus == 1) ~ 3, # indicus x brachyrhynchus hybrid
(Anas_zonorhyncha == 1 & Anser_indicus == 1 & Anser_brachyrhynchus == 0) ~ 4, # cygnoides x brachyrhynchus hybrid
(Anas_zonorhyncha == 1 & Anser_indicus == 1 & Anser_brachyrhynchus == 1) ~ 5, # since cygnoides+indicus
TRUE ~ -1 # unclear
)
) %>%
inner_join(reciprocal_blast_out) %>%
# filter(state != -1) %>%
base::unique()
table(collated_info_2$state, collated_info_2$subfamily)
divergence_plot <- ggplot(data = collated_info_2, aes(jc_dist, fill = as.factor(state))) + geom_histogram(binwidth = 0.005) +
scale_x_continuous(name = "Jukes-Cantor distance", expand = c(0,0), limits = c(-0.005, 0.405)) + theme_bw() +
scale_fill_manual(labels = c("Ancestral", "Since Anas", "Anser i. + Anser c.", "Since Anser indicus", "Since Anser brachyrhynchus"),
values = brewer.pal(n = 6, name = 'BrBG')[c(1:2, 4:6)]) +
scale_y_continuous(limits = c(0, 1200), expand = c(0,0), name = "Insertions") +
ggtitle(label = gsub("_", " ", species_name)) +
theme(plot.title = element_text(family = "Arial", face = "italic", hjust = 0.5, size = 14),
axis.title.x = element_text(family = "Arial", size = 12),
axis.title.y = element_text(family = "Arial", size = 12),
axis.text.x = element_text(family = "Arial", size = 11),
axis.text.y = element_text(family = "Arial", size = 11),
legend.text = element_text(family = "Arial", size = 11),
legend.title = element_blank())
divergence_plot
ggsave(divergence_plot, filename = paste0("extended_fastas/plots/", species_name, "_small_insertion_timeline.svg"), device = "svg", height = 10, width = 18, units = "cm")
collated_info_3 <- collated_info_2 %>%
filter(state %in% c(1, 2, 3))
table(collated_info_3$state)
collated_info_3_tbl <- as_tibble(as.data.frame(table(collated_info_3$subfamily))) %>%
mutate(Var1 = as.character(Var1)) %>%
filter(Freq/sum(Freq) > 0.01, Freq > 1)
collated_info_3 <- collated_info_3 %>%
filter(subfamily %in% collated_info_3_tbl$Var1)
subfamily_plot <- ggplot(data = collated_info_3, aes(jc_dist, subfamily, fill = as.factor(subfamily))) + geom_violin(scale = "count") + theme_bw() +
scale_fill_manual(values = subfamily_colours$class_colour[subfamily_colours$subfamily %in% collated_info_3$subfamily]) +
scale_y_discrete(expand = c(0,0), limits = rev(levels(as.factor(collated_info_3$subfamily)))) +
scale_x_continuous(expand = c(0,0), name = "Jukes-Cantor distance", limits = c(0, 0.26)) +
ggtitle(label = sub("_", " ", species_name), subtitle = paste0(nrow(collated_info_3), " CR1s")) +
theme(legend.position = "none",
plot.title = element_text(family = "Arial", face = "italic", hjust = 0.5, size = 14),
plot.subtitle = element_text(family = "Arial", hjust = 0.5, size = 12),
axis.title.x = element_text(family = "Arial", size = 12),
axis.title.y = element_text(family = "Arial", size = 12),
axis.text.x = element_text(family = "Arial", size = 11),
axis.text.y = element_text(family = "Arial", size = 11))
subfamily_plot
ggsave(subfamily_plot, filename = paste0("extended_fastas/plots/", species_name, "_subfamily_timeline.svg"), device = "svg", height = 10, width = 18, units = "cm")
for(i in 1:nrow(collated_info_3_tbl)){
to_align_ranges <- collated_info_3_ranges %>%
filter(sseqid == collated_info_3_tbl$Var1[i])
to_align_seq <- getSeq(genome_seq, to_align_ranges)
names(to_align_seq) <- paste0(seqnames(to_align_ranges), ":", ranges(to_align_ranges), "(", strand(to_align_ranges), ")")
writeXStringSet(to_align_seq, "temp/temp.fa")
system(paste0("mafft --localpair --thread 12 temp/temp.fa > extended_fastas/new_class/alignments/", species_name, "#", collated_info_3_tbl$subfamily[i],
"#", collated_info_3_tbl$Var1[i],".fasta"))
}
| /small_species_specific_ortho/Anser_c.R | no_license | jamesdgalbraith/Avian_CR1_Activity | R | false | false | 13,508 | r | library(tidyverse)
library(GenomicRanges)
library(BSgenome)
library(plyranges)
library(reshape2)
library(RColorBrewer)
species_list <- read_tsv("~/Genomes/Birds/bird_genomes.tsv", col_names = c("species_name", "genome_name"))
work_dir <- "~/temp"
query_dir <- "~/Birds/OrthologueRegions/cluster"
genome_dir <- "~/Genomes/Birds"
repbase_rep <- "RepBase_for_class.fasta"
subfamily_colours <- tibble(subfamily = c("CR1-C", "CR1-Croc", "CR1-E", "CR1-J", "CR1-W", "CR1-X", "CR1-Y", "CR1-Z"),
class_colour = c("#FEFA70", "#9D9D9D", "#FED16B", "#98FF6F", "#D363FE", "#6F87FF", "#B56FFF", "#FE6363"))
subfamilies <- read_tsv("extended_fastas/recip_blast/classes.tsv") %>%
inner_join(subfamily_colours)
# set query species
species_name <- species_list$species_name[12]
query_info <- tibble(qseqid = names(readDNAStringSet(paste0("extended_fastas/", species_name, "_extended_small.fasta")))) %>%
mutate(qseqid_2 = qseqid) %>%
tidyr::separate(qseqid_2, into = c("qseqnames", "ranges"), ":") %>%
tidyr::separate(ranges, into = c("ranges", "ostrand"), "\\(") %>%
tidyr::separate(ranges, into = c("ostart", "oend"), "-") %>%
mutate(ostrand = sub(")", "", ostrand),
ostart = as.integer(ostart),
oend = as.integer(oend),
olength = oend - ostart + 1)
collated_info <- query_info %>% dplyr::select(qseqid)
queries <- query_info %>% dplyr::select(qseqid, olength)
for(i in c(9:13, 58)){
# set subject species
s_species <- species_list$species_name[i]
# skip if subject species is query species
if(species_name == s_species){
collated_info <- collated_info %>% mutate(!! s_species := 0)
next()
}
# read in blast, remove small hits, label flank, and determine start, end and strand
other_blast <- read_tsv(paste0("extended_fastas/blast_out/", species_name, "_in_", s_species, "_small.tsv") ,col_names = c("qseqid", "sseqid", "qstart", "qend", "sstart", "send", "bitscore", "length", "qlen", "slen", "pident")) %>%
mutate(qseqid = sub("#.*", "", qseqid))
old <- other_blast %>%
filter(qstart <= 450, qend >= qlen - 450) %>%
dplyr::group_by(qseqid) %>%
dplyr::slice(1) %>%
dplyr::ungroup()
other_filtered <- other_blast %>%
filter(!qseqid %in% old$qseqid) %>%
mutate(olen = qlen - 1200) %>%
filter(qstart <= 450 | qend >= qlen - 450) %>%
mutate(sstrand = ifelse(sstart < send, "+", "-"),
start = ifelse(sstart < send, sstart, send),
end = ifelse(sstart < send, send, sstart),
strand = case_when(qstart <= 450 ~ "5",
qend >= qlen - 450 ~ "3"),
strand_filter = paste0(qseqid, "#", sstrand)) %>%
arrange(qseqid, start) %>%
mutate(sstart = start, send = end) %>%
dplyr::select(-start, -end)
# select single hits
hit_no <- tibble(strand_filter = names(table(other_filtered$strand_filter)), n = as.integer(table(other_filtered$strand_filter))) %>%
filter(n == 2)
hit_no_s <- tibble(qseqid = names(table(sub("#.", "", hit_no$strand_filter))), n = as.integer(table(sub("#.", "", hit_no$strand_filter)))) %>%
filter(n == 1)
hit_no <- hit_no %>%
filter(sub("#.", "", strand_filter) %in% hit_no_s$qseqid)
# work out distances between query stop start and subject stop start
other_further_filtered_5_3 <- other_filtered %>%
filter(strand_filter %in% hit_no$strand_filter, sstrand == "+") %>%
dplyr::select(-strand_filter) %>%
filter((qseqid == lead(qseqid) & strand == "5" & lead(strand == "3")) |
(qseqid == lag(qseqid) & strand == "3" & lag(strand == "5"))) %>%
mutate(sdist = case_when(qseqid == lead(qseqid) ~ lead(sstart) - send + 1,
qseqid == lag(qseqid) ~ sstart - lag(send) + 1),
qdist = case_when(qseqid == lead(qseqid) ~ lead(qstart) - qend + 1,
qseqid == lag(qseqid) ~ qstart - lag(qend) + 1),
diff = qdist - sdist)
other_further_filtered_3_5 <- other_filtered %>%
filter(strand_filter %in% hit_no$strand_filter, sstrand == "-") %>%
dplyr::select(-strand_filter) %>%
filter((qseqid == lead(qseqid) & strand == "3" & lead(strand == "5")) |
(qseqid == lag(qseqid) & strand == "5" & lag(strand == "3"))) %>%
mutate(sdist = case_when(qseqid == lead(qseqid) ~ lead(sstart) - send + 1,
qseqid == lag(qseqid) ~ sstart - lag(send) + 1),
qdist = case_when(qseqid == lead(qseqid) ~ qstart - lead(qend) + 1,
qseqid == lag(qseqid) ~ lag(qstart) - qend + 1),
diff = qdist - sdist)
# find old by weeding out overlaps (potential deletions)
other_further_filtered_old_5_3 <- other_further_filtered_5_3 %>%
filter((strand == 5 & qend > 616 & lead(qseqid) == qseqid) |
(lag(qseqid) == qseqid & lag(strand) == 5 & lag(qend) > 616) |
(strand == 3 & qstart < qlen - 616 & lag(qseqid) == qseqid) |
(lead(qseqid) == qseqid & lead(strand) == 3 & lead(qstart) < qlen - 616)) %>%
filter(abs(diff) < 10000) %>%
mutate(start = ifelse(qseqid == lead(qseqid), sstart, lag(sstart)),
end = ifelse(qseqid == lead(qseqid), lead(send), send)) %>%
group_by(qseqid) %>%
dplyr::slice(1) %>%
ungroup() %>%
mutate(state = 0) %>%
dplyr::select(sseqid, start, end, qseqid, sstrand, state)
# find old by weeding out overlaps (potential deletions)
other_further_filtered_old_3_5 <- other_further_filtered_3_5 %>%
filter((strand == 5 & qend > 616 & lag(qseqid) == qseqid) |
(strand == 3 & lead(qend) > 616 & lead(qseqid) == qseqid) |
(strand == 3 & qstart < qlen - 616 & lead(qseqid) == qseqid) |
(strand == 5 & lag(qstart) < qlen - 616 & lag(qseqid) == qseqid)) %>%
filter(abs(diff) < 10000) %>%
mutate(start = ifelse(qseqid == lead(qseqid), sstart, lag(sstart)),
end = ifelse(qseqid == lead(qseqid), lead(send), send)) %>%
group_by(qseqid) %>%
dplyr::slice(1) %>%
ungroup() %>%
mutate(state = 0) %>%
dplyr::select(sseqid, start, end, qseqid, sstrand, state)
# determine old vs new
other_further_filtered <- rbind(other_further_filtered_5_3, other_further_filtered_3_5) %>%
filter(sdist > -1000, sdist < 1000, !qseqid %in% other_further_filtered_old_3_5$qseqid, !qseqid %in% other_further_filtered_old_5_3$qseqid) %>%
mutate(state = case_when((sdist <= 16 & qdist >= olen - 24) ~ 1,
(qdist >= -16 & qdist <= 16 & sdist >= 90) ~ 0,
TRUE ~ -1)) %>%
mutate(start = ifelse(qseqid == lead(qseqid), sstart, lag(sstart)),
end = ifelse(qseqid == lead(qseqid), lead(send), send)) %>%
group_by(qseqid) %>%
dplyr::slice(1) %>%
ungroup() %>%
dplyr::select(sseqid, start, end, qseqid, sstrand, state)
old <- old %>%
mutate(sstrand = ifelse(sstart < send, "+", "-"),
start = ifelse(sstart < send, sstart, send),
end = ifelse(sstart < send, send, sstart),
state = 0) %>%
dplyr::select(sseqid, start, end, qseqid, sstrand, state)
present_bound_resolved <- rbind(old, other_further_filtered_old_3_5, other_further_filtered_old_5_3, other_further_filtered) %>%
arrange(qseqid)
# write resolved locations to file
write_tsv(x = present_bound_resolved, file = paste0("extended_fastas/resolved_locations/small_", species_name, "_in_", s_species, ".tsv"), col_names = F)
# select relevant info for further resolution
pre_info <- present_bound_resolved %>%
select(qseqid, state) %>%
full_join(queries) %>%
mutate(state = ifelse(is.na(state), -1, state)) %>%
dplyr::rename(!! s_species := state) %>%
dplyr::select(-olength)
# collate with previous info
collated_info <- full_join(pre_info, collated_info)
}
# missing = -1, new = 1, ancestral = 0
# join all data
collated_info <- inner_join(collated_info, query_info)
# write to file
write_tsv(x = collated_info, file = paste0("extended_fastas/collated_results/", species_name, "_small_collated_results.tsv"), col_names = T)
# plotting
species_name <- species_list$species_name[12]
collated_info <- read_tsv(paste0("extended_fastas/collated_results/", species_name, "_small_collated_results.tsv")) %>%
filter(olength >= 100)
subfamilies <- read_tsv("extended_fastas/recip_blast/classes.tsv")
reciprocal_blast_out <- read_tsv(paste0("extended_fastas/recip_blast/", species_name, ".out"),
col_names = c("qseqid", "sseqid", "pident", "length", "mismatch", "gapopen", "qstart", "qend", "sstart", "send", "evalue", "bitscore")) %>%
mutate(d = mismatch/length) %>%
mutate(jc_dist = (-3 / 4) * log(1 - (4 * d / 3))) %>%
dplyr::group_by(qseqid) %>% # group using query sequence
dplyr::slice(1) %>% # select top hits (highest bitscore)
dplyr::ungroup() %>%
mutate(sseqid = sub(";.*", "", sub(".*#", "", as.character(sseqid)))) %>%
inner_join(subfamilies) %>%
dplyr::select(qseqid, jc_dist, subfamily, sseqid)
# determine whether ancestral or recent (needs to be adjusted for each species)
collated_info_2 <- collated_info %>%
mutate(state = case_when(
(Anas_zonorhyncha == 0 & Anser_indicus == 0 & Anser_brachyrhynchus == 0 & Gallus_gallus == 0) ~ 0, # very ancestral
(Anas_zonorhyncha == 0 & Anser_indicus == 0 & Anser_brachyrhynchus == 0 & Gallus_gallus == 1) ~ 1, # ancestral
(Anas_zonorhyncha == 1 & Anser_indicus == 0 & Anser_brachyrhynchus == 0 & Gallus_gallus == 1) ~ 2, # since Anas
(Anas_zonorhyncha == 1 & Anser_indicus == 0 & Anser_brachyrhynchus == 1) ~ 3, # indicus x brachyrhynchus hybrid
(Anas_zonorhyncha == 1 & Anser_indicus == 1 & Anser_brachyrhynchus == 0) ~ 4, # cygnoides x brachyrhynchus hybrid
(Anas_zonorhyncha == 1 & Anser_indicus == 1 & Anser_brachyrhynchus == 1) ~ 5, # since cygnoides+indicus
TRUE ~ -1 # unclear
)
) %>%
inner_join(reciprocal_blast_out) %>%
# filter(state != -1) %>%
base::unique()
table(collated_info_2$state, collated_info_2$subfamily)
divergence_plot <- ggplot(data = collated_info_2, aes(jc_dist, fill = as.factor(state))) + geom_histogram(binwidth = 0.005) +
scale_x_continuous(name = "Jukes-Cantor distance", expand = c(0,0), limits = c(-0.005, 0.405)) + theme_bw() +
scale_fill_manual(labels = c("Ancestral", "Since Anas", "Anser i. + Anser c.", "Since Anser indicus", "Since Anser brachyrhynchus"),
values = brewer.pal(n = 6, name = 'BrBG')[c(1:2, 4:6)]) +
scale_y_continuous(limits = c(0, 1200), expand = c(0,0), name = "Insertions") +
ggtitle(label = gsub("_", " ", species_name)) +
theme(plot.title = element_text(family = "Arial", face = "italic", hjust = 0.5, size = 14),
axis.title.x = element_text(family = "Arial", size = 12),
axis.title.y = element_text(family = "Arial", size = 12),
axis.text.x = element_text(family = "Arial", size = 11),
axis.text.y = element_text(family = "Arial", size = 11),
legend.text = element_text(family = "Arial", size = 11),
legend.title = element_blank())
divergence_plot
ggsave(divergence_plot, filename = paste0("extended_fastas/plots/", species_name, "_small_insertion_timeline.svg"), device = "svg", height = 10, width = 18, units = "cm")
collated_info_3 <- collated_info_2 %>%
filter(state %in% c(1, 2, 3))
table(collated_info_3$state)
collated_info_3_tbl <- as_tibble(as.data.frame(table(collated_info_3$subfamily))) %>%
mutate(Var1 = as.character(Var1)) %>%
filter(Freq/sum(Freq) > 0.01, Freq > 1)
collated_info_3 <- collated_info_3 %>%
filter(subfamily %in% collated_info_3_tbl$Var1)
subfamily_plot <- ggplot(data = collated_info_3, aes(jc_dist, subfamily, fill = as.factor(subfamily))) + geom_violin(scale = "count") + theme_bw() +
scale_fill_manual(values = subfamily_colours$class_colour[subfamily_colours$subfamily %in% collated_info_3$subfamily]) +
scale_y_discrete(expand = c(0,0), limits = rev(levels(as.factor(collated_info_3$subfamily)))) +
scale_x_continuous(expand = c(0,0), name = "Jukes-Cantor distance", limits = c(0, 0.26)) +
ggtitle(label = sub("_", " ", species_name), subtitle = paste0(nrow(collated_info_3), " CR1s")) +
theme(legend.position = "none",
plot.title = element_text(family = "Arial", face = "italic", hjust = 0.5, size = 14),
plot.subtitle = element_text(family = "Arial", hjust = 0.5, size = 12),
axis.title.x = element_text(family = "Arial", size = 12),
axis.title.y = element_text(family = "Arial", size = 12),
axis.text.x = element_text(family = "Arial", size = 11),
axis.text.y = element_text(family = "Arial", size = 11))
subfamily_plot
ggsave(subfamily_plot, filename = paste0("extended_fastas/plots/", species_name, "_subfamily_timeline.svg"), device = "svg", height = 10, width = 18, units = "cm")
for(i in 1:nrow(collated_info_3_tbl)){
to_align_ranges <- collated_info_3_ranges %>%
filter(sseqid == collated_info_3_tbl$Var1[i])
to_align_seq <- getSeq(genome_seq, to_align_ranges)
names(to_align_seq) <- paste0(seqnames(to_align_ranges), ":", ranges(to_align_ranges), "(", strand(to_align_ranges), ")")
writeXStringSet(to_align_seq, "temp/temp.fa")
system(paste0("mafft --localpair --thread 12 temp/temp.fa > extended_fastas/new_class/alignments/", species_name, "#", collated_info_3_tbl$subfamily[i],
"#", collated_info_3_tbl$Var1[i],".fasta"))
}
|
####Principal Component Analysis
newdata2$Manager_Current_Designation <- NULL # Has high correlationwith Manager_Grade
newdata2$Manager_Business2 <- NULL #Has very high correlation with Manager_Business
newdata2$Manager_Num_Products2 <- NULL #Correlated to Manager_Num_Products
newdata2$Manager_Gender <- NULL ## Highly correlated with Gender_Dynamics
newdata2$Applicant_City_PIN <- NULL ## Highly corelated with Applicant_City_Pin
scaledata <- scale(newdata2)
pca_data <- prcomp(scaledata)
std_dev <- pca_data$sdev
pr_var <- std_dev^2
prop_varex <- pr_var/sum(pr_var)
plot(prop_varex, xlab = "Principal Component",
ylab = "Proportion of Variance Explained",
type = "b")
p_data <- data.frame(pca_data$x)
train_new <- p_data[1:nrow(train), ]
test_new <- p_data[-(1:nrow(train)), ]
| /principal_comp_analysis.R | no_license | keshavkl/Fintro_Recruitment | R | false | false | 801 | r | ####Principal Component Analysis
newdata2$Manager_Current_Designation <- NULL # Has high correlationwith Manager_Grade
newdata2$Manager_Business2 <- NULL #Has very high correlation with Manager_Business
newdata2$Manager_Num_Products2 <- NULL #Correlated to Manager_Num_Products
newdata2$Manager_Gender <- NULL ## Highly correlated with Gender_Dynamics
newdata2$Applicant_City_PIN <- NULL ## Highly corelated with Applicant_City_Pin
scaledata <- scale(newdata2)
pca_data <- prcomp(scaledata)
std_dev <- pca_data$sdev
pr_var <- std_dev^2
prop_varex <- pr_var/sum(pr_var)
plot(prop_varex, xlab = "Principal Component",
ylab = "Proportion of Variance Explained",
type = "b")
p_data <- data.frame(pca_data$x)
train_new <- p_data[1:nrow(train), ]
test_new <- p_data[-(1:nrow(train)), ]
|
d=read.delim("http://dnett.github.io/S510/SeedlingDryWeight2.txt")
d
plot(d[,2],d[,4]+rnorm(56,0,.2),
xlab="Tray",ylab="Seedling Dry Weight",
col=2*d[,1],pch="-",cex=2)
legend("topright",c("Genotype 1","Genotype 2"),fill=c(2,4),border=c(2,4))
d$Genotype=factor(d$Genotype)
library(nlme)
lme(SeedlingWeight~Genotype,random=~1|Tray,method="ML",data=d)
library(lme4)
lmer(SeedlingWeight~Genotype+(1|Tray),REML=F,data=d)
lme(SeedlingWeight~Genotype,random=~1|Tray,data=d)
lmer(SeedlingWeight~Genotype+(1|Tray),data=d)
| /S510/20SeedlingDryWeightMLREML.R | no_license | cassiewinn/dnett.github.io | R | false | false | 553 | r | d=read.delim("http://dnett.github.io/S510/SeedlingDryWeight2.txt")
d
plot(d[,2],d[,4]+rnorm(56,0,.2),
xlab="Tray",ylab="Seedling Dry Weight",
col=2*d[,1],pch="-",cex=2)
legend("topright",c("Genotype 1","Genotype 2"),fill=c(2,4),border=c(2,4))
d$Genotype=factor(d$Genotype)
library(nlme)
lme(SeedlingWeight~Genotype,random=~1|Tray,method="ML",data=d)
library(lme4)
lmer(SeedlingWeight~Genotype+(1|Tray),REML=F,data=d)
lme(SeedlingWeight~Genotype,random=~1|Tray,data=d)
lmer(SeedlingWeight~Genotype+(1|Tray),data=d)
|
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = 9.16848866468164e-311, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615856084-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 866 | r | testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = 9.16848866468164e-311, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
\name{rgr.ols}
\alias{rgr.ols}
\title{Random Group Resampling OLS Regression}
\description{Uses Random Group Resampling (RGR) within an Ordinary Least Square (OLS)
framework to contrast actual group results with pseudo group results. This specific
function performs an RGR on an OLS hierarchical OLS model with two predictors as
in Bliese & Halverson (2002). To run this analysis on data with more predictors,
the function would have to be modified.}
\usage{
rgr.ols(xdat1,xdat2,ydata,grpid,nreps)
}
\arguments{
\item{xdat1}{The first predictor.}
\item{xdat2}{The second predictor.}
\item{ydata}{The outcome.}
\item{grpid}{The group identifier.}
\item{nreps}{The number of pseudo groups to create.}
}
\value{A matrix containing mean squares. Each row provides mean square values for a single pseudo
group iteration}
\references{Bliese, P. D., & Halverson, R. R. (2002). Using random group resampling in multilevel research. Leadership Quarterly, 13, 53-68.}
\author{Paul Bliese
\email{pdbliese@gmail.com}}
\seealso{\code{\link{mix.data}}}
\examples{
data(lq2002)
RGROUT<-rgr.ols(lq2002$LEAD,lq2002$TSIG,lq2002$HOSTILE,lq2002$COMPID,100)
#Compare values to those reported on p.62 in Bliese & Halverson (2002)
summary(RGROUT)
}
\keyword{attribute}
| /man/rgr.ols.Rd | no_license | cran/multilevel | R | false | false | 1,314 | rd | \name{rgr.ols}
\alias{rgr.ols}
\title{Random Group Resampling OLS Regression}
\description{Uses Random Group Resampling (RGR) within an Ordinary Least Square (OLS)
framework to contrast actual group results with pseudo group results. This specific
function performs an RGR on an OLS hierarchical OLS model with two predictors as
in Bliese & Halverson (2002). To run this analysis on data with more predictors,
the function would have to be modified.}
\usage{
rgr.ols(xdat1,xdat2,ydata,grpid,nreps)
}
\arguments{
\item{xdat1}{The first predictor.}
\item{xdat2}{The second predictor.}
\item{ydata}{The outcome.}
\item{grpid}{The group identifier.}
\item{nreps}{The number of pseudo groups to create.}
}
\value{A matrix containing mean squares. Each row provides mean square values for a single pseudo
group iteration}
\references{Bliese, P. D., & Halverson, R. R. (2002). Using random group resampling in multilevel research. Leadership Quarterly, 13, 53-68.}
\author{Paul Bliese
\email{pdbliese@gmail.com}}
\seealso{\code{\link{mix.data}}}
\examples{
data(lq2002)
RGROUT<-rgr.ols(lq2002$LEAD,lq2002$TSIG,lq2002$HOSTILE,lq2002$COMPID,100)
#Compare values to those reported on p.62 in Bliese & Halverson (2002)
summary(RGROUT)
}
\keyword{attribute}
|
library(igraph)
library(Matrix)
## save plots
dirname <- "C:\\Users\\T430\\Google Drive\\PhD\\Dissertation\\competition networks\\acquisitions"
setwd(dirname)
## cache default params
.par = par()
##
#
##
plot2 <- function(gx, layout=layout.fruchterman.reingold, vertex.size=15, focal.firm=NA, fam='sans', edge.curved=F, seed=11111, ...)
{
vAttrs <- igraph::list.vertex.attributes(gx)
if ('type' %in% vAttrs) {
vcolors <- sapply(V(gx)$type, function(x)ifelse(x, "SkyBlue2", "gray"))
lcolors <- sapply(V(gx)$type, function(x)ifelse(x, "darkblue", "black"))
vshapes <- sapply(1:vcount(gx),function(x)ifelse(V(gx)$type[x], "circle", "square"))
isBipartite <- length(unique(V(gx)$type)) > 1
} else {
vcolors <- rep("SkyBlue2", vcount(gx))
lcolors <- rep("darkblue", vcount(gx))
vshapes <- rep("circle", vcount(gx))
isBipartite <- FALSE
}
fonts <- rep(1, vcount(gx))
framecols <- rep('black', vcount(gx))
framewidths <- rep(1, vcount(gx))
if(!is.na(focal.firm)) {
vcolors[V(gx)$name==focal.firm] <- 'darkblue'
lcolors[V(gx)$name==focal.firm] <- 'white'
}
if(!isBipartite) {
adjmat <- as_adjacency_matrix(gx, attr = 'weight', sparse = F)
ffidx <- which(V(gx)$name==focal.firm)
mmcidx <- unname(which(adjmat[ , ffidx] > 1))
framecols[mmcidx] <- 'darkred'
lcolors[mmcidx] <- 'darkred'
framewidths[mmcidx] <- 5
fonts[mmcidx] <- 4
}
set.seed(seed)
plot(gx,
layout = layout,
layout.par = list(),
labels = NULL,
label.color = lcolors,
label.font = NULL,
label.degree = -pi/4,
label.dist = 0,
vertex.label=sapply(1:vcount(gx), function(x) ifelse("name" %in% vAttrs, V(gx)$name[x], x)),
vertex.color = vcolors,
vertex.shape = vshapes,
vertex.size = vertex.size,
vertex.frame.color=framecols,
vertex.frame.width=framewidths,
vertex.label.family=fam, # Font family of the label (e.g."Times", "Helvetica")
vertex.label.font=fonts, # Font: 1 plain, 2 bold, 3, italic, 4 bold italic, 5 symbol
vertex.label.color=lcolors,
edge.color = "darkgrey",
edge.width = 1 + 2 * (E(gx)$weight-1),
edge.labels = NA,
edge.lty=1,
margin=0,
loop.angle=0,
axes = FALSE,
xlab = "",
ylab = "",
xlim=c(-1,1),
ylim=c(-1,1),
edge.curved=edge.curved,
...)
}
# ##
# # Bipartite Graph Acquisition -- PREVIOUS VERSION
# ##
# biAcq.prev <- function(gi, acquirer, target, decay=-0.2, project=T, verbose=T)
# {
# if (project) {
# gi.l <- bipartite.projection(gi, multiplicity = T, remove.type = F)
# vcs <- sapply(gi.l,vcount)
# gi <- gi.l[[ which.max(vcs) ]]
# V(gi)$type <- unlist(V(gi)$type)
# V(gi)$name <- unlist(V(gi)$name)
# }
#
# tdf <- as_data_frame(gi, what='vertices')
# tdf$before <- power_centrality(gi, exponent = decay)
# tdf$after <- NA
#
# vnamemap <- names(V(gi))
# vmap <- as.integer(V(gi))
#
# revord <- which(vnamemap==target) < which(vnamemap==acquirer)
# comb.func <- ifelse(revord, 'last', 'first')
#
# vmap[vnamemap==target] <- vmap[vnamemap==acquirer]
#
# vertex.attr.comb <- list(type=ifelse(revord, 'last', 'first'),
# name=ifelse(revord, 'last', 'first'))
#
# gi.2 <- igraph::contract.vertices(gi, vmap, vertex.attr.comb = vertex.attr.comb)
# gi.2 <- igraph::simplify(gi.2, remove.multiple = T, remove.loops = T, edge.attr.comb = list(weight='sum'))
# gi.2 <- igraph::induced.subgraph(gi.2, V(gi.2)[igraph::degree(gi.2)>0])
#
# tdf$after[tdf$name!=target] <- power_centrality(gi.2, exponent = decay)
# tdf$delta <- tdf$after - tdf$before
#
# if (verbose)
# print(tdf)
#
# return(list(df=tdf, g=gi.2))
# }
##
# Bipartite Graph Acquisition
##
biAcq <- function(gi, acquirer.name, target.name, project=F, verbose=T)
{
if ( ! 'name' %in% igraph::list.vertex.attributes(gi))
stop('gi must have name attribute.')
is.bi <- is.bipartite.safe(gi)
if (project & is.bi) {
gi.l <- bipartite.projection(gi, multiplicity = T, remove.type = F)
vcs <- sapply(gi.l,vcount)
gi <- gi.l[[ which.max(vcs) ]]
is.bi <- is.bipartite.safe(gi)
}
acquirer <- which(V(gi)$name==acquirer.name)
target <- which(V(gi)$name==target.name)
if (length(acquirer)==0 | length(target)==0) {
stop(sprintf('has acquirer=%s; has target=%s', length(acquirer)>0, length(target)>0))
}
vnamemap <- names(V(gi))
vmap <- as.integer(V(gi))
revord <- which(vnamemap==target.name) < which(vnamemap==acquirer.name)
comb.func <- ifelse(revord, 'last', 'first')
vmap[vnamemap==target.name] <- vmap[vnamemap==acquirer.name]
vertex.attr.comb <- list(type=function(x)ifelse(revord, x[length(x)], x[1]),
name=function(x)ifelse(revord, x[length(x)], x[1]))
if (is.bi) {
edge.attr.comb <- list(weight=function(x)ifelse(revord, x[length(x)], x[1]))
} else {
edge.attr.comb <- list(weight='sum')
}
gi.2 <- igraph::contract.vertices(gi, vmap, vertex.attr.comb = vertex.attr.comb)
gi.2 <- igraph::simplify(gi.2, remove.multiple = T, remove.loops = T, edge.attr.comb = edge.attr.comb)
gi.2 <- igraph::induced.subgraph(gi.2, V(gi.2)[igraph::degree(gi.2)>0])
return(gi.2)
}
##
#
##
mapTo <- function(x, #vector of degrees
minmax=c(9,20), # vector of length 2: min and max
log=F # logicial for log transform
) {
if (any(minmax < 0)) stop ("Negative output range is not allowed.\nPlease assign minmax argument as vector of 2 non-negative values (x>=0) and rerun function.")
n <- length(x)
dist <- max(x) - min(x) #scalar
#output
M <- max(minmax)
m <- min(minmax)
range <- M - m # interval to be mapped to
scale <- (x-min(x)) / dist
if(log) {
if(all(x>=1 | x==0)) {
lx <- log(x)
maxlx <- max(lx[lx<Inf & lx>-Inf])
scale <- lx / maxlx
y <- m + range*scale
y[is.na(y)|is.nan(y)|y==-Inf|y==Inf] <- m
} else {
#augment x proportions to > 1 yielding values suitable for log transform
scalepos <- (scale+1)/min(scale+1)
#account for log transform while still proportional to elements of x
# by normalizing with the log of the maximum
scale <- log(scalepos) / log(max(scalepos))
y <- m + range*scale
}
} else {
y <- m + range*scale
}
return(y)
}
centPow <- function(gx, decay=-0.1)
{
return(power_centrality(gx, exponent = decay))
}
df.pow <- function(gx, betas=c(-.3,-.2,-.1,-.01,0))
{
df <- data.frame(name=V(gx)$name)
for (beta in betas) df[ , as.character(beta)] <- centPow(gx, beta)
return(df)
}
##
# checks if graph is actually bipartite by `type` attribute
# - if only one `type` then not functionally bipartite
# - if more than one `type` then is functionally biparite
##
is.bipartite.safe <- function(g)
{
if (!igraph::is.bipartite(g) | ! 'type' %in% igraph::list.vertex.attributes(g))
return(FALSE)
return(length(unique(V(g)$type)) > 1)
}
##
# Combine two bipartite networks
# - keeps original names of vertex and edge properties (unlike igraph "+" operator: g3 <- g1 + g2)
##
bipartiteCombine <- function(gx1, gx2) {
.vt <- unique(rbind(as_data_frame(gx1,'vertices'),as_data_frame(gx2,'vertices')))
nz <- names(.vt)
if ((! 'name' %in% nz) & (! 'type' %in% nz))
stop('graphs must have name and type attributes.')
idx.name <- which(nz=='name')
idx.type <- which(nz=='type')
idx.rest <- which( ! nz %in% c('name','type'))
.vt <- .vt[ ,c(idx.name,idx.type,idx.rest)] ## rearrange "name" column first
.el <- rbind(as_data_frame(gx1,'edges'),as_data_frame(gx2,'edges'))
gx <- graph.data.frame(d = .el, directed = F, vertices = .vt)
return(gx)
}
##
# Gets vertex indices of firms in dyads that have multi-market contact
##
which.mmc <- function(g, focal, keep.focal=F, proj.max=T) {
if (class(g) != 'igraph') stop('g must be an igraph object')
if (!igraph::is.weighted(g)) E(g)$weight <- 1
if (is.bipartite.safe(g))
return(which.mmc.bipartite(g, focal, keep.focal, proj.max))
## NOT BIPARTITE
if (! focal %in% 1:vcount(g))
stop('focal firm index must be in vertices of g')
adjm <- igraph::as_adjacency_matrix(g, attr = 'weight', sparse = F)
vids <- unname(which(adjm[focal,] > 1))
if (keep.focal) {
return(sort(unique(c(vids, focal))))
} else {
return(sort(unique(vids)))
}
}
##
# Gets BIPARTITE graph vertex indices of firms in dyads that have multi-market contact
##
which.mmc.bipartite <- function(g, focal, keep.focal=F, proj.max=T) {
g2.l <- bipartite.projection(g, multiplicity = T, remove.type = F)
vcs <- sapply(g2.l,vcount)
idx.proj <- ifelse(proj.max, which.max(vcs), which.min(vcs))
g2 <- g2.l[[idx.proj]]
if (! focal %in% 1:vcount(g2))
stop('focal firm index must be in vertices of g')
adjm2 <- igraph::as_adjacency_matrix(g2, attr = 'weight', sparse = F)
proj.vids <- unname(which(adjm2[focal,] > 1))
if (keep.focal) {
proj.vids <- sort(c(proj.vids, focal))
}
proj.names <- V(g2)$name[proj.vids]
##
vids.f <- which(V(g)$name %in% proj.names)
vids.m <- c()
for (v in vids.f) {
vids.m <- unique(c(vids.m, as.integer(igraph::neighbors(g, v))))
}
vids.focal <- which(V(g)$name==as.character(focal))
if (keep.focal) {
return(sort(c(vids.f, vids.m, vids.focal)))
} else {
return(sort(c(vids.f, vids.m)))
}
}
##
#
##
getNotMmcBipartiteEdgeIds <- function(g.sub)
{
# adjm <- getAdjacencyMatrix(g.sub, proj.max = T)
firmnames <- getMaxProjNames(g.sub)
vids <- which(V(g.sub)$name %in% firmnames)
# mids <- which( ! V(g.sub)$name %in% firmnames)
bic <- igraph::bibcoupling(g.sub) ## shared neighbors (## possibly large matrix, need to subject to only firm vids)
##------------------------------
## NOT MMC eids
##------------------------------
## 1. F-F non-MMC dyads
ffno <- which(bic == 1, arr.ind=T) ## 2-col matrix of (row,col) id tuples for non-mmc elements
ffno <- ffno[which(ffno[,1] %in% vids & ffno[,2] %in% vids), ]
## 2. F-M-F No-MMC 3-paths: cache as (F,M),(M,F) tuples
fm.no <- c() ## Firm-Market-Firm No-MMC paths: saved as (F1-M, M-F2, ...)
urow1 <- unique(ffno[,1])
cat(' fetching Non-MMC bipartite dyads...\n')
for (i in 1:length(urow1)) {
if (i %% 50 == 0) cat(sprintf(' %s (%.2f%s)\n',i,100*i/length(urow1),'%'))
r1i.r2js <- ffno[which(ffno[,1] == urow1[i]),2]
xi.paths <- igraph::all_shortest_paths(g.sub, urow1[i], r1i.r2js)$res
ls <- sapply(xi.paths, length)
idx <- which(ls==3)
for (j in idx) {
x <- as.integer(xi.paths[[j]])
fm.no <- c(fm.no, c(x[1],x[2], x[2],x[3]))
}
}
cat(' done.\n')
## 3. save bipartite F-M edge IDs for non-MMC dyads
eid.no <- unique(igraph::get.edge.ids(g.sub, vp = fm.no, directed = F, error = F, multi = F))
##------------------------------
## MMC eids (filter out)
##-------------------------------
## 4. MMC firm-firm dyads
ff.mmc <- which(bic > 1, arr.ind=T) ##
ff.mmc <- ff.mmc[which(ff.mmc[,1] %in% vids & ff.mmc[,2] %in% vids), ]
## 5. F-M-F MMC 3-paths
fm.mmc <- integer() ## cache MMC tuples (F,M),(M,F),(...)
urow1 <- unique(ff.mmc[,1])
cat(' fetching MMC bipartite dyads...\n')
for (i in 1:length(urow1)) {
if (i %% 50 == 0) cat(sprintf(' %s (%.2f%s)\n',i,100*i/length(urow1),'%'))
r1i.r2js <- ff.mmc[which(ff.mmc[,1] == urow1[i]),2]
xi.paths <- igraph::all_shortest_paths(g.sub, urow1[i], r1i.r2js)$res
ls <- sapply(xi.paths, length)
idx <- which(ls==3)
for (j in idx) {
x <- as.integer(xi.paths[[j]])
fm.mmc <- c(fm.mmc, c(x[1],x[2], x[2],x[3]))
}
}
cat(' done.\n')
## 6. save bipartite F-M edge IDs for MMC dyads
eid.mmc <- unique(igraph::get.edge.ids(g.sub, vp = fm.mmc, directed = F, error = F, multi = F))
##--------------------------------
## Check is Non-MMC and is NOT MMC
##--------------------------------
## 7. filter only the Non-MMC F-M dyads that are not included in any MMC F-M dyads (which comprise MMC F-F dyads)
eids <- eid.no[ which( ! eid.no %in% eid.mmc) ]
return(eids)
}
##
# Creates MMC subgraph
# - subsets to firms with MMC relations to another firm
# - removes non-MMC edges (weight <= 1)
##
# ## filter to ego-mmc network (?)
# focal.firm <- which(V(g)$name == focal.name)
# if (length(focal.firm)>0) {
# ## MMC VERTEX SUBGRAPH if `focal` is set
# vids <- which.mmc(g, focal.firm, keep.focal=T)
# g.sub <- igraph::induced.subgraph(g, vids = vids)
# } else {
# g.sub <- g
# }
##
mmcSubgraph <- function(g, remove.isolates=F)
{
is.bi <- is.bipartite.safe(g)
g.sub <- g
## DROP NON-MMC EDGES
if (is.bi) {
eids <- getNotMmcBipartiteEdgeIds(g.sub)
} else {
which(E(g.sub)$weight <= 1)
}
if (length(eids) > 0) {
g.sub <- igraph::delete.edges(g.sub, eids)
}
if (remove.isolates) {
g.sub <- igraph::induced.subgraph(g.sub, vids = which(igraph::degree(g.sub)>0) )
}
return(g.sub)
}
# mmcSubgraphDEBUG <- function(g, focal.name=NA, remove.isolates=F)
# {
# is.bi <- is.bipartite.safe(g)
# focal.firm <- which(V(g)$name == focal.name)
#
# # if (length(focal.firm)>0) {
# # ## MMC VERTEX SUBGRAPH if `focal` is set
# # vids <- which.mmc(g, focal.firm, keep.focal=T)
# # g.sub <- igraph::induced.subgraph(g, vids = vids)
# # } else {
# # g.sub <- g
# # }
# g.sub <- g
#
# ## DROP NON-MMC EDGES
# if (is.bi) {
# # adjm <- getAdjacencyMatrix(g.sub, proj.max = T)
# firmnames <- getMaxProjNames(g.sub)
# vids <- which(V(g.sub)$name %in% firmnames)
# # mids <- which( ! V(g.sub)$name %in% firmnames)
# bic <- igraph::bibcoupling(g.sub) ## shared neighbors (## possibly large matrix, need to subject to only firm vids)
# ## MMC firm-firm dyads
# mmc <- which(bic > 1, arr.ind=T) ##
# mmc <- mmc[which(mmc[,1] %in% vids & mmc[,2] %in% vids), ]
# ## non-MMC firm-firm dyads
# nmmc <- which(bic == 1, arr.ind=T) ## 2-col matrix of (row,col) id tuples for non-mmc elements
# nmmc <- nmmc[which(nmmc[,1] %in% vids & nmmc[,2] %in% vids), ]
# # ###
# # bnmids <- sort(unique(c(nmmc[,1],nmmc[,2])))
# # sapply(bnmids,function(i){
# # ls <- sapply(igraph::all_shortest_paths(g.sub, i, vids[ ! vids %in% i])$res, length)
# # return(length(ls[ls==3]))
# # })
# # ###
# edge.l <- list()
# for (i in 1:nrow(nmmc)) {
# ps <- igraph::all_shortest_paths(g.sub, nmmc[i,1], nmmc[i,2])$res
# if (length(ps)==1) {
# pv <- as.integer(ps[[1]])
# chk1 <- length(which( (mmc[,1]==pv[1] & mmc[,2]==pv[2]) | (mmc[,1]==pv[2] & mmc[,2]==pv[1]) )) == 0
# chk2 <- length(which( (mmc[,1]==pv[2] & mmc[,2]==pv[3]) | (mmc[,1]==pv[3] & mmc[,2]==pv[2]) )) == 0
# if (chk1) edge.l <- c(edge.l, list(c(idx[1],idx[2])))
# if (chk2) edge.l <- c(edge.l, list(c(idx[2],idx[3])))
# }
# }
# eids <- sort(unique(igraph::get.edge.ids(g.sub, vp = edges, directed = F))) ## edge ids of non-mmc firm
# } else {
# eids <- which(E(g.sub)$weight <= 1)
# }
#
# if (length(eids) > 0) {
# g.sub <- igraph::delete.edges(g.sub, eids)
# }
#
# if (remove.isolates) {
# g.sub <- igraph::induced.subgraph(g.sub, vids = which(igraph::degree(g.sub)==0))
# }
#
# return(g.sub)
# }
##
# Gets adjacency matrix -- for either Bipartite or unipartite graphs
# - unipartite, just return adjmat
# - bipartite, return adjmat for the mode with more (proj.max=T) or fewer (proj.max=F) nodes
##
getAdjacencyMatrix <- function(g, proj.max=T) {
if (is.bipartite.safe(g)) {
g2.l <- bipartite.projection(g, multiplicity = T, remove.type = F)
vcs <- sapply(g2.l,vcount)
idx.proj <- ifelse(proj.max, which.max(vcs), which.min(vcs))
g2 <- g2.l[[idx.proj]]
adjm <- igraph::as_adjacency_matrix(g2, attr = 'weight', sparse = F)
} else {
adjm <- igraph::as_adjacency_matrix(g, attr = 'weight', sparse = F)
}
return(adjm)
}
##
#
##
getGraphProjection <- function(g, max.proj=T, remove.type=T)
{
if (!is.bipartite.safe(g))
return(g)
g2.l <- bipartite.projection(g, multiplicity = T, remove.type = remove.type)
vcs <- sapply(g2.l,vcount)
which.proj <- ifelse(max.proj, which.max, which.min)
g2 <- g2.l[[ which.proj(vcs) ]]
return(g2)
}
##
# Get vertex IDs
# - if bipartite, return vids of maximal projection (largest size mode)
##
getMaxProjNames <- function(gx)
{
if (! 'name' %in% igraph::list.vertex.attributes(gx))
stop('gx must have vertex name attribute')
bps <- igraph::bipartite.projection.size(gx)
types <- unique(V(gx)$type)
idx <- ifelse(bps$vcount1 > bps$vcount2, 1, 2)
return(V(gx)$name[ which(V(gx)$type == types[idx]) ])
}
# mmcSum <- function(g, focal, proj.max=T) {
# adjm <- getAdjacencyMatrix(g, focal, proj.max)
# vids.mmc <- which(adjm[focal,] > 1)
# return(sum(adjm[focal,vids.mmc]))
# }
#
#
# mmcCount <- function(g, focal, proj.max=T) {
# adjm <- getAdjacencyMatrix(g, focal, proj.max)
# return(length(which(adjm[focal,] > 1)))
# }
getMmcEdgeSum <- function(g, name.remove=T) {
if ( ! 'weight' %in% igraph::list.edge.attributes(g))
stop('g must have edge weights')
adj <- getAdjacencyMatrix(g, T)
sums <- apply(adj, 1, function(x) sum(x[x>1]) )
if (name.remove)
sums <- unname(sums)
return(sums)
}
getMmcEdgeCount <- function(g, name.remove=T) {
if ( ! 'weight' %in% igraph::list.edge.attributes(g))
stop('g must have edge weights')
adj <- getAdjacencyMatrix(g, T)
counts <- apply(adj, 1, function(x) length(x[x>1]) )
if (name.remove)
counts <- unname(counts)
return(counts)
}
getMaxMmcCliqueSize <- function(g, vid, min=3)
{
cls <- igraph::cliques(g, min = min)
if (length(cls)==0)
return(0)
idx <- which(sapply(cls,function(x) vid %in% x))
return(max(sapply(cls[idx], length)))
}
##
#
##
# getMmcTargetDataframe <- function(gx.m, vid.a, is.ego=FALSE)
# {
# if (is.ego) {
# gx.m <- igraph::make_ego_graph(gx.m, 1, vid.a)[[1]]
# vid.a <- which(as.character(V(gx.m)$name) == as.character(vid.a))
# }
# return(data.frame(
# name=unlist(V(gx.m)$name),
# ##
# sum=mmcEdgeSum(gx.m)[vid.a], ## sum of mmc
# degree=mmcEdgeCount(gx.m)[vid.a], ## number of mmc competitiors
# ##
# clust=igraph::transitivity(gx.m, type = 'global'),
# closeness=unname(igraph::closeness(gx.m, vid.a)),
# eigen=unname(igraph::eigen_centrality(gx.m)$vector[vid.a]),
# pow.n1=unname(igraph::power_centrality(gx.m, vid.a, exponent = -0.1)),
# pow.n3=unname(igraph::power_centrality(gx.m, vid.a, exponent = -0.3)),
# eccen=unname(igraph::eccentricity(gx.m, vid.a)),
# ##
# central.clos=igraph::centr_clo(gx.m)$centralization / igraph::centr_clo_tmax(gx.m),
# central.eign=igraph::centr_eigen(gx.m)$centralization / igraph::centr_eigen_tmax(gx.m),
# central.betw=igraph::centr_betw(gx.m)$centralization / igraph::centr_betw_tmax(gx.m),
# central.degr=igraph::centr_degree(gx.m)$centralization / igraph::centr_degree_tmax(gx.m),
# ##
# subgraph=unname(igraph::subgraph.centrality(gx.m)[vid.a]),
# density=igraph::graph.density(gx.m),
# constraint=unname(igraph::constraint(gx.m, vid.a)),
# max.clique=maxCliqueSize(gx.m, vid.a),
# ##
# stringsAsFactors = F
# ))
# }
##
#
##
getMmcDf <- function(gx.m, vert.name, ego.order=NA, proj.uni=FALSE)
{
vid.a <- which(V(gx.m)$name==vert.name)
if (length(vid.a)==0)
stop(sprintf('vert.name `%s` not in graph gx.m',vert.name))
if (proj.uni) {
gx.m <- getGraphProjection(gx.m)
vid.a <- which(V(gx.m)$name==vert.name)
} else {
.proj.gx.m <- getGraphProjection(gx.m)
.proj.vid.a <- which(V(.proj.gx.m)$name==vert.name)
}
if (!is.na(ego.order) & ego.order >= 1) {
ord <- ifelse(is.bipartite.safe(gx.m), 2*ego.order, 1*ego.order) ## bipartite twice distance
gx.m <- igraph::make_ego_graph(gx.m, ord, vid.a)[[1]]
vid.a <- which(V(gx.m)$name == vert.name)
##
ord <- ifelse(is.bipartite.safe(.proj.gx.m), 2*ego.order, 1*ego.order) ## bipartite twice distance
.proj.gx.m <- igraph::make_ego_graph(.proj.gx.m, ord, .proj.vid.a)[[1]]
.proj.vid.a <- which(V(.proj.gx.m)$name == vert.name)
}
is.bi <- is.bipartite.safe(gx.m)
df <- data.frame(
name=unlist(V(gx.m)$name[vid.a]),
is.bi=is.bi,
v=vcount(gx.m),
e=ecount(gx.m),
##
sum=ifelse(is.bi, getMmcEdgeSum(.proj.gx.m), getMmcEdgeSum(gx.m)),
degree=ifelse(is.bi, getMmcEdgeCount(.proj.gx.m), getMmcEdgeCount(gx.m)),
max.clique=ifelse(is.bi, getMaxMmcCliqueSize(.proj.gx.m, .proj.vid.a), getMaxMmcCliqueSize(gx.m, vid.a)),
##
clust=igraph::transitivity(gx.m, type = 'global'),
closeness=unname(igraph::closeness(gx.m, vid.a)),
eigen=unname(igraph::eigen_centrality(gx.m)$vector[vid.a]),
pow.n1=unname(igraph::power_centrality(gx.m, vid.a, exponent = -0.1)),
pow.n3=unname(igraph::power_centrality(gx.m, vid.a, exponent = -0.3)),
eccen=unname(igraph::eccentricity(gx.m, vid.a)),
##
central.clos=igraph::centr_clo(gx.m)$centralization / igraph::centr_clo_tmax(gx.m),
central.eign=igraph::centr_eigen(gx.m)$centralization / igraph::centr_eigen_tmax(gx.m),
central.betw=igraph::centr_betw(gx.m)$centralization / igraph::centr_betw_tmax(gx.m),
central.degr=igraph::centr_degree(gx.m)$centralization / igraph::centr_degree_tmax(gx.m),
##
subgraph=unname(igraph::subgraph.centrality(gx.m)[vid.a]),
density=igraph::graph.density(gx.m),
constraint=unname(igraph::constraint(gx.m, vid.a)),
##
stringsAsFactors = F
)
return(df)
}
# ##
# #
# ##
# getMmcAcquirerDf <- function(gx.m, vert.name, is.ego=FALSE)
# {
# vid.a <- which(V(gx.m)$name==vert.name)
# if (is.ego) {
# ord <- ifelse(is.bipartite.safe(gx.m), 2, 1) ##
# gx.m <- igraph::make_ego_graph(gx.m, ord, vid.a)[[1]]
# vid.a <- which(as.character(V(gx.m)$name) == as.character(vid.a))
# }
# return(data.frame(
# name=unlist(V(gx.m)$name[vid.a]),
# ##
# sum=mmcEdgeSum(gx.m)[vid.a], ## sum of mmc
# degree=mmcEdgeCount(gx.m)[vid.a], ## number of mmc competitiors
# max.clique=maxCliqueSize(gx.m, vid.a),
# ##
# clust=igraph::transitivity(gx.m, type = 'global'),
# closeness=unname(igraph::closeness(gx.m, vid.a)),
# eigen=unname(igraph::eigen_centrality(gx.m)$vector[vid.a]),
# pow.n1=unname(igraph::power_centrality(gx.m, vid.a, exponent = -0.1)),
# pow.n3=unname(igraph::power_centrality(gx.m, vid.a, exponent = -0.3)),
# eccen=unname(igraph::eccentricity(gx.m, vid.a)),
# ##
# central.clos=igraph::centr_clo(gx.m)$centralization / igraph::centr_clo_tmax(gx.m),
# central.eign=igraph::centr_eigen(gx.m)$centralization / igraph::centr_eigen_tmax(gx.m),
# central.betw=igraph::centr_betw(gx.m)$centralization / igraph::centr_betw_tmax(gx.m),
# central.degr=igraph::centr_degree(gx.m)$centralization / igraph::centr_degree_tmax(gx.m),
# ##
# subgraph=unname(igraph::subgraph.centrality(gx.m)[vid.a]),
# density=igraph::graph.density(gx.m),
# constraint=unname(igraph::constraint(gx.m, vid.a)),
# ##
# stringsAsFactors = F
# ))
# }
##-----------------------------------------------------------------------------------
##==================================
## FIRM-MARKET GRAPH
##----------------------------------
# ## EXAMPLE OF ALL 4 QUADRANTS
# n1 <- 4
# n2 <- 12
# focal.firm <- as.character(4)
# set.seed(1133241) #1133241
# gx=sample_bipartite(n1,n2,'gnp',.62)
# ## SPARSE 1
# n1 <- 5
# n2 <- 12
# focal.firm <- as.character(4)
# set.seed(11111)
# gx=sample_bipartite(n1,n2,'gnp',.6)
##
# ## DENSE 2
# n1 <- 4
# n2 <- 12
# focal.firm <- as.character(4)
# set.seed(1133241)
# gx=sample_bipartite(n1,n2,'gnp',.70)
##
##----------------------------------
# ## Main Cluster
# # n1 <- 4 ## markets
# # n2 <- 12 ## firms
c1 <- list(m = 4, f = 12)
## Cluster 2
c2 <- list(m = 2, f = 6)
# c1 <- list(m = 10, f = 1200)
# ## Cluster 2
# c2 <- list(m = 5, f = 600)
focal.firm <- '4'
## CREATE RANDOM BIPARTITE FIRM_MARKET
set.seed(1133241) #1133241
gx_1 <- sample_bipartite(c1$m,c1$f,'gnp',.62)
V(gx_1)$name <- c(LETTERS[1:c1$m], 1:c1$f)
E(gx_1)$weight <- 1
set.seed(11341) #1133241
gx_2 <- sample_bipartite(c2$m,c2$f,'gnp',.72)
V(gx_2)$name <- c(LETTERS[(c1$m+1):(c1$m+c2$m)], (c1$f+1):(c1$f+c2$f))
E(gx_2)$weight <- 1
## COMBINE
gx <- bipartiteCombine(gx_1, gx_2)
# .vt <- unique(rbind(as_data_frame(gx1,'vertices'),as_data_frame(gx2,'vertices')))
# nz <- names(.vt)
# idx.name <- which(nz=='name')
# idx.type <- which(nz=='type')
# .vt <- .vt[ ,c(idx.name,idx.type)] ## rearrange "name" column first
# .el <- rbind(as_data_frame(gx1,'edges'),as_data_frame(gx2,'edges'))
# gx <- graph.data.frame(d = .el, directed = F, vertices = .vt)
## BIMODAL FIRM_MARKET PLOT
vshapes <- sapply(V(gx)$type,function(x)ifelse(x,'circle','square'))
par(mar=c(.1,.1,.1,.1), mfrow=c(1,2))
plot2(gx,
layout=layout.bipartite,
vertex.shape=vshapes,
vertex.size=18,
focal.firm=focal.firm)
plot2(gx,
layout=layout.kamada.kawai,
vertex.shape=vshapes,
vertex.size=18,
focal.firm=focal.firm, edge.curved = F)
par(mfrow=c(1,1))
## UNIMODAL FIRM_FIRM
gx.ff <- bipartite.projection(gx, remove.type = F)$proj2
V(gx.ff)$type <- unlist(V(gx.ff)$type)
## UNIMODAL FIRM_FIRM ADJACENCY MATRIX
adjmat <- as_adjacency_matrix(gx.ff, attr = 'weight', sparse = F)
print(adjmat)
## SUM MMC
ffidx <- which(V(gx.ff)$name==focal.firm)
mmcidx <- which(adjmat[, ffidx] > 1)
print(sprintf("FOCAL FIRM %s SUM OF MMC: %s", focal.firm, sum(adjmat[mmcidx, ffidx])))
## PLOT FIRM_FIRM NETWORk
vshapes <- sapply(V(gx.ff)$type,function(x)ifelse(x,'circle','square'))
## Save plot of bipartite --> firm-firm competition networ
# png(sprintf("firm_market_firm_firm_2side_N%s_M%s.png",n2,n1), height = 4.5, width = 8.5, units = 'in', res = 250)
par(mar=c(.1,.1,.1,.1), mfrow=c(1,2))
plot2(gx,
layout=layout.bipartite,
vertex.shape=vshapes,
vertex.size=18,
focal.firm=focal.firm)
plot2(gx.ff,
layout=layout.fruchterman.reingold,
vertex.shape=vshapes,
vertex.size= 18, ##1.1*mapTo(centPow(gx.ff, beta = -.01)),
focal.firm=focal.firm
)
# dev.off()
vid.a <- 4
vid.ts <- c(15,16)
par(mfrow=c(2,3), mar=c(.1,.1,1.5,.1))
for (vid.t in vid.ts)
{
plot2(gx, main="Pre-Acquisition")
plot2(biAcq(gx, vid.a, vid.t), main=sprintf("%s==>%s",vid.a,vid.t))
plot2(mmcSubgraph(biAcq(gx, vid.a, vid.t), vid.a),
main=sprintf("%s==>%s MMC Subgraph",vid.a,vid.t))
}
##==================================
##
## ACQUISITION LOOP -- ALL OTHER FIRMS
##
##
##
##
##
##
##----------------------------------
## focal firm
focal.firm <- 4
focal.name <- as.character(focal.firm)
## vert names for either uni or bipartite
gnames <- getMaxProjNames(gx)
df.a <- data.frame()
df.a.e <- data.frame()
df0.t <- getMmcDf(gx, focal.name, ego.order=NA)
df0.t.e <- getMmcDf(gx, focal.name, ego.order=1)
df.t.diff <- data.frame()
df.t.e.diff <- data.frame()
meta.attrs <- c('name','targ','is.bi','v','e')
mmc.attrs <- names(df0.t)[which( ! names(df0.t) %in% meta.attrs)]
for (i in gnames) {
## Acquirer MMC Metrics
df.a <- rbind(df.a, getMmcDf(gx, as.character(i)) )
df.a.e <- rbind(df.a.e, getMmcDf(gx, as.character(i), ego.order=1) )
## Target Synergy MMC Metrics
target.firm <- as.numeric(i)
target.name <- as.character(i)
if (target.firm != focal.firm) {
## NODE COLLAPSE BIPARTITE GRAPH
gx2 <- biAcq(gx, focal.name, target.name, project = F)
cat(sprintf('%s(%s)-->%s(%s)\n',focal.name, focal.firm, target.name, target.firm))
## MMC Subgraph
gx2.sub <- mmcSubgraph(gx2, remove.isolates=T)
plot2(gx2.sub)
## Get MMC metrics
dfi.t <- getMmcDf(gx2.sub, focal.name)
dfi.t.e <- getMmcDf(gx2.sub, focal.name, ego.order=1)
## make diff df
dfi.t.diff <- dfi.t
dfi.t.e.diff <- dfi.t.e
## set diff values
dfi.t.diff[,mmc.attrs] <- dfi.t[,mmc.attrs] - df0.t[,mmc.attrs]
dfi.t.e.diff[,mmc.attrs] <- dfi.t.e[,mmc.attrs] - df0.t.e[,mmc.attrs]
## add target
dfi.t.diff$targ <- target.name
dfi.t.e.diff$targ <- target.name
## append
df.t.diff <- rbind(df.t.diff, dfi.t.diff)
df.t.e.diff <- rbind(df.t.e.diff, dfi.t.e.diff)
## dataframe
# idx <- which(as.character(acq.df$name)==target.firm)
# ## PLOT
# vshapes <- sapply(V(gx2.ff)$type,function(x)ifelse(x,'circle','square'))
# pngfile <- sprintf("%s\\firm_firm_mmc_acquisition%s_1.png",dirname, target.firm)
# png(pngfile, width = 5, height = 5, units = 'in', res = 250)
# par(mar=c(.1,.1,.1,.1))
# plot2(gx2.ff,
# layout=layout.fruchterman.reingold,
# vertex.shape=vshapes,
# vertex.size= 18, ##1.1*mapTo(centPow(gx2.ff, beta = -.1)),
# focal.firm=focal.firm
# )
# dev.off()
}
}
## SAVE DATAFRAMES
print(df.a)
csvfilename <- sprintf("%s\\acquisition_acquirer_mmc_compare_c1M%s_c1N%s_c2M%s_c2N%s.csv", dirname, c1$m, c1$f, c2$m, c2$f)
write.csv(df.a, file = csvfilename)
print(df.a.e)
csvfilename <- sprintf("%s\\acquisition_acquirer_mmc_compare_EGO_c1M%s_c1N%s_c2M%s_c2N%s.csv", dirname, c1$m, c1$f, c2$m, c2$f)
write.csv(df.a.e, file = csvfilename)
print(df.t.diff)
csvfilename <- sprintf("%s\\acquisition_mmc_synergies_structure_position_compare_c1M%s_c1N%s_c2M%s_c2N%s.csv", dirname, c1$m, c1$f, c2$m, c2$f)
write.csv(df.t.diff, file = csvfilename)
print(df.t.e.diff)
csvfilename <- sprintf("%s\\acquisition_mmc_synergies_structure_position_compare_EGO_c1M%s_c1N%s_c2M%s_c2N%s.csv", dirname, c1$m, c1$f, c2$m, c2$f)
write.csv(df.t.e.diff, file = csvfilename)
## ACQUIRER
par(mfrow=c(3,3), mar=c(1,1,2.5,1))
for (attr in mmc.attrs) {
if (is.numeric(df.a[,attr]))
hist(df.a[,attr], col='gray', main=attr)
}
## TARGET SYNERGY
par(mfrow=c(3,3), mar=c(1,1,2.5,1))
for (attr in mmc.attrs) {
x <- df.t.diff[,attr]
if (is.numeric(x) & length(unique(x)) > 1)
hist(df.t.diff[,attr], col='gray', main=attr)
}
plot2(gx)
sepattrs <- c()
for (attr in mmc.attrs) {
if (length(unique(df.t.diff[,attr] < 0)) > 1)
sepattrs <- c(sepattrs, attr)
}
cat(sprintf('all separating attrs +/-:\n %s\n\n', paste(sepattrs, collapse = ", ")))
View(df.t.diff[,c(meta.attrs,sepattrs)])
sepattrs <- c()
for (attr in mmc.attrs) {
if (length(unique(df.t.e.diff[,attr] < 0)) > 1)
sepattrs <- c(sepattrs, attr)
}
cat(sprintf('EGO separating attrs +/-:\n %s\n\n', paste(sepattrs, collapse = ", ")))
View(df.t.e.diff[,c(meta.attrs,sepattrs)])
##=========================
## EXAMPLE HIGH MARKETS
##------------------------
n1 <- 2
n2 <- 12
focal.firm <- as.character(4)
## CREATE RANDOM BIPARTITE FIRM_MARKET
set.seed(1133241) #1133241
gx=sample_bipartite(n1,n2,'gnp',.72)
V(gx)$name <- c(LETTERS[1:n1], 1:n2)
E(gx)$weight <- 1
## BIMODAL FIRM_MARKET PLOT
vshapes <- sapply(V(gx)$type,function(x)ifelse(x,'circle','square'))
par(mar=c(.1,.1,.1,.1), mfrow=c(1,2))
plot2(gx,
layout=layout.bipartite,
vertex.shape=vshapes,
vertex.size=18,
focal.firm=focal.firm)
plot2(gx,
layout=layout.kamada.kawai,
vertex.shape=vshapes,
vertex.size=18,
focal.firm=focal.firm, edge.curved = F)
par(mfrow=c(1,1))
## UNIMODAL FIRM_FIRM
gx.ff <- bipartite.projection(gx, remove.type = F)$proj2
V(gx.ff)$type <- unlist(V(gx.ff)$type)
## UNIMODAL FIRM_FIRM ADJACENCY MATRIX
adjmat <- as_adjacency_matrix(gx.ff, attr = 'weight', sparse = F)
print(adjmat)
## SUM MMC
ffidx <- which(V(gx.ff)$name==focal.firm)
mmcidx <- which(adjmat[, ffidx] > 1)
print(sprintf("FOCAL FIRM %s SUM OF MMC: %s", focal.firm, sum(adjmat[mmcidx, ffidx])))
## PLOT FIRM_FIRM NETWORk
vshapes <- sapply(V(gx.ff)$type,function(x)ifelse(x,'circle','square'))
## Save plot of bipartite --> firm-firm competition networ
png(sprintf("firm_market_firm_firm_2side_N%s_M%s.png",n2,n1), height = 4.5, width = 8.5, units = 'in', res = 250)
par(mar=c(.1,.1,.1,.1), mfrow=c(1,2))
plot2(gx,
layout=layout.bipartite,
vertex.shape=vshapes,
vertex.size=18,
focal.firm=focal.firm)
plot2(gx.ff,
layout=layout.fruchterman.reingold,
vertex.shape=vshapes,
vertex.size= 18, ##1.1*mapTo(centPow(gx.ff, beta = -.01)),
focal.firm=focal.firm
)
dev.off()
#
# ##==================================
# ## ACQUISITION 3
# ##----------------------------------
# target.firm <- as.character(3)
# ## ACQUISITION UNIMODAL FIRM_FIRM
# gx2.ff <- biAcq(gx, focal.firm, target.firm, project = T)$g
# V(gx2.ff)$type <- unlist(V(gx2.ff)$type)
# V(gx2.ff)$name <- unlist(V(gx2.ff)$name)
# ## ADJACENCY
# adjmat <- as_adjacency_matrix(gx2.ff, attr = 'weight', sparse = F)
# print(adjmat)
# ## SUM MMC
# ffidx <- which(V(gx2.ff)$name==focal.firm)
# mmcidx <- which(adjmat[, ffidx] > 1)
# print(sprintf("FOCAL FIRM %s SUM OF MMC: %s", focal.firm, sum(adjmat[mmcidx, ffidx])))
# ## PLOT
# vshapes <- sapply(V(gx2.ff)$type,function(x)ifelse(x,'circle','square'))
# plot2(gx2.ff,
# layout=layout.fruchterman.reingold,
# vertex.shape=vshapes,
# vertex.size= 18, ##1.1*mapTo(centPow(gx2.ff, beta = -.1)),
# focal.firm=focal.firm
# )
#
# ##==================================
# ## ACQUISITION 6
# ##----------------------------------
# target.firm <- as.character(6)
# ## ACQUISITION UNIMODAL FIRM_FIRM
# gx2.ff <- biAcq(gx, focal.firm, target.firm, project = T)$g
# V(gx2.ff)$type <- unlist(V(gx2.ff)$type)
# V(gx2.ff)$name <- unlist(V(gx2.ff)$name)
# ## ADJACENCY
# adjmat <- as_adjacency_matrix(gx2.ff, attr = 'weight', sparse = F)
# print(adjmat)
# ## SUM MMC
# ffidx <- which(V(gx2.ff)$name==focal.firm)
# mmcidx <- which(adjmat[, ffidx] > 1)
# print(sprintf("FOCAL FIRM %s SUM OF MMC: %s", focal.firm, sum(adjmat[mmcidx, ffidx])))
# ## PLOT
# vshapes <- sapply(V(gx2.ff)$type,function(x)ifelse(x,'circle','square'))
# plot2(gx2.ff,
# layout=layout.fruchterman.reingold,
# vertex.shape=vshapes,
# vertex.size= 18, ##1.1*mapTo(centPow(gx2.ff, beta = -.1)),
# focal.firm=focal.firm
# )
#
# ##==================================
# ## ACQUISITION 2
# ##----------------------------------
# target.firm <- as.character(2)
# ## ACQUISITION UNIMODAL FIRM_FIRM
# gx2.ff <- biAcq(gx, focal.firm, target.firm, project = T)$g
# V(gx2.ff)$type <- unlist(V(gx2.ff)$type)
# V(gx2.ff)$name <- unlist(V(gx2.ff)$name)
# ## ADJACENCY
# adjmat <- as_adjacency_matrix(gx2.ff, attr = 'weight', sparse = F)
# print(adjmat)
# ## SUM MMC
# ffidx <- which(V(gx2.ff)$name==focal.firm)
# mmcidx <- which(adjmat[, ffidx] > 1)
# print(sprintf("FOCAL FIRM %s SUM OF MMC: %s", focal.firm, sum(adjmat[mmcidx, ffidx])))
# ## PLOT
# vshapes <- sapply(V(gx2.ff)$type,function(x)ifelse(x,'circle','square'))
# plot2(gx2.ff,
# layout=layout.fruchterman.reingold,
# vertex.shape=vshapes,
# vertex.size= 18, ##1.1*mapTo(centPow(gx2.ff, beta = -.1)),
# focal.firm=focal.firm
# )
#
# ##==================================
# ## ACQUISITION 5
# ##----------------------------------
# target.firm <- as.character(5)
# ## ACQUISITION UNIMODAL FIRM_FIRM
# gx2.ff <- biAcq(gx, focal.firm, target.firm, project = T)$g
# V(gx2.ff)$type <- unlist(V(gx2.ff)$type)
# V(gx2.ff)$name <- unlist(V(gx2.ff)$name)
# ## ADJACENCY
# adjmat <- as_adjacency_matrix(gx2.ff, attr = 'weight', sparse = F)
# print(adjmat)
# ## SUM MMC
# ffidx <- which(V(gx2.ff)$name==focal.firm)
# mmcidx <- which(adjmat[, ffidx] > 1)
# print(sprintf("FOCAL FIRM %s SUM OF MMC: %s", focal.firm, sum(adjmat[mmcidx, ffidx])))
# ## PLOT
# vshapes <- sapply(V(gx2.ff)$type,function(x)ifelse(x,'circle','square'))
# plot2(gx2.ff,
# layout=layout.fruchterman.reingold,
# vertex.shape=vshapes,
# vertex.size= 18, ##1.1*mapTo(centPow(gx2.ff, beta = -.1)),
# focal.firm=focal.firm
# )
#
# ##==================================
# ## ACQUISITION 10
# ##----------------------------------
# target.firm <- as.character(10)
# ## ACQUISITION UNIMODAL FIRM_FIRM
# gx2.ff <- biAcq(gx, focal.firm, target.firm, project = T)$g
# V(gx2.ff)$type <- unlist(V(gx2.ff)$type)
# V(gx2.ff)$name <- unlist(V(gx2.ff)$name)
# ## ADJACENCY
# adjmat <- as_adjacency_matrix(gx2.ff, attr = 'weight', sparse = F)
# print(adjmat)
# ## SUM MMC
# ffidx <- which(V(gx2.ff)$name==focal.firm)
# mmcidx <- which(adjmat[, ffidx] > 1)
# print(sprintf("FOCAL FIRM %s SUM OF MMC: %s", focal.firm, sum(adjmat[mmcidx, ffidx])))
# ## PLOT
# vshapes <- sapply(V(gx2.ff)$type,function(x)ifelse(x,'circle','square'))
# plot2(gx2.ff,
# layout=layout.fruchterman.reingold,
# vertex.shape=vshapes,
# vertex.size= 18, ##1.1*mapTo(centPow(gx2.ff, beta = -.1)),
# focal.firm=focal.firm
# )
##----------- END ------------------
as_adjacency_matrix(bipartite.projection(gx2)$proj2, attr = 'weight', sparse = F)
power_centrality(gx, exponent = -0.2, nodes = V(gx)$type)
biAcq(gi, '1', '2', T)
plot2(gx,vertex.shape=vshapes, layout=layout.kamada.kawai)
# plot(gx,vertex.shape=vshapes, layout=layout.fruchterman.reingold)
E(gx)$weight <- 1
gx.bp <- bipartite.projection(gx)
plot(gx.bp$proj1, edge.width=E(gx.bp$proj1)*.3, vertex.shape='square')
plot(gx.bp$proj2, edge.width=E(gx.bp$proj2)*.025, vertex.shape='circle',
layout=layout.fruchterman.reingold)
as_data_frame(gx, what='vertices')
biAcq(gi, '1', '2', T)
##--------------------------------------------------------------------------------------
gx=sample_bipartite(5,5,'gnp',.5)
vshapes <- sapply(V(gx)$type,function(x)ifelse(x,'circle','square'))
plot(gx,
layout=layout.bipartite,
vertex.shape=vshapes)
as_data_frame(gx, what='vertices')
## BIPARTITE EDGE DATAFRAME
df <- data.frame(
market = c('A','A','A','A', 'B','B','B', 'C','C','C'),
firm = c(1, 2, 3, 4, 3, 4, 5, 4, 5, 6)
)
## SPARSE INCIDENCE MATRIX
R <- spMatrix(nrow=length(unique(df$firm)),
ncol=length(unique(df$market)),
i = as.numeric(factor(df$firm)),
j = as.numeric(factor(df$market)),
x = rep(1, length(as.numeric(df$firm))) )
row.names(R) <- levels(factor(df$firm))
colnames(R) <- levels(factor(df$market))
R
## FIRM_FIRM MATRIX
Rrow <- tcrossprod(R)
## t(R)
## MODE1::MARKETS
## MODE2: FIRMS
gi <- graph.incidence(t(R))
vshapes <- sapply(V(gi)$type, function(x)ifelse(x,'circle','square'))
plot(gi, vertex.shape=vshapes)
plot(gi,
layout=layout.bipartite,
vertex.shape=vshapes,
vertex.size=power_centrality(gi, exponent = -0.2)*30
)
df <- as_data_frame(gi, what='vertices')
for (i in 1:6) {
for (j in 1:6) {
if (i != j) {
df[ ,paste0(i,j)] <- biAcq(gi, as.character(i), as.character(j))$delta
}
}
}
## WHICH MIN
apply(df[df$type==T,3:ncol(df)], 2, which.min)
biAcq(gi, '1', '2', T)
## FIRM-FIRM ADJACENCY
ga <- graph.adjacency(tcrossprod(R), diag = F, mode = 'undirected')
E(ga)$weight <- 1
ga <- simplify(ga, remove.multiple = T, remove.loops = T, edge.attr.comb = list(weight='sum'))
set.seed(2)
plot(ga, edge.width=E(ga)$weight^2)
| /R/acqmmc_bipartite_acq_centrality_example_5.R | no_license | sdownin/compnet | R | false | false | 39,340 | r |
library(igraph)
library(Matrix)
## save plots
dirname <- "C:\\Users\\T430\\Google Drive\\PhD\\Dissertation\\competition networks\\acquisitions"
setwd(dirname)
## cache default params
.par = par()
##
#
##
plot2 <- function(gx, layout=layout.fruchterman.reingold, vertex.size=15, focal.firm=NA, fam='sans', edge.curved=F, seed=11111, ...)
{
vAttrs <- igraph::list.vertex.attributes(gx)
if ('type' %in% vAttrs) {
vcolors <- sapply(V(gx)$type, function(x)ifelse(x, "SkyBlue2", "gray"))
lcolors <- sapply(V(gx)$type, function(x)ifelse(x, "darkblue", "black"))
vshapes <- sapply(1:vcount(gx),function(x)ifelse(V(gx)$type[x], "circle", "square"))
isBipartite <- length(unique(V(gx)$type)) > 1
} else {
vcolors <- rep("SkyBlue2", vcount(gx))
lcolors <- rep("darkblue", vcount(gx))
vshapes <- rep("circle", vcount(gx))
isBipartite <- FALSE
}
fonts <- rep(1, vcount(gx))
framecols <- rep('black', vcount(gx))
framewidths <- rep(1, vcount(gx))
if(!is.na(focal.firm)) {
vcolors[V(gx)$name==focal.firm] <- 'darkblue'
lcolors[V(gx)$name==focal.firm] <- 'white'
}
if(!isBipartite) {
adjmat <- as_adjacency_matrix(gx, attr = 'weight', sparse = F)
ffidx <- which(V(gx)$name==focal.firm)
mmcidx <- unname(which(adjmat[ , ffidx] > 1))
framecols[mmcidx] <- 'darkred'
lcolors[mmcidx] <- 'darkred'
framewidths[mmcidx] <- 5
fonts[mmcidx] <- 4
}
set.seed(seed)
plot(gx,
layout = layout,
layout.par = list(),
labels = NULL,
label.color = lcolors,
label.font = NULL,
label.degree = -pi/4,
label.dist = 0,
vertex.label=sapply(1:vcount(gx), function(x) ifelse("name" %in% vAttrs, V(gx)$name[x], x)),
vertex.color = vcolors,
vertex.shape = vshapes,
vertex.size = vertex.size,
vertex.frame.color=framecols,
vertex.frame.width=framewidths,
vertex.label.family=fam, # Font family of the label (e.g."Times", "Helvetica")
vertex.label.font=fonts, # Font: 1 plain, 2 bold, 3, italic, 4 bold italic, 5 symbol
vertex.label.color=lcolors,
edge.color = "darkgrey",
edge.width = 1 + 2 * (E(gx)$weight-1),
edge.labels = NA,
edge.lty=1,
margin=0,
loop.angle=0,
axes = FALSE,
xlab = "",
ylab = "",
xlim=c(-1,1),
ylim=c(-1,1),
edge.curved=edge.curved,
...)
}
# ##
# # Bipartite Graph Acquisition -- PREVIOUS VERSION
# ##
# biAcq.prev <- function(gi, acquirer, target, decay=-0.2, project=T, verbose=T)
# {
# if (project) {
# gi.l <- bipartite.projection(gi, multiplicity = T, remove.type = F)
# vcs <- sapply(gi.l,vcount)
# gi <- gi.l[[ which.max(vcs) ]]
# V(gi)$type <- unlist(V(gi)$type)
# V(gi)$name <- unlist(V(gi)$name)
# }
#
# tdf <- as_data_frame(gi, what='vertices')
# tdf$before <- power_centrality(gi, exponent = decay)
# tdf$after <- NA
#
# vnamemap <- names(V(gi))
# vmap <- as.integer(V(gi))
#
# revord <- which(vnamemap==target) < which(vnamemap==acquirer)
# comb.func <- ifelse(revord, 'last', 'first')
#
# vmap[vnamemap==target] <- vmap[vnamemap==acquirer]
#
# vertex.attr.comb <- list(type=ifelse(revord, 'last', 'first'),
# name=ifelse(revord, 'last', 'first'))
#
# gi.2 <- igraph::contract.vertices(gi, vmap, vertex.attr.comb = vertex.attr.comb)
# gi.2 <- igraph::simplify(gi.2, remove.multiple = T, remove.loops = T, edge.attr.comb = list(weight='sum'))
# gi.2 <- igraph::induced.subgraph(gi.2, V(gi.2)[igraph::degree(gi.2)>0])
#
# tdf$after[tdf$name!=target] <- power_centrality(gi.2, exponent = decay)
# tdf$delta <- tdf$after - tdf$before
#
# if (verbose)
# print(tdf)
#
# return(list(df=tdf, g=gi.2))
# }
##
# Bipartite Graph Acquisition
##
biAcq <- function(gi, acquirer.name, target.name, project=F, verbose=T)
{
if ( ! 'name' %in% igraph::list.vertex.attributes(gi))
stop('gi must have name attribute.')
is.bi <- is.bipartite.safe(gi)
if (project & is.bi) {
gi.l <- bipartite.projection(gi, multiplicity = T, remove.type = F)
vcs <- sapply(gi.l,vcount)
gi <- gi.l[[ which.max(vcs) ]]
is.bi <- is.bipartite.safe(gi)
}
acquirer <- which(V(gi)$name==acquirer.name)
target <- which(V(gi)$name==target.name)
if (length(acquirer)==0 | length(target)==0) {
stop(sprintf('has acquirer=%s; has target=%s', length(acquirer)>0, length(target)>0))
}
vnamemap <- names(V(gi))
vmap <- as.integer(V(gi))
revord <- which(vnamemap==target.name) < which(vnamemap==acquirer.name)
comb.func <- ifelse(revord, 'last', 'first')
vmap[vnamemap==target.name] <- vmap[vnamemap==acquirer.name]
vertex.attr.comb <- list(type=function(x)ifelse(revord, x[length(x)], x[1]),
name=function(x)ifelse(revord, x[length(x)], x[1]))
if (is.bi) {
edge.attr.comb <- list(weight=function(x)ifelse(revord, x[length(x)], x[1]))
} else {
edge.attr.comb <- list(weight='sum')
}
gi.2 <- igraph::contract.vertices(gi, vmap, vertex.attr.comb = vertex.attr.comb)
gi.2 <- igraph::simplify(gi.2, remove.multiple = T, remove.loops = T, edge.attr.comb = edge.attr.comb)
gi.2 <- igraph::induced.subgraph(gi.2, V(gi.2)[igraph::degree(gi.2)>0])
return(gi.2)
}
##
#
##
mapTo <- function(x, #vector of degrees
minmax=c(9,20), # vector of length 2: min and max
log=F # logicial for log transform
) {
if (any(minmax < 0)) stop ("Negative output range is not allowed.\nPlease assign minmax argument as vector of 2 non-negative values (x>=0) and rerun function.")
n <- length(x)
dist <- max(x) - min(x) #scalar
#output
M <- max(minmax)
m <- min(minmax)
range <- M - m # interval to be mapped to
scale <- (x-min(x)) / dist
if(log) {
if(all(x>=1 | x==0)) {
lx <- log(x)
maxlx <- max(lx[lx<Inf & lx>-Inf])
scale <- lx / maxlx
y <- m + range*scale
y[is.na(y)|is.nan(y)|y==-Inf|y==Inf] <- m
} else {
#augment x proportions to > 1 yielding values suitable for log transform
scalepos <- (scale+1)/min(scale+1)
#account for log transform while still proportional to elements of x
# by normalizing with the log of the maximum
scale <- log(scalepos) / log(max(scalepos))
y <- m + range*scale
}
} else {
y <- m + range*scale
}
return(y)
}
centPow <- function(gx, decay=-0.1)
{
return(power_centrality(gx, exponent = decay))
}
df.pow <- function(gx, betas=c(-.3,-.2,-.1,-.01,0))
{
df <- data.frame(name=V(gx)$name)
for (beta in betas) df[ , as.character(beta)] <- centPow(gx, beta)
return(df)
}
##
# checks if graph is actually bipartite by `type` attribute
# - if only one `type` then not functionally bipartite
# - if more than one `type` then is functionally biparite
##
is.bipartite.safe <- function(g)
{
if (!igraph::is.bipartite(g) | ! 'type' %in% igraph::list.vertex.attributes(g))
return(FALSE)
return(length(unique(V(g)$type)) > 1)
}
##
# Combine two bipartite networks
# - keeps original names of vertex and edge properties (unlike igraph "+" operator: g3 <- g1 + g2)
##
bipartiteCombine <- function(gx1, gx2) {
.vt <- unique(rbind(as_data_frame(gx1,'vertices'),as_data_frame(gx2,'vertices')))
nz <- names(.vt)
if ((! 'name' %in% nz) & (! 'type' %in% nz))
stop('graphs must have name and type attributes.')
idx.name <- which(nz=='name')
idx.type <- which(nz=='type')
idx.rest <- which( ! nz %in% c('name','type'))
.vt <- .vt[ ,c(idx.name,idx.type,idx.rest)] ## rearrange "name" column first
.el <- rbind(as_data_frame(gx1,'edges'),as_data_frame(gx2,'edges'))
gx <- graph.data.frame(d = .el, directed = F, vertices = .vt)
return(gx)
}
##
# Gets vertex indices of firms in dyads that have multi-market contact
##
which.mmc <- function(g, focal, keep.focal=F, proj.max=T) {
if (class(g) != 'igraph') stop('g must be an igraph object')
if (!igraph::is.weighted(g)) E(g)$weight <- 1
if (is.bipartite.safe(g))
return(which.mmc.bipartite(g, focal, keep.focal, proj.max))
## NOT BIPARTITE
if (! focal %in% 1:vcount(g))
stop('focal firm index must be in vertices of g')
adjm <- igraph::as_adjacency_matrix(g, attr = 'weight', sparse = F)
vids <- unname(which(adjm[focal,] > 1))
if (keep.focal) {
return(sort(unique(c(vids, focal))))
} else {
return(sort(unique(vids)))
}
}
##
# Gets BIPARTITE graph vertex indices of firms in dyads that have multi-market contact
##
which.mmc.bipartite <- function(g, focal, keep.focal=F, proj.max=T) {
g2.l <- bipartite.projection(g, multiplicity = T, remove.type = F)
vcs <- sapply(g2.l,vcount)
idx.proj <- ifelse(proj.max, which.max(vcs), which.min(vcs))
g2 <- g2.l[[idx.proj]]
if (! focal %in% 1:vcount(g2))
stop('focal firm index must be in vertices of g')
adjm2 <- igraph::as_adjacency_matrix(g2, attr = 'weight', sparse = F)
proj.vids <- unname(which(adjm2[focal,] > 1))
if (keep.focal) {
proj.vids <- sort(c(proj.vids, focal))
}
proj.names <- V(g2)$name[proj.vids]
##
vids.f <- which(V(g)$name %in% proj.names)
vids.m <- c()
for (v in vids.f) {
vids.m <- unique(c(vids.m, as.integer(igraph::neighbors(g, v))))
}
vids.focal <- which(V(g)$name==as.character(focal))
if (keep.focal) {
return(sort(c(vids.f, vids.m, vids.focal)))
} else {
return(sort(c(vids.f, vids.m)))
}
}
##
#
##
getNotMmcBipartiteEdgeIds <- function(g.sub)
{
# adjm <- getAdjacencyMatrix(g.sub, proj.max = T)
firmnames <- getMaxProjNames(g.sub)
vids <- which(V(g.sub)$name %in% firmnames)
# mids <- which( ! V(g.sub)$name %in% firmnames)
bic <- igraph::bibcoupling(g.sub) ## shared neighbors (## possibly large matrix, need to subject to only firm vids)
##------------------------------
## NOT MMC eids
##------------------------------
## 1. F-F non-MMC dyads
ffno <- which(bic == 1, arr.ind=T) ## 2-col matrix of (row,col) id tuples for non-mmc elements
ffno <- ffno[which(ffno[,1] %in% vids & ffno[,2] %in% vids), ]
## 2. F-M-F No-MMC 3-paths: cache as (F,M),(M,F) tuples
fm.no <- c() ## Firm-Market-Firm No-MMC paths: saved as (F1-M, M-F2, ...)
urow1 <- unique(ffno[,1])
cat(' fetching Non-MMC bipartite dyads...\n')
for (i in 1:length(urow1)) {
if (i %% 50 == 0) cat(sprintf(' %s (%.2f%s)\n',i,100*i/length(urow1),'%'))
r1i.r2js <- ffno[which(ffno[,1] == urow1[i]),2]
xi.paths <- igraph::all_shortest_paths(g.sub, urow1[i], r1i.r2js)$res
ls <- sapply(xi.paths, length)
idx <- which(ls==3)
for (j in idx) {
x <- as.integer(xi.paths[[j]])
fm.no <- c(fm.no, c(x[1],x[2], x[2],x[3]))
}
}
cat(' done.\n')
## 3. save bipartite F-M edge IDs for non-MMC dyads
eid.no <- unique(igraph::get.edge.ids(g.sub, vp = fm.no, directed = F, error = F, multi = F))
##------------------------------
## MMC eids (filter out)
##-------------------------------
## 4. MMC firm-firm dyads
ff.mmc <- which(bic > 1, arr.ind=T) ##
ff.mmc <- ff.mmc[which(ff.mmc[,1] %in% vids & ff.mmc[,2] %in% vids), ]
## 5. F-M-F MMC 3-paths
fm.mmc <- integer() ## cache MMC tuples (F,M),(M,F),(...)
urow1 <- unique(ff.mmc[,1])
cat(' fetching MMC bipartite dyads...\n')
for (i in 1:length(urow1)) {
if (i %% 50 == 0) cat(sprintf(' %s (%.2f%s)\n',i,100*i/length(urow1),'%'))
r1i.r2js <- ff.mmc[which(ff.mmc[,1] == urow1[i]),2]
xi.paths <- igraph::all_shortest_paths(g.sub, urow1[i], r1i.r2js)$res
ls <- sapply(xi.paths, length)
idx <- which(ls==3)
for (j in idx) {
x <- as.integer(xi.paths[[j]])
fm.mmc <- c(fm.mmc, c(x[1],x[2], x[2],x[3]))
}
}
cat(' done.\n')
## 6. save bipartite F-M edge IDs for MMC dyads
eid.mmc <- unique(igraph::get.edge.ids(g.sub, vp = fm.mmc, directed = F, error = F, multi = F))
##--------------------------------
## Check is Non-MMC and is NOT MMC
##--------------------------------
## 7. filter only the Non-MMC F-M dyads that are not included in any MMC F-M dyads (which comprise MMC F-F dyads)
eids <- eid.no[ which( ! eid.no %in% eid.mmc) ]
return(eids)
}
##
# Creates MMC subgraph
# - subsets to firms with MMC relations to another firm
# - removes non-MMC edges (weight <= 1)
##
# ## filter to ego-mmc network (?)
# focal.firm <- which(V(g)$name == focal.name)
# if (length(focal.firm)>0) {
# ## MMC VERTEX SUBGRAPH if `focal` is set
# vids <- which.mmc(g, focal.firm, keep.focal=T)
# g.sub <- igraph::induced.subgraph(g, vids = vids)
# } else {
# g.sub <- g
# }
##
mmcSubgraph <- function(g, remove.isolates=F)
{
is.bi <- is.bipartite.safe(g)
g.sub <- g
## DROP NON-MMC EDGES
if (is.bi) {
eids <- getNotMmcBipartiteEdgeIds(g.sub)
} else {
which(E(g.sub)$weight <= 1)
}
if (length(eids) > 0) {
g.sub <- igraph::delete.edges(g.sub, eids)
}
if (remove.isolates) {
g.sub <- igraph::induced.subgraph(g.sub, vids = which(igraph::degree(g.sub)>0) )
}
return(g.sub)
}
# mmcSubgraphDEBUG <- function(g, focal.name=NA, remove.isolates=F)
# {
# is.bi <- is.bipartite.safe(g)
# focal.firm <- which(V(g)$name == focal.name)
#
# # if (length(focal.firm)>0) {
# # ## MMC VERTEX SUBGRAPH if `focal` is set
# # vids <- which.mmc(g, focal.firm, keep.focal=T)
# # g.sub <- igraph::induced.subgraph(g, vids = vids)
# # } else {
# # g.sub <- g
# # }
# g.sub <- g
#
# ## DROP NON-MMC EDGES
# if (is.bi) {
# # adjm <- getAdjacencyMatrix(g.sub, proj.max = T)
# firmnames <- getMaxProjNames(g.sub)
# vids <- which(V(g.sub)$name %in% firmnames)
# # mids <- which( ! V(g.sub)$name %in% firmnames)
# bic <- igraph::bibcoupling(g.sub) ## shared neighbors (## possibly large matrix, need to subject to only firm vids)
# ## MMC firm-firm dyads
# mmc <- which(bic > 1, arr.ind=T) ##
# mmc <- mmc[which(mmc[,1] %in% vids & mmc[,2] %in% vids), ]
# ## non-MMC firm-firm dyads
# nmmc <- which(bic == 1, arr.ind=T) ## 2-col matrix of (row,col) id tuples for non-mmc elements
# nmmc <- nmmc[which(nmmc[,1] %in% vids & nmmc[,2] %in% vids), ]
# # ###
# # bnmids <- sort(unique(c(nmmc[,1],nmmc[,2])))
# # sapply(bnmids,function(i){
# # ls <- sapply(igraph::all_shortest_paths(g.sub, i, vids[ ! vids %in% i])$res, length)
# # return(length(ls[ls==3]))
# # })
# # ###
# edge.l <- list()
# for (i in 1:nrow(nmmc)) {
# ps <- igraph::all_shortest_paths(g.sub, nmmc[i,1], nmmc[i,2])$res
# if (length(ps)==1) {
# pv <- as.integer(ps[[1]])
# chk1 <- length(which( (mmc[,1]==pv[1] & mmc[,2]==pv[2]) | (mmc[,1]==pv[2] & mmc[,2]==pv[1]) )) == 0
# chk2 <- length(which( (mmc[,1]==pv[2] & mmc[,2]==pv[3]) | (mmc[,1]==pv[3] & mmc[,2]==pv[2]) )) == 0
# if (chk1) edge.l <- c(edge.l, list(c(idx[1],idx[2])))
# if (chk2) edge.l <- c(edge.l, list(c(idx[2],idx[3])))
# }
# }
# eids <- sort(unique(igraph::get.edge.ids(g.sub, vp = edges, directed = F))) ## edge ids of non-mmc firm
# } else {
# eids <- which(E(g.sub)$weight <= 1)
# }
#
# if (length(eids) > 0) {
# g.sub <- igraph::delete.edges(g.sub, eids)
# }
#
# if (remove.isolates) {
# g.sub <- igraph::induced.subgraph(g.sub, vids = which(igraph::degree(g.sub)==0))
# }
#
# return(g.sub)
# }
##
# Gets adjacency matrix -- for either Bipartite or unipartite graphs
# - unipartite, just return adjmat
# - bipartite, return adjmat for the mode with more (proj.max=T) or fewer (proj.max=F) nodes
##
getAdjacencyMatrix <- function(g, proj.max=T) {
if (is.bipartite.safe(g)) {
g2.l <- bipartite.projection(g, multiplicity = T, remove.type = F)
vcs <- sapply(g2.l,vcount)
idx.proj <- ifelse(proj.max, which.max(vcs), which.min(vcs))
g2 <- g2.l[[idx.proj]]
adjm <- igraph::as_adjacency_matrix(g2, attr = 'weight', sparse = F)
} else {
adjm <- igraph::as_adjacency_matrix(g, attr = 'weight', sparse = F)
}
return(adjm)
}
##
#
##
getGraphProjection <- function(g, max.proj=T, remove.type=T)
{
if (!is.bipartite.safe(g))
return(g)
g2.l <- bipartite.projection(g, multiplicity = T, remove.type = remove.type)
vcs <- sapply(g2.l,vcount)
which.proj <- ifelse(max.proj, which.max, which.min)
g2 <- g2.l[[ which.proj(vcs) ]]
return(g2)
}
##
# Get vertex IDs
# - if bipartite, return vids of maximal projection (largest size mode)
##
getMaxProjNames <- function(gx)
{
if (! 'name' %in% igraph::list.vertex.attributes(gx))
stop('gx must have vertex name attribute')
bps <- igraph::bipartite.projection.size(gx)
types <- unique(V(gx)$type)
idx <- ifelse(bps$vcount1 > bps$vcount2, 1, 2)
return(V(gx)$name[ which(V(gx)$type == types[idx]) ])
}
# mmcSum <- function(g, focal, proj.max=T) {
# adjm <- getAdjacencyMatrix(g, focal, proj.max)
# vids.mmc <- which(adjm[focal,] > 1)
# return(sum(adjm[focal,vids.mmc]))
# }
#
#
# mmcCount <- function(g, focal, proj.max=T) {
# adjm <- getAdjacencyMatrix(g, focal, proj.max)
# return(length(which(adjm[focal,] > 1)))
# }
getMmcEdgeSum <- function(g, name.remove=T) {
if ( ! 'weight' %in% igraph::list.edge.attributes(g))
stop('g must have edge weights')
adj <- getAdjacencyMatrix(g, T)
sums <- apply(adj, 1, function(x) sum(x[x>1]) )
if (name.remove)
sums <- unname(sums)
return(sums)
}
getMmcEdgeCount <- function(g, name.remove=T) {
if ( ! 'weight' %in% igraph::list.edge.attributes(g))
stop('g must have edge weights')
adj <- getAdjacencyMatrix(g, T)
counts <- apply(adj, 1, function(x) length(x[x>1]) )
if (name.remove)
counts <- unname(counts)
return(counts)
}
getMaxMmcCliqueSize <- function(g, vid, min=3)
{
cls <- igraph::cliques(g, min = min)
if (length(cls)==0)
return(0)
idx <- which(sapply(cls,function(x) vid %in% x))
return(max(sapply(cls[idx], length)))
}
##
#
##
# getMmcTargetDataframe <- function(gx.m, vid.a, is.ego=FALSE)
# {
# if (is.ego) {
# gx.m <- igraph::make_ego_graph(gx.m, 1, vid.a)[[1]]
# vid.a <- which(as.character(V(gx.m)$name) == as.character(vid.a))
# }
# return(data.frame(
# name=unlist(V(gx.m)$name),
# ##
# sum=mmcEdgeSum(gx.m)[vid.a], ## sum of mmc
# degree=mmcEdgeCount(gx.m)[vid.a], ## number of mmc competitiors
# ##
# clust=igraph::transitivity(gx.m, type = 'global'),
# closeness=unname(igraph::closeness(gx.m, vid.a)),
# eigen=unname(igraph::eigen_centrality(gx.m)$vector[vid.a]),
# pow.n1=unname(igraph::power_centrality(gx.m, vid.a, exponent = -0.1)),
# pow.n3=unname(igraph::power_centrality(gx.m, vid.a, exponent = -0.3)),
# eccen=unname(igraph::eccentricity(gx.m, vid.a)),
# ##
# central.clos=igraph::centr_clo(gx.m)$centralization / igraph::centr_clo_tmax(gx.m),
# central.eign=igraph::centr_eigen(gx.m)$centralization / igraph::centr_eigen_tmax(gx.m),
# central.betw=igraph::centr_betw(gx.m)$centralization / igraph::centr_betw_tmax(gx.m),
# central.degr=igraph::centr_degree(gx.m)$centralization / igraph::centr_degree_tmax(gx.m),
# ##
# subgraph=unname(igraph::subgraph.centrality(gx.m)[vid.a]),
# density=igraph::graph.density(gx.m),
# constraint=unname(igraph::constraint(gx.m, vid.a)),
# max.clique=maxCliqueSize(gx.m, vid.a),
# ##
# stringsAsFactors = F
# ))
# }
##
#
##
getMmcDf <- function(gx.m, vert.name, ego.order=NA, proj.uni=FALSE)
{
vid.a <- which(V(gx.m)$name==vert.name)
if (length(vid.a)==0)
stop(sprintf('vert.name `%s` not in graph gx.m',vert.name))
if (proj.uni) {
gx.m <- getGraphProjection(gx.m)
vid.a <- which(V(gx.m)$name==vert.name)
} else {
.proj.gx.m <- getGraphProjection(gx.m)
.proj.vid.a <- which(V(.proj.gx.m)$name==vert.name)
}
if (!is.na(ego.order) & ego.order >= 1) {
ord <- ifelse(is.bipartite.safe(gx.m), 2*ego.order, 1*ego.order) ## bipartite twice distance
gx.m <- igraph::make_ego_graph(gx.m, ord, vid.a)[[1]]
vid.a <- which(V(gx.m)$name == vert.name)
##
ord <- ifelse(is.bipartite.safe(.proj.gx.m), 2*ego.order, 1*ego.order) ## bipartite twice distance
.proj.gx.m <- igraph::make_ego_graph(.proj.gx.m, ord, .proj.vid.a)[[1]]
.proj.vid.a <- which(V(.proj.gx.m)$name == vert.name)
}
is.bi <- is.bipartite.safe(gx.m)
df <- data.frame(
name=unlist(V(gx.m)$name[vid.a]),
is.bi=is.bi,
v=vcount(gx.m),
e=ecount(gx.m),
##
sum=ifelse(is.bi, getMmcEdgeSum(.proj.gx.m), getMmcEdgeSum(gx.m)),
degree=ifelse(is.bi, getMmcEdgeCount(.proj.gx.m), getMmcEdgeCount(gx.m)),
max.clique=ifelse(is.bi, getMaxMmcCliqueSize(.proj.gx.m, .proj.vid.a), getMaxMmcCliqueSize(gx.m, vid.a)),
##
clust=igraph::transitivity(gx.m, type = 'global'),
closeness=unname(igraph::closeness(gx.m, vid.a)),
eigen=unname(igraph::eigen_centrality(gx.m)$vector[vid.a]),
pow.n1=unname(igraph::power_centrality(gx.m, vid.a, exponent = -0.1)),
pow.n3=unname(igraph::power_centrality(gx.m, vid.a, exponent = -0.3)),
eccen=unname(igraph::eccentricity(gx.m, vid.a)),
##
central.clos=igraph::centr_clo(gx.m)$centralization / igraph::centr_clo_tmax(gx.m),
central.eign=igraph::centr_eigen(gx.m)$centralization / igraph::centr_eigen_tmax(gx.m),
central.betw=igraph::centr_betw(gx.m)$centralization / igraph::centr_betw_tmax(gx.m),
central.degr=igraph::centr_degree(gx.m)$centralization / igraph::centr_degree_tmax(gx.m),
##
subgraph=unname(igraph::subgraph.centrality(gx.m)[vid.a]),
density=igraph::graph.density(gx.m),
constraint=unname(igraph::constraint(gx.m, vid.a)),
##
stringsAsFactors = F
)
return(df)
}
# ##
# #
# ##
# getMmcAcquirerDf <- function(gx.m, vert.name, is.ego=FALSE)
# {
# vid.a <- which(V(gx.m)$name==vert.name)
# if (is.ego) {
# ord <- ifelse(is.bipartite.safe(gx.m), 2, 1) ##
# gx.m <- igraph::make_ego_graph(gx.m, ord, vid.a)[[1]]
# vid.a <- which(as.character(V(gx.m)$name) == as.character(vid.a))
# }
# return(data.frame(
# name=unlist(V(gx.m)$name[vid.a]),
# ##
# sum=mmcEdgeSum(gx.m)[vid.a], ## sum of mmc
# degree=mmcEdgeCount(gx.m)[vid.a], ## number of mmc competitiors
# max.clique=maxCliqueSize(gx.m, vid.a),
# ##
# clust=igraph::transitivity(gx.m, type = 'global'),
# closeness=unname(igraph::closeness(gx.m, vid.a)),
# eigen=unname(igraph::eigen_centrality(gx.m)$vector[vid.a]),
# pow.n1=unname(igraph::power_centrality(gx.m, vid.a, exponent = -0.1)),
# pow.n3=unname(igraph::power_centrality(gx.m, vid.a, exponent = -0.3)),
# eccen=unname(igraph::eccentricity(gx.m, vid.a)),
# ##
# central.clos=igraph::centr_clo(gx.m)$centralization / igraph::centr_clo_tmax(gx.m),
# central.eign=igraph::centr_eigen(gx.m)$centralization / igraph::centr_eigen_tmax(gx.m),
# central.betw=igraph::centr_betw(gx.m)$centralization / igraph::centr_betw_tmax(gx.m),
# central.degr=igraph::centr_degree(gx.m)$centralization / igraph::centr_degree_tmax(gx.m),
# ##
# subgraph=unname(igraph::subgraph.centrality(gx.m)[vid.a]),
# density=igraph::graph.density(gx.m),
# constraint=unname(igraph::constraint(gx.m, vid.a)),
# ##
# stringsAsFactors = F
# ))
# }
##-----------------------------------------------------------------------------------
##==================================
## FIRM-MARKET GRAPH
##----------------------------------
# ## EXAMPLE OF ALL 4 QUADRANTS
# n1 <- 4
# n2 <- 12
# focal.firm <- as.character(4)
# set.seed(1133241) #1133241
# gx=sample_bipartite(n1,n2,'gnp',.62)
# ## SPARSE 1
# n1 <- 5
# n2 <- 12
# focal.firm <- as.character(4)
# set.seed(11111)
# gx=sample_bipartite(n1,n2,'gnp',.6)
##
# ## DENSE 2
# n1 <- 4
# n2 <- 12
# focal.firm <- as.character(4)
# set.seed(1133241)
# gx=sample_bipartite(n1,n2,'gnp',.70)
##
##----------------------------------
# ## Main Cluster
# # n1 <- 4 ## markets
# # n2 <- 12 ## firms
c1 <- list(m = 4, f = 12)
## Cluster 2
c2 <- list(m = 2, f = 6)
# c1 <- list(m = 10, f = 1200)
# ## Cluster 2
# c2 <- list(m = 5, f = 600)
focal.firm <- '4'
## CREATE RANDOM BIPARTITE FIRM_MARKET
set.seed(1133241) #1133241
gx_1 <- sample_bipartite(c1$m,c1$f,'gnp',.62)
V(gx_1)$name <- c(LETTERS[1:c1$m], 1:c1$f)
E(gx_1)$weight <- 1
set.seed(11341) #1133241
gx_2 <- sample_bipartite(c2$m,c2$f,'gnp',.72)
V(gx_2)$name <- c(LETTERS[(c1$m+1):(c1$m+c2$m)], (c1$f+1):(c1$f+c2$f))
E(gx_2)$weight <- 1
## COMBINE
gx <- bipartiteCombine(gx_1, gx_2)
# .vt <- unique(rbind(as_data_frame(gx1,'vertices'),as_data_frame(gx2,'vertices')))
# nz <- names(.vt)
# idx.name <- which(nz=='name')
# idx.type <- which(nz=='type')
# .vt <- .vt[ ,c(idx.name,idx.type)] ## rearrange "name" column first
# .el <- rbind(as_data_frame(gx1,'edges'),as_data_frame(gx2,'edges'))
# gx <- graph.data.frame(d = .el, directed = F, vertices = .vt)
## BIMODAL FIRM_MARKET PLOT
vshapes <- sapply(V(gx)$type,function(x)ifelse(x,'circle','square'))
par(mar=c(.1,.1,.1,.1), mfrow=c(1,2))
plot2(gx,
layout=layout.bipartite,
vertex.shape=vshapes,
vertex.size=18,
focal.firm=focal.firm)
plot2(gx,
layout=layout.kamada.kawai,
vertex.shape=vshapes,
vertex.size=18,
focal.firm=focal.firm, edge.curved = F)
par(mfrow=c(1,1))
## UNIMODAL FIRM_FIRM
gx.ff <- bipartite.projection(gx, remove.type = F)$proj2
V(gx.ff)$type <- unlist(V(gx.ff)$type)
## UNIMODAL FIRM_FIRM ADJACENCY MATRIX
adjmat <- as_adjacency_matrix(gx.ff, attr = 'weight', sparse = F)
print(adjmat)
## SUM MMC
ffidx <- which(V(gx.ff)$name==focal.firm)
mmcidx <- which(adjmat[, ffidx] > 1)
print(sprintf("FOCAL FIRM %s SUM OF MMC: %s", focal.firm, sum(adjmat[mmcidx, ffidx])))
## PLOT FIRM_FIRM NETWORk
vshapes <- sapply(V(gx.ff)$type,function(x)ifelse(x,'circle','square'))
## Save plot of bipartite --> firm-firm competition networ
# png(sprintf("firm_market_firm_firm_2side_N%s_M%s.png",n2,n1), height = 4.5, width = 8.5, units = 'in', res = 250)
par(mar=c(.1,.1,.1,.1), mfrow=c(1,2))
plot2(gx,
layout=layout.bipartite,
vertex.shape=vshapes,
vertex.size=18,
focal.firm=focal.firm)
plot2(gx.ff,
layout=layout.fruchterman.reingold,
vertex.shape=vshapes,
vertex.size= 18, ##1.1*mapTo(centPow(gx.ff, beta = -.01)),
focal.firm=focal.firm
)
# dev.off()
vid.a <- 4
vid.ts <- c(15,16)
par(mfrow=c(2,3), mar=c(.1,.1,1.5,.1))
for (vid.t in vid.ts)
{
plot2(gx, main="Pre-Acquisition")
plot2(biAcq(gx, vid.a, vid.t), main=sprintf("%s==>%s",vid.a,vid.t))
plot2(mmcSubgraph(biAcq(gx, vid.a, vid.t), vid.a),
main=sprintf("%s==>%s MMC Subgraph",vid.a,vid.t))
}
##==================================
##
## ACQUISITION LOOP -- ALL OTHER FIRMS
##
##
##
##
##
##
##----------------------------------
## focal firm
focal.firm <- 4
focal.name <- as.character(focal.firm)
## vert names for either uni or bipartite
gnames <- getMaxProjNames(gx)
df.a <- data.frame()
df.a.e <- data.frame()
df0.t <- getMmcDf(gx, focal.name, ego.order=NA)
df0.t.e <- getMmcDf(gx, focal.name, ego.order=1)
df.t.diff <- data.frame()
df.t.e.diff <- data.frame()
meta.attrs <- c('name','targ','is.bi','v','e')
mmc.attrs <- names(df0.t)[which( ! names(df0.t) %in% meta.attrs)]
for (i in gnames) {
## Acquirer MMC Metrics
df.a <- rbind(df.a, getMmcDf(gx, as.character(i)) )
df.a.e <- rbind(df.a.e, getMmcDf(gx, as.character(i), ego.order=1) )
## Target Synergy MMC Metrics
target.firm <- as.numeric(i)
target.name <- as.character(i)
if (target.firm != focal.firm) {
## NODE COLLAPSE BIPARTITE GRAPH
gx2 <- biAcq(gx, focal.name, target.name, project = F)
cat(sprintf('%s(%s)-->%s(%s)\n',focal.name, focal.firm, target.name, target.firm))
## MMC Subgraph
gx2.sub <- mmcSubgraph(gx2, remove.isolates=T)
plot2(gx2.sub)
## Get MMC metrics
dfi.t <- getMmcDf(gx2.sub, focal.name)
dfi.t.e <- getMmcDf(gx2.sub, focal.name, ego.order=1)
## make diff df
dfi.t.diff <- dfi.t
dfi.t.e.diff <- dfi.t.e
## set diff values
dfi.t.diff[,mmc.attrs] <- dfi.t[,mmc.attrs] - df0.t[,mmc.attrs]
dfi.t.e.diff[,mmc.attrs] <- dfi.t.e[,mmc.attrs] - df0.t.e[,mmc.attrs]
## add target
dfi.t.diff$targ <- target.name
dfi.t.e.diff$targ <- target.name
## append
df.t.diff <- rbind(df.t.diff, dfi.t.diff)
df.t.e.diff <- rbind(df.t.e.diff, dfi.t.e.diff)
## dataframe
# idx <- which(as.character(acq.df$name)==target.firm)
# ## PLOT
# vshapes <- sapply(V(gx2.ff)$type,function(x)ifelse(x,'circle','square'))
# pngfile <- sprintf("%s\\firm_firm_mmc_acquisition%s_1.png",dirname, target.firm)
# png(pngfile, width = 5, height = 5, units = 'in', res = 250)
# par(mar=c(.1,.1,.1,.1))
# plot2(gx2.ff,
# layout=layout.fruchterman.reingold,
# vertex.shape=vshapes,
# vertex.size= 18, ##1.1*mapTo(centPow(gx2.ff, beta = -.1)),
# focal.firm=focal.firm
# )
# dev.off()
}
}
## SAVE DATAFRAMES
print(df.a)
csvfilename <- sprintf("%s\\acquisition_acquirer_mmc_compare_c1M%s_c1N%s_c2M%s_c2N%s.csv", dirname, c1$m, c1$f, c2$m, c2$f)
write.csv(df.a, file = csvfilename)
print(df.a.e)
csvfilename <- sprintf("%s\\acquisition_acquirer_mmc_compare_EGO_c1M%s_c1N%s_c2M%s_c2N%s.csv", dirname, c1$m, c1$f, c2$m, c2$f)
write.csv(df.a.e, file = csvfilename)
print(df.t.diff)
csvfilename <- sprintf("%s\\acquisition_mmc_synergies_structure_position_compare_c1M%s_c1N%s_c2M%s_c2N%s.csv", dirname, c1$m, c1$f, c2$m, c2$f)
write.csv(df.t.diff, file = csvfilename)
print(df.t.e.diff)
csvfilename <- sprintf("%s\\acquisition_mmc_synergies_structure_position_compare_EGO_c1M%s_c1N%s_c2M%s_c2N%s.csv", dirname, c1$m, c1$f, c2$m, c2$f)
write.csv(df.t.e.diff, file = csvfilename)
## ACQUIRER
par(mfrow=c(3,3), mar=c(1,1,2.5,1))
for (attr in mmc.attrs) {
if (is.numeric(df.a[,attr]))
hist(df.a[,attr], col='gray', main=attr)
}
## TARGET SYNERGY
par(mfrow=c(3,3), mar=c(1,1,2.5,1))
for (attr in mmc.attrs) {
x <- df.t.diff[,attr]
if (is.numeric(x) & length(unique(x)) > 1)
hist(df.t.diff[,attr], col='gray', main=attr)
}
plot2(gx)
sepattrs <- c()
for (attr in mmc.attrs) {
if (length(unique(df.t.diff[,attr] < 0)) > 1)
sepattrs <- c(sepattrs, attr)
}
cat(sprintf('all separating attrs +/-:\n %s\n\n', paste(sepattrs, collapse = ", ")))
View(df.t.diff[,c(meta.attrs,sepattrs)])
sepattrs <- c()
for (attr in mmc.attrs) {
if (length(unique(df.t.e.diff[,attr] < 0)) > 1)
sepattrs <- c(sepattrs, attr)
}
cat(sprintf('EGO separating attrs +/-:\n %s\n\n', paste(sepattrs, collapse = ", ")))
View(df.t.e.diff[,c(meta.attrs,sepattrs)])
##=========================
## EXAMPLE HIGH MARKETS
##------------------------
n1 <- 2
n2 <- 12
focal.firm <- as.character(4)
## CREATE RANDOM BIPARTITE FIRM_MARKET
set.seed(1133241) #1133241
gx=sample_bipartite(n1,n2,'gnp',.72)
V(gx)$name <- c(LETTERS[1:n1], 1:n2)
E(gx)$weight <- 1
## BIMODAL FIRM_MARKET PLOT
vshapes <- sapply(V(gx)$type,function(x)ifelse(x,'circle','square'))
par(mar=c(.1,.1,.1,.1), mfrow=c(1,2))
plot2(gx,
layout=layout.bipartite,
vertex.shape=vshapes,
vertex.size=18,
focal.firm=focal.firm)
plot2(gx,
layout=layout.kamada.kawai,
vertex.shape=vshapes,
vertex.size=18,
focal.firm=focal.firm, edge.curved = F)
par(mfrow=c(1,1))
## UNIMODAL FIRM_FIRM
gx.ff <- bipartite.projection(gx, remove.type = F)$proj2
V(gx.ff)$type <- unlist(V(gx.ff)$type)
## UNIMODAL FIRM_FIRM ADJACENCY MATRIX
adjmat <- as_adjacency_matrix(gx.ff, attr = 'weight', sparse = F)
print(adjmat)
## SUM MMC
ffidx <- which(V(gx.ff)$name==focal.firm)
mmcidx <- which(adjmat[, ffidx] > 1)
print(sprintf("FOCAL FIRM %s SUM OF MMC: %s", focal.firm, sum(adjmat[mmcidx, ffidx])))
## PLOT FIRM_FIRM NETWORk
vshapes <- sapply(V(gx.ff)$type,function(x)ifelse(x,'circle','square'))
## Save plot of bipartite --> firm-firm competition networ
png(sprintf("firm_market_firm_firm_2side_N%s_M%s.png",n2,n1), height = 4.5, width = 8.5, units = 'in', res = 250)
par(mar=c(.1,.1,.1,.1), mfrow=c(1,2))
plot2(gx,
layout=layout.bipartite,
vertex.shape=vshapes,
vertex.size=18,
focal.firm=focal.firm)
plot2(gx.ff,
layout=layout.fruchterman.reingold,
vertex.shape=vshapes,
vertex.size= 18, ##1.1*mapTo(centPow(gx.ff, beta = -.01)),
focal.firm=focal.firm
)
dev.off()
#
# ##==================================
# ## ACQUISITION 3
# ##----------------------------------
# target.firm <- as.character(3)
# ## ACQUISITION UNIMODAL FIRM_FIRM
# gx2.ff <- biAcq(gx, focal.firm, target.firm, project = T)$g
# V(gx2.ff)$type <- unlist(V(gx2.ff)$type)
# V(gx2.ff)$name <- unlist(V(gx2.ff)$name)
# ## ADJACENCY
# adjmat <- as_adjacency_matrix(gx2.ff, attr = 'weight', sparse = F)
# print(adjmat)
# ## SUM MMC
# ffidx <- which(V(gx2.ff)$name==focal.firm)
# mmcidx <- which(adjmat[, ffidx] > 1)
# print(sprintf("FOCAL FIRM %s SUM OF MMC: %s", focal.firm, sum(adjmat[mmcidx, ffidx])))
# ## PLOT
# vshapes <- sapply(V(gx2.ff)$type,function(x)ifelse(x,'circle','square'))
# plot2(gx2.ff,
# layout=layout.fruchterman.reingold,
# vertex.shape=vshapes,
# vertex.size= 18, ##1.1*mapTo(centPow(gx2.ff, beta = -.1)),
# focal.firm=focal.firm
# )
#
# ##==================================
# ## ACQUISITION 6
# ##----------------------------------
# target.firm <- as.character(6)
# ## ACQUISITION UNIMODAL FIRM_FIRM
# gx2.ff <- biAcq(gx, focal.firm, target.firm, project = T)$g
# V(gx2.ff)$type <- unlist(V(gx2.ff)$type)
# V(gx2.ff)$name <- unlist(V(gx2.ff)$name)
# ## ADJACENCY
# adjmat <- as_adjacency_matrix(gx2.ff, attr = 'weight', sparse = F)
# print(adjmat)
# ## SUM MMC
# ffidx <- which(V(gx2.ff)$name==focal.firm)
# mmcidx <- which(adjmat[, ffidx] > 1)
# print(sprintf("FOCAL FIRM %s SUM OF MMC: %s", focal.firm, sum(adjmat[mmcidx, ffidx])))
# ## PLOT
# vshapes <- sapply(V(gx2.ff)$type,function(x)ifelse(x,'circle','square'))
# plot2(gx2.ff,
# layout=layout.fruchterman.reingold,
# vertex.shape=vshapes,
# vertex.size= 18, ##1.1*mapTo(centPow(gx2.ff, beta = -.1)),
# focal.firm=focal.firm
# )
#
# ##==================================
# ## ACQUISITION 2
# ##----------------------------------
# target.firm <- as.character(2)
# ## ACQUISITION UNIMODAL FIRM_FIRM
# gx2.ff <- biAcq(gx, focal.firm, target.firm, project = T)$g
# V(gx2.ff)$type <- unlist(V(gx2.ff)$type)
# V(gx2.ff)$name <- unlist(V(gx2.ff)$name)
# ## ADJACENCY
# adjmat <- as_adjacency_matrix(gx2.ff, attr = 'weight', sparse = F)
# print(adjmat)
# ## SUM MMC
# ffidx <- which(V(gx2.ff)$name==focal.firm)
# mmcidx <- which(adjmat[, ffidx] > 1)
# print(sprintf("FOCAL FIRM %s SUM OF MMC: %s", focal.firm, sum(adjmat[mmcidx, ffidx])))
# ## PLOT
# vshapes <- sapply(V(gx2.ff)$type,function(x)ifelse(x,'circle','square'))
# plot2(gx2.ff,
# layout=layout.fruchterman.reingold,
# vertex.shape=vshapes,
# vertex.size= 18, ##1.1*mapTo(centPow(gx2.ff, beta = -.1)),
# focal.firm=focal.firm
# )
#
# ##==================================
# ## ACQUISITION 5
# ##----------------------------------
# target.firm <- as.character(5)
# ## ACQUISITION UNIMODAL FIRM_FIRM
# gx2.ff <- biAcq(gx, focal.firm, target.firm, project = T)$g
# V(gx2.ff)$type <- unlist(V(gx2.ff)$type)
# V(gx2.ff)$name <- unlist(V(gx2.ff)$name)
# ## ADJACENCY
# adjmat <- as_adjacency_matrix(gx2.ff, attr = 'weight', sparse = F)
# print(adjmat)
# ## SUM MMC
# ffidx <- which(V(gx2.ff)$name==focal.firm)
# mmcidx <- which(adjmat[, ffidx] > 1)
# print(sprintf("FOCAL FIRM %s SUM OF MMC: %s", focal.firm, sum(adjmat[mmcidx, ffidx])))
# ## PLOT
# vshapes <- sapply(V(gx2.ff)$type,function(x)ifelse(x,'circle','square'))
# plot2(gx2.ff,
# layout=layout.fruchterman.reingold,
# vertex.shape=vshapes,
# vertex.size= 18, ##1.1*mapTo(centPow(gx2.ff, beta = -.1)),
# focal.firm=focal.firm
# )
#
# ##==================================
# ## ACQUISITION 10
# ##----------------------------------
# target.firm <- as.character(10)
# ## ACQUISITION UNIMODAL FIRM_FIRM
# gx2.ff <- biAcq(gx, focal.firm, target.firm, project = T)$g
# V(gx2.ff)$type <- unlist(V(gx2.ff)$type)
# V(gx2.ff)$name <- unlist(V(gx2.ff)$name)
# ## ADJACENCY
# adjmat <- as_adjacency_matrix(gx2.ff, attr = 'weight', sparse = F)
# print(adjmat)
# ## SUM MMC
# ffidx <- which(V(gx2.ff)$name==focal.firm)
# mmcidx <- which(adjmat[, ffidx] > 1)
# print(sprintf("FOCAL FIRM %s SUM OF MMC: %s", focal.firm, sum(adjmat[mmcidx, ffidx])))
# ## PLOT
# vshapes <- sapply(V(gx2.ff)$type,function(x)ifelse(x,'circle','square'))
# plot2(gx2.ff,
# layout=layout.fruchterman.reingold,
# vertex.shape=vshapes,
# vertex.size= 18, ##1.1*mapTo(centPow(gx2.ff, beta = -.1)),
# focal.firm=focal.firm
# )
##----------- END ------------------
as_adjacency_matrix(bipartite.projection(gx2)$proj2, attr = 'weight', sparse = F)
power_centrality(gx, exponent = -0.2, nodes = V(gx)$type)
biAcq(gi, '1', '2', T)
plot2(gx,vertex.shape=vshapes, layout=layout.kamada.kawai)
# plot(gx,vertex.shape=vshapes, layout=layout.fruchterman.reingold)
E(gx)$weight <- 1
gx.bp <- bipartite.projection(gx)
plot(gx.bp$proj1, edge.width=E(gx.bp$proj1)*.3, vertex.shape='square')
plot(gx.bp$proj2, edge.width=E(gx.bp$proj2)*.025, vertex.shape='circle',
layout=layout.fruchterman.reingold)
as_data_frame(gx, what='vertices')
biAcq(gi, '1', '2', T)
##--------------------------------------------------------------------------------------
gx=sample_bipartite(5,5,'gnp',.5)
vshapes <- sapply(V(gx)$type,function(x)ifelse(x,'circle','square'))
plot(gx,
layout=layout.bipartite,
vertex.shape=vshapes)
as_data_frame(gx, what='vertices')
## BIPARTITE EDGE DATAFRAME
df <- data.frame(
market = c('A','A','A','A', 'B','B','B', 'C','C','C'),
firm = c(1, 2, 3, 4, 3, 4, 5, 4, 5, 6)
)
## SPARSE INCIDENCE MATRIX
R <- spMatrix(nrow=length(unique(df$firm)),
ncol=length(unique(df$market)),
i = as.numeric(factor(df$firm)),
j = as.numeric(factor(df$market)),
x = rep(1, length(as.numeric(df$firm))) )
row.names(R) <- levels(factor(df$firm))
colnames(R) <- levels(factor(df$market))
R
## FIRM_FIRM MATRIX
Rrow <- tcrossprod(R)
## t(R)
## MODE1::MARKETS
## MODE2: FIRMS
gi <- graph.incidence(t(R))
vshapes <- sapply(V(gi)$type, function(x)ifelse(x,'circle','square'))
plot(gi, vertex.shape=vshapes)
plot(gi,
layout=layout.bipartite,
vertex.shape=vshapes,
vertex.size=power_centrality(gi, exponent = -0.2)*30
)
df <- as_data_frame(gi, what='vertices')
for (i in 1:6) {
for (j in 1:6) {
if (i != j) {
df[ ,paste0(i,j)] <- biAcq(gi, as.character(i), as.character(j))$delta
}
}
}
## WHICH MIN
apply(df[df$type==T,3:ncol(df)], 2, which.min)
biAcq(gi, '1', '2', T)
## FIRM-FIRM ADJACENCY
ga <- graph.adjacency(tcrossprod(R), diag = F, mode = 'undirected')
E(ga)$weight <- 1
ga <- simplify(ga, remove.multiple = T, remove.loops = T, edge.attr.comb = list(weight='sum'))
set.seed(2)
plot(ga, edge.width=E(ga)$weight^2)
|
## Loading Packages
library(rvest)
library(ggmap)
library(tidyverse)
## Functions to Grab the Name and the Address and Number of Pages
get_name <- function(url) {
url %>%
read_html() %>%
html_nodes("a.business-name") %>%
html_text()
}
get_address <- function(url) {
url %>%
read_html() %>%
html_nodes("p.adr") %>%
html_text()
}
get_page <- function(url) {
url %>%
read_html() %>%
html_nodes("div.pagination") %>%
html_text()
}
city <- "Seattle" ## Name of the City. If there is a space, add a + between the words. Los+Angeles
state <- "WA" ## State. Two letter abbrevation
yp <- paste0("https://www.yellowpages.com/search?search_terms=churches&geo_location_terms=",city,"%2C%20",state)
results <- sapply(yp, get_page)
dfs <- lapply(results, data.frame, stringsAsFactors = FALSE)
count <- bind_rows(dfs) %>%
rename(text = X..i..)
remove <- c("We found", "results12345Next")
count$text <- gsub(paste0(remove, collapse = "|"), "", count$text)
count <- count %>%
mutate(text = as.numeric(text)) %>%
mutate(pg = text/30) %>%
mutate(pg = ceiling(pg))
pages <- seq(0, count$pg, by = 1)
yp <- paste0("https://www.yellowpages.com/search?search_terms=churches&geo_location_terms=",city,"%2C%20",state,"&page=", pages)
## Get the church name
results <- sapply(yp, get_name)
## Flatten into dataframe and clean up. Get to a tibble
dfs <- lapply(results, data.frame, stringsAsFactors = FALSE)
col_name <- bind_rows(dfs)
col_name <- col_name %>%
rename(name = X..i..) %>% as.tibble()
## Flatten into dataframe and clean up. Get to a tibble
results <- sapply(yp, get_address)
dfs <- lapply(results, data.frame, stringsAsFactors = FALSE)
col_add <- bind_rows(dfs)
col_add <- col_add %>%
rename(address = X..i..) %>% as.tibble()
## Bind the names df and the address df
name_add <- bind_cols(col_name, col_add)
write_csv(name_add, "D://yp_scrapes/need_geocodes/seattle_name_add.csv")
## For some reason it scrape the city mashed together with the address, this just takes the city name and adds a space before so it can geocode easier
# name_add$address <- gsub("Columbus,", " Columbus,", name_add$address)
## There are dragons here. DO NOT RUN UNTIL YOU ARE READY
## This is your google API key
register_google(key = "XXXXXXXX", account_type = "premium", day_limit = 100000)
## This will geocode the entire address column from the vector we just grabbed. It will take a while
# chi_geo1 <- name_add %>% head(1500)
# chi_geo2 <- name_add %>% tail(1451)
#
# chi_geo_done1 <- geocode(chi_geo1$address)
# chi_geo_done2 <- geocode(chi_geo2$address)
#
# all_chi_geo <- bind_rows(chi_geo_done1, chi_geo_done2)
#
# chicago_done <- bind_cols(name_add, all_chi_geo)
#
# write_csv(chicago_done, "chicago_complete.csv")
## This is to take the new geocodes lon and lat and mash that together with our full dataset
all_col <- bind_cols(name_add, col_geo) %>% as.tibble()
## Write this out to a csv
write_csv(col_total, "columbus_add.csv")
| /full_scrape.R | no_license | ryanburge/yp_scrapes | R | false | false | 3,017 | r |
## Loading Packages
library(rvest)
library(ggmap)
library(tidyverse)
## Functions to Grab the Name and the Address and Number of Pages
get_name <- function(url) {
url %>%
read_html() %>%
html_nodes("a.business-name") %>%
html_text()
}
get_address <- function(url) {
url %>%
read_html() %>%
html_nodes("p.adr") %>%
html_text()
}
get_page <- function(url) {
url %>%
read_html() %>%
html_nodes("div.pagination") %>%
html_text()
}
city <- "Seattle" ## Name of the City. If there is a space, add a + between the words. Los+Angeles
state <- "WA" ## State. Two letter abbrevation
yp <- paste0("https://www.yellowpages.com/search?search_terms=churches&geo_location_terms=",city,"%2C%20",state)
results <- sapply(yp, get_page)
dfs <- lapply(results, data.frame, stringsAsFactors = FALSE)
count <- bind_rows(dfs) %>%
rename(text = X..i..)
remove <- c("We found", "results12345Next")
count$text <- gsub(paste0(remove, collapse = "|"), "", count$text)
count <- count %>%
mutate(text = as.numeric(text)) %>%
mutate(pg = text/30) %>%
mutate(pg = ceiling(pg))
pages <- seq(0, count$pg, by = 1)
yp <- paste0("https://www.yellowpages.com/search?search_terms=churches&geo_location_terms=",city,"%2C%20",state,"&page=", pages)
## Get the church name
results <- sapply(yp, get_name)
## Flatten into dataframe and clean up. Get to a tibble
dfs <- lapply(results, data.frame, stringsAsFactors = FALSE)
col_name <- bind_rows(dfs)
col_name <- col_name %>%
rename(name = X..i..) %>% as.tibble()
## Flatten into dataframe and clean up. Get to a tibble
results <- sapply(yp, get_address)
dfs <- lapply(results, data.frame, stringsAsFactors = FALSE)
col_add <- bind_rows(dfs)
col_add <- col_add %>%
rename(address = X..i..) %>% as.tibble()
## Bind the names df and the address df
name_add <- bind_cols(col_name, col_add)
write_csv(name_add, "D://yp_scrapes/need_geocodes/seattle_name_add.csv")
## For some reason it scrape the city mashed together with the address, this just takes the city name and adds a space before so it can geocode easier
# name_add$address <- gsub("Columbus,", " Columbus,", name_add$address)
## There are dragons here. DO NOT RUN UNTIL YOU ARE READY
## This is your google API key
register_google(key = "XXXXXXXX", account_type = "premium", day_limit = 100000)
## This will geocode the entire address column from the vector we just grabbed. It will take a while
# chi_geo1 <- name_add %>% head(1500)
# chi_geo2 <- name_add %>% tail(1451)
#
# chi_geo_done1 <- geocode(chi_geo1$address)
# chi_geo_done2 <- geocode(chi_geo2$address)
#
# all_chi_geo <- bind_rows(chi_geo_done1, chi_geo_done2)
#
# chicago_done <- bind_cols(name_add, all_chi_geo)
#
# write_csv(chicago_done, "chicago_complete.csv")
## This is to take the new geocodes lon and lat and mash that together with our full dataset
all_col <- bind_cols(name_add, col_geo) %>% as.tibble()
## Write this out to a csv
write_csv(col_total, "columbus_add.csv")
|
#### States of Fragility Report 2016 - OECD
# Written by David Hammond Institute for Economics and Peace
# 28 May 2016 This script:
# (1) processes all original data files into a standard tabular format and
# (2) calculates the SFR 2016 rankings
####
source("./lib/load-libraries.R")
rm(list = ls())
output_folders <- c("./data_out/", "./graphs")
lapply(output_folders, function(x) file.remove(list.files(x, full.names = T)))
options(stringsAsFactors = FALSE)
options(useFancyQuotes = "UTF-8")
reload.project(override.config = list(munging = T))
cache("raw.data")
set.seed(12345)
source("./lib/funcs.R")
##### Step 1 #### Set parameters for calculations
# 1. The fragile clusters
# 2. the number of clusters in each dimension
# 3. whether to drop highly correlated indicators in each dimension
# 4. The cluster method, ward.d2 selected as the simplest to explain
fragile.clusters <- list(Environmental = c("A", "B", "C", "D"), Political = c("A", "B", "C", "D"), Economic = c("A",
"B", "C", "D", "E", "F"), Security = c("A", "B", "C", "D", "E", "F"), Societal = c("A", "B", "C",
"E", "D"))
fragile.levels <- read_excel("./data/additional data/dimensional fragility.xlsx")
fragile.levels$join.on <- paste(fragile.levels$dimension, fragile.levels$cluster)
num.clusters <- list(Environmental = 8, Political = 8, Economic = 8, Security = 8, Societal = 8)
drop.indicators.based.on.correlations <- T
cluster.method <- "ward.D2"
pca.axis.labels <- read_excel("./data/additional data/pca axis labels.xlsx")
pca.axis.labels <- split(pca.axis.labels, factor(pca.axis.labels$dimension))
all.drops <- NULL
round.numbers <- T
################### Step 2 #### Calculate PCA for each dimension and plot
all.distances <- NULL
all.dimension.pca.metrics <- list()
counter <- 3
all.results <- sapply(sort(unique(raw.data$dimension)), function(i) {
# take dimension subset of raw.data
temp <- raw.data %>% filter(dimension == i)
temp <- temp %>% select(iso3c, type, variablename, imputed)
# rename for the bi-plots
pos <- temp$type == "Coping"
temp$variablename[pos] <- paste(temp$variablename[pos], " (C)", sep = "")
temp$variablename[!pos] <- paste(temp$variablename[!pos], " (R)", sep = "")
# create panel data
temp <- temp %>% select(-type) %>% distinct() %>% spread(variablename, imputed)
# drop highly correllated indicators
drops <- findCorrelation(cor(temp[, -1]))
if (length(drops) > 0 & drop.indicators.based.on.correlations) {
all.drops <<- rbind(all.drops, data.frame(dimension = i, indicators = names(temp)[drops + 1]))
data.frame(dimension = i, indicators = names(temp)[drops + 1])
temp <- temp[, -(drops + 1)]
}
# calculate a pca
pca <- prcomp(temp[, -1], center = TRUE, scale. = TRUE)
all.dimension.pca.metrics <<- c(all.dimension.pca.metrics, list(PCA(temp[, -1], graph = F)))
names(all.dimension.pca.metrics)[length(all.dimension.pca.metrics)] <<- i
# switch diriction for ease of reading
somalia <- which(temp$iso3c == "SOM")
iceland <- which(temp$iso3c == "ISL")
if (pca$x[somalia, 1] > pca$x[iceland, 1]) {
pca <- prcomp(-temp[, -1], center = TRUE, scale. = TRUE)
pca$x <- -pca$x
}
if (round.numbers) {
pca$x <- apply(pca$x, 2, round, digits = 2)
}
# create a data frame of the first two principal components
tmp <- data.frame(iso3c = temp$iso3c, pca$x[, 1:2])
distance <- dist(tmp[, -1], method = "euclidean")
if (round.numbers) {
distance <- round(dist(tmp[, -1], method = "euclidean"), digits = 2)
}
d2 <- as.data.frame(as.matrix(distance))
d2$col <- rownames(d2)
d2 <- d2 %>% gather(row, dist, -col)
all.distances <<- rbind(all.distances, data.frame(dimension = i, d2))
clusters <- hclust(distance, method = cluster.method)
clusters <- cutree(clusters, num.clusters[[i]])
# calculate centroids for the visualisation
centroids <- as.data.frame(apply(tmp[, -1], 2, function(x) tapply(x, clusters, mean)))
centroids$dist <- with(centroids, sqrt(PC1^2 + PC2^2))
centroids$omega <- atan(centroids$PC2/centroids$PC1)
centroids$cut <- LETTERS[1:nrow(centroids)]
centroids$rank <- rank(centroids$PC1)
centroids$labels <- LETTERS[centroids$rank]
tmp$cut <- LETTERS[clusters]
tmp <- left_join(tmp, select(centroids, cut, labels))
# create bi-plot
p <- ggbiplot.sfr(pca, obs.scale = 1, var.scale = 1, ellipse = TRUE, circle = F, labels = tmp$iso3c,
groups = tmp$labels, var.axes = T, coloured.clusters = fragile.clusters[[i]])
p <- p + theme(legend.position = "none")
p <- p + ggtitle(i)
p <- p + geom_text(data = centroids, aes(x = PC1, y = PC2, label = labels))
p <- oecd.biplot(p, coloured.clusters = fragile.clusters[[i]], n = num.clusters[[i]])
xlabel <- paste(pca.axis.labels[[i]]$x)
ylabel <- paste(pca.axis.labels[[i]]$y)
p <- p + xlab(xlabel) + ylab(ylabel)
ggsave(p, filename = paste("./graphs/Fig A", counter, " cluster ", i, ".pdf", sep = ""), height = 8,
width = 10)
counter <<- counter + 1
tmp$value <- tmp$labels
tmp2 <- tmp
return(tmp)
}, USE.NAMES = T, simplify = F)
contrib <- lapply(all.dimension.pca.metrics, function(x) {
return(cbind(variablename = rownames(x$var$contrib), as.data.frame(apply(x$var$contrib[, 1:2], 2,
round, digits = 2))))
})
contrib <- bind_rows(contrib, .id = "dimension")
contrib <- contrib %>% rename(contrib.to.dim1 = Dim.1, contrib.to.dim2 = Dim.2)
cos2 <- lapply(all.dimension.pca.metrics, function(x) {
return(cbind(variablename = rownames(x$var$cos2), as.data.frame(apply(x$var$cos2[, 1:2], 2, round,
digits = 2))))
})
cos2 <- bind_rows(cos2, .id = "dimension")
cos2 <- cos2 %>% rename(cos2.to.dim1 = Dim.1, cos2.to.dim2 = Dim.2)
cor <- lapply(all.dimension.pca.metrics, function(x) {
return(cbind(variablename = rownames(x$var$cor), as.data.frame(apply(x$var$cor[, 1:2], 2, round,
digits = 2))))
})
cor <- bind_rows(cor, .id = "dimension")
cor <- cor %>% rename(cor.to.dim1 = Dim.1, cor.to.dim2 = Dim.2)
cor <- left_join(contrib, cor)
cor <- left_join(cor, cos2)
cor <- cor[, c(1:3, 5, 7, 4, 6, 8)]
write.csv(cor, "./data_out/dimensional pca contributions.csv", row.names = F)
################### Step 3 #### Create a data frame listing
# which clusters are fragile
fragile.clusters <- sapply((names(all.results)), function(i) {
all.results[[i]] %>% filter(labels %in% fragile.clusters[[i]])
}, USE.NAMES = T, simplify = F)
results <- bind_rows(fragile.clusters, .id = ".id")
results <- as.data.frame.matrix(table(results$iso3c, results$.id))
results$total <- rowSums(results)
results$iso3c <- rownames(results)
results <- results %>% arrange(desc(total))
results <- results %>% MoveFront("iso3c")
temp <- data.frame(setdiff(unique(raw.data$iso3c), results$iso3c), 0, 0, 0, 0, 0, 0)
names(temp) <- names(results)
results <- bind_rows(results, temp)
results <- results %>% rename(value = total)
results$value <- as.character(results$value)
all.clusters <- bind_rows(all.results, .id = ".id")
all.clusters$join.on <- paste(all.clusters$.id, all.clusters$labels)
all.clusters <- all.clusters %>% select(-c(value, cut))
# join clusters with fragility levels
cluster.descriptions <- left_join(select(all.clusters, iso3c, join.on), fragile.levels)
# generate output dimensions fragility levels
fragility <- cluster.descriptions %>% select(iso3c, dimension, cluster, fragility) %>% distinct()
fragility$country <- oecd.country.name(fragility$iso3c, short = T)
# create a raw data output file
temp <- raw.data %>% select(country, dimension, type, variablename, imputed)
temp$variablename <- paste("[", temp$dimension, "] [", temp$type, "] ", temp$variablename, sep = "")
temp <- temp %>% distinct() %>% arrange(variablename) %>% select(-c(dimension, type)) %>% spread(variablename,
imputed)
temp[, -1] <- apply(temp[, -1], 2, scale)
write.csv(all.clusters, "./data_out/principal components.csv", row.names = F)
rmExcept(keepers = c("raw.data", "all.clusters", "fragility", "cluster.method", "results", "pca.axis.labels",
"cluster.descriptions", "fragile.levels", "all.results"))
################### Step 4 ######################################### run further scripts
source("./src/02-Two-Tier PCA Typology.R")
| /src/01-SFR Calculation.R | no_license | robertoschiano/oecd-sfr-2016 | R | false | false | 8,366 | r | #### States of Fragility Report 2016 - OECD
# Written by David Hammond Institute for Economics and Peace
# 28 May 2016 This script:
# (1) processes all original data files into a standard tabular format and
# (2) calculates the SFR 2016 rankings
####
source("./lib/load-libraries.R")
rm(list = ls())
output_folders <- c("./data_out/", "./graphs")
lapply(output_folders, function(x) file.remove(list.files(x, full.names = T)))
options(stringsAsFactors = FALSE)
options(useFancyQuotes = "UTF-8")
reload.project(override.config = list(munging = T))
cache("raw.data")
set.seed(12345)
source("./lib/funcs.R")
##### Step 1 #### Set parameters for calculations
# 1. The fragile clusters
# 2. the number of clusters in each dimension
# 3. whether to drop highly correlated indicators in each dimension
# 4. The cluster method, ward.d2 selected as the simplest to explain
fragile.clusters <- list(Environmental = c("A", "B", "C", "D"), Political = c("A", "B", "C", "D"), Economic = c("A",
"B", "C", "D", "E", "F"), Security = c("A", "B", "C", "D", "E", "F"), Societal = c("A", "B", "C",
"E", "D"))
fragile.levels <- read_excel("./data/additional data/dimensional fragility.xlsx")
fragile.levels$join.on <- paste(fragile.levels$dimension, fragile.levels$cluster)
num.clusters <- list(Environmental = 8, Political = 8, Economic = 8, Security = 8, Societal = 8)
drop.indicators.based.on.correlations <- T
cluster.method <- "ward.D2"
pca.axis.labels <- read_excel("./data/additional data/pca axis labels.xlsx")
pca.axis.labels <- split(pca.axis.labels, factor(pca.axis.labels$dimension))
all.drops <- NULL
round.numbers <- T
################### Step 2 #### Calculate PCA for each dimension and plot
all.distances <- NULL
all.dimension.pca.metrics <- list()
counter <- 3
all.results <- sapply(sort(unique(raw.data$dimension)), function(i) {
# take dimension subset of raw.data
temp <- raw.data %>% filter(dimension == i)
temp <- temp %>% select(iso3c, type, variablename, imputed)
# rename for the bi-plots
pos <- temp$type == "Coping"
temp$variablename[pos] <- paste(temp$variablename[pos], " (C)", sep = "")
temp$variablename[!pos] <- paste(temp$variablename[!pos], " (R)", sep = "")
# create panel data
temp <- temp %>% select(-type) %>% distinct() %>% spread(variablename, imputed)
# drop highly correllated indicators
drops <- findCorrelation(cor(temp[, -1]))
if (length(drops) > 0 & drop.indicators.based.on.correlations) {
all.drops <<- rbind(all.drops, data.frame(dimension = i, indicators = names(temp)[drops + 1]))
data.frame(dimension = i, indicators = names(temp)[drops + 1])
temp <- temp[, -(drops + 1)]
}
# calculate a pca
pca <- prcomp(temp[, -1], center = TRUE, scale. = TRUE)
all.dimension.pca.metrics <<- c(all.dimension.pca.metrics, list(PCA(temp[, -1], graph = F)))
names(all.dimension.pca.metrics)[length(all.dimension.pca.metrics)] <<- i
# switch diriction for ease of reading
somalia <- which(temp$iso3c == "SOM")
iceland <- which(temp$iso3c == "ISL")
if (pca$x[somalia, 1] > pca$x[iceland, 1]) {
pca <- prcomp(-temp[, -1], center = TRUE, scale. = TRUE)
pca$x <- -pca$x
}
if (round.numbers) {
pca$x <- apply(pca$x, 2, round, digits = 2)
}
# create a data frame of the first two principal components
tmp <- data.frame(iso3c = temp$iso3c, pca$x[, 1:2])
distance <- dist(tmp[, -1], method = "euclidean")
if (round.numbers) {
distance <- round(dist(tmp[, -1], method = "euclidean"), digits = 2)
}
d2 <- as.data.frame(as.matrix(distance))
d2$col <- rownames(d2)
d2 <- d2 %>% gather(row, dist, -col)
all.distances <<- rbind(all.distances, data.frame(dimension = i, d2))
clusters <- hclust(distance, method = cluster.method)
clusters <- cutree(clusters, num.clusters[[i]])
# calculate centroids for the visualisation
centroids <- as.data.frame(apply(tmp[, -1], 2, function(x) tapply(x, clusters, mean)))
centroids$dist <- with(centroids, sqrt(PC1^2 + PC2^2))
centroids$omega <- atan(centroids$PC2/centroids$PC1)
centroids$cut <- LETTERS[1:nrow(centroids)]
centroids$rank <- rank(centroids$PC1)
centroids$labels <- LETTERS[centroids$rank]
tmp$cut <- LETTERS[clusters]
tmp <- left_join(tmp, select(centroids, cut, labels))
# create bi-plot
p <- ggbiplot.sfr(pca, obs.scale = 1, var.scale = 1, ellipse = TRUE, circle = F, labels = tmp$iso3c,
groups = tmp$labels, var.axes = T, coloured.clusters = fragile.clusters[[i]])
p <- p + theme(legend.position = "none")
p <- p + ggtitle(i)
p <- p + geom_text(data = centroids, aes(x = PC1, y = PC2, label = labels))
p <- oecd.biplot(p, coloured.clusters = fragile.clusters[[i]], n = num.clusters[[i]])
xlabel <- paste(pca.axis.labels[[i]]$x)
ylabel <- paste(pca.axis.labels[[i]]$y)
p <- p + xlab(xlabel) + ylab(ylabel)
ggsave(p, filename = paste("./graphs/Fig A", counter, " cluster ", i, ".pdf", sep = ""), height = 8,
width = 10)
counter <<- counter + 1
tmp$value <- tmp$labels
tmp2 <- tmp
return(tmp)
}, USE.NAMES = T, simplify = F)
contrib <- lapply(all.dimension.pca.metrics, function(x) {
return(cbind(variablename = rownames(x$var$contrib), as.data.frame(apply(x$var$contrib[, 1:2], 2,
round, digits = 2))))
})
contrib <- bind_rows(contrib, .id = "dimension")
contrib <- contrib %>% rename(contrib.to.dim1 = Dim.1, contrib.to.dim2 = Dim.2)
cos2 <- lapply(all.dimension.pca.metrics, function(x) {
return(cbind(variablename = rownames(x$var$cos2), as.data.frame(apply(x$var$cos2[, 1:2], 2, round,
digits = 2))))
})
cos2 <- bind_rows(cos2, .id = "dimension")
cos2 <- cos2 %>% rename(cos2.to.dim1 = Dim.1, cos2.to.dim2 = Dim.2)
cor <- lapply(all.dimension.pca.metrics, function(x) {
return(cbind(variablename = rownames(x$var$cor), as.data.frame(apply(x$var$cor[, 1:2], 2, round,
digits = 2))))
})
cor <- bind_rows(cor, .id = "dimension")
cor <- cor %>% rename(cor.to.dim1 = Dim.1, cor.to.dim2 = Dim.2)
cor <- left_join(contrib, cor)
cor <- left_join(cor, cos2)
cor <- cor[, c(1:3, 5, 7, 4, 6, 8)]
write.csv(cor, "./data_out/dimensional pca contributions.csv", row.names = F)
################### Step 3 #### Create a data frame listing
# which clusters are fragile
fragile.clusters <- sapply((names(all.results)), function(i) {
all.results[[i]] %>% filter(labels %in% fragile.clusters[[i]])
}, USE.NAMES = T, simplify = F)
results <- bind_rows(fragile.clusters, .id = ".id")
results <- as.data.frame.matrix(table(results$iso3c, results$.id))
results$total <- rowSums(results)
results$iso3c <- rownames(results)
results <- results %>% arrange(desc(total))
results <- results %>% MoveFront("iso3c")
temp <- data.frame(setdiff(unique(raw.data$iso3c), results$iso3c), 0, 0, 0, 0, 0, 0)
names(temp) <- names(results)
results <- bind_rows(results, temp)
results <- results %>% rename(value = total)
results$value <- as.character(results$value)
all.clusters <- bind_rows(all.results, .id = ".id")
all.clusters$join.on <- paste(all.clusters$.id, all.clusters$labels)
all.clusters <- all.clusters %>% select(-c(value, cut))
# join clusters with fragility levels
cluster.descriptions <- left_join(select(all.clusters, iso3c, join.on), fragile.levels)
# generate output dimensions fragility levels
fragility <- cluster.descriptions %>% select(iso3c, dimension, cluster, fragility) %>% distinct()
fragility$country <- oecd.country.name(fragility$iso3c, short = T)
# create a raw data output file
temp <- raw.data %>% select(country, dimension, type, variablename, imputed)
temp$variablename <- paste("[", temp$dimension, "] [", temp$type, "] ", temp$variablename, sep = "")
temp <- temp %>% distinct() %>% arrange(variablename) %>% select(-c(dimension, type)) %>% spread(variablename,
imputed)
temp[, -1] <- apply(temp[, -1], 2, scale)
write.csv(all.clusters, "./data_out/principal components.csv", row.names = F)
rmExcept(keepers = c("raw.data", "all.clusters", "fragility", "cluster.method", "results", "pca.axis.labels",
"cluster.descriptions", "fragile.levels", "all.results"))
################### Step 4 ######################################### run further scripts
source("./src/02-Two-Tier PCA Typology.R")
|
library(dplyr)
library(ggplot2)
str(mtcars)
summary(mtcars$mpg)
ggplot(mtcars,aes(x=factor(mtcars$am),y=mtcars$mpg,fill=factor(mtcars$am))) +
geom_boxplot()+
scale_fill_discrete(name = "Transmission", labels = c("Automatic", "Manual"))+
xlab("Transmission") + ylab("MPG")
group_by(mtcars,am) %>% summarise(mean(mpg),sd(mpg)) %>% as.data.frame()
mtcarsAutomatic <- mtcars[mtcars$am==0,]
mtcarsManual <- mtcars[mtcars$am==1,]
t.test(mtcarsAutomatic$mpg,mtcarsManual$mpg,paired = FALSE)
pairs(mtcars$mpg ~ .,data=mtcars)
fit1 <- lm(mpg~factor(am),mtcars)
CoefFit1 <- summary(fit1)$coef
fit2 <- lm(mpg~factor(am)+cyl-1,mtcars)
anova(fit1,fit2)
##Significant difference comparing transmissions
CoefFit2
CoefFit2 <- summary(fit2)$coef
cor(mtcars$cyl,mtcars$disp)
cor(mtcars$cyl,mtcars$hp)
cor(mtcars$cyl,mtcars$wt)
##cor(mtcars$qsec,mtcars$drat)
##High
fit3 <- lm(mpg~factor(am)+cyl+drat-1,mtcars)
anova(fit1,fit2,fit3)
##No significant result
fit4 <- lm(mpg~factor(am)+cyl+qsec-1,mtcars)
anova(fit1,fit2,fit4)
##No significant result
fit5 <- lm(mpg~factor(am)+cyl+factor(vs)-1,mtcars)
anova(fit1,fit2,fit5)
##No significant result
fit6 <- lm(mpg~factor(am)+cyl+factor(gear)-1,mtcars)
anova(fit1,fit2,fit6)
##No significant result
fit7 <- lm(mpg~factor(am)+cyl+factor(carb)-1,mtcars)
anova(fit1,fit2,fit7)
##No significant result
FittedMPGs <- as.integer(predict(fit2))
mtcars <- mutate(mtcars,FittedMPGs)
par(mfrow=c(2,2))
plot(fit2)
pairs(mtcars$mpg ~ .,data=mtcars) | /07. Regression Models/Analysis.R | no_license | nima14/Coursera_DataScience_Specialization | R | false | false | 1,528 | r | library(dplyr)
library(ggplot2)
str(mtcars)
summary(mtcars$mpg)
ggplot(mtcars,aes(x=factor(mtcars$am),y=mtcars$mpg,fill=factor(mtcars$am))) +
geom_boxplot()+
scale_fill_discrete(name = "Transmission", labels = c("Automatic", "Manual"))+
xlab("Transmission") + ylab("MPG")
group_by(mtcars,am) %>% summarise(mean(mpg),sd(mpg)) %>% as.data.frame()
mtcarsAutomatic <- mtcars[mtcars$am==0,]
mtcarsManual <- mtcars[mtcars$am==1,]
t.test(mtcarsAutomatic$mpg,mtcarsManual$mpg,paired = FALSE)
pairs(mtcars$mpg ~ .,data=mtcars)
fit1 <- lm(mpg~factor(am),mtcars)
CoefFit1 <- summary(fit1)$coef
fit2 <- lm(mpg~factor(am)+cyl-1,mtcars)
anova(fit1,fit2)
##Significant difference comparing transmissions
CoefFit2
CoefFit2 <- summary(fit2)$coef
cor(mtcars$cyl,mtcars$disp)
cor(mtcars$cyl,mtcars$hp)
cor(mtcars$cyl,mtcars$wt)
##cor(mtcars$qsec,mtcars$drat)
##High
fit3 <- lm(mpg~factor(am)+cyl+drat-1,mtcars)
anova(fit1,fit2,fit3)
##No significant result
fit4 <- lm(mpg~factor(am)+cyl+qsec-1,mtcars)
anova(fit1,fit2,fit4)
##No significant result
fit5 <- lm(mpg~factor(am)+cyl+factor(vs)-1,mtcars)
anova(fit1,fit2,fit5)
##No significant result
fit6 <- lm(mpg~factor(am)+cyl+factor(gear)-1,mtcars)
anova(fit1,fit2,fit6)
##No significant result
fit7 <- lm(mpg~factor(am)+cyl+factor(carb)-1,mtcars)
anova(fit1,fit2,fit7)
##No significant result
FittedMPGs <- as.integer(predict(fit2))
mtcars <- mutate(mtcars,FittedMPGs)
par(mfrow=c(2,2))
plot(fit2)
pairs(mtcars$mpg ~ .,data=mtcars) |
# Read a LAWST game, both inputs and outputs into a set of data frames
## Completed
# * Read config into a list structure
# * Most output files
# * Read unit script with all fields rolling
## TODOs
# * Write a function to read everything into one structure.
# * Where one table's factor refers to another, ensure same levels
# * Finish reading in other object types - see list at bottom of file
# * Add game name, case name and timestamp to all data frames
# * Finish reading scripts: lognode, pipeline, arc
# * Read output data arc and pipe history
# * Investigate using unit icons in charts
readLawstConfig <- function(configFile = 'game_config.csv', caseName = 'case', timestamp = date()){
#returns a list of configuration file contents
cfg <- read.table(configFile, header = FALSE, sep = ",", fill = TRUE,
colClasses = rep("character", 3), col.names = c("field", "value1", "value2"))
cfg$field <- toupper(cfg$field)
#Replace any space with underscore
cfg$field <- gsub(" ", "_", cfg$field, fixed = TRUE)
#Get the path to the _OUTPUT folder
outDir <- paste(dirname(configFile), '/', cfg$value1[cfg$field == "GAME_NAME"],
"_OUTPUT/", sep = '')
#This assumes structure of the game file is stable
return(list(GAME_NAME = cfg$value1[cfg$field == "GAME_NAME"],
CASE = caseName,
TIMESTAMP = timestamp,
OUTPUT_DIR = outDir,
GAME_DURATION = as.numeric(cfg$value1[cfg$field == "GAME_DURATION"]),
GAME_START_DATE = list(month = cfg$value1[cfg$field == "GAME_START_DATE"],
day = as.numeric(cfg$value2[cfg$field == "GAME_START_DATE"])),
VOLUME_UOM = cfg$value1[cfg$field == "VOLUME_UOM"],
#5
MASS_UOM = cfg$value1[cfg$field == "MASS_UOM"],
DISTANCE_UOM = cfg$value1[cfg$field == "DISTANCE_UOM"],
SUPPLY_ADJUDICATION_METHODOLOGY = cfg$value1[cfg$field == "SUPPLY_ADJUDICATION_METHODOLOGY"],
HIGHWAY_REACH = as.numeric(cfg$value1[cfg$field == "HIGHWAY_REACH"]),
ZERO_RISK_UTILIZATION = as.numeric(cfg$value1[cfg$field == "ZERO_RISK_UTILIZATION"]),
GROUND_ESCORT_FUEL_RATE = as.numeric(cfg$value1[cfg$field == "GROUND_ESCORT_FUEL_RATE"]),
#10
SEA_ESCORT_FUEL_RATE = as.numeric(cfg$value1[cfg$field == "SEA_ESCORT_FUEL_RATE"]),
AIR_ESCORT_FUEL_RATE = as.numeric(cfg$value1[cfg$field == "AIR_ESCORT_FUEL_RATE"]),
files = list(
#Begin file names. Replace the windows style slash with /
SUPPLY_TYPES = gsub('\\', '/', cfg$value1[cfg$field == "SUPPLY_TYPES"], fixed = TRUE),
POSTURES = gsub('\\', '/', cfg$value1[cfg$field == "POSTURES"], fixed = TRUE),
CONSUMPTION_CLASSES = gsub('\\', '/', cfg$value1[cfg$field == "CONSUMPTION_CLASSES"], fixed = TRUE),
#15
PRIORITIZATION_CLASSES = gsub('\\', '/', cfg$value1[cfg$field == "PRIORITIZATION_CLASSES"], fixed = TRUE),
MAPS = gsub('\\', '/', cfg$value1[cfg$field == "MAPS"], fixed = TRUE),
NODES = gsub('\\', '/', cfg$value1[cfg$field == "NODES"], fixed = TRUE),
ARCS = gsub('\\', '/', cfg$value1[cfg$field == "ARCS"], fixed = TRUE),
ARC_SCRIPTS = gsub('\\', '/', cfg$value1[cfg$field == "ARC_SCRIPTS"], fixed = TRUE),
#20
TRANSPORTATION_ASSETS = gsub('\\', '/', cfg$value1[cfg$field == "TRANSPORTATION_ASSETS"], fixed = TRUE),
TRANSPORTATION_MODE_EXCLUSIONS = gsub('\\', '/', cfg$value1[cfg$field == "TRANSPORTATION_MODE_EXCLUSIONS"], fixed = TRUE),
UNITS = gsub('\\', '/', cfg$value1[cfg$field == "UNITS"], fixed = TRUE),
UNIT_SCRIPTS = gsub('\\', '/', cfg$value1[cfg$field == "UNIT_SCRIPTS"], fixed = TRUE),
LOG_NODE_SCRIPTS = gsub('\\', '/', cfg$value1[cfg$field == "LOG_NODE_SCRIPTS"], fixed = TRUE),
#25
PIPELINES = gsub('\\', '/', cfg$value1[cfg$field == "PIPELINES"], fixed = TRUE),
PIPELINE_SCRIPTS = gsub('\\', '/', cfg$value1[cfg$field == "PIPELINE_SCRIPTS"], fixed = TRUE),
WEATHER = gsub('\\', '/', cfg$value1[cfg$field == "WEATHER"], fixed = TRUE),
SCRIPTED_SORTIES = gsub('\\', '/', cfg$value1[cfg$field == "SCRIPTED_SORTIES"], fixed = TRUE)
)
))
}
readUnit <- function(config){
u <- read.csv(config$files$UNITS,
colClasses = c("factor", rep("character", 3), "logical"))
#not sure if I can actually exploit the icons
#Standardize the field names for post processing use
names(u) <- c('UnitName', 'Description', 'UnitType', 'UnitIcon', 'IsLogNode')
return(u)
}
readSupplyTypes <- function(config){
st <- read.csv(config$files$SUPPLY_TYPES,
colClasses = c('factor', 'character', 'factor', 'logical', 'numeric'),
col.names = c('SupplyType', 'Description',
'SupplyClass', 'IsLiquid', 'Density'))
st[st$IsLiquid == FALSE,'Density'] <- 1
return(st)
}
readUnitScript <- function(config){
require(data.table)
#assume reading units and supply types is cheap
u <- readUnit(config)
st <- readSupplyTypes(config)
nDay <- config$GAME_DURATION - 1
# This function will return a list of two data frames. One has supply-dependent
# fields. This is the other.
us <- expand.grid(UnitName = u$UnitName, Day = seq(from = 0, to = nDay, by = 1))
us.supply <- expand.grid(UnitName = u$UnitName, SupplyType = st$SupplyType,
Day = seq(from = 0, to = nDay, by = 1))
#read the unit script file
raw <- read.csv(config$files$UNIT_SCRIPTS, fill = TRUE,
colClasses = c('factor', 'numeric', rep('character', 3)),
col.names = c('UnitName', 'Day', 'Field', 'V1', 'V2'),
header = FALSE, skip = 1) #use this header & skip combination to avoid a warning; the header has 1 too few columns.
# 'fields' all uppercase and replace space with underscore
raw$Field <- toupper(raw$Field)
raw$Field <- gsub(" ", "_", raw$Field, fixed = TRUE)
#convert raw and expand.grids into data.tables to take advantage of rolling merges
us.dt <- data.table(us)
setkey(us.dt, UnitName, Day)
us.supply.dt <- data.table(us.supply)
setkey(us.supply.dt, UnitName, SupplyType, Day)
raw.dt <- data.table(raw)
setkey(raw.dt, UnitName, Day)
# Subsetting by field, merge into the normalized data table with rolling join
#LATITUDE
us.dt <- raw.dt[Field=="LOCATION"][us.dt, roll = TRUE]
us.dt[, Latitude := as.numeric(V1)]
us.dt[, Longitude := as.numeric(V2)]
us.dt[, V1 := NULL]
us.dt[, V2 := NULL]
us.dt[, Field := NULL]
setkey(us.dt, UnitName, Day)
#Strength
us.dt <- raw.dt[Field=="STRENGTH"][us.dt, roll = TRUE]
us.dt[, Strength := as.numeric(V1)]
us.dt[, V1 := NULL]
us.dt[, V2 := NULL]
us.dt[, Field := NULL]
setkey(us.dt, UnitName, Day)
#'POSTURE'
us.dt <- raw.dt[Field=="POSTURE"][us.dt, roll = TRUE]
us.dt[, Posture := as.factor(V1)]
us.dt[, V1 := NULL]
us.dt[, V2 := NULL]
us.dt[, Field := NULL]
setkey(us.dt, UnitName, Day)
#Consumption Class
us.dt <- raw.dt[Field=="CONSUMPTION_CLASS"][us.dt, roll = TRUE]
us.dt[, ConsumptionClass := as.factor(V1)]
us.dt[, V1 := NULL]
us.dt[, V2 := NULL]
us.dt[, Field := NULL]
setkey(us.dt, UnitName, Day)
#Prioritization Class
us.dt <- raw.dt[Field=="PRIORITIZATION_CLASS"][us.dt, roll = TRUE]
us.dt[, PrioritizationClass := as.factor(V1)]
us.dt[, V1 := NULL]
us.dt[, V2 := NULL]
us.dt[, Field := NULL]
setkey(us.dt, UnitName, Day)
# REQUIRED DAYS OF SUPPLY'
us.dt <- raw.dt[Field=="REQUIRED_DAYS_OF_SUPPLY"][us.dt, roll = TRUE]
us.dt[, ReqDaysSupply := as.numeric(V1)]
us.dt[, V1 := NULL]
us.dt[, V2 := NULL]
us.dt[, Field := NULL]
setkey(us.dt, UnitName, Day)
#'DOMAIN'
us.dt <- raw.dt[Field=="DOMAIN"][us.dt, roll = TRUE]
us.dt[, Domain := as.factor(V1)]
us.dt[, V1 := NULL]
us.dt[, V2 := NULL]
us.dt[, Field := NULL]
setkey(us.dt, UnitName, Day)
# Supply type dependent
#SUPPLYING LOG NODE
setnames(raw.dt, c('UnitName', 'Day', 'Field', 'SupplyType', 'V2'))
setkey(raw.dt, UnitName, SupplyType, Day)
us.supply.dt <- raw.dt[Field=="SUPPLYING_LOG_NODE"][us.supply.dt, roll = TRUE]
us.supply.dt[, SupplyingLogNode := as.factor(V2)]
us.supply.dt[, SupplyType := as.factor(SupplyType)]
us.supply.dt[, V2 := NULL]
us.supply.dt[, Field := NULL]
setkey(us.supply.dt, UnitName, SupplyType, Day)
#merge in the supply intrements without rolling, as a left outer join
us.supply.dt[raw.dt[Field == 'SUPPLY_INCREMENT'],
SupplyIncrement := as.numeric(i.V2), nomatch = NA ]
#Supply increment is now numeric; also would rather have supplyingLN be FACTOR.
return(list(Script = as.data.frame(us.dt),
SupplyScript = as.data.frame(us.supply.dt)
))
}
#TODO
# * read the max loads as a data frame
# * read exclusions as a data frame
readTransports <- function(config){
t <- read.csv(config$files$TRANSPORTATION_ASSETS,
skip = 1, header = FALSE, fill = TRUE)
t <- t[, 1:9]
names(t) <- c('Name', 'Description', 'ConfigurationName', 'Category', 'Availability',
'Fuel Type', 'Fuel Efficiency', 'Average Speed', 'Max Range')
list(transports = t,
capacities = data.frame(),
exclusions = data.frame()
)
}
# TODO: read intermediate points.
# * read fields as character then convert to appropriate type.
readArcs <- function(config){
a <- read.csv(config$files$ARCS, skip = 1, header = FALSE)
a <- a[, 1:7]
names(a) <- c('Name', 'Description', 'Node1', 'Node2', 'Mode', 'True Length', 'Max Speed')
return(a)
}
readNodes <- function(config){
read.csv(config$files$NODES, colClasses = c('factor', 'character', rep('numeric', 2), 'factor'))
}
#consumption classes = done
# Objects to read`
readMaps <- function(config){} #not sure this is really needed
readPipelines <- function(config){}
readPostures <- function(config){}
readPriorityClass <- function(config){} # this structure is the same as consumption class
readScriptedSorties <- function(config){}
readWeather <- function(config){} #low priority
#scripts to read
readArcScript <- function(config){}
readLogNodeScript <- function(config){}
readPipelineScript <- function(config){}
# Read output data
readLogNodeSupplyHistory <- function(config){
read.csv(paste(config$OUTPUT_DIR, 'LogNodeSupplyHistory.csv', sep = ''),
colClasses = c('integer', rep('factor', 2), rep('numeric', 13)))
}
readLogNodeTransportHistory <- function(config){
read.csv(paste(config$OUTPUT_DIR, 'LogNodeTransportHistory.csv', sep = ''),
colClasses = c('integer', rep('factor', 2), rep('numeric', 6)))
}
readUnitSupplyHistory <- function(config){
read.csv(paste(config$OUTPUT_DIR, 'UnitHistory.csv', sep = ''),
colClasses = c('integer', rep('factor',2), 'character', rep('numeric', 16)))
}
readSupplyRequests <- function(config){
read.csv(paste(config$OUTPUT_DIR, 'SupplyRequests.csv', sep = ''),
colClasses = c(rep('integer', 2), rep('factor', 2), rep('numeric', 3),
'integer', rep('numeric', 2))
)
}
readTransportDeliveries <- function(config){
read.csv(paste(config$OUTPUT_DIR, 'TransportDeliveries.csv', sep = ''),
colClasses = c(rep('integer',3), rep('factor', 3), rep('numeric', 2),
'factor', rep('numeric', 6), 'logical'))
}
readIncrementDeliveries <- function(config){
read.csv(paste(config$OUTPUT_DIR, 'ScriptedIncrementDeliveries.csv', sep = ''),
colClasses = c(rep('integer', 2), 'numeric')
)
}
readPipelineDeliveries <- function(config){
read.csv(paste(config$OUTPUT_DIR, 'PipelineDeliveries.csv', sep = ''),
colClasses = c(rep('integer', 2), rep('factor', 2), 'numeric',
'factor', rep('numeric', 2))
)
}
readDeliveryArcs <- function(config){
da <- read.csv(paste(config$OUTPUT_DIR, 'DeliveryArcs.csv', sep = ''),
colClasses = c('integer', 'factor')
)
# merge to get start/end locations
n <- readNodes(config)
a <- readArcs(config)
a <- merge(a, n, by.x = 'Node1', by.y = 'Name', suffixes = c('', '.o'))
a <- merge(a, n, by.x = 'Node2', by.y = 'Name', suffixes = c('', '.d'))
a$Description <- NULL
a$Description.o <- NULL
a$Description.d <- NULL
merge(da, a, by.x = 'ArcID', by.y = 'Name', all.x = TRUE, all.y = FALSE)
}
#Arc history
#pipelinHistory
| /src/readLawstGame.R | no_license | vpipkt/lawstPost | R | false | false | 13,261 | r | # Read a LAWST game, both inputs and outputs into a set of data frames
## Completed
# * Read config into a list structure
# * Most output files
# * Read unit script with all fields rolling
## TODOs
# * Write a function to read everything into one structure.
# * Where one table's factor refers to another, ensure same levels
# * Finish reading in other object types - see list at bottom of file
# * Add game name, case name and timestamp to all data frames
# * Finish reading scripts: lognode, pipeline, arc
# * Read output data arc and pipe history
# * Investigate using unit icons in charts
readLawstConfig <- function(configFile = 'game_config.csv', caseName = 'case', timestamp = date()){
#returns a list of configuration file contents
cfg <- read.table(configFile, header = FALSE, sep = ",", fill = TRUE,
colClasses = rep("character", 3), col.names = c("field", "value1", "value2"))
cfg$field <- toupper(cfg$field)
#Replace any space with underscore
cfg$field <- gsub(" ", "_", cfg$field, fixed = TRUE)
#Get the path to the _OUTPUT folder
outDir <- paste(dirname(configFile), '/', cfg$value1[cfg$field == "GAME_NAME"],
"_OUTPUT/", sep = '')
#This assumes structure of the game file is stable
return(list(GAME_NAME = cfg$value1[cfg$field == "GAME_NAME"],
CASE = caseName,
TIMESTAMP = timestamp,
OUTPUT_DIR = outDir,
GAME_DURATION = as.numeric(cfg$value1[cfg$field == "GAME_DURATION"]),
GAME_START_DATE = list(month = cfg$value1[cfg$field == "GAME_START_DATE"],
day = as.numeric(cfg$value2[cfg$field == "GAME_START_DATE"])),
VOLUME_UOM = cfg$value1[cfg$field == "VOLUME_UOM"],
#5
MASS_UOM = cfg$value1[cfg$field == "MASS_UOM"],
DISTANCE_UOM = cfg$value1[cfg$field == "DISTANCE_UOM"],
SUPPLY_ADJUDICATION_METHODOLOGY = cfg$value1[cfg$field == "SUPPLY_ADJUDICATION_METHODOLOGY"],
HIGHWAY_REACH = as.numeric(cfg$value1[cfg$field == "HIGHWAY_REACH"]),
ZERO_RISK_UTILIZATION = as.numeric(cfg$value1[cfg$field == "ZERO_RISK_UTILIZATION"]),
GROUND_ESCORT_FUEL_RATE = as.numeric(cfg$value1[cfg$field == "GROUND_ESCORT_FUEL_RATE"]),
#10
SEA_ESCORT_FUEL_RATE = as.numeric(cfg$value1[cfg$field == "SEA_ESCORT_FUEL_RATE"]),
AIR_ESCORT_FUEL_RATE = as.numeric(cfg$value1[cfg$field == "AIR_ESCORT_FUEL_RATE"]),
files = list(
#Begin file names. Replace the windows style slash with /
SUPPLY_TYPES = gsub('\\', '/', cfg$value1[cfg$field == "SUPPLY_TYPES"], fixed = TRUE),
POSTURES = gsub('\\', '/', cfg$value1[cfg$field == "POSTURES"], fixed = TRUE),
CONSUMPTION_CLASSES = gsub('\\', '/', cfg$value1[cfg$field == "CONSUMPTION_CLASSES"], fixed = TRUE),
#15
PRIORITIZATION_CLASSES = gsub('\\', '/', cfg$value1[cfg$field == "PRIORITIZATION_CLASSES"], fixed = TRUE),
MAPS = gsub('\\', '/', cfg$value1[cfg$field == "MAPS"], fixed = TRUE),
NODES = gsub('\\', '/', cfg$value1[cfg$field == "NODES"], fixed = TRUE),
ARCS = gsub('\\', '/', cfg$value1[cfg$field == "ARCS"], fixed = TRUE),
ARC_SCRIPTS = gsub('\\', '/', cfg$value1[cfg$field == "ARC_SCRIPTS"], fixed = TRUE),
#20
TRANSPORTATION_ASSETS = gsub('\\', '/', cfg$value1[cfg$field == "TRANSPORTATION_ASSETS"], fixed = TRUE),
TRANSPORTATION_MODE_EXCLUSIONS = gsub('\\', '/', cfg$value1[cfg$field == "TRANSPORTATION_MODE_EXCLUSIONS"], fixed = TRUE),
UNITS = gsub('\\', '/', cfg$value1[cfg$field == "UNITS"], fixed = TRUE),
UNIT_SCRIPTS = gsub('\\', '/', cfg$value1[cfg$field == "UNIT_SCRIPTS"], fixed = TRUE),
LOG_NODE_SCRIPTS = gsub('\\', '/', cfg$value1[cfg$field == "LOG_NODE_SCRIPTS"], fixed = TRUE),
#25
PIPELINES = gsub('\\', '/', cfg$value1[cfg$field == "PIPELINES"], fixed = TRUE),
PIPELINE_SCRIPTS = gsub('\\', '/', cfg$value1[cfg$field == "PIPELINE_SCRIPTS"], fixed = TRUE),
WEATHER = gsub('\\', '/', cfg$value1[cfg$field == "WEATHER"], fixed = TRUE),
SCRIPTED_SORTIES = gsub('\\', '/', cfg$value1[cfg$field == "SCRIPTED_SORTIES"], fixed = TRUE)
)
))
}
readUnit <- function(config){
u <- read.csv(config$files$UNITS,
colClasses = c("factor", rep("character", 3), "logical"))
#not sure if I can actually exploit the icons
#Standardize the field names for post processing use
names(u) <- c('UnitName', 'Description', 'UnitType', 'UnitIcon', 'IsLogNode')
return(u)
}
readSupplyTypes <- function(config){
st <- read.csv(config$files$SUPPLY_TYPES,
colClasses = c('factor', 'character', 'factor', 'logical', 'numeric'),
col.names = c('SupplyType', 'Description',
'SupplyClass', 'IsLiquid', 'Density'))
st[st$IsLiquid == FALSE,'Density'] <- 1
return(st)
}
readUnitScript <- function(config){
require(data.table)
#assume reading units and supply types is cheap
u <- readUnit(config)
st <- readSupplyTypes(config)
nDay <- config$GAME_DURATION - 1
# This function will return a list of two data frames. One has supply-dependent
# fields. This is the other.
us <- expand.grid(UnitName = u$UnitName, Day = seq(from = 0, to = nDay, by = 1))
us.supply <- expand.grid(UnitName = u$UnitName, SupplyType = st$SupplyType,
Day = seq(from = 0, to = nDay, by = 1))
#read the unit script file
raw <- read.csv(config$files$UNIT_SCRIPTS, fill = TRUE,
colClasses = c('factor', 'numeric', rep('character', 3)),
col.names = c('UnitName', 'Day', 'Field', 'V1', 'V2'),
header = FALSE, skip = 1) #use this header & skip combination to avoid a warning; the header has 1 too few columns.
# 'fields' all uppercase and replace space with underscore
raw$Field <- toupper(raw$Field)
raw$Field <- gsub(" ", "_", raw$Field, fixed = TRUE)
#convert raw and expand.grids into data.tables to take advantage of rolling merges
us.dt <- data.table(us)
setkey(us.dt, UnitName, Day)
us.supply.dt <- data.table(us.supply)
setkey(us.supply.dt, UnitName, SupplyType, Day)
raw.dt <- data.table(raw)
setkey(raw.dt, UnitName, Day)
# Subsetting by field, merge into the normalized data table with rolling join
#LATITUDE
us.dt <- raw.dt[Field=="LOCATION"][us.dt, roll = TRUE]
us.dt[, Latitude := as.numeric(V1)]
us.dt[, Longitude := as.numeric(V2)]
us.dt[, V1 := NULL]
us.dt[, V2 := NULL]
us.dt[, Field := NULL]
setkey(us.dt, UnitName, Day)
#Strength
us.dt <- raw.dt[Field=="STRENGTH"][us.dt, roll = TRUE]
us.dt[, Strength := as.numeric(V1)]
us.dt[, V1 := NULL]
us.dt[, V2 := NULL]
us.dt[, Field := NULL]
setkey(us.dt, UnitName, Day)
#'POSTURE'
us.dt <- raw.dt[Field=="POSTURE"][us.dt, roll = TRUE]
us.dt[, Posture := as.factor(V1)]
us.dt[, V1 := NULL]
us.dt[, V2 := NULL]
us.dt[, Field := NULL]
setkey(us.dt, UnitName, Day)
#Consumption Class
us.dt <- raw.dt[Field=="CONSUMPTION_CLASS"][us.dt, roll = TRUE]
us.dt[, ConsumptionClass := as.factor(V1)]
us.dt[, V1 := NULL]
us.dt[, V2 := NULL]
us.dt[, Field := NULL]
setkey(us.dt, UnitName, Day)
#Prioritization Class
us.dt <- raw.dt[Field=="PRIORITIZATION_CLASS"][us.dt, roll = TRUE]
us.dt[, PrioritizationClass := as.factor(V1)]
us.dt[, V1 := NULL]
us.dt[, V2 := NULL]
us.dt[, Field := NULL]
setkey(us.dt, UnitName, Day)
# REQUIRED DAYS OF SUPPLY'
us.dt <- raw.dt[Field=="REQUIRED_DAYS_OF_SUPPLY"][us.dt, roll = TRUE]
us.dt[, ReqDaysSupply := as.numeric(V1)]
us.dt[, V1 := NULL]
us.dt[, V2 := NULL]
us.dt[, Field := NULL]
setkey(us.dt, UnitName, Day)
#'DOMAIN'
us.dt <- raw.dt[Field=="DOMAIN"][us.dt, roll = TRUE]
us.dt[, Domain := as.factor(V1)]
us.dt[, V1 := NULL]
us.dt[, V2 := NULL]
us.dt[, Field := NULL]
setkey(us.dt, UnitName, Day)
# Supply type dependent
#SUPPLYING LOG NODE
setnames(raw.dt, c('UnitName', 'Day', 'Field', 'SupplyType', 'V2'))
setkey(raw.dt, UnitName, SupplyType, Day)
us.supply.dt <- raw.dt[Field=="SUPPLYING_LOG_NODE"][us.supply.dt, roll = TRUE]
us.supply.dt[, SupplyingLogNode := as.factor(V2)]
us.supply.dt[, SupplyType := as.factor(SupplyType)]
us.supply.dt[, V2 := NULL]
us.supply.dt[, Field := NULL]
setkey(us.supply.dt, UnitName, SupplyType, Day)
#merge in the supply intrements without rolling, as a left outer join
us.supply.dt[raw.dt[Field == 'SUPPLY_INCREMENT'],
SupplyIncrement := as.numeric(i.V2), nomatch = NA ]
#Supply increment is now numeric; also would rather have supplyingLN be FACTOR.
return(list(Script = as.data.frame(us.dt),
SupplyScript = as.data.frame(us.supply.dt)
))
}
#TODO
# * read the max loads as a data frame
# * read exclusions as a data frame
readTransports <- function(config){
t <- read.csv(config$files$TRANSPORTATION_ASSETS,
skip = 1, header = FALSE, fill = TRUE)
t <- t[, 1:9]
names(t) <- c('Name', 'Description', 'ConfigurationName', 'Category', 'Availability',
'Fuel Type', 'Fuel Efficiency', 'Average Speed', 'Max Range')
list(transports = t,
capacities = data.frame(),
exclusions = data.frame()
)
}
# TODO: read intermediate points.
# * read fields as character then convert to appropriate type.
readArcs <- function(config){
a <- read.csv(config$files$ARCS, skip = 1, header = FALSE)
a <- a[, 1:7]
names(a) <- c('Name', 'Description', 'Node1', 'Node2', 'Mode', 'True Length', 'Max Speed')
return(a)
}
readNodes <- function(config){
read.csv(config$files$NODES, colClasses = c('factor', 'character', rep('numeric', 2), 'factor'))
}
#consumption classes = done
# Objects to read`
readMaps <- function(config){} #not sure this is really needed
readPipelines <- function(config){}
readPostures <- function(config){}
readPriorityClass <- function(config){} # this structure is the same as consumption class
readScriptedSorties <- function(config){}
readWeather <- function(config){} #low priority
#scripts to read
readArcScript <- function(config){}
readLogNodeScript <- function(config){}
readPipelineScript <- function(config){}
# Read output data
readLogNodeSupplyHistory <- function(config){
read.csv(paste(config$OUTPUT_DIR, 'LogNodeSupplyHistory.csv', sep = ''),
colClasses = c('integer', rep('factor', 2), rep('numeric', 13)))
}
readLogNodeTransportHistory <- function(config){
read.csv(paste(config$OUTPUT_DIR, 'LogNodeTransportHistory.csv', sep = ''),
colClasses = c('integer', rep('factor', 2), rep('numeric', 6)))
}
readUnitSupplyHistory <- function(config){
read.csv(paste(config$OUTPUT_DIR, 'UnitHistory.csv', sep = ''),
colClasses = c('integer', rep('factor',2), 'character', rep('numeric', 16)))
}
readSupplyRequests <- function(config){
read.csv(paste(config$OUTPUT_DIR, 'SupplyRequests.csv', sep = ''),
colClasses = c(rep('integer', 2), rep('factor', 2), rep('numeric', 3),
'integer', rep('numeric', 2))
)
}
readTransportDeliveries <- function(config){
read.csv(paste(config$OUTPUT_DIR, 'TransportDeliveries.csv', sep = ''),
colClasses = c(rep('integer',3), rep('factor', 3), rep('numeric', 2),
'factor', rep('numeric', 6), 'logical'))
}
readIncrementDeliveries <- function(config){
read.csv(paste(config$OUTPUT_DIR, 'ScriptedIncrementDeliveries.csv', sep = ''),
colClasses = c(rep('integer', 2), 'numeric')
)
}
readPipelineDeliveries <- function(config){
read.csv(paste(config$OUTPUT_DIR, 'PipelineDeliveries.csv', sep = ''),
colClasses = c(rep('integer', 2), rep('factor', 2), 'numeric',
'factor', rep('numeric', 2))
)
}
readDeliveryArcs <- function(config){
da <- read.csv(paste(config$OUTPUT_DIR, 'DeliveryArcs.csv', sep = ''),
colClasses = c('integer', 'factor')
)
# merge to get start/end locations
n <- readNodes(config)
a <- readArcs(config)
a <- merge(a, n, by.x = 'Node1', by.y = 'Name', suffixes = c('', '.o'))
a <- merge(a, n, by.x = 'Node2', by.y = 'Name', suffixes = c('', '.d'))
a$Description <- NULL
a$Description.o <- NULL
a$Description.d <- NULL
merge(da, a, by.x = 'ArcID', by.y = 'Name', all.x = TRUE, all.y = FALSE)
}
#Arc history
#pipelinHistory
|
require(lattice)
output_file_prefix <- "basemodel2_a135"
A = NULL
rep <- 0
force <- c(1, 3, 7, 10, 14, 16)
for ( i in seq(6) )
{
rep = rep + 1
cat("\n ***********run********* ", rep, "\n")
FolderName <- paste("case_", output_file_prefix, "_", as.character(rep), "/out/", sep='')
cmd = paste("ls ", FolderName, "stat* > ", FolderName, "filelist.txt", sep='')
system(cmd)
fileList <- readLines(paste(FolderName, "filelist.txt", sep=''))
for ( fileid in fileList )
{
FileName <- fileid
stat <- read.csv(file=FileName, header=FALSE)
L <- length(stat[,1])
if ( L > 200 )
{
A <- rbind(A, c(force[rep], stat[L,10]))
}
cat(L, " ")
}
cat("\n")
}
plot(A[,1], A[,2])
## colList <- c("red", "blue", "black", "green", "cyan")
## colList <- rep(colList, 5)
## png(paste("output_", output_file_prefix, ".png", sep=''), height=600, width=1200)
## par(mar=c(5,6,4,4))
## plot(0, 0, type='n', xlim=c(0, 2000),
## ##ylim=c(-10, 10),
## ylim=c(-1, 0),
## xlab="number of divisions", ylab="deviation X(t)",
## cex.axis=1.5, cex.lab=1.5)
## abline(h=c(0, -5, 5),col='blue')
## dev.off()
## hist(atan(abs(sin(totalDividingAngleList)/cos(totalDividingAngleList))))
## png('AspectRatio.png', height=600, width=600)
## ##par(mfrow=c(1,2))
## ##angle <- angle + rnorm(length(angle), 0, pi*15.37/180)
## angle=atan(abs(sin(angle)/cos(angle)))*180/pi
## t = hist(angle, breaks=c(0, 30, 60, 90), plot=F)
## barplot(t$counts/sum(t$counts), ylim=c(0, 1))
## ##t = hist(angle[braf==1], breaks=c(0, 30, 60, 90))
## #tt = hist(angle[braf==0], breaks=c(0, 30, 60, 90))
## #d1 = t$density/sum(t$density)
## #d0 = tt$density/sum(tt$density)
## dev.off()
## write.table(t$counts/sum(t$counts), file="as.csv", sep=',')
## cat('Fcal: ', var(Fcal, na.rm=T), '\n')
## cat('Gcal: ', var(Gcal, na.rm=T), '\n')
## pdf("p1.pdf")
## ##png("p1.png")
## par(mar=c(5,6,4,4))
## plot(0, 0, type='n', xlim=c(0, 2000), ylim=c(0, 1),
## xlab="time", ylab="percentage of elongated cells",
## cex.axis=1.5, cex.lab=2)
## abline(h=c(0.37), col='black', lwd=4, lty=2)
## rep = rep0
## n0 = 600
## n <- 0
## for ( i in seq(5) )
## {
## rep = rep + 1
## cat("\n ***********run********* ", rep, "\n")
## FolderName <- paste("case",as.character(rep), "/out/", sep='')
## cmd = paste("ls ", FolderName, "stat* > ", FolderName, "filelist.txt", sep='')
## system(cmd)
## fileList <- readLines(paste(FolderName, "filelist.txt", sep=''))
## for ( fileid in fileList )
## {
## n <- n + 1
## FileName <- fileid
## A <- read.csv(file=FileName, header=FALSE)
## if ( length(A[,10])/10 >= Nsample )
## {
## ##lines(A[,10], col=colList[i])
## lines(A[,10], col='red')
## }
## }
## }
## rep = 20
## n0 = 600
## n <- 0
## for ( i in seq(10) )
## {
## rep = rep + 1
## cat("\n ***********run********* ", rep, "\n")
## FolderName <- paste("../360_InitialStates_CellModel_MitotiSpindle/case",as.character(rep), "/out/", sep='')
## cmd = paste("ls ", FolderName, "stat* > ", FolderName, "filelist.txt", sep='')
## system(cmd)
## fileList <- readLines(paste(FolderName, "filelist.txt", sep=''))
## for ( fileid in fileList )
## {
## n <- n + 1
## FileName <- fileid
## A <- read.csv(file=FileName, header=FALSE)
## if ( length(A[,10])/10 >= Nsample )
## {
## ##lines(A[,10], col=colList[i])
## lines(A[,10], col='blue')
## }
## }
## }
## dev.off()
## plot(0, 0, type='n', xlim=c(0, 1000), ylim=c(0, 1),
## xlab="number of divisions", ylab="p",
## cex.axis=1.5, cex.lab=1.5)
## abline(h=c(0, 0.37),col='blue')
## rep = rep0
## n0 = 1
## Fcal <- NULL
## n <- 0
## for ( i in seq(10) )
## ##for ( i in 10 )
## {
## rep = rep + 1
## cat("\n ***********run********* ", rep, "\n")
## FolderName <- paste("case",as.character(rep), "/out/", sep='')
## cmd = paste("ls ", FolderName, "stat* > ", FolderName, "filelist.txt", sep='')
## system(cmd)
## fileList <- readLines(paste(FolderName, "filelist.txt", sep=''))
## for ( fileid in fileList )
## {
## n <- n + 1
## FileName <- fileid
## stat <- read.csv(file=FileName, header=FALSE)
## z <- 100*(stat[n0:dim(stat)[1],9] - 0.37)
## ##z <- z - 0.1*seq(length(z))
## ##lines(stat[,8], type='o', col=colList[i])
## lines(z, col='green')
## }
## }
## rep = rep0
## plot(0, 0, type='n', xlim=c(0, 200), ylim=c(-15, 15))
## abline(h=c(0, -5, 5),col='blue')
## n0 = 1
## Fcal <- NULL
## for ( i in seq(40) ) {
## rep = rep + 1
## cat("\n ***********run********* ", rep, "\n")
## FolderName <- gsub("(\ )", "", paste("case",as.character(rep)))
## FileName <- paste(FolderName, "/out/statistics.txt", sep='')
## stat <- read.csv(file=FileName, header=FALSE)
## z <- stat[n0:dim(stat)[1],7] - stat[n0,7]
## z <- z - 0.0*seq(length(z))
## ##lines(stat[,8], type='o', col=colList[i])
## lines(z, col=colList[i])
## if ( length(z) >= Nsample )
## {
## Fcal[i] <- z[Nsample]
## }
## else
## {
## Fcal[i] <- NA
## }
## ##lines(15*(z[,8]-0.37), col=colList[i])
## }
##legend("bottomleft", c("random", "model"), lwd=1, col=c('green', 'blue'), cex=1.5)
##dev.off()
## plot(0, 0, type='n', xlim=c(0, 200), ylim=c(0, 1))
## abline(h=0.37,col='blue')
## rep = rep0
## n0 = 1
## for ( i in seq(10) ) {
## rep = rep + 1
## cat("\n ***********run********* ", rep, "\n")
## FolderName <- gsub("(\ )", "", paste("case",as.character(rep)))
## FileName <- paste(FolderName, "/out/statistics.txt", sep='')
## stat <- read.csv(file=FileName, header=FALSE)
## ## z <- stat[n0:dim(stat)[1],9] - stat[n0,7]
## ## z <- z + 0.02*seq(length(z))
## lines(stat[,9], col=colList[i])
## ##lines(15*(z[,8]-0.37), col=colList[i])
## }
| /postprocess.r | permissive | hydrays/CellModel | R | false | false | 5,995 | r | require(lattice)
output_file_prefix <- "basemodel2_a135"
A = NULL
rep <- 0
force <- c(1, 3, 7, 10, 14, 16)
for ( i in seq(6) )
{
rep = rep + 1
cat("\n ***********run********* ", rep, "\n")
FolderName <- paste("case_", output_file_prefix, "_", as.character(rep), "/out/", sep='')
cmd = paste("ls ", FolderName, "stat* > ", FolderName, "filelist.txt", sep='')
system(cmd)
fileList <- readLines(paste(FolderName, "filelist.txt", sep=''))
for ( fileid in fileList )
{
FileName <- fileid
stat <- read.csv(file=FileName, header=FALSE)
L <- length(stat[,1])
if ( L > 200 )
{
A <- rbind(A, c(force[rep], stat[L,10]))
}
cat(L, " ")
}
cat("\n")
}
plot(A[,1], A[,2])
## colList <- c("red", "blue", "black", "green", "cyan")
## colList <- rep(colList, 5)
## png(paste("output_", output_file_prefix, ".png", sep=''), height=600, width=1200)
## par(mar=c(5,6,4,4))
## plot(0, 0, type='n', xlim=c(0, 2000),
## ##ylim=c(-10, 10),
## ylim=c(-1, 0),
## xlab="number of divisions", ylab="deviation X(t)",
## cex.axis=1.5, cex.lab=1.5)
## abline(h=c(0, -5, 5),col='blue')
## dev.off()
## hist(atan(abs(sin(totalDividingAngleList)/cos(totalDividingAngleList))))
## png('AspectRatio.png', height=600, width=600)
## ##par(mfrow=c(1,2))
## ##angle <- angle + rnorm(length(angle), 0, pi*15.37/180)
## angle=atan(abs(sin(angle)/cos(angle)))*180/pi
## t = hist(angle, breaks=c(0, 30, 60, 90), plot=F)
## barplot(t$counts/sum(t$counts), ylim=c(0, 1))
## ##t = hist(angle[braf==1], breaks=c(0, 30, 60, 90))
## #tt = hist(angle[braf==0], breaks=c(0, 30, 60, 90))
## #d1 = t$density/sum(t$density)
## #d0 = tt$density/sum(tt$density)
## dev.off()
## write.table(t$counts/sum(t$counts), file="as.csv", sep=',')
## cat('Fcal: ', var(Fcal, na.rm=T), '\n')
## cat('Gcal: ', var(Gcal, na.rm=T), '\n')
## pdf("p1.pdf")
## ##png("p1.png")
## par(mar=c(5,6,4,4))
## plot(0, 0, type='n', xlim=c(0, 2000), ylim=c(0, 1),
## xlab="time", ylab="percentage of elongated cells",
## cex.axis=1.5, cex.lab=2)
## abline(h=c(0.37), col='black', lwd=4, lty=2)
## rep = rep0
## n0 = 600
## n <- 0
## for ( i in seq(5) )
## {
## rep = rep + 1
## cat("\n ***********run********* ", rep, "\n")
## FolderName <- paste("case",as.character(rep), "/out/", sep='')
## cmd = paste("ls ", FolderName, "stat* > ", FolderName, "filelist.txt", sep='')
## system(cmd)
## fileList <- readLines(paste(FolderName, "filelist.txt", sep=''))
## for ( fileid in fileList )
## {
## n <- n + 1
## FileName <- fileid
## A <- read.csv(file=FileName, header=FALSE)
## if ( length(A[,10])/10 >= Nsample )
## {
## ##lines(A[,10], col=colList[i])
## lines(A[,10], col='red')
## }
## }
## }
## rep = 20
## n0 = 600
## n <- 0
## for ( i in seq(10) )
## {
## rep = rep + 1
## cat("\n ***********run********* ", rep, "\n")
## FolderName <- paste("../360_InitialStates_CellModel_MitotiSpindle/case",as.character(rep), "/out/", sep='')
## cmd = paste("ls ", FolderName, "stat* > ", FolderName, "filelist.txt", sep='')
## system(cmd)
## fileList <- readLines(paste(FolderName, "filelist.txt", sep=''))
## for ( fileid in fileList )
## {
## n <- n + 1
## FileName <- fileid
## A <- read.csv(file=FileName, header=FALSE)
## if ( length(A[,10])/10 >= Nsample )
## {
## ##lines(A[,10], col=colList[i])
## lines(A[,10], col='blue')
## }
## }
## }
## dev.off()
## plot(0, 0, type='n', xlim=c(0, 1000), ylim=c(0, 1),
## xlab="number of divisions", ylab="p",
## cex.axis=1.5, cex.lab=1.5)
## abline(h=c(0, 0.37),col='blue')
## rep = rep0
## n0 = 1
## Fcal <- NULL
## n <- 0
## for ( i in seq(10) )
## ##for ( i in 10 )
## {
## rep = rep + 1
## cat("\n ***********run********* ", rep, "\n")
## FolderName <- paste("case",as.character(rep), "/out/", sep='')
## cmd = paste("ls ", FolderName, "stat* > ", FolderName, "filelist.txt", sep='')
## system(cmd)
## fileList <- readLines(paste(FolderName, "filelist.txt", sep=''))
## for ( fileid in fileList )
## {
## n <- n + 1
## FileName <- fileid
## stat <- read.csv(file=FileName, header=FALSE)
## z <- 100*(stat[n0:dim(stat)[1],9] - 0.37)
## ##z <- z - 0.1*seq(length(z))
## ##lines(stat[,8], type='o', col=colList[i])
## lines(z, col='green')
## }
## }
## rep = rep0
## plot(0, 0, type='n', xlim=c(0, 200), ylim=c(-15, 15))
## abline(h=c(0, -5, 5),col='blue')
## n0 = 1
## Fcal <- NULL
## for ( i in seq(40) ) {
## rep = rep + 1
## cat("\n ***********run********* ", rep, "\n")
## FolderName <- gsub("(\ )", "", paste("case",as.character(rep)))
## FileName <- paste(FolderName, "/out/statistics.txt", sep='')
## stat <- read.csv(file=FileName, header=FALSE)
## z <- stat[n0:dim(stat)[1],7] - stat[n0,7]
## z <- z - 0.0*seq(length(z))
## ##lines(stat[,8], type='o', col=colList[i])
## lines(z, col=colList[i])
## if ( length(z) >= Nsample )
## {
## Fcal[i] <- z[Nsample]
## }
## else
## {
## Fcal[i] <- NA
## }
## ##lines(15*(z[,8]-0.37), col=colList[i])
## }
##legend("bottomleft", c("random", "model"), lwd=1, col=c('green', 'blue'), cex=1.5)
##dev.off()
## plot(0, 0, type='n', xlim=c(0, 200), ylim=c(0, 1))
## abline(h=0.37,col='blue')
## rep = rep0
## n0 = 1
## for ( i in seq(10) ) {
## rep = rep + 1
## cat("\n ***********run********* ", rep, "\n")
## FolderName <- gsub("(\ )", "", paste("case",as.character(rep)))
## FileName <- paste(FolderName, "/out/statistics.txt", sep='')
## stat <- read.csv(file=FileName, header=FALSE)
## ## z <- stat[n0:dim(stat)[1],9] - stat[n0,7]
## ## z <- z + 0.02*seq(length(z))
## lines(stat[,9], col=colList[i])
## ##lines(15*(z[,8]-0.37), col=colList[i])
## }
|
#Contracts has to be a list(..) of twsContracts
#IBeWrapper.Mktdata.SHARED.MULTISYMBOL <- function(Contracts=list(), ShareFiles=list(), Aggregation=list(), ShareFilesWipe=FALSE, xupdate.mat=NULL) {
IBeWrapper.Mktdata.SHARED.MULTISYMBOL <- function(Contracts=list(), ShareFiles=list(), Aggregation=5L, ShareFilesWipe=FALSE, xupdate.mat=NULL) {
verbose <- 0 # only for init
eW <- eWrapper(NULL)
if (!(length(Contracts)>0)) stop('Contract list cannot be empty')
n<-length(Contracts)
stopifnot( (is.integer(Aggregation) & (1<Aggregation)) ) #barsize (in seconds) is the same for all contracts!
#Initalize n different mmap xts structures type'double' on disk
# c("BidSize", "BidPrice", "AskPrice", "AskSize", "Last", "LastSize", "Volume","Open","High","Low")
#Set number of rows and calculate xts size in bytes (on disk)
numrows<-18000 # set to 18000 ~= 17280 * 5sec to cover 24 hours. The rest is safety padding
#Create disk files and mappings
numcontracts <- 0
numstreams <- 0
mmappings<-list()
for (id in 1:n) {
if (is.list(Contracts[[id]])) { # symbol contract == list()
numcontracts <- numcontracts + 1
#~ tmpx <- xts(matrix(data=NA_real_, nrow=numrows, ncol=10), Sys.time()+1:numrows)
#~ sizeinbytes<-length(coredata(tmpx)) * nbytes(struct(double())) + length(.index(tmpx)) * nbytes(struct(double()))
#~ rm(tmpx)
#~ #filename: SYMBOL-EXCHANGE-mktdata.bin
#~ #tmpfname <- paste(ShareDir,'/',Contracts[[id]]$symbol,'-', Contracts[[id]]$exch,'-mktdata.bin',sep='')
#~ tmpfname <- ShareFiles[[id]]
#~ if (!file.exists(tmpfname) | ShareFilesWipe) { writeBin(raw(sizeinbytes),tmpfname) }
#~ mmappings[[id]] <- mmap(tmpfname, struct(timestamp=double(), BidSize=double(), BidPrice=double(), AskPrice=double(), AskSize=double(), Last=double(), LastSize=double(), Volume=double(), Open=double(), High=double(), Low=double() ))
#~ if (ShareFilesWipe) {
#~ # Initialize values - brute
#~ mmappings[[id]][,1] <- NA
#~ mmappings[[id]][,2] <- NA
#~ mmappings[[id]][,3] <- NA
#~ mmappings[[id]][,4] <- NA
#~ mmappings[[id]][,5] <- NA
#~ mmappings[[id]][,6] <- NA
#~ mmappings[[id]][,7] <- NA
#~ mmappings[[id]][,8] <- NA
#~ mmappings[[id]][,9] <- NA
#~ mmappings[[id]][,10] <- NA
#~ mmappings[[id]][,11] <- NA
#~ ## Write full xts data by column .....
#~ #m[,1] <- .index(x)
#~ #m[,2] <- coredata(x)[,1]
#~ #m[,3] <- coredata(x)[,2]
#~ #m[,4] <- coredata(x)[,3]
#~ #m[,5] <- coredata(x)[,4]
#~ # maybe it would be good to have some kind of consistency check on the
#~ # client side, where we compare the write/read data to an xts stored in
#~ # a .rdata file ??
#~ }
} else { # multi symbol definition , including single symbols!
if (length(Contracts[[id]])>100) stop('more than 100 symbols in a multisymbol stream are not supported yet.')
numstreams <- numstreams + 1
numcolumns <- 10*length(Contracts[[id]])
tmpx <- xts(matrix(data=NA_real_, nrow=numrows, ncol=numcolumns), Sys.time()+1:numrows)
sizeinbytes<-length(coredata(tmpx)) * nbytes(struct(double())) + length(.index(tmpx)) * nbytes(struct(double()))
rm(tmpx)
#filename: 43274Fsdrc.bin
tmpfname <- ShareFiles[[id]]
if (!file.exists(tmpfname) | ShareFilesWipe) { writeBin(raw(sizeinbytes),tmpfname) }
#all.LABELS <- c('BidSize', 'BidPrice', 'AskPrice', 'AskSize', 'Last', 'LastSize', 'Volume', 'Open', 'High', 'Low' )
# Make a struct() of the required length
numsymbols <- length(Contracts[[id]])
atom.lst=c(
list(double()) #timestamp
,rep(
#BIG# 1.37 Mb per symbol 100% size. double() == real64()
list(double(), double() ,double() ,double() ,double() ,double() ,double() ,double() ,double() ,double() )
#OK# 1.0 Mb per symbol 80% sixe
#list(int32() ,real64() ,real64() ,int32() ,real64() ,int32() ,real64() ,real64() ,real64() , int32() )
#TIGHT?# 0.6 Mb per symbol 45% size
#list(uint24() ,real32() ,real32() ,uint24() ,real32() ,uint24() ,real32() ,real32() ,real32() , uint24() )
, numsymbols)
)
ss <- IBmakeVarlengthStruct(atom.lst, 1L)
stopifnot(is.struct(ss))
if (verbose>0) cat('length of struct:',length(ss),'\n')
# Create the mapping
mmappings[[id]] <- mmap(tmpfname, ss )
if (ShareFilesWipe) {
# Initialize values - brute
mmappings[[id]][,1] <- NA
for (icol in 2:(numcolumns+1)) mmappings[[id]][,icol] <- NA
## Write full xts data by column .....
#m[,1] <- .index(x)
#m[,2] <- coredata(x)[,1]
#m[,3] <- coredata(x)[,2]
#m[,4] <- coredata(x)[,3]
#m[,5] <- coredata(x)[,4]
# maybe it would be good to have some kind of consistency check on the
# client side, where we compare the write/read data to an xts stored in
# a .rdata file ??
}
}
}
#store mappings in closure
eW$assign.Data("mmappings",mmappings)
#store number of contracts, etc in closure
eW$assign.Data("numcontracts",numcontracts)
eW$assign.Data('xupdate.mat', xupdate.mat)
eW$assign.Data('numstreams', numstreams)
# Initialize in-memory data buffer
eW$assign.Data("data", rep(list(structure(.xts(matrix(rep(NA_real_,
10), ncol = 10), 0), .Dimnames = list(NULL, c("BidSize",
"BidPrice", "AskPrice", "AskSize", "Last", "LastSize",
"Volume","Open","High","Low")))), n)) # instead of 'n' this should be 'numcontracts'
eW$tickPrice <- function(curMsg, msg, timestamp, file, ...) {
tickType = msg[3]
msg <- as.numeric(msg)
id <- msg[2]
data <- eW$get.Data("data")
attr(data[[id]], "index") <- as.numeric(nowtime<-Sys.time())
nr.data <- NROW(data[[id]])
if (tickType == .twsTickType$BID) {
data[[id]][nr.data, 1:2] <- msg[5:4]
}
else if (tickType == .twsTickType$ASK) {
data[[id]][nr.data, 3:4] <- msg[4:5]
}
else if (tickType == .twsTickType$LAST) {
data[[id]][nr.data, 5] <- msg[4]
}
else if (tickType == .twsTickType$HIGH) {
data[[id]][nr.data, 9] <- msg[4]
}
else if (tickType == .twsTickType$LOW) {
data[[id]][nr.data, 10] <- msg[4]
}
else if (tickType == .twsTickType$OPEN) {
data[[id]][nr.data, 8] <- msg[4]
}
eW$assign.Data("data", data)
c(curMsg, msg)
#eW$makeDataRowIfTime()
}
eW$tickSize <- function(curMsg, msg, timestamp, file, ...) {
data <- eW$get.Data("data")
tickType = msg[3]
msg <- as.numeric(msg)
id <- as.numeric(msg[2])
attr(data[[id]], "index") <- as.numeric(nowtime<-Sys.time())
nr.data <- NROW(data[[id]])
if (tickType == .twsTickType$BID_SIZE) {
data[[id]][nr.data, 1] <- msg[4]
}
else if (tickType == .twsTickType$ASK_SIZE) {
data[[id]][nr.data, 4] <- msg[4]
}
else if (tickType == .twsTickType$LAST_SIZE) {
data[[id]][nr.data, 6] <- msg[4]
}
else if (tickType == .twsTickType$VOLUME) {
data[[id]][nr.data, 7] <- msg[4]
}
eW$assign.Data("data", data)
c(curMsg, msg)
#?eW$makeDataRowIfTime()
}
eW$assign.Data("gridtimeoflastrow.int.sec", 0) #initialize
eW$assign.Data("barsize.int.sec", as.integer(Aggregation)) #initialize
eW$assign.Data("first.row.complete", FALSE) #initialize. Will be true after we have the first full row
# and only then we will start writing rows to shared mem.
eW$makeDataRowIfTime <- function(nowtime) {
verbose <- 0 # 0: nothing, 1:basic, 2:all
gridtimeoflastrow.int.sec <- eW$get.Data("gridtimeoflastrow.int.sec")
barsize.int.sec <- eW$get.Data("barsize.int.sec")
first.row.complete <- eW$get.Data("first.row.complete")
if (!first.row.complete) { # Do not write rows until we have one complete first row for all id's (NAs in 7,8,9,10 allowed!)
data <- eW$get.Data("data")
numcontracts <- eW$get.Data("numcontracts")
ss <- sum(vv<-vapply(1:numcontracts, function(id) { as.numeric(any(is.na(data[[id]][1,1:6]))) },0)) #dont check 7,8,9,10
#cat(vv,'\n')
#if we still have NAs, return(), otherwise set to TRUE
if (ss) return() else {
eW$assign.Data("first.row.complete", TRUE)
cat('first.row.complete\n')
}
}
if ( (gridtime<-unclass(nowtime)%/%barsize.int.sec*barsize.int.sec) > gridtimeoflastrow.int.sec ) {
# Reset the timestamp
eW$assign.Data("gridtimeoflastrow.int.sec", gridtime)
#BEGIN - For all id's write the new row to shared memory as struct type 'double' with 11 columns.
data <- eW$get.Data("data")
mmappings <- eW$get.Data("mmappings")
#print(mmappings[[id]][,1])
# #get the ii index of the first free (NA) timestamp of m[], assuming that we do not have an extraction function??
# #Faster: we could store this index in the closure somewhere and just increment it,
# # instead of searching for it every time.
remembered.iinext <- NA
numcontracts <- eW$get.Data("numcontracts")
vapply(1:numcontracts, function(id) {
#~ tstmps<-mmappings[[id]][,1]
#~ #print( is.na( tstmps[[1]][1] ) ) #first element
#~ iinext<-match(NA, tstmps[[1]] ) #Only works if we don't have leading NAs or interrupting NAs
#~ #print(iinext)
#~ if (is.na(iinext)) stop(paste('Fatal error. Shared memory/file buffer for id',id,'is full, or invalid NAs in data.'))
#~ mmappings[[id]][iinext,1] <- gridtime #.index(newbar)
#~ mmappings[[id]][iinext,2] <- data[[id]][1,1] #coredata(newbar)[1,1]
#~ mmappings[[id]][iinext,3] <- data[[id]][1,2] #coredata(newbar)[1,2]
#~ mmappings[[id]][iinext,4] <- data[[id]][1,3] #coredata(newbar)[1,3]
#~ mmappings[[id]][iinext,5] <- data[[id]][1,4] #coredata(newbar)[1,4]
#~ mmappings[[id]][iinext,6] <- data[[id]][1,5] #coredata(newbar)[1,5]
#~ mmappings[[id]][iinext,7] <- data[[id]][1,6] #coredata(newbar)[1,6] #NA
#~ mmappings[[id]][iinext,8] <- data[[id]][1,7] #coredata(newbar)[1,7] #NA
#~ mmappings[[id]][iinext,9] <- data[[id]][1,8] #coredata(newbar)[1,8] #NA
#~ mmappings[[id]][iinext,10] <- data[[id]][1,9] #coredata(newbar)[1,9] #NA
#~ mmappings[[id]][iinext,11] <- data[[id]][1,10] #coredata(newbar)[1,10] #NA
#~ #cat('data: ',unlist(data[[id]]),'\n')
#~ cat(id,' mmappings[[id]][iinext,] ',iinext,' :',unlist(mmappings[[id]][iinext,]),'\n')
# BEGIN - xupdate all multisymbol streams that contain this symbol
xupdate.mat <- eW$get.Data("xupdate.mat")
#for (xu.idx in which(xupdate.mat[,1]==id)) {
vapply( which(xupdate.mat[,1]==id) ,function(xu.idx) {
target.id <- xupdate.mat[xu.idx,2]
offset.num <- xupdate.mat[xu.idx,3]
target.size <- xupdate.mat[xu.idx,4]
#got a full multisymbol row!
#WRITE multi* to SHMEM FILE
if (is.na(iinext <- remembered.iinext[target.id])) {
tstmps<-mmappings[[target.id]][,1]
iinext<-match(NA, tstmps[[1]] ) #Only works if we don't have leading NAs or interrupting NAs
if (is.na(iinext)) stop(paste('Fatal error. Shared memory/file buffer for id',id,'is full, or invalid NAs in data.'))
remembered.iinext[target.id] <<- iinext # <<- PARENT, two levels up
}
mmappings[[target.id]][iinext,1] <- gridtime #either here, with 'remembered.' or do this last for all streams. see sapply below
ixstart <- 2+(offset.num-1)*ncol(data[[id]])
ixend <- ixstart + ncol(data[[id]])-1
if (verbose>1) {
cat('data: ',data[[id]],'\n')
cat('Writing data to target stream',target.id,' at ixstart:',ixstart,' ixend:', ixend, '\n')
}
#mmappings[[target.id]][iinext, ixstart:ixend ] <- data[[id]] # Doesnt work like this. need to split it up.
vapply( ixstart:ixend, function(ix) {
mmappings[[target.id]][iinext, ix ] <- data[[id]][1,ix-ixstart+1]
1
},0)
if (verbose>1) {
cat('Stream',target.id,':', unlist( mmappings[[target.id]][iinext,]) ,'\n')
}
1
},0)
# END - xupdate all multisymbol streams that contain this symbol
1
},0)
#
if (verbose>1) cat('remembered.iinext: ',remembered.iinext,'\n')
#
if (verbose>0) {
numstreams <- eW$get.Data("numstreams")
vapply((numcontracts+1):(numcontracts+numstreams), function(stream.id) {
iithis <- remembered.iinext[stream.id]
#mmappings[[stream.id]][iithis,1]
cat('Final Stream',stream.id,':', unlist( mmappings[[stream.id]][iithis,] ) ,'\n')
1
},0)
}
#END - Write the newbar to shared memory
}
}
#Define the error function
eW$errorMessage <- function(curMsg, msg, timestamp, file, twsconn, ...)
{
if(msg[3] == "1100") {
twsconn$connected <- FALSE
## Do not disconnect, since the TWS may reconnect, with a 1101, 1102 and
## we still want to be around when that happens!!!
#twsDisconnect(twsconn) #Soren added
#stop(paste("Shutting down: TWS Message:",msg[4])) #Soren added
}
if(msg[3] %in% c("1101","1102")) twsconn$connected <- TRUE
cat(as.character(Sys.time()),"TWS Message:",msg,"\n")
# It looks like the 2105 (lost connection) Error and a subsequent 2106 (connection ok)
# requires us to re-issue the request for marketdata !!!
# 4-2--1-2105-HMDS data farm connection is broken:euhmds2-
# 4-2--1-2106-HMDS data farm connection is OK:euhmds2-
if(msg[3] == "2105") {return('BREAKBREAK')}
# An error that requires re-connection is Error 420. (Happens when we request data
# from two different ip addresses, i.e. at home and at work)
# 4-2-78046390-420-Invalid Real-time Query:Trading TWS session is connected from a different IP address-
if(msg[3] == "420") {return('BREAKBREAK')}
}
return(eW)
}
## our custimized twsCALLBACK. only change is that it allows us to exit from the otherwise
## infinite while(TRUE) loop.
twsCALLBACKdatafeed <- function(twsCon, eWrapper, timestamp, file, playback=1, ...)
{
if(missing(eWrapper))
eWrapper <- eWrapper()
con <- twsCon[[1]]
if(inherits(twsCon, 'twsPlayback')) {
stop('Playback not supported')
} else {
while(TRUE) {
#socketSelect(list(con), FALSE, NULL)
if (socketSelect(list(con), FALSE, timeout=1L)) { #timeout at 1sec
curMsg <- .Internal(readBin(con, "character", 1L, NA_integer_, TRUE, FALSE))
res <- NULL
nowtime <- Sys.time()
if(!is.null(timestamp)) {
#print('AAA')
res <- processMsg(curMsg, con, eWrapper, format(nowtime, timestamp), file, twsCon, ...)
} else {
res <- processMsg(curMsg, con, eWrapper, timestamp, file, twsCon, ...)
}
} else nowtime <- Sys.time()
#S. Added here to make sure Mktdata is always written to a new row, even if no update ocurred
#print('BBB')
eWrapper$makeDataRowIfTime(nowtime)
#print('CCC')
#S. Added this as a breaking mechanism
if (!is.null(res)) { if (as.character(res)=='BREAKBREAK') break }
}
}
}
| /IBeWrapper.Mktdata.SHARED.MULTISYMBOL100.r | no_license | parthasen/datafeedMKT-pub | R | false | false | 15,065 | r | #Contracts has to be a list(..) of twsContracts
#IBeWrapper.Mktdata.SHARED.MULTISYMBOL <- function(Contracts=list(), ShareFiles=list(), Aggregation=list(), ShareFilesWipe=FALSE, xupdate.mat=NULL) {
IBeWrapper.Mktdata.SHARED.MULTISYMBOL <- function(Contracts=list(), ShareFiles=list(), Aggregation=5L, ShareFilesWipe=FALSE, xupdate.mat=NULL) {
verbose <- 0 # only for init
eW <- eWrapper(NULL)
if (!(length(Contracts)>0)) stop('Contract list cannot be empty')
n<-length(Contracts)
stopifnot( (is.integer(Aggregation) & (1<Aggregation)) ) #barsize (in seconds) is the same for all contracts!
#Initalize n different mmap xts structures type'double' on disk
# c("BidSize", "BidPrice", "AskPrice", "AskSize", "Last", "LastSize", "Volume","Open","High","Low")
#Set number of rows and calculate xts size in bytes (on disk)
numrows<-18000 # set to 18000 ~= 17280 * 5sec to cover 24 hours. The rest is safety padding
#Create disk files and mappings
numcontracts <- 0
numstreams <- 0
mmappings<-list()
for (id in 1:n) {
if (is.list(Contracts[[id]])) { # symbol contract == list()
numcontracts <- numcontracts + 1
#~ tmpx <- xts(matrix(data=NA_real_, nrow=numrows, ncol=10), Sys.time()+1:numrows)
#~ sizeinbytes<-length(coredata(tmpx)) * nbytes(struct(double())) + length(.index(tmpx)) * nbytes(struct(double()))
#~ rm(tmpx)
#~ #filename: SYMBOL-EXCHANGE-mktdata.bin
#~ #tmpfname <- paste(ShareDir,'/',Contracts[[id]]$symbol,'-', Contracts[[id]]$exch,'-mktdata.bin',sep='')
#~ tmpfname <- ShareFiles[[id]]
#~ if (!file.exists(tmpfname) | ShareFilesWipe) { writeBin(raw(sizeinbytes),tmpfname) }
#~ mmappings[[id]] <- mmap(tmpfname, struct(timestamp=double(), BidSize=double(), BidPrice=double(), AskPrice=double(), AskSize=double(), Last=double(), LastSize=double(), Volume=double(), Open=double(), High=double(), Low=double() ))
#~ if (ShareFilesWipe) {
#~ # Initialize values - brute
#~ mmappings[[id]][,1] <- NA
#~ mmappings[[id]][,2] <- NA
#~ mmappings[[id]][,3] <- NA
#~ mmappings[[id]][,4] <- NA
#~ mmappings[[id]][,5] <- NA
#~ mmappings[[id]][,6] <- NA
#~ mmappings[[id]][,7] <- NA
#~ mmappings[[id]][,8] <- NA
#~ mmappings[[id]][,9] <- NA
#~ mmappings[[id]][,10] <- NA
#~ mmappings[[id]][,11] <- NA
#~ ## Write full xts data by column .....
#~ #m[,1] <- .index(x)
#~ #m[,2] <- coredata(x)[,1]
#~ #m[,3] <- coredata(x)[,2]
#~ #m[,4] <- coredata(x)[,3]
#~ #m[,5] <- coredata(x)[,4]
#~ # maybe it would be good to have some kind of consistency check on the
#~ # client side, where we compare the write/read data to an xts stored in
#~ # a .rdata file ??
#~ }
} else { # multi symbol definition , including single symbols!
if (length(Contracts[[id]])>100) stop('more than 100 symbols in a multisymbol stream are not supported yet.')
numstreams <- numstreams + 1
numcolumns <- 10*length(Contracts[[id]])
tmpx <- xts(matrix(data=NA_real_, nrow=numrows, ncol=numcolumns), Sys.time()+1:numrows)
sizeinbytes<-length(coredata(tmpx)) * nbytes(struct(double())) + length(.index(tmpx)) * nbytes(struct(double()))
rm(tmpx)
#filename: 43274Fsdrc.bin
tmpfname <- ShareFiles[[id]]
if (!file.exists(tmpfname) | ShareFilesWipe) { writeBin(raw(sizeinbytes),tmpfname) }
#all.LABELS <- c('BidSize', 'BidPrice', 'AskPrice', 'AskSize', 'Last', 'LastSize', 'Volume', 'Open', 'High', 'Low' )
# Make a struct() of the required length
numsymbols <- length(Contracts[[id]])
atom.lst=c(
list(double()) #timestamp
,rep(
#BIG# 1.37 Mb per symbol 100% size. double() == real64()
list(double(), double() ,double() ,double() ,double() ,double() ,double() ,double() ,double() ,double() )
#OK# 1.0 Mb per symbol 80% sixe
#list(int32() ,real64() ,real64() ,int32() ,real64() ,int32() ,real64() ,real64() ,real64() , int32() )
#TIGHT?# 0.6 Mb per symbol 45% size
#list(uint24() ,real32() ,real32() ,uint24() ,real32() ,uint24() ,real32() ,real32() ,real32() , uint24() )
, numsymbols)
)
ss <- IBmakeVarlengthStruct(atom.lst, 1L)
stopifnot(is.struct(ss))
if (verbose>0) cat('length of struct:',length(ss),'\n')
# Create the mapping
mmappings[[id]] <- mmap(tmpfname, ss )
if (ShareFilesWipe) {
# Initialize values - brute
mmappings[[id]][,1] <- NA
for (icol in 2:(numcolumns+1)) mmappings[[id]][,icol] <- NA
## Write full xts data by column .....
#m[,1] <- .index(x)
#m[,2] <- coredata(x)[,1]
#m[,3] <- coredata(x)[,2]
#m[,4] <- coredata(x)[,3]
#m[,5] <- coredata(x)[,4]
# maybe it would be good to have some kind of consistency check on the
# client side, where we compare the write/read data to an xts stored in
# a .rdata file ??
}
}
}
#store mappings in closure
eW$assign.Data("mmappings",mmappings)
#store number of contracts, etc in closure
eW$assign.Data("numcontracts",numcontracts)
eW$assign.Data('xupdate.mat', xupdate.mat)
eW$assign.Data('numstreams', numstreams)
# Initialize in-memory data buffer
eW$assign.Data("data", rep(list(structure(.xts(matrix(rep(NA_real_,
10), ncol = 10), 0), .Dimnames = list(NULL, c("BidSize",
"BidPrice", "AskPrice", "AskSize", "Last", "LastSize",
"Volume","Open","High","Low")))), n)) # instead of 'n' this should be 'numcontracts'
eW$tickPrice <- function(curMsg, msg, timestamp, file, ...) {
tickType = msg[3]
msg <- as.numeric(msg)
id <- msg[2]
data <- eW$get.Data("data")
attr(data[[id]], "index") <- as.numeric(nowtime<-Sys.time())
nr.data <- NROW(data[[id]])
if (tickType == .twsTickType$BID) {
data[[id]][nr.data, 1:2] <- msg[5:4]
}
else if (tickType == .twsTickType$ASK) {
data[[id]][nr.data, 3:4] <- msg[4:5]
}
else if (tickType == .twsTickType$LAST) {
data[[id]][nr.data, 5] <- msg[4]
}
else if (tickType == .twsTickType$HIGH) {
data[[id]][nr.data, 9] <- msg[4]
}
else if (tickType == .twsTickType$LOW) {
data[[id]][nr.data, 10] <- msg[4]
}
else if (tickType == .twsTickType$OPEN) {
data[[id]][nr.data, 8] <- msg[4]
}
eW$assign.Data("data", data)
c(curMsg, msg)
#eW$makeDataRowIfTime()
}
eW$tickSize <- function(curMsg, msg, timestamp, file, ...) {
data <- eW$get.Data("data")
tickType = msg[3]
msg <- as.numeric(msg)
id <- as.numeric(msg[2])
attr(data[[id]], "index") <- as.numeric(nowtime<-Sys.time())
nr.data <- NROW(data[[id]])
if (tickType == .twsTickType$BID_SIZE) {
data[[id]][nr.data, 1] <- msg[4]
}
else if (tickType == .twsTickType$ASK_SIZE) {
data[[id]][nr.data, 4] <- msg[4]
}
else if (tickType == .twsTickType$LAST_SIZE) {
data[[id]][nr.data, 6] <- msg[4]
}
else if (tickType == .twsTickType$VOLUME) {
data[[id]][nr.data, 7] <- msg[4]
}
eW$assign.Data("data", data)
c(curMsg, msg)
#?eW$makeDataRowIfTime()
}
eW$assign.Data("gridtimeoflastrow.int.sec", 0) #initialize
eW$assign.Data("barsize.int.sec", as.integer(Aggregation)) #initialize
eW$assign.Data("first.row.complete", FALSE) #initialize. Will be true after we have the first full row
# and only then we will start writing rows to shared mem.
eW$makeDataRowIfTime <- function(nowtime) {
verbose <- 0 # 0: nothing, 1:basic, 2:all
gridtimeoflastrow.int.sec <- eW$get.Data("gridtimeoflastrow.int.sec")
barsize.int.sec <- eW$get.Data("barsize.int.sec")
first.row.complete <- eW$get.Data("first.row.complete")
if (!first.row.complete) { # Do not write rows until we have one complete first row for all id's (NAs in 7,8,9,10 allowed!)
data <- eW$get.Data("data")
numcontracts <- eW$get.Data("numcontracts")
ss <- sum(vv<-vapply(1:numcontracts, function(id) { as.numeric(any(is.na(data[[id]][1,1:6]))) },0)) #dont check 7,8,9,10
#cat(vv,'\n')
#if we still have NAs, return(), otherwise set to TRUE
if (ss) return() else {
eW$assign.Data("first.row.complete", TRUE)
cat('first.row.complete\n')
}
}
if ( (gridtime<-unclass(nowtime)%/%barsize.int.sec*barsize.int.sec) > gridtimeoflastrow.int.sec ) {
# Reset the timestamp
eW$assign.Data("gridtimeoflastrow.int.sec", gridtime)
#BEGIN - For all id's write the new row to shared memory as struct type 'double' with 11 columns.
data <- eW$get.Data("data")
mmappings <- eW$get.Data("mmappings")
#print(mmappings[[id]][,1])
# #get the ii index of the first free (NA) timestamp of m[], assuming that we do not have an extraction function??
# #Faster: we could store this index in the closure somewhere and just increment it,
# # instead of searching for it every time.
remembered.iinext <- NA
numcontracts <- eW$get.Data("numcontracts")
vapply(1:numcontracts, function(id) {
#~ tstmps<-mmappings[[id]][,1]
#~ #print( is.na( tstmps[[1]][1] ) ) #first element
#~ iinext<-match(NA, tstmps[[1]] ) #Only works if we don't have leading NAs or interrupting NAs
#~ #print(iinext)
#~ if (is.na(iinext)) stop(paste('Fatal error. Shared memory/file buffer for id',id,'is full, or invalid NAs in data.'))
#~ mmappings[[id]][iinext,1] <- gridtime #.index(newbar)
#~ mmappings[[id]][iinext,2] <- data[[id]][1,1] #coredata(newbar)[1,1]
#~ mmappings[[id]][iinext,3] <- data[[id]][1,2] #coredata(newbar)[1,2]
#~ mmappings[[id]][iinext,4] <- data[[id]][1,3] #coredata(newbar)[1,3]
#~ mmappings[[id]][iinext,5] <- data[[id]][1,4] #coredata(newbar)[1,4]
#~ mmappings[[id]][iinext,6] <- data[[id]][1,5] #coredata(newbar)[1,5]
#~ mmappings[[id]][iinext,7] <- data[[id]][1,6] #coredata(newbar)[1,6] #NA
#~ mmappings[[id]][iinext,8] <- data[[id]][1,7] #coredata(newbar)[1,7] #NA
#~ mmappings[[id]][iinext,9] <- data[[id]][1,8] #coredata(newbar)[1,8] #NA
#~ mmappings[[id]][iinext,10] <- data[[id]][1,9] #coredata(newbar)[1,9] #NA
#~ mmappings[[id]][iinext,11] <- data[[id]][1,10] #coredata(newbar)[1,10] #NA
#~ #cat('data: ',unlist(data[[id]]),'\n')
#~ cat(id,' mmappings[[id]][iinext,] ',iinext,' :',unlist(mmappings[[id]][iinext,]),'\n')
# BEGIN - xupdate all multisymbol streams that contain this symbol
xupdate.mat <- eW$get.Data("xupdate.mat")
#for (xu.idx in which(xupdate.mat[,1]==id)) {
vapply( which(xupdate.mat[,1]==id) ,function(xu.idx) {
target.id <- xupdate.mat[xu.idx,2]
offset.num <- xupdate.mat[xu.idx,3]
target.size <- xupdate.mat[xu.idx,4]
#got a full multisymbol row!
#WRITE multi* to SHMEM FILE
if (is.na(iinext <- remembered.iinext[target.id])) {
tstmps<-mmappings[[target.id]][,1]
iinext<-match(NA, tstmps[[1]] ) #Only works if we don't have leading NAs or interrupting NAs
if (is.na(iinext)) stop(paste('Fatal error. Shared memory/file buffer for id',id,'is full, or invalid NAs in data.'))
remembered.iinext[target.id] <<- iinext # <<- PARENT, two levels up
}
mmappings[[target.id]][iinext,1] <- gridtime #either here, with 'remembered.' or do this last for all streams. see sapply below
ixstart <- 2+(offset.num-1)*ncol(data[[id]])
ixend <- ixstart + ncol(data[[id]])-1
if (verbose>1) {
cat('data: ',data[[id]],'\n')
cat('Writing data to target stream',target.id,' at ixstart:',ixstart,' ixend:', ixend, '\n')
}
#mmappings[[target.id]][iinext, ixstart:ixend ] <- data[[id]] # Doesnt work like this. need to split it up.
vapply( ixstart:ixend, function(ix) {
mmappings[[target.id]][iinext, ix ] <- data[[id]][1,ix-ixstart+1]
1
},0)
if (verbose>1) {
cat('Stream',target.id,':', unlist( mmappings[[target.id]][iinext,]) ,'\n')
}
1
},0)
# END - xupdate all multisymbol streams that contain this symbol
1
},0)
#
if (verbose>1) cat('remembered.iinext: ',remembered.iinext,'\n')
#
if (verbose>0) {
numstreams <- eW$get.Data("numstreams")
vapply((numcontracts+1):(numcontracts+numstreams), function(stream.id) {
iithis <- remembered.iinext[stream.id]
#mmappings[[stream.id]][iithis,1]
cat('Final Stream',stream.id,':', unlist( mmappings[[stream.id]][iithis,] ) ,'\n')
1
},0)
}
#END - Write the newbar to shared memory
}
}
#Define the error function
eW$errorMessage <- function(curMsg, msg, timestamp, file, twsconn, ...)
{
if(msg[3] == "1100") {
twsconn$connected <- FALSE
## Do not disconnect, since the TWS may reconnect, with a 1101, 1102 and
## we still want to be around when that happens!!!
#twsDisconnect(twsconn) #Soren added
#stop(paste("Shutting down: TWS Message:",msg[4])) #Soren added
}
if(msg[3] %in% c("1101","1102")) twsconn$connected <- TRUE
cat(as.character(Sys.time()),"TWS Message:",msg,"\n")
# It looks like the 2105 (lost connection) Error and a subsequent 2106 (connection ok)
# requires us to re-issue the request for marketdata !!!
# 4-2--1-2105-HMDS data farm connection is broken:euhmds2-
# 4-2--1-2106-HMDS data farm connection is OK:euhmds2-
if(msg[3] == "2105") {return('BREAKBREAK')}
# An error that requires re-connection is Error 420. (Happens when we request data
# from two different ip addresses, i.e. at home and at work)
# 4-2-78046390-420-Invalid Real-time Query:Trading TWS session is connected from a different IP address-
if(msg[3] == "420") {return('BREAKBREAK')}
}
return(eW)
}
## our custimized twsCALLBACK. only change is that it allows us to exit from the otherwise
## infinite while(TRUE) loop.
twsCALLBACKdatafeed <- function(twsCon, eWrapper, timestamp, file, playback=1, ...)
{
if(missing(eWrapper))
eWrapper <- eWrapper()
con <- twsCon[[1]]
if(inherits(twsCon, 'twsPlayback')) {
stop('Playback not supported')
} else {
while(TRUE) {
#socketSelect(list(con), FALSE, NULL)
if (socketSelect(list(con), FALSE, timeout=1L)) { #timeout at 1sec
curMsg <- .Internal(readBin(con, "character", 1L, NA_integer_, TRUE, FALSE))
res <- NULL
nowtime <- Sys.time()
if(!is.null(timestamp)) {
#print('AAA')
res <- processMsg(curMsg, con, eWrapper, format(nowtime, timestamp), file, twsCon, ...)
} else {
res <- processMsg(curMsg, con, eWrapper, timestamp, file, twsCon, ...)
}
} else nowtime <- Sys.time()
#S. Added here to make sure Mktdata is always written to a new row, even if no update ocurred
#print('BBB')
eWrapper$makeDataRowIfTime(nowtime)
#print('CCC')
#S. Added this as a breaking mechanism
if (!is.null(res)) { if (as.character(res)=='BREAKBREAK') break }
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/keep_na.R
\name{keep_na}
\alias{keep_na}
\title{Keep rows containing missing values}
\usage{
keep_na(.data, ...)
}
\arguments{
\item{.data}{A data frame.}
\item{...}{A selection of columns. If empty, all columns are selected.}
}
\description{
Keep rows containing missing values
}
| /man/keep_na.Rd | permissive | han-tun/hacksaw | R | false | true | 360 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/keep_na.R
\name{keep_na}
\alias{keep_na}
\title{Keep rows containing missing values}
\usage{
keep_na(.data, ...)
}
\arguments{
\item{.data}{A data frame.}
\item{...}{A selection of columns. If empty, all columns are selected.}
}
\description{
Keep rows containing missing values
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/issue.r
\name{publish_issue}
\alias{publish_issue}
\title{Publish an issue}
\usage{
publish_issue(
id,
web_path = file.path("..", "rjournal.github.io"),
post_file = c(foundation = 1, cran = 1, bioc = 1, ch = 1)
)
}
\arguments{
\item{id}{the id of the issue}
\item{web_path}{path to the rjournal.github.io checkout}
}
\description{
Generates per-article PDFs and copies them to the website, located
at \code{web_path}. Removes the published articles from the
accepted directory. Generates the necessary metadata and updates
the website configuration.
}
\details{
This depends on the pdftools CRAN package, which in turn depends on
the poppler system library. It also requires the command line
program pdftk (distributed as PDFtk Server).
}
| /man/publish_issue.Rd | no_license | AlgoSkyNet/rj | R | false | true | 824 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/issue.r
\name{publish_issue}
\alias{publish_issue}
\title{Publish an issue}
\usage{
publish_issue(
id,
web_path = file.path("..", "rjournal.github.io"),
post_file = c(foundation = 1, cran = 1, bioc = 1, ch = 1)
)
}
\arguments{
\item{id}{the id of the issue}
\item{web_path}{path to the rjournal.github.io checkout}
}
\description{
Generates per-article PDFs and copies them to the website, located
at \code{web_path}. Removes the published articles from the
accepted directory. Generates the necessary metadata and updates
the website configuration.
}
\details{
This depends on the pdftools CRAN package, which in turn depends on
the poppler system library. It also requires the command line
program pdftk (distributed as PDFtk Server).
}
|
# Ensembles
library(caret)
names(getModelInfo())
# Read in the data
library(RCurl)
urlData = getURL('https://raw.githubusercontent.com/hadley/fueleconomy/master/data-raw/vehicles.csv')
vehicles = read.csv(text = urlData)
dim(vehicles)
View(vehicles)
str(vehicles)
# clean up the data and only use the first 24 columns
vehicles = vehicles[names(vehicles)[1:24]]
vehicles = data.frame(lapply(vehicles,as.character),stringsAsFactors = F)
vehicles = data.frame(lapply(vehicles,as.numeric))
vehicles[is.na(vehicles)] = 0
vehicles$cylinders = ifelse(vehicles$cylinders == 6, 1, 0)
prop.table(table(vehicles$cylinders))
names(vehicles) # use all the variables to predict cylinders
# divide into ensemble/blender/testing
set.seed(1234)
vehicles = vehicles[sample(nrow(vehicles)),] # reorder the rows
split = floor(nrow(vehicles)/3)
ensembleData = vehicles[0:split,]
blenderData = vehicles[(split+1):(split*2),]
testingData = vehicles[(split*2+1):nrow(vehicles),]
labelName = "cylinders"
predictors = names(ensembleData)[names(ensembleData) != labelName]
predictors
# caret
# crossvalidate 3 times
myControl = trainControl(method = "cv", number = 3, repeats = 1, returnResamp = "none") # trainControl defines the best parameters for the model
# train the ensemble model
model_gbm = train(x = ensembleData[,predictors],y = ensembleData[,labelName], method = "gbm", trControl = myControl)
model_rpart = train(x = ensembleData[,predictors],y = ensembleData[,labelName], method = "rpart", trControl = myControl)
model_treebag = train(x = ensembleData[,predictors],y = ensembleData[,labelName], method = "treebag", trControl = myControl)
# use these three models to predict belender dataset and testing dataset, added probablities predicted by the model as columns
blenderData$gbm_PROB = predict(object = model_gbm, blenderData[,predictors])
blenderData$rf_PROB = predict(object = model_rpart, blenderData[,predictors])
blenderData$treebag_PROB = predict(object = model_treebag, blenderData[,predictors])
testingData$gbm_PROB = predict(object = model_gbm, testingData[,predictors])
testingData$rf_PROB = predict(object = model_rpart, testingData[,predictors])
testingData$treebag_PROB = predict(object = model_treebag, testingData[,predictors])
predictors = names(blenderData)[names(blenderData) != labelName]
final_blender_model = train(blenderData[,predictors],blenderData[,labelName],method = 'gbm',trControl = myControl)
preds = predict(object = final_blender_model, testingData[,predictors])
library(pROC)
auc = roc(testingData[,labelName],preds)
auc # 0.9949
| /caret/Ensembles.R | no_license | ChanningC12/Machine-Learning-with-R | R | false | false | 2,576 | r | # Ensembles
library(caret)
names(getModelInfo())
# Read in the data
library(RCurl)
urlData = getURL('https://raw.githubusercontent.com/hadley/fueleconomy/master/data-raw/vehicles.csv')
vehicles = read.csv(text = urlData)
dim(vehicles)
View(vehicles)
str(vehicles)
# clean up the data and only use the first 24 columns
vehicles = vehicles[names(vehicles)[1:24]]
vehicles = data.frame(lapply(vehicles,as.character),stringsAsFactors = F)
vehicles = data.frame(lapply(vehicles,as.numeric))
vehicles[is.na(vehicles)] = 0
vehicles$cylinders = ifelse(vehicles$cylinders == 6, 1, 0)
prop.table(table(vehicles$cylinders))
names(vehicles) # use all the variables to predict cylinders
# divide into ensemble/blender/testing
set.seed(1234)
vehicles = vehicles[sample(nrow(vehicles)),] # reorder the rows
split = floor(nrow(vehicles)/3)
ensembleData = vehicles[0:split,]
blenderData = vehicles[(split+1):(split*2),]
testingData = vehicles[(split*2+1):nrow(vehicles),]
labelName = "cylinders"
predictors = names(ensembleData)[names(ensembleData) != labelName]
predictors
# caret
# crossvalidate 3 times
myControl = trainControl(method = "cv", number = 3, repeats = 1, returnResamp = "none") # trainControl defines the best parameters for the model
# train the ensemble model
model_gbm = train(x = ensembleData[,predictors],y = ensembleData[,labelName], method = "gbm", trControl = myControl)
model_rpart = train(x = ensembleData[,predictors],y = ensembleData[,labelName], method = "rpart", trControl = myControl)
model_treebag = train(x = ensembleData[,predictors],y = ensembleData[,labelName], method = "treebag", trControl = myControl)
# use these three models to predict belender dataset and testing dataset, added probablities predicted by the model as columns
blenderData$gbm_PROB = predict(object = model_gbm, blenderData[,predictors])
blenderData$rf_PROB = predict(object = model_rpart, blenderData[,predictors])
blenderData$treebag_PROB = predict(object = model_treebag, blenderData[,predictors])
testingData$gbm_PROB = predict(object = model_gbm, testingData[,predictors])
testingData$rf_PROB = predict(object = model_rpart, testingData[,predictors])
testingData$treebag_PROB = predict(object = model_treebag, testingData[,predictors])
predictors = names(blenderData)[names(blenderData) != labelName]
final_blender_model = train(blenderData[,predictors],blenderData[,labelName],method = 'gbm',trControl = myControl)
preds = predict(object = final_blender_model, testingData[,predictors])
library(pROC)
auc = roc(testingData[,labelName],preds)
auc # 0.9949
|
## Plot 2 :
Energy_dates$DateTime <- strptime(paste(as.character(Energy_dates$Date), Energy_dates$Time), format = "%Y-%m-%d %H:%M:%S")
plot(Energy_dates$DateTime, as.numeric(as.character(Energy_dates$Global_active_power)), type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
dev.copy(png, file ="plot2.png", width = 480, height = 480)
dev.off() | /Plot2.R | no_license | DoanTrangNguyen/ExData_Plotting1 | R | false | false | 360 | r | ## Plot 2 :
Energy_dates$DateTime <- strptime(paste(as.character(Energy_dates$Date), Energy_dates$Time), format = "%Y-%m-%d %H:%M:%S")
plot(Energy_dates$DateTime, as.numeric(as.character(Energy_dates$Global_active_power)), type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
dev.copy(png, file ="plot2.png", width = 480, height = 480)
dev.off() |
/erster Versuch SA und LDA.R | no_license | FlorianW13/Justhjurathings | R | false | false | 5,195 | r | ||
anisodistfn <- function (xy1, xy2, mask) {
if (missing(xy1)) return(character(0))
if (!requireNamespace("geoR")) stop ("aniso requires geoR that is unavailable")
xy1 <- as.matrix(xy1)
xy2 <- as.matrix(xy2)
miscparm <- attr(mask, 'miscparm')
psiA <- miscparm[1] # anisotropy angle; identity link
psiR <- 1 + exp(miscparm[2]) # anisotropy ratio; log link
aniso.xy1 <- geoR::coords.aniso(xy1, aniso.pars = c(psiA, psiR))
aniso.xy2 <- geoR::coords.aniso(xy2, aniso.pars = c(psiA, psiR))
secr::edist(aniso.xy1, aniso.xy2) # nrow(xy1) x nrow(xy2) matrix
}
predictAniso <- function (fit, angle = c("degrees", "radians")) {
angle <- match.arg(angle)
co <- coef(fit)
# if (!all( c("psiA", "psiR") %in% rownames(co)))
# stop ("input is not anisotropic.fit")
pred <- co[c("psiA", "psiR"), ]
rownames(pred) <- c("psiA", "psiR")
colnames(pred)[1:2] <- c("estimate", "SE.estimate")
if (!is.null(fit$details$fixedbeta)) {
fb <- fit$details$fixedbeta
fixed <- fb[max(unlist(fit$parindx)) + 1:2]
pred[!is.na(fixed), 1] <- fixed[!is.na(fixed)]
}
if (angle == "degrees")
pred["psiA", ] <- pred["psiA", ] * 360 / 2 / pi
pred["psiR", ] <- 1 + exp(pred["psiR", ])
beta <- co["psiR","beta"]
sebeta <- co["psiR","SE.beta"]
pred["psiR", "SE.estimate"] <- exp(beta) * sqrt(exp(sebeta^2)-1)
pred
}
anisotropic.fit <- function (..., psiA = pi/4, psiR = 2) {
args <- list(...)
if (is.null(args$details))
args$details <- vector('list')
args$details$userdist <- anisodistfn
args$details$miscparm <- c(psiA = psiA, psiR = log(psiR-1))
tmp <- do.call("secr.fit", args)
tmp$call <- NULL ## drop bulky evaluated call
tmp
} | /R/anisotropic.R | no_license | MurrayEfford/secrBVN | R | false | false | 1,774 | r | anisodistfn <- function (xy1, xy2, mask) {
if (missing(xy1)) return(character(0))
if (!requireNamespace("geoR")) stop ("aniso requires geoR that is unavailable")
xy1 <- as.matrix(xy1)
xy2 <- as.matrix(xy2)
miscparm <- attr(mask, 'miscparm')
psiA <- miscparm[1] # anisotropy angle; identity link
psiR <- 1 + exp(miscparm[2]) # anisotropy ratio; log link
aniso.xy1 <- geoR::coords.aniso(xy1, aniso.pars = c(psiA, psiR))
aniso.xy2 <- geoR::coords.aniso(xy2, aniso.pars = c(psiA, psiR))
secr::edist(aniso.xy1, aniso.xy2) # nrow(xy1) x nrow(xy2) matrix
}
predictAniso <- function (fit, angle = c("degrees", "radians")) {
angle <- match.arg(angle)
co <- coef(fit)
# if (!all( c("psiA", "psiR") %in% rownames(co)))
# stop ("input is not anisotropic.fit")
pred <- co[c("psiA", "psiR"), ]
rownames(pred) <- c("psiA", "psiR")
colnames(pred)[1:2] <- c("estimate", "SE.estimate")
if (!is.null(fit$details$fixedbeta)) {
fb <- fit$details$fixedbeta
fixed <- fb[max(unlist(fit$parindx)) + 1:2]
pred[!is.na(fixed), 1] <- fixed[!is.na(fixed)]
}
if (angle == "degrees")
pred["psiA", ] <- pred["psiA", ] * 360 / 2 / pi
pred["psiR", ] <- 1 + exp(pred["psiR", ])
beta <- co["psiR","beta"]
sebeta <- co["psiR","SE.beta"]
pred["psiR", "SE.estimate"] <- exp(beta) * sqrt(exp(sebeta^2)-1)
pred
}
anisotropic.fit <- function (..., psiA = pi/4, psiR = 2) {
args <- list(...)
if (is.null(args$details))
args$details <- vector('list')
args$details$userdist <- anisodistfn
args$details$miscparm <- c(psiA = psiA, psiR = log(psiR-1))
tmp <- do.call("secr.fit", args)
tmp$call <- NULL ## drop bulky evaluated call
tmp
} |
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 8062694171891.13, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615781601-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 324 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 8062694171891.13, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
# Read in the data
spx <- read.csv("data/sp_500_yearend.csv")
library(ggplot2)
ggplot(spx,
aes(x = Year, y = Close)) +
geom_point()
ggsave("results/sp_500_year_close.png")
| /scripts/plot_yr_close.R | no_license | riktidokkas/TestRepo2 | R | false | false | 183 | r | # Read in the data
spx <- read.csv("data/sp_500_yearend.csv")
library(ggplot2)
ggplot(spx,
aes(x = Year, y = Close)) +
geom_point()
ggsave("results/sp_500_year_close.png")
|
library(tidyverse)
library(DBI)
#####################################################
#given a box score or season totals, compute advanced stats
collect_box <-
function(hometeam = NULL,
awayteam = NULL,
dt = NULL,
game_id = NULL) {
#find the game in the game database
if (is.null(game_id)) {
game <- games %>% filter(home == hometeam) %>%
filter(away == awayteam) %>%
filter(date == dt) %>% collect()
}
if (!is.null(game_id)) {
game <- games %>% filter(gameid == game_id) %>% collect()
}
#collect home team box
homebox <-
homes %>% filter(gameid == (game$gameid)) %>% collect() %>% data.frame()
hometotals <-
c(
game$gameid[1],
homebox$team[1],
'Totals',
colSums(homebox[, 4:6]),
round(sum(homebox$FG) / sum(homebox$FGA), 4),
colSums(homebox[, 8:9]),
round(sum(homebox$`X2P`) / sum(homebox$`X2PA`), 4),
colSums(homebox[, 11:12]),
round(sum(homebox$`X3P`) / sum(homebox$`X3PA`), 4),
colSums(homebox[, 14:15]),
round(sum(homebox$FT) / sum(homebox$FTA), 4),
colSums(homebox[, 17:25])
)
homebox <- rbind(homebox, hometotals)
#collect away team box
awaybox <-
aways %>% filter(gameid == game$gameid) %>% collect() %>% data.frame()
awaytotals <-
c(
game$gameid[1],
awaybox$team[1],
'Totals',
colSums(awaybox[, 4:6]),
round(sum(awaybox$FG) / sum(awaybox$FGA), 4),
colSums(awaybox[, 8:9]),
round(sum(awaybox$`X2P`) / sum(awaybox$`X2PA`), 4),
colSums(awaybox[, 11:12]),
round(sum(awaybox$`X3P`) / sum(awaybox$`X3PA`), 4),
colSums(awaybox[, 14:15]),
round(sum(awaybox$FT) / sum(awaybox$FTA), 4),
colSums(awaybox[, 17:25])
)
awaybox <- rbind(awaybox, awaytotals)
#list and output
return(list(
gamedata = game,
homebox = homebox,
awaybox = awaybox
))
}
#####################################################
#####################################################
#collect all games for a team
collect_team <- function(tm = NULL) {
if (!is.null(tm)) {
ab <- homes %>% filter(team == tm) %>%
select(gid = gameid) %>% collect() %>% unique() %>%
union_all((
aways %>% filter(team == tm) %>%
select(gid = gameid) %>% collect() %>% unique()
))
}
gamelist <- ab$gid %>% map(function(x)
collect_box(game_id = x))
gamedf <- gamelist %>% pluck('homebox') %>% bind_rows %>%
union_all((gamelist %>% pluck('awaybox') %>% bind_rows)) %>% left_join((gamelist %>% pluck('gamedata') %>% bind_rows))
gamelist <-
list(boxes = (gamedf %>% filter(Name != 'Totals')),
Totals = (gamedf %>% filter(Name == 'Totals')))
return(gamelist)
}
#####################################################
#####################################################
collect_player <- function(plyr, tm) {
abs <- collect_team(tm)
filt <-
abs$boxes %>% filter(Name == plyr) %>% select(-Name) %>% type_convert %>%
left_join((
abs$Totals %>% filter(team != tm) %>%
mutate(opponent = team) %>% select(gameid, opponent) %>% type_convert
),
by = c('gameid'))
totals <-
filt %>% select(-gameid) %>% mutate(GP = 1) %>% group_by(team) %>%
summarise_if(is.numeric, sum) %>% mutate(
FG. = FG / FGA,
X2P. = X2P / X2PA,
X3P. = X3P / X3PA,
FT. = FT / FTA
)
averages <-
totals %>% mutate_if(is.numeric, function(x)
x / nrow(filt)) %>%
mutate(
FG. = round(FG / FGA, 2),
X2P. = round(X2P / X2PA, 2),
X3P. = round(X3P / X3PA, 2),
FT. = round(FT / FTA, 2)
)
return(list(
games = filt,
totals = totals,
averages = averages
))
}
#####################################################
#test it out down here
#connect to the dbs first
homes <- tbl(mydb, 'homebox')
aways <- tbl(mydb, 'awaybox')
games <- tbl(mydb, 'games')
collect_box('Kansas', 'Texas', dt = '2019-01-14')
collect_box(game_id = '201811063136')
collect_team('Kansas')$Totals
collect_player('Zion Williamson', 'Duke')
| /collect_box.R | no_license | stharms/college_basketball | R | false | false | 4,201 | r | library(tidyverse)
library(DBI)
#####################################################
#given a box score or season totals, compute advanced stats
collect_box <-
function(hometeam = NULL,
awayteam = NULL,
dt = NULL,
game_id = NULL) {
#find the game in the game database
if (is.null(game_id)) {
game <- games %>% filter(home == hometeam) %>%
filter(away == awayteam) %>%
filter(date == dt) %>% collect()
}
if (!is.null(game_id)) {
game <- games %>% filter(gameid == game_id) %>% collect()
}
#collect home team box
homebox <-
homes %>% filter(gameid == (game$gameid)) %>% collect() %>% data.frame()
hometotals <-
c(
game$gameid[1],
homebox$team[1],
'Totals',
colSums(homebox[, 4:6]),
round(sum(homebox$FG) / sum(homebox$FGA), 4),
colSums(homebox[, 8:9]),
round(sum(homebox$`X2P`) / sum(homebox$`X2PA`), 4),
colSums(homebox[, 11:12]),
round(sum(homebox$`X3P`) / sum(homebox$`X3PA`), 4),
colSums(homebox[, 14:15]),
round(sum(homebox$FT) / sum(homebox$FTA), 4),
colSums(homebox[, 17:25])
)
homebox <- rbind(homebox, hometotals)
#collect away team box
awaybox <-
aways %>% filter(gameid == game$gameid) %>% collect() %>% data.frame()
awaytotals <-
c(
game$gameid[1],
awaybox$team[1],
'Totals',
colSums(awaybox[, 4:6]),
round(sum(awaybox$FG) / sum(awaybox$FGA), 4),
colSums(awaybox[, 8:9]),
round(sum(awaybox$`X2P`) / sum(awaybox$`X2PA`), 4),
colSums(awaybox[, 11:12]),
round(sum(awaybox$`X3P`) / sum(awaybox$`X3PA`), 4),
colSums(awaybox[, 14:15]),
round(sum(awaybox$FT) / sum(awaybox$FTA), 4),
colSums(awaybox[, 17:25])
)
awaybox <- rbind(awaybox, awaytotals)
#list and output
return(list(
gamedata = game,
homebox = homebox,
awaybox = awaybox
))
}
#####################################################
#####################################################
#collect all games for a team
collect_team <- function(tm = NULL) {
if (!is.null(tm)) {
ab <- homes %>% filter(team == tm) %>%
select(gid = gameid) %>% collect() %>% unique() %>%
union_all((
aways %>% filter(team == tm) %>%
select(gid = gameid) %>% collect() %>% unique()
))
}
gamelist <- ab$gid %>% map(function(x)
collect_box(game_id = x))
gamedf <- gamelist %>% pluck('homebox') %>% bind_rows %>%
union_all((gamelist %>% pluck('awaybox') %>% bind_rows)) %>% left_join((gamelist %>% pluck('gamedata') %>% bind_rows))
gamelist <-
list(boxes = (gamedf %>% filter(Name != 'Totals')),
Totals = (gamedf %>% filter(Name == 'Totals')))
return(gamelist)
}
#####################################################
#####################################################
collect_player <- function(plyr, tm) {
abs <- collect_team(tm)
filt <-
abs$boxes %>% filter(Name == plyr) %>% select(-Name) %>% type_convert %>%
left_join((
abs$Totals %>% filter(team != tm) %>%
mutate(opponent = team) %>% select(gameid, opponent) %>% type_convert
),
by = c('gameid'))
totals <-
filt %>% select(-gameid) %>% mutate(GP = 1) %>% group_by(team) %>%
summarise_if(is.numeric, sum) %>% mutate(
FG. = FG / FGA,
X2P. = X2P / X2PA,
X3P. = X3P / X3PA,
FT. = FT / FTA
)
averages <-
totals %>% mutate_if(is.numeric, function(x)
x / nrow(filt)) %>%
mutate(
FG. = round(FG / FGA, 2),
X2P. = round(X2P / X2PA, 2),
X3P. = round(X3P / X3PA, 2),
FT. = round(FT / FTA, 2)
)
return(list(
games = filt,
totals = totals,
averages = averages
))
}
#####################################################
#test it out down here
#connect to the dbs first
homes <- tbl(mydb, 'homebox')
aways <- tbl(mydb, 'awaybox')
games <- tbl(mydb, 'games')
collect_box('Kansas', 'Texas', dt = '2019-01-14')
collect_box(game_id = '201811063136')
collect_team('Kansas')$Totals
collect_player('Zion Williamson', 'Duke')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.