content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#Generate community matrices
dat_df_2_or <- read_rds("./data/dat.df.2.or.RDS")
dat_df_2_or$tax.class_size.class <- as.factor(paste(dat_df_2_or$Class,dat_df_2_or$size.class.2, sep = "."))
# major habitat type replacement: Replace Deep water with actual benthic cover
source("./scripts/community_analysis/01b_replace_majhab_values_comm_matrix.R")
#names(dat_df_2_or)
#subset species data
bruvs.df <- dat_df_2_or %>% filter(comment == "baited") %>% dplyr::select(opcode, species = vsppname, family, taxa.class_size.class = tax.class_size.class, taxa.class_guild_size.class = troph.class.size.2, trophic.guild = trophwip4, maxn = maxn.sr, ) %>% dplyr::arrange(opcode)
ruvs.df <- dat_df_2_or %>% filter(comment == "unbaited") %>% dplyr::select(opcode, species = vsppname, family, taxa.class_size.class = tax.class_size.class, taxa.class_guild_size.class = troph.class.size.2,trophic.guild = trophwip4, maxn = maxn.sr) %>% dplyr::arrange(opcode)
#species community matrix
spe.comm.b <- reshape2::acast(data = bruvs.df, opcode ~ species, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(spe.comm.b, "./data/community_data/spe.comm.b.RDS")
spe.comm.r <- reshape2::acast(data = ruvs.df, opcode ~ species, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(spe.comm.r, "./data/community_data/spe.comm.r.RDS")
#size-class community matrix
size.class.comm.b <- reshape2::acast(data = bruvs.df, opcode ~ taxa.class_size.class, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(size.class.comm.b, "./data/community_data/size.class.comm.b.RDS")
size.class.comm.r <- reshape2::acast(data = ruvs.df, opcode ~ taxa.class_size.class, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(size.class.comm.r, "./data/community_data/size.class.comm.r.RDS")
#fish guild community
guild.comm.b <- reshape2::acast(data = bruvs.df, opcode ~ trophic.guild, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(guild.comm.b, "./data/community_data/guild.comm.b.RDS")
guild.comm.r <- reshape2::acast(data = ruvs.df, opcode ~ trophic.guild, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(guild.comm.r, "./data/community_data/guild.comm.r.RDS")
# Combined taxonomic class, guild and size class community
class.guild.size.comm.b <- reshape2::acast(data = bruvs.df, opcode ~ taxa.class_guild_size.class, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(class.guild.size.comm.b, "./data/community_data/class.guild.size.comm.b.RDS")
class.guild.size.comm.r <- reshape2::acast(data = ruvs.df, opcode ~ taxa.class_guild_size.class, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(class.guild.size.comm.r, "./data/community_data/class.guild.size.comm.r.RDS")
#read environmental data
env.df.2.or <- read_rds("./data/community_data/raw/env.df.2.or.RDS") #%>% arrangge(opcode)
# major habitat type replacement: Replace Deep water with actual benthic cover
source("./scripts/community_analysis/01c_replace_majhab_values_env_20220802.R")
env_df_2_or %>% group_by(majhab)%>% distinct(majhab) # check there are no deep water values
env.factors <- env_df_2_or[,c(1:6,14,40)]
env.factors$reef.zone <- as.factor(env.factors$reef.zone)
env.c.vars.std.or <- read_rds("./data/community_data/env.c.vars.std.or.RDS") #%>% arrange()
env.tmp <- env.c.vars.std.or
env.tmp$opcode <- row.names(env.c.vars.std.or)
env.perm <- left_join(env.factors, env.tmp, by = "opcode")
env.perm <- env.perm %>% rename(medium.benthic.cover = majhab, fishing.effort = f_effort, percent.water.column = perc.water.column, fine.benthic.cover = p.tree.cl12)
env.perm.b <- env.perm %>% filter(treatment == "baited")
env.perm.r <- env.perm %>% filter(treatment == "unbaited")
rm(env.tmp)
| /scripts/phd_chpt_6/01_read_data_generate_community_matrices.R | no_license | philiphaupt/bruvs_vs_ruvs | R | false | false | 5,216 | r | #Generate community matrices
dat_df_2_or <- read_rds("./data/dat.df.2.or.RDS")
dat_df_2_or$tax.class_size.class <- as.factor(paste(dat_df_2_or$Class,dat_df_2_or$size.class.2, sep = "."))
# major habitat type replacement: Replace Deep water with actual benthic cover
source("./scripts/community_analysis/01b_replace_majhab_values_comm_matrix.R")
#names(dat_df_2_or)
#subset species data
bruvs.df <- dat_df_2_or %>% filter(comment == "baited") %>% dplyr::select(opcode, species = vsppname, family, taxa.class_size.class = tax.class_size.class, taxa.class_guild_size.class = troph.class.size.2, trophic.guild = trophwip4, maxn = maxn.sr, ) %>% dplyr::arrange(opcode)
ruvs.df <- dat_df_2_or %>% filter(comment == "unbaited") %>% dplyr::select(opcode, species = vsppname, family, taxa.class_size.class = tax.class_size.class, taxa.class_guild_size.class = troph.class.size.2,trophic.guild = trophwip4, maxn = maxn.sr) %>% dplyr::arrange(opcode)
#species community matrix
spe.comm.b <- reshape2::acast(data = bruvs.df, opcode ~ species, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(spe.comm.b, "./data/community_data/spe.comm.b.RDS")
spe.comm.r <- reshape2::acast(data = ruvs.df, opcode ~ species, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(spe.comm.r, "./data/community_data/spe.comm.r.RDS")
#size-class community matrix
size.class.comm.b <- reshape2::acast(data = bruvs.df, opcode ~ taxa.class_size.class, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(size.class.comm.b, "./data/community_data/size.class.comm.b.RDS")
size.class.comm.r <- reshape2::acast(data = ruvs.df, opcode ~ taxa.class_size.class, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(size.class.comm.r, "./data/community_data/size.class.comm.r.RDS")
#fish guild community
guild.comm.b <- reshape2::acast(data = bruvs.df, opcode ~ trophic.guild, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(guild.comm.b, "./data/community_data/guild.comm.b.RDS")
guild.comm.r <- reshape2::acast(data = ruvs.df, opcode ~ trophic.guild, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(guild.comm.r, "./data/community_data/guild.comm.r.RDS")
# Combined taxonomic class, guild and size class community
class.guild.size.comm.b <- reshape2::acast(data = bruvs.df, opcode ~ taxa.class_guild_size.class, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(class.guild.size.comm.b, "./data/community_data/class.guild.size.comm.b.RDS")
class.guild.size.comm.r <- reshape2::acast(data = ruvs.df, opcode ~ taxa.class_guild_size.class, fill = 0, value.var = 'maxn', fun.aggregate = sum) # critical function to genrate zero values for species not counted, and use sum, for conssitency with methods to aggeregate according to, used in trophic level and combined trophic level.
write_rds(class.guild.size.comm.r, "./data/community_data/class.guild.size.comm.r.RDS")
#read environmental data
env.df.2.or <- read_rds("./data/community_data/raw/env.df.2.or.RDS") #%>% arrangge(opcode)
# major habitat type replacement: Replace Deep water with actual benthic cover
source("./scripts/community_analysis/01c_replace_majhab_values_env_20220802.R")
env_df_2_or %>% group_by(majhab)%>% distinct(majhab) # check there are no deep water values
env.factors <- env_df_2_or[,c(1:6,14,40)]
env.factors$reef.zone <- as.factor(env.factors$reef.zone)
env.c.vars.std.or <- read_rds("./data/community_data/env.c.vars.std.or.RDS") #%>% arrange()
env.tmp <- env.c.vars.std.or
env.tmp$opcode <- row.names(env.c.vars.std.or)
env.perm <- left_join(env.factors, env.tmp, by = "opcode")
env.perm <- env.perm %>% rename(medium.benthic.cover = majhab, fishing.effort = f_effort, percent.water.column = perc.water.column, fine.benthic.cover = p.tree.cl12)
env.perm.b <- env.perm %>% filter(treatment == "baited")
env.perm.r <- env.perm %>% filter(treatment == "unbaited")
rm(env.tmp)
|
install.packages('arules')
library(arules)
restaurant <- read.transactions('C:\\Users\\escha\\OneDrive\\Documents\\MSA\\Fall Classes\\Data Mining\\Data\\res_complete.csv',sep=',')
summary(restaurant)
inspect(restaurant[17])
itemFrequency(restaurant)
rules <- apriori(restaurant)
summary(rules)
inspect(sort(rules, by = "lift"))
newrules <- apriori(restaurant, parameter = list(support=0.05, confidence=0.5, minlen=2, maxlen=2))
summary(newrules)
inspect(sort(newrules, by = "lift"))
meats <- subset(newrules,items %in% c("Duck","Filet Mignon","Pork Tenderloin","Roast Chicken"))
inspect(sort(meats, by = "lift"))
### Meat -> Wine pairings ###
#Filet Mignon -> Blackstone Merlot
#Roast Chicken -> Duckhorn Chardonnay
#Duck -> Duckhorn Chardonnay
#Pork Tenderloin -> Cantina Pinot Bianco
popmeal <- apriori(restaurant, parameter = list(support=0.05, confidence=0.5, minlen=3, maxlen=3))
summary(popmeal)
inspect(sort(popmeal, by = "support"))
### Most Popular Meal ###
# Cantina Pinot Bianco, Roasted Root Veg, Pork Tenderloin | /Data Mining/DataMining_Evan.R | no_license | ashwinb6/fall2_hw_team6 | R | false | false | 1,032 | r | install.packages('arules')
library(arules)
restaurant <- read.transactions('C:\\Users\\escha\\OneDrive\\Documents\\MSA\\Fall Classes\\Data Mining\\Data\\res_complete.csv',sep=',')
summary(restaurant)
inspect(restaurant[17])
itemFrequency(restaurant)
rules <- apriori(restaurant)
summary(rules)
inspect(sort(rules, by = "lift"))
newrules <- apriori(restaurant, parameter = list(support=0.05, confidence=0.5, minlen=2, maxlen=2))
summary(newrules)
inspect(sort(newrules, by = "lift"))
meats <- subset(newrules,items %in% c("Duck","Filet Mignon","Pork Tenderloin","Roast Chicken"))
inspect(sort(meats, by = "lift"))
### Meat -> Wine pairings ###
#Filet Mignon -> Blackstone Merlot
#Roast Chicken -> Duckhorn Chardonnay
#Duck -> Duckhorn Chardonnay
#Pork Tenderloin -> Cantina Pinot Bianco
popmeal <- apriori(restaurant, parameter = list(support=0.05, confidence=0.5, minlen=3, maxlen=3))
summary(popmeal)
inspect(sort(popmeal, by = "support"))
### Most Popular Meal ###
# Cantina Pinot Bianco, Roasted Root Veg, Pork Tenderloin |
directionality_of_chrRange = function(hic_mat, chr, start, end){
#determines index positions of specified interval and runs directionality_of_indexes
#determine overlapping indexes
m_gr = GRanges(hic_mat@hic_1d)
start(m_gr) = start(m_gr) + 1
q_gr = GRanges(seqnames = chr, IRanges(start + 1, end))
pos_indexes = subjectHits(findOverlaps(query = q_gr, subject = m_gr, ignore.strand = TRUE))
#pos_indexes have n_bins buffer from start or end of chromosome
chr_indexes = m_gr[seqnames(m_gr) == chr]$index
chr_len = length(chr_indexes)
chr_range = range(chr_indexes)
# n_bins = round(2*10^6 / hic_mat@parameters@bin_size)
n_bins = hic_mat@parameters@n_insulation_bins
to_remove = chr_indexes[c(1:n_bins, (chr_len-n_bins+1):chr_len)]
if(length(intersect(to_remove, pos_indexes)) > 0){
pos_indexes[pos_indexes %in% to_remove] = NA
# warning("some bins in range were too close to chromosome ends and were set to NA")
}
directionality_of_indexes(hic_mat, pos_indexes, n_bins)
}
#the directionality range is a square region adjacent to diagonal of the matrix.
#pos_index : the position along diagonal, 1 indexed
#n_bins : the size of the directionality square. smaller n_bins may be sensitive to more local features are dependent on read depth
directionality_range = function(pos_index, n_bins){
list(pos_index - 1:n_bins, pos_index + 1:n_bins)
}
#
#' DI calculation for single index
#'
#' calculates the directionality index of
#' pos_index position using n_bins in both directions
#'
#' @param hic_matrix
#' @param pos_index
#' @param n_bins
#'
#' @return
#' @export
#'
#' @examples
directionality_of_index = function(hic_matrix, pos_index, n_bins){
rng = directionality_range(pos_index, n_bins)
#this is a bit fragile but should hold
max_miss = 1 - hic_matrix@parameters@min_insulation_coverage
missed_A = hic_matrix@hic_2d[.(rng[[1]], pos_index)][is.na(val), .N] / n_bins > max_miss
missed_B = hic_matrix@hic_2d[.(pos_index, rng[[2]])][is.na(val), .N] / n_bins > max_miss
if(missed_A | missed_B){
DI = NA
}else{
A = hic_matrix@hic_2d[.(rng[[1]], pos_index), sum(val, na.rm = TRUE)]
B = hic_matrix@hic_2d[.(pos_index, rng[[2]]), sum(val, na.rm = TRUE)]
E = (A + B) / 2
DI = ( (B - A) / abs(B - A) )*( (A - E)^2 / E + (B - E)^2 / E )
}
return(DI)
}
#runs directionality_of_index on all indexes in pos_indexes
directionality_of_indexes = function(hic_matrix, pos_indexes, n_bins){
DIs = sapply(pos_indexes, function(pos_index){
directionality_of_index(hic_matrix, pos_index, n_bins)
})
names(DIs) = pos_indexes
return(DIs)
}
| /R/functions_directionalityIndex.R | no_license | jrboyd/dthic | R | false | false | 2,708 | r | directionality_of_chrRange = function(hic_mat, chr, start, end){
#determines index positions of specified interval and runs directionality_of_indexes
#determine overlapping indexes
m_gr = GRanges(hic_mat@hic_1d)
start(m_gr) = start(m_gr) + 1
q_gr = GRanges(seqnames = chr, IRanges(start + 1, end))
pos_indexes = subjectHits(findOverlaps(query = q_gr, subject = m_gr, ignore.strand = TRUE))
#pos_indexes have n_bins buffer from start or end of chromosome
chr_indexes = m_gr[seqnames(m_gr) == chr]$index
chr_len = length(chr_indexes)
chr_range = range(chr_indexes)
# n_bins = round(2*10^6 / hic_mat@parameters@bin_size)
n_bins = hic_mat@parameters@n_insulation_bins
to_remove = chr_indexes[c(1:n_bins, (chr_len-n_bins+1):chr_len)]
if(length(intersect(to_remove, pos_indexes)) > 0){
pos_indexes[pos_indexes %in% to_remove] = NA
# warning("some bins in range were too close to chromosome ends and were set to NA")
}
directionality_of_indexes(hic_mat, pos_indexes, n_bins)
}
#the directionality range is a square region adjacent to diagonal of the matrix.
#pos_index : the position along diagonal, 1 indexed
#n_bins : the size of the directionality square. smaller n_bins may be sensitive to more local features are dependent on read depth
directionality_range = function(pos_index, n_bins){
list(pos_index - 1:n_bins, pos_index + 1:n_bins)
}
#
#' DI calculation for single index
#'
#' calculates the directionality index of
#' pos_index position using n_bins in both directions
#'
#' @param hic_matrix
#' @param pos_index
#' @param n_bins
#'
#' @return
#' @export
#'
#' @examples
directionality_of_index = function(hic_matrix, pos_index, n_bins){
rng = directionality_range(pos_index, n_bins)
#this is a bit fragile but should hold
max_miss = 1 - hic_matrix@parameters@min_insulation_coverage
missed_A = hic_matrix@hic_2d[.(rng[[1]], pos_index)][is.na(val), .N] / n_bins > max_miss
missed_B = hic_matrix@hic_2d[.(pos_index, rng[[2]])][is.na(val), .N] / n_bins > max_miss
if(missed_A | missed_B){
DI = NA
}else{
A = hic_matrix@hic_2d[.(rng[[1]], pos_index), sum(val, na.rm = TRUE)]
B = hic_matrix@hic_2d[.(pos_index, rng[[2]]), sum(val, na.rm = TRUE)]
E = (A + B) / 2
DI = ( (B - A) / abs(B - A) )*( (A - E)^2 / E + (B - E)^2 / E )
}
return(DI)
}
#runs directionality_of_index on all indexes in pos_indexes
directionality_of_indexes = function(hic_matrix, pos_indexes, n_bins){
DIs = sapply(pos_indexes, function(pos_index){
directionality_of_index(hic_matrix, pos_index, n_bins)
})
names(DIs) = pos_indexes
return(DIs)
}
|
## Put comments here that give an overall description of what your
## functions do
## returns a function that can store a matrix and the invertse of tha matrix
# set and get operate on the matrix
# setinverse and getinverse operte on the inverse
# setting the matrix will invalidate previous calculated inv.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(i) inv <<- i
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# returns a cached inv of a makeCacheMatrix if there is one
# if there isn't, calculates the inv, stores it with setinverse and returns inv
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
m <- x$get()
inv <- solve(m, ...)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | mwittenb/ProgrammingAssignment2 | R | false | false | 1,094 | r | ## Put comments here that give an overall description of what your
## functions do
## returns a function that can store a matrix and the invertse of tha matrix
# set and get operate on the matrix
# setinverse and getinverse operte on the inverse
# setting the matrix will invalidate previous calculated inv.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(i) inv <<- i
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# returns a cached inv of a makeCacheMatrix if there is one
# if there isn't, calculates the inv, stores it with setinverse and returns inv
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
m <- x$get()
inv <- solve(m, ...)
x$setinverse(inv)
inv
}
|
###############################################################################
# Script para detectar estacionariedad debil en registros electrofisiologicos,
# por Enciso Alva, 2017
# Para citar y revisar instrucciones de uso, revisar documantacion anexa
#
###############################################################################
# parametros del script, ver documantacion
#nombre = 'CLMN10SUE'
#etiqueta = 'CLMN'
#dir_datos = paste0(getwd(),'/CLMN10SUE')
#dir_res = paste0(getwd(),'/res_parciales')
extension = '.txt'
reemplazar = TRUE # <-
fr_muestreo = 512
#dur_epoca = 30
canales = 'PSG'
# canales = c('C3','C4','CZ','F3','F4','F7','F8','FP1','FP2','FZ','O1','O2',
# 'P3','P4','PZ','ROG','T3','T4','T5','T6')
ver_avance = T
no_repetir = F
haz_carpeta = T
usar_loess = F
#################################################
# libreria que contiene la prueba de PSR
require('fractal')
#################################################
# revisar si faltan algunos parametros
if(missing(fr_muestreo)){
warning('WARNING: No se ha indicado la frecuencia de muestreo')
}
if(missing(dur_epoca)){
warning('WARNING: No se ha indicado el tamano de la epoca')
}
#################################################
# parametros opcionales
if(reemplazar){
if(canales=='10-20'){
canales = c('C3','C4','CZ','F3','F4','F7','F8','FP1','FP2','FZ','O1','O2',
'P3','P4','PZ','T3','T4','T5','T6')
}
if(canales=='PSG'){
canales = c('C3','C4','CZ','F3','F4','F7','F8','FP1','FP2','FZ','O1','O2',
'P3','P4','PZ','T3','T4','T5','T6','LOG','ROG','EMG')
}
}
if(length(canales)<1){
stop('ERROR: Lista de canales tiene longitud cero')
}
# if(missing(dir_datos)){
# dir_datos = getwd()
# }else{
# if(!dir.exists(dir_datos)){
# stop('ERROR: El directorio con datos no existe, o no puede ser leido.')
# }
# }
# if(missing(dir_res)){
# if(haz_carpeta){
# dir_res = paste0(getwd(),'/est_',nombre)
# dir.create(dir_res)
# }else{
# dir_res = getwd()
# }
# }else{
# if(!dir.exists(dir_res)){
# stop('ERROR: El directorio para resultados no existe')
# }
# }
if(missing(etiqueta)){
etiqueta = nombre
}
#################################################
# parametros dependientes de los datos
ventana = fr_muestreo*dur_epoca
n_canales = length(canales)
usar_stl = T
if(dur_epoca<=2){
usar_stl = F
}
if(usar_loess){
usar_stl = F
}
#################################################
# inicio del ciclo que recorre los canales
for(ch in 1:n_canales){
# construye el nombre del archivo
ch_actual = canales[ch]
nom_archivo = paste0(nombre,'_',ch_actual,extension)
if(no_repetir){
setwd(dir_res)
if(file.exists(paste0('EST_',nombre,'_',ch_actual,'_T.csv' ))){
warning('El canal ',ch_actual,
' se ha omitido, pues se encontraron resultados previos')
next()
}
}
# cargar los datos
setwd(dir_datos)
if(!file.exists(nom_archivo)){
warning('ERROR: En canal ',ch_actual,
', no se encontro el archivo ',nom_archivo)
next()
}
DATOS = read.csv(nom_archivo)
DATOS = as.numeric(unlist(DATOS))
# cuantas epocas pueden formarse
max_epoca = floor(length(DATOS)/ventana)
if(max_epoca==0){
warning(paste0('ERROR: En canal ',ch_actual,
', no se pudieron leer datos'))
next()
}
# contenedores de los resltados
pv.t = rep(0,max_epoca)
pv.ir = rep(0,max_epoca)
pv.tir = rep(0,max_epoca)
#informacion sobre el progreso, si fue requerida
if(ver_avance){
print( paste0(' Sujeto : ',etiqueta) )
print( paste0(' Canal : ',ch_actual,
' (',toString(ch),'/',toString(n_canales),')') )
}
#################################################
# inicio del ciclo que recorre las epocas
for ( i in 0:(max_epoca-1) ){
# filtro STL, robusto y forzado a periodico estandar
tmp = DATOS[ (i*ventana+1) : ((i+1)*ventana) ]
tmp.t = ts(tmp,frequency=fr_muestreo,start=c(0,0))
if(usar_stl){
tmp.s = stl(tmp.t,robust=T,s.window='periodic')
tmp.r = tmp.s$time.series[,'remainder']
tmp = as.numeric(unclass(tmp.r))
}else{
tmp.l = loess(tmp~time(tmp.t))
tmp.s = predict(tmp.l,time(tmp.t))
tmp = tmp - tmp.s$fit
}
# test de PSR, los archivos se recolectan
z = stationarity(tmp)
pv.t[i] = as.numeric( attr(z,'pvals')[1])
pv.ir[i] = as.numeric( attr(z,'pvals')[2])
pv.tir[i] = as.numeric( attr(z,'pvals')[3])
}
# fin del ciclo que recorre las epocas
#################################################
# los resultados se guardan en un archivo .csv
setwd(dir_res)
write.table(pv.t , paste0('EST_',nombre,'_',ch_actual,'_T.txt' ),
row.names=FALSE,col.names=FALSE)
#write.table(pv.ir , paste0('EST_',nombre,'_',ch_actual,'_IR.txt' ),
# row.names=FALSE,col.names=FALSE)
#write.table(pv.tir, paste0('EST_',nombre,'_',ch_actual,'_TIR.txt'),
# row.names=FALSE,col.names=FALSE)
}
# fin del ciclo que recorre canales
#################################################
# fin del script
############################################################################### | /scripts_old/scripts170700/multipsr_curso01_personal.R | no_license | EncisoAlvaJC/TESIS | R | false | false | 5,276 | r | ###############################################################################
# Script para detectar estacionariedad debil en registros electrofisiologicos,
# por Enciso Alva, 2017
# Para citar y revisar instrucciones de uso, revisar documantacion anexa
#
###############################################################################
# parametros del script, ver documantacion
#nombre = 'CLMN10SUE'
#etiqueta = 'CLMN'
#dir_datos = paste0(getwd(),'/CLMN10SUE')
#dir_res = paste0(getwd(),'/res_parciales')
extension = '.txt'
reemplazar = TRUE # <-
fr_muestreo = 512
#dur_epoca = 30
canales = 'PSG'
# canales = c('C3','C4','CZ','F3','F4','F7','F8','FP1','FP2','FZ','O1','O2',
# 'P3','P4','PZ','ROG','T3','T4','T5','T6')
ver_avance = T
no_repetir = F
haz_carpeta = T
usar_loess = F
#################################################
# libreria que contiene la prueba de PSR
require('fractal')
#################################################
# revisar si faltan algunos parametros
if(missing(fr_muestreo)){
warning('WARNING: No se ha indicado la frecuencia de muestreo')
}
if(missing(dur_epoca)){
warning('WARNING: No se ha indicado el tamano de la epoca')
}
#################################################
# parametros opcionales
if(reemplazar){
if(canales=='10-20'){
canales = c('C3','C4','CZ','F3','F4','F7','F8','FP1','FP2','FZ','O1','O2',
'P3','P4','PZ','T3','T4','T5','T6')
}
if(canales=='PSG'){
canales = c('C3','C4','CZ','F3','F4','F7','F8','FP1','FP2','FZ','O1','O2',
'P3','P4','PZ','T3','T4','T5','T6','LOG','ROG','EMG')
}
}
if(length(canales)<1){
stop('ERROR: Lista de canales tiene longitud cero')
}
# if(missing(dir_datos)){
# dir_datos = getwd()
# }else{
# if(!dir.exists(dir_datos)){
# stop('ERROR: El directorio con datos no existe, o no puede ser leido.')
# }
# }
# if(missing(dir_res)){
# if(haz_carpeta){
# dir_res = paste0(getwd(),'/est_',nombre)
# dir.create(dir_res)
# }else{
# dir_res = getwd()
# }
# }else{
# if(!dir.exists(dir_res)){
# stop('ERROR: El directorio para resultados no existe')
# }
# }
if(missing(etiqueta)){
etiqueta = nombre
}
#################################################
# parametros dependientes de los datos
ventana = fr_muestreo*dur_epoca
n_canales = length(canales)
usar_stl = T
if(dur_epoca<=2){
usar_stl = F
}
if(usar_loess){
usar_stl = F
}
#################################################
# inicio del ciclo que recorre los canales
for(ch in 1:n_canales){
# construye el nombre del archivo
ch_actual = canales[ch]
nom_archivo = paste0(nombre,'_',ch_actual,extension)
if(no_repetir){
setwd(dir_res)
if(file.exists(paste0('EST_',nombre,'_',ch_actual,'_T.csv' ))){
warning('El canal ',ch_actual,
' se ha omitido, pues se encontraron resultados previos')
next()
}
}
# cargar los datos
setwd(dir_datos)
if(!file.exists(nom_archivo)){
warning('ERROR: En canal ',ch_actual,
', no se encontro el archivo ',nom_archivo)
next()
}
DATOS = read.csv(nom_archivo)
DATOS = as.numeric(unlist(DATOS))
# cuantas epocas pueden formarse
max_epoca = floor(length(DATOS)/ventana)
if(max_epoca==0){
warning(paste0('ERROR: En canal ',ch_actual,
', no se pudieron leer datos'))
next()
}
# contenedores de los resltados
pv.t = rep(0,max_epoca)
pv.ir = rep(0,max_epoca)
pv.tir = rep(0,max_epoca)
#informacion sobre el progreso, si fue requerida
if(ver_avance){
print( paste0(' Sujeto : ',etiqueta) )
print( paste0(' Canal : ',ch_actual,
' (',toString(ch),'/',toString(n_canales),')') )
}
#################################################
# inicio del ciclo que recorre las epocas
for ( i in 0:(max_epoca-1) ){
# filtro STL, robusto y forzado a periodico estandar
tmp = DATOS[ (i*ventana+1) : ((i+1)*ventana) ]
tmp.t = ts(tmp,frequency=fr_muestreo,start=c(0,0))
if(usar_stl){
tmp.s = stl(tmp.t,robust=T,s.window='periodic')
tmp.r = tmp.s$time.series[,'remainder']
tmp = as.numeric(unclass(tmp.r))
}else{
tmp.l = loess(tmp~time(tmp.t))
tmp.s = predict(tmp.l,time(tmp.t))
tmp = tmp - tmp.s$fit
}
# test de PSR, los archivos se recolectan
z = stationarity(tmp)
pv.t[i] = as.numeric( attr(z,'pvals')[1])
pv.ir[i] = as.numeric( attr(z,'pvals')[2])
pv.tir[i] = as.numeric( attr(z,'pvals')[3])
}
# fin del ciclo que recorre las epocas
#################################################
# los resultados se guardan en un archivo .csv
setwd(dir_res)
write.table(pv.t , paste0('EST_',nombre,'_',ch_actual,'_T.txt' ),
row.names=FALSE,col.names=FALSE)
#write.table(pv.ir , paste0('EST_',nombre,'_',ch_actual,'_IR.txt' ),
# row.names=FALSE,col.names=FALSE)
#write.table(pv.tir, paste0('EST_',nombre,'_',ch_actual,'_TIR.txt'),
# row.names=FALSE,col.names=FALSE)
}
# fin del ciclo que recorre canales
#################################################
# fin del script
############################################################################### |
library(occCite)
library(rfishbase)
library(robis)
library(dplyr)
library(rgbif)
library(rgdal)
library(raster)
library(ggplot2)
library(viridis)
library(rphylopic)
# Get lists of taxa from FishBase and OBIS -----
# A convenience function
'%!in%' <- function(x,y)!('%in%'(x,y))
# Getting species lists
gadiformListFB <- species_list(Order = "Gadiformes")
gadiformListOBIS <- checklist("Gadiformes")
gadiformListOBIS <- gadiformListOBIS %>% filter(taxonRank == "Species", taxonomicStatus == "accepted")
inFBnotOBIS <- gadiformListFB[gadiformListFB %!in% gadiformListOBIS$acceptedNameUsage]
inOBISnotFB <- gadiformListOBIS[gadiformListOBIS$acceptedNameUsage %!in% gadiformListFB,]
inBoth <- gadiformListFB[gadiformListFB %in% gadiformListOBIS$acceptedNameUsage]
scombriformListFB <- species_list(Family = c("Amarsipidae", "Nomeidae", "Ariommatidae",
"Stromateidae", "Pomatomidae", "Centrolophidae",
"Icosteidae", "Arripidae", "Tetragonuridae",
"Chiasmodontidae", "Scombridae", "Caristiidae",
"Bramidae", "Scombrolabracidae", "Scombropidae",
"Gempylidae", "Trichiuridae")) # From Betancur-R et al 2017; Eschmeyer was missing Scombropidae
scombriformListOBIS <- checklist(c("Amarsipidae", "Nomeidae", "Ariommatidae",
"Stromateidae", "Pomatomidae", "Centrolophidae",
"Icosteidae", "Arripidae", "Tetragonuridae",
"Chiasmodontidae", "Scombridae", "Caristiidae",
"Bramidae", "Scombrolabracidae", "Scombropidae",
"Gempylidae", "Trichiuridae"))
scombriformListOBIS <- scombriformListOBIS %>% filter(taxonRank == "Species", taxonomicStatus == "accepted")
scombInFBnotOBIS <- scombriformListFB[scombriformListFB %!in% scombriformListOBIS$acceptedNameUsage]
scombInOBISnotFB <- scombriformListOBIS[scombriformListOBIS$acceptedNameUsage %!in% scombriformListFB,]
scombInBoth <- scombriformListFB[scombriformListFB %in% scombriformListOBIS$acceptedNameUsage]
belonListFB <- species_list(Family = c("Scomberesocidae", "Belonidae", "Hemiramphidae",
"Zenarchopteridae", "Exocoetidae", "Adrianichthyidae"))
belonListOBIS <- checklist(c("Scomberesocidae", "Belonidae", "Hemiramphidae",
"Zenarchopteridae", "Exocoetidae", "Adrianichthyidae")) # Eschmeyer and Betancur-R agree (low boostrap support for the group in BR, though)
belonListOBIS <- belonListOBIS %>% filter(taxonRank == "Species", taxonomicStatus == "accepted")
belonInFBnotOBIS <- belonListFB[belonListFB %!in% belonListOBIS$acceptedNameUsage]
belonInOBISnotFB <- belonListOBIS[belonListOBIS$acceptedNameUsage %!in% belonListFB,]
belonInBoth <- belonListFB[belonListFB %in% belonListOBIS$acceptedNameUsage]
# Next, hand-curated lists, compared to Eschmeyer's Catalog, accessed September 2020
# Filter list for species found in the Atlantic -----
masterList <- read.csv("data/TaxonomicResolution.csv")
# Atlantic distribution according to Fishbase
fishbaseDistributions <- distribution(masterList$FBName)
fishbaseDistributions <- table(fishbaseDistributions[,c("Species", "FAO")])
fishbaseDistributions <- fishbaseDistributions[,c("Atlantic, Antarctic", "Atlantic, Eastern Central",
"Atlantic, Northeast", "Atlantic, Northwest",
"Atlantic, Southeast", "Atlantic, Southwest",
"Atlantic, Western Central",
"Mediterranean and Black Sea")] # Area of interest
fishbaseDistributions <- apply(fishbaseDistributions, 1, sum)
fbATLPresent <- fishbaseDistributions[masterList$FBName] > 0 # Presence/absence in area of interest
masterList <- cbind(masterList,fbATLPresent)
# Atlantic distribution according to OBIS
faoShapefile <- readOGR("data/FAO Fishing Areas 2005/FAO_AREAS.shp")
atlanticShapefile <- aggregate(faoShapefile[faoShapefile$OCEAN=="Atlantic",], dissolve = T)
OCEAN <- "Atlantic"
atlanticShapefile <- SpatialPolygonsDataFrame(atlanticShapefile, as.data.frame(OCEAN))
writeOGR(obj = atlanticShapefile, dsn = "data/", layer = "atlantic", driver = "ESRI Shapefile")
atlanticSimp <- rgeos::gSimplify(atlanticShapefile, tol = 3)
atlanticWKT <- wicket::sp_convert(atlanticSimp, group = T)
atlSpp <- NULL
index <- 1
# Because OBIS gets angry when you ask for more than 100 names at a time
while(length(unique(masterList$OBISChecklist)) > index){
atlSpp <- append(atlSpp, checklist(scientificname = unique(masterList$OBISChecklist)[index:(index + 99)],
geometry = atlanticWKT)$scientificName)
print(index)
index <- index + 100
}
atlSpp <- gsub(pattern = "(\\w+\\s\\w+)(\\s\\w+)", atlSpp, perl = T, replacement = "\\1") # Toss subspecific epithets
OBIS_atlPresent <- masterList$OBISChecklist %in% atlSpp
masterList <- cbind(masterList, OBIS_atlPresent)
fbOBagree <- masterList$fbATLPresent == masterList$OBIS_atlPresent
masterList <- cbind(masterList, fbOBagree)
write.csv(masterList, "data/taxaWithinAreaOfInterest.csv", row.names = F)
# At this point, I did a manual rectification of the list in cases where there was no data or sources disagreed
# This list was finalized as of Sept 1, 2020
# Getting GBIF taxonomy to verify
atlChecklist <- read.csv("data/taxaWithinAreaOfInterest.csv")
GBIFnames <- vector(mode = "character", length = nrow(atlChecklist))
for(i in 1:nrow(atlChecklist)){
nameCheck <- as.character(atlChecklist$EschmeyerName[i])
GBIFname <- name_lookup(query = nameCheck, limit = 1)
if(length(GBIFname$data)==0){
GBIFnames[[i]] <- NA
} else{
GBIFnames[[i]] <- GBIFname$data$scientificName
}
}
atlChecklist <- cbind(atlChecklist, GBIFnames)
write.csv(atlChecklist, "data/taxaWithinAreaOfInterest.csv", row.names = F)
# Visualizing the checklist ----
atlChecklist <- read.csv("data/taxaWithinAreaOfInterest_curated8Jan.csv")
# All taxa
Source <- c(rep("Fishbase", 2), rep("OBIS", 2), rep("Curated", 2))
Location <- rep(c("Atlantic", "Not Atlantic"), 3)
Count <- c(length(unique(atlChecklist$FBName[atlChecklist$fbATLPresent])),
length(unique(atlChecklist$FBName[!atlChecklist$fbATLPresent])),
length(unique(atlChecklist$OBISChecklist[atlChecklist$OBIS_atlPresent])),
length(unique(atlChecklist$OBISChecklist[!atlChecklist$OBIS_atlPresent])),
length(unique(atlChecklist$EschmeyerName[atlChecklist$isAtlantic])),
length(unique(atlChecklist$EschmeyerName[!atlChecklist$isAtlantic])))
data <- data.frame(Source, Location, Count)
ggplot(data, aes(fill=Location, y=Count, x=Source)) +
geom_bar(position="stack", stat="identity") +
ggtitle("Checklist of Cods, Tunas, Flyingfishes, and Their Allies") +
scale_fill_brewer(palette="Dark2") +
theme_minimal(base_size = 18)
# Gadiformes
gadChecklist <- atlChecklist[atlChecklist$Group == "G",]
Source <- c(rep("Fishbase", 2), rep("OBIS", 2), rep("Curated", 2))
Location <- rep(c("Atlantic", "Not Atlantic"), 3)
Count <- c(length(unique(gadChecklist$FBName[gadChecklist$fbATLPresent])),
length(unique(gadChecklist$FBName[!gadChecklist$fbATLPresent])),
length(unique(gadChecklist$OBISChecklist[gadChecklist$OBIS_atlPresent])),
length(unique(gadChecklist$OBISChecklist[!gadChecklist$OBIS_atlPresent])),
length(unique(gadChecklist$EschmeyerName[gadChecklist$isAtlantic])),
length(unique(gadChecklist$EschmeyerName[!gadChecklist$isAtlantic])))
data <- data.frame(Source, Location, Count)
cod <- image_data("bba1800a-dd86-451d-a79b-c5944cfe5231", size = 256)[[1]]
ggplot(data, aes(fill=Location, y=Count, x=Source)) +
geom_bar(position="stack", stat="identity") +
ggtitle("Checklist of Cods and Their Allies") +
scale_fill_brewer(palette="Dark2") +
theme_minimal(base_size = 18) +
add_phylopic(cod, alpha = 1, x = 3, y = sum(data[1:2,3])-27.5+5, ysize = 55)
# Scombriformes
scombChecklist <- atlChecklist[atlChecklist$Group == "S",]
Source <- c(rep("Fishbase", 2), rep("OBIS", 2), rep("Curated", 2))
Location <- rep(c("Atlantic", "Not Atlantic"), 3)
Count <- c(length(unique(scombChecklist$FBName[scombChecklist$fbATLPresent])),
length(unique(scombChecklist$FBName[!scombChecklist$fbATLPresent])),
length(unique(scombChecklist$OBISChecklist[scombChecklist$OBIS_atlPresent])),
length(unique(scombChecklist$OBISChecklist[!scombChecklist$OBIS_atlPresent])),
length(unique(scombChecklist$EschmeyerName[scombChecklist$isAtlantic])),
length(unique(scombChecklist$EschmeyerName[!scombChecklist$isAtlantic])))
data <- data.frame(Source, Location, Count)
tuna <- image_data("16989401-0080-4502-828d-e85a45a262be", size = 256)[[1]]
ggplot(data, aes(fill=Location, y=Count, x=Source)) +
geom_bar(position="stack", stat="identity") +
ggtitle("Checklist of Tunas and Their Allies") +
scale_fill_brewer(palette="Dark2") +
theme_minimal(base_size = 18) +
add_phylopic(tuna, alpha = 1, x = 3, y = sum(data[5:6,3])-(35/2) + 5, ysize = 35)
# Beloniformes
belonChecklist <- atlChecklist[atlChecklist$Group == "B",]
Source <- c(rep("Fishbase", 2), rep("OBIS", 2), rep("Curated", 2))
Location <- rep(c("Atlantic", "Not Atlantic"), 3)
Count <- c(length(unique(belonChecklist$FBName[belonChecklist$fbATLPresent])),
length(unique(belonChecklist$FBName[!belonChecklist$fbATLPresent])),
length(unique(belonChecklist$OBISChecklist[belonChecklist$OBIS_atlPresent])),
length(unique(belonChecklist$OBISChecklist[!belonChecklist$OBIS_atlPresent])),
length(unique(belonChecklist$EschmeyerName[belonChecklist$isAtlantic])),
length(unique(belonChecklist$EschmeyerName[!belonChecklist$isAtlantic])))
data <- data.frame(Source, Location, Count)
flyingFish <- image_data("b6626eff-b00e-428a-b3fa-51679a0cfaa2", size = 256)[[1]]
ggplot(data, aes(fill=Location, y=Count, x=Source)) +
geom_bar(position="stack", stat="identity") +
ggtitle("Checklist of Flyingfishes and Their Allies") +
scale_fill_brewer(palette="Dark2") +
theme_minimal(base_size = 18) +
add_phylopic(flyingFish, alpha = 1, x = 3, y = sum(data[5:6,3])-7.5, ysize = 15)
# Query GBIF for occurrence data ----
atlChecklist <- read.csv("data/taxaWithinAreaOfInterest_curated8Jan.csv", stringsAsFactors = F)
atlChecklist <- atlChecklist[atlChecklist$isAtlantic,]
ddList <- read.csv("data/postVisualizationOccurrenceCounts_withNotes.csv")
ddList <- ddList[!ddList$Remove.from.dataset == "T",]
login <- GBIFLoginManager(user = "hannah0wens",
email = "hannah.owens@gmail.com",
pwd = "Llab7a3m!")
inDDlist <- lapply(X = 1:nrow(atlChecklist),
FUN = function(x) any(atlChecklist$EschmeyerName[[x]] %in% ddList$X,
atlChecklist$OBISChecklist[[x]] %in% ddList$X,
atlChecklist$GBIFnames[[x]] %in% ddList$X))
atlChecklist <- atlChecklist[unlist(inDDlist),]
GBIFsearchList <- c(atlChecklist$GBIFnames, atlChecklist$GBIF.Synonym)
GBIFsearchList <- unique(GBIFsearchList[!is.na(GBIFsearchList)])
GBIFsearchList <- studyTaxonList(GBIFsearchList, datasources = "GBIF Backbone Taxonomy")
myBridgeTreeObject <- occQuery(x = GBIFsearchList, GBIFLogin = login, datasources = "gbif",
GBIFDownloadDirectory = "data/GBIFDownloads/")
saveRDS(myBridgeTreeObject, file = "data/GBIFDownloads/myBridgeTreeObject")
myBridgeTreeObject <- readRDS(file = "data/GBIFDownloads/myBridgeTreeObject")
#Get and write citations
myOccCitations <- occCitation(myBridgeTreeObject)
sink("data/rawCitations.txt")
print(myOccCitations)
sink()
# Query OBIS for occurrence data ----
atlChecklist <- read.csv("data/taxaWithinAreaOfInterest_curated8Jan.csv", stringsAsFactors = F)
atlChecklist <- atlChecklist[atlChecklist$isAtlantic,]
ddList <- read.csv("data/postVisualizationOccurrenceCounts_withNotes.csv")
ddList <- ddList[!ddList$Remove.from.dataset == "T",]
inDDlist <- lapply(X = 1:nrow(atlChecklist),
FUN = function(x) any(atlChecklist$EschmeyerName[[x]] %in% ddList$X,
atlChecklist$OBISChecklist[[x]] %in% ddList$X,
atlChecklist$GBIFnames[[x]] %in% ddList$X))
atlChecklist <- atlChecklist[inDDlist,]
OBISsearchList <- c(atlChecklist$OBISChecklist)
OBISsearchList <- unique(OBISsearchList[!is.na(OBISsearchList)])
OBISresults <- vector(mode = "list", length = length(OBISsearchList))
OBIScitations <- NULL
for(name in OBISsearchList){
print(name)
OBISresults[[match(name, OBISsearchList)]] <- occurrence(scientificname = name, absence = F)
print(nrow(OBISresults[[match(name, OBISsearchList)]]))
if(nrow(OBISresults[[match(name, OBISsearchList)]]) > 1){
write.csv(OBISresults[[match(name, OBISsearchList)]],
file = paste0("data/OBISDownloads/", name, ".csv"))
OBIScitations <- c(unique(OBISresults[[match(name, OBISsearchList)]]$dataset_id),
OBIScitations)
}
}
# Get and save raw OBIS citation keys
obisDatasetCitations <- robis::dataset()
OBIScitations <- unique(OBIScitations)
sink("data/OBISDownloads/citationKeys.txt", append = F)
OBIScitations
sink()
mardigrasOBIScitations <- obisDatasetCitations[obisDatasetCitations$id %in% OBIScitations,]$citation
mardigrasOBIScitations <- mardigrasOBIScitations[!is.na(mardigrasOBIScitations)]
sink("data/rawOBISCitations.txt", append = F)
paste0(mardigrasOBIScitations, " Accessed via OBIS on 2020-11-04.")
sink()
# Process all results into common by-species .csv occurrence files ----
# Define function to check for decimals that are all 0s.
zeroDecCheck <- function(x){
return((x - floor(x))==0)
}
# Get data
gbifData <- readRDS(file = "data/GBIFDownloads/myBridgeTreeObject")
obisData <- list.files(path = "data/OBISDownloads/", pattern = ".csv", full.names = T)
atlChecklist <- read.csv("data/taxaWithinAreaOfInterest_curated8Jan.csv", stringsAsFactors = F)
atlChecklist <- atlChecklist[atlChecklist$isAtlantic,]
ddList <- read.csv("data/postVisualizationOccurrenceCounts_withNotes.csv")
ddList <- ddList[!ddList$Remove.from.dataset == "T",]
inDDlist <- lapply(X = 1:nrow(atlChecklist),
FUN = function(x) any(atlChecklist$EschmeyerName[[x]] %in% ddList$X,
atlChecklist$OBISChecklist[[x]] %in% ddList$X,
atlChecklist$GBIFnames[[x]] %in% ddList$X))
atlChecklist <- atlChecklist[unlist(inDDlist),]
# Merging datasets and doing some preliminary cleaning
for(sp in atlChecklist$EschmeyerName){
GBIFrec <- NULL
GBIFsynRec <- NULL
OBISrec <- NULL
print(sp)
spGBIF <- atlChecklist$GBIFnames[match(sp, atlChecklist$EschmeyerName)] # Get GBIF record, if present
if(!is.na(spGBIF)){
spGBIF <- gbifData@cleanedTaxonomy$`Best Match`[match(spGBIF, gbifData@cleanedTaxonomy$`Input Name`)]
GBIFrec <- gbifData@occResults[[match(spGBIF, names(gbifData@occResults))]]$GBIF$RawOccurrences
GBIFrec <- occ_download_import(GBIFrec,
path = GBIFrec[[1]]) %>% dplyr::select(scientificName,
decimalLatitude,
decimalLongitude,
coordinateUncertaintyInMeters,
year, month, day,
depth, depthAccuracy,
basisOfRecord, issue)
source <- rep("GBIF", n=nrow(GBIFrec))
if(nrow(GBIFrec) > 0) GBIFrec <- cbind(GBIFrec, source)
GBIFrec <- GBIFrec[!zeroDecCheck(GBIFrec$decimalLatitude),]
GBIFrec <- GBIFrec[!zeroDecCheck(GBIFrec$decimalLongitude),]
}
spGBIFSyn <- atlChecklist$GBIF.Synonym[match(sp, atlChecklist$EschmeyerName)] # Get synonymous record, if present
if(!is.na(spGBIFSyn)){
spGBIFSyn <- gbifData@cleanedTaxonomy$`Best Match`[match(spGBIFSyn, gbifData@cleanedTaxonomy$`Input Name`)]
GBIFsynRec <- gbifData@occResults[[match(spGBIFsyn, names(gbifData@occResults))]]$GBIF$RawOccurrences
GBIFsynRec <- occ_download_import(GBIFsynRec,
path = GBIFsynRec[[1]]) %>% dplyr::select(scientificName,
decimalLatitude, decimalLongitude,
coordinateUncertaintyInMeters,
year, month, day,
depth, depthAccuracy,
basisOfRecord, issue)
source <- rep("GBIF", n=nrow(GBIFsynRec))
if(nrow(GBIFsynRec) > 0) GBIFsynRec <- cbind(GBIFsynRec,source)
GBIFsynRec <- GBIFsynRec[!zeroDecCheck(GBIFsynRec$decimalLatitude),]
GBIFsynRec <- GBIFsynRec[!zeroDecCheck(GBIFsynRec$decimalLongitude),]
}
requiredOBISData <- c("scientificName", "decimalLatitude", "decimalLongitude",
"coordinateUncertaintyInMeters",
"eventDate", "year", "month", "day",
"depth", "basisOfRecord", "flags")
spOBIS <- atlChecklist$OBISChecklist[match(sp,atlChecklist$EschmeyerName)]
if(!is.na(spOBIS) && any(grepl(pattern = spOBIS, obisData))){
spOBIS <- paste0("data/OBISDownloads/", spOBIS, ".csv")
OBISrec <- read.csv(spOBIS)
OBISrec <- OBISrec %>% dplyr::select(any_of(requiredOBISData))
# Fix dates
if (all(c("year", "month", "day", "eventDate") %in% colnames(OBISrec))){
OBISrec <- OBISrec[,-match("eventDate", colnames(OBISrec))]
}
else if("eventDate" %in% colnames(OBISrec)){
dates <- cbind(format(lubridate::as_date(OBISrec$eventDate), format = "%Y"),
format(lubridate::as_date(OBISrec$eventDate), format = "%m"),
format(lubridate::as_date(OBISrec$eventDate), format = "%d"))
colnames(dates) <- c("year", "month", "day")
OBISrec <- cbind(OBISrec, dates)
}
else{
dates <- cbind(rep(NA, nrow(OBISrec)),
rep(NA, nrow(OBISrec)),
rep(NA, nrow(OBISrec)))
colnames(dates) <- c("year", "month", "day")
OBISrec <- cbind(OBISrec, dates)
}
if (!("coordinateUncertaintyInMeters" %in% colnames(OBISrec))){
coordinateUncertaintyInMeters <- rep(NA, nrow(OBISrec))
OBISrec <- cbind(OBISrec,coordinateUncertaintyInMeters)
}
if (!("depth" %in% colnames(OBISrec))){
depth <- rep(NA, nrow(OBISrec))
OBISrec <- cbind(OBISrec,depth)
}
if (!("basisOfRecord" %in% colnames(OBISrec))){
basisOfRecord <- rep(NA, nrow(OBISrec))
OBISrec <- cbind(OBISrec,basisOfRecord)
}
if (!("flags" %in% colnames(OBISrec))){
flags <- rep(NA, nrow(OBISrec))
OBISrec <- cbind(OBISrec, flags)
}
colnames(OBISrec)[match("flags", colnames(OBISrec))] <- "issue"
depthAccuracy <- rep(NA, nrow(OBISrec))
OBISrec <- cbind(OBISrec, depthAccuracy)
OBISrec <- OBISrec[ , c("scientificName",
"decimalLatitude", "decimalLongitude",
"coordinateUncertaintyInMeters",
"year", "month", "day",
"depth", "depthAccuracy",
"basisOfRecord", "issue")]
source <- rep("OBIS", n=nrow(OBISrec))
if(nrow(OBISrec) > 0) OBISrec <- cbind(OBISrec,source)
OBISrec <- OBISrec[!zeroDecCheck(OBISrec$decimalLatitude),]
OBISrec <- OBISrec[!zeroDecCheck(OBISrec$decimalLongitude),]
}
AllRec <- rbind(GBIFrec, GBIFsynRec, OBISrec)
AllRec$scientificName <- rep(sp, nrow(AllRec))
print(paste0(sp, " has ", nrow(AllRec), " points."))
write.csv(AllRec, file = paste0("data/MergedOccurrences/", sp, ".csv"))
}
| /OccurrenceSearch_DDSpRemoved.R | no_license | hannahlowens/empiricalBiodiversity | R | false | false | 20,523 | r | library(occCite)
library(rfishbase)
library(robis)
library(dplyr)
library(rgbif)
library(rgdal)
library(raster)
library(ggplot2)
library(viridis)
library(rphylopic)
# Get lists of taxa from FishBase and OBIS -----
# A convenience function
'%!in%' <- function(x,y)!('%in%'(x,y))
# Getting species lists
gadiformListFB <- species_list(Order = "Gadiformes")
gadiformListOBIS <- checklist("Gadiformes")
gadiformListOBIS <- gadiformListOBIS %>% filter(taxonRank == "Species", taxonomicStatus == "accepted")
inFBnotOBIS <- gadiformListFB[gadiformListFB %!in% gadiformListOBIS$acceptedNameUsage]
inOBISnotFB <- gadiformListOBIS[gadiformListOBIS$acceptedNameUsage %!in% gadiformListFB,]
inBoth <- gadiformListFB[gadiformListFB %in% gadiformListOBIS$acceptedNameUsage]
scombriformListFB <- species_list(Family = c("Amarsipidae", "Nomeidae", "Ariommatidae",
"Stromateidae", "Pomatomidae", "Centrolophidae",
"Icosteidae", "Arripidae", "Tetragonuridae",
"Chiasmodontidae", "Scombridae", "Caristiidae",
"Bramidae", "Scombrolabracidae", "Scombropidae",
"Gempylidae", "Trichiuridae")) # From Betancur-R et al 2017; Eschmeyer was missing Scombropidae
scombriformListOBIS <- checklist(c("Amarsipidae", "Nomeidae", "Ariommatidae",
"Stromateidae", "Pomatomidae", "Centrolophidae",
"Icosteidae", "Arripidae", "Tetragonuridae",
"Chiasmodontidae", "Scombridae", "Caristiidae",
"Bramidae", "Scombrolabracidae", "Scombropidae",
"Gempylidae", "Trichiuridae"))
scombriformListOBIS <- scombriformListOBIS %>% filter(taxonRank == "Species", taxonomicStatus == "accepted")
scombInFBnotOBIS <- scombriformListFB[scombriformListFB %!in% scombriformListOBIS$acceptedNameUsage]
scombInOBISnotFB <- scombriformListOBIS[scombriformListOBIS$acceptedNameUsage %!in% scombriformListFB,]
scombInBoth <- scombriformListFB[scombriformListFB %in% scombriformListOBIS$acceptedNameUsage]
belonListFB <- species_list(Family = c("Scomberesocidae", "Belonidae", "Hemiramphidae",
"Zenarchopteridae", "Exocoetidae", "Adrianichthyidae"))
belonListOBIS <- checklist(c("Scomberesocidae", "Belonidae", "Hemiramphidae",
"Zenarchopteridae", "Exocoetidae", "Adrianichthyidae")) # Eschmeyer and Betancur-R agree (low boostrap support for the group in BR, though)
belonListOBIS <- belonListOBIS %>% filter(taxonRank == "Species", taxonomicStatus == "accepted")
belonInFBnotOBIS <- belonListFB[belonListFB %!in% belonListOBIS$acceptedNameUsage]
belonInOBISnotFB <- belonListOBIS[belonListOBIS$acceptedNameUsage %!in% belonListFB,]
belonInBoth <- belonListFB[belonListFB %in% belonListOBIS$acceptedNameUsage]
# Next, hand-curated lists, compared to Eschmeyer's Catalog, accessed September 2020
# Filter list for species found in the Atlantic -----
masterList <- read.csv("data/TaxonomicResolution.csv")
# Atlantic distribution according to Fishbase
fishbaseDistributions <- distribution(masterList$FBName)
fishbaseDistributions <- table(fishbaseDistributions[,c("Species", "FAO")])
fishbaseDistributions <- fishbaseDistributions[,c("Atlantic, Antarctic", "Atlantic, Eastern Central",
"Atlantic, Northeast", "Atlantic, Northwest",
"Atlantic, Southeast", "Atlantic, Southwest",
"Atlantic, Western Central",
"Mediterranean and Black Sea")] # Area of interest
fishbaseDistributions <- apply(fishbaseDistributions, 1, sum)
fbATLPresent <- fishbaseDistributions[masterList$FBName] > 0 # Presence/absence in area of interest
masterList <- cbind(masterList,fbATLPresent)
# Atlantic distribution according to OBIS
faoShapefile <- readOGR("data/FAO Fishing Areas 2005/FAO_AREAS.shp")
atlanticShapefile <- aggregate(faoShapefile[faoShapefile$OCEAN=="Atlantic",], dissolve = T)
OCEAN <- "Atlantic"
atlanticShapefile <- SpatialPolygonsDataFrame(atlanticShapefile, as.data.frame(OCEAN))
writeOGR(obj = atlanticShapefile, dsn = "data/", layer = "atlantic", driver = "ESRI Shapefile")
atlanticSimp <- rgeos::gSimplify(atlanticShapefile, tol = 3)
atlanticWKT <- wicket::sp_convert(atlanticSimp, group = T)
atlSpp <- NULL
index <- 1
# Because OBIS gets angry when you ask for more than 100 names at a time
while(length(unique(masterList$OBISChecklist)) > index){
atlSpp <- append(atlSpp, checklist(scientificname = unique(masterList$OBISChecklist)[index:(index + 99)],
geometry = atlanticWKT)$scientificName)
print(index)
index <- index + 100
}
atlSpp <- gsub(pattern = "(\\w+\\s\\w+)(\\s\\w+)", atlSpp, perl = T, replacement = "\\1") # Toss subspecific epithets
OBIS_atlPresent <- masterList$OBISChecklist %in% atlSpp
masterList <- cbind(masterList, OBIS_atlPresent)
fbOBagree <- masterList$fbATLPresent == masterList$OBIS_atlPresent
masterList <- cbind(masterList, fbOBagree)
write.csv(masterList, "data/taxaWithinAreaOfInterest.csv", row.names = F)
# At this point, I did a manual rectification of the list in cases where there was no data or sources disagreed
# This list was finalized as of Sept 1, 2020
# Getting GBIF taxonomy to verify
atlChecklist <- read.csv("data/taxaWithinAreaOfInterest.csv")
GBIFnames <- vector(mode = "character", length = nrow(atlChecklist))
for(i in 1:nrow(atlChecklist)){
nameCheck <- as.character(atlChecklist$EschmeyerName[i])
GBIFname <- name_lookup(query = nameCheck, limit = 1)
if(length(GBIFname$data)==0){
GBIFnames[[i]] <- NA
} else{
GBIFnames[[i]] <- GBIFname$data$scientificName
}
}
atlChecklist <- cbind(atlChecklist, GBIFnames)
write.csv(atlChecklist, "data/taxaWithinAreaOfInterest.csv", row.names = F)
# Visualizing the checklist ----
atlChecklist <- read.csv("data/taxaWithinAreaOfInterest_curated8Jan.csv")
# All taxa
Source <- c(rep("Fishbase", 2), rep("OBIS", 2), rep("Curated", 2))
Location <- rep(c("Atlantic", "Not Atlantic"), 3)
Count <- c(length(unique(atlChecklist$FBName[atlChecklist$fbATLPresent])),
length(unique(atlChecklist$FBName[!atlChecklist$fbATLPresent])),
length(unique(atlChecklist$OBISChecklist[atlChecklist$OBIS_atlPresent])),
length(unique(atlChecklist$OBISChecklist[!atlChecklist$OBIS_atlPresent])),
length(unique(atlChecklist$EschmeyerName[atlChecklist$isAtlantic])),
length(unique(atlChecklist$EschmeyerName[!atlChecklist$isAtlantic])))
data <- data.frame(Source, Location, Count)
ggplot(data, aes(fill=Location, y=Count, x=Source)) +
geom_bar(position="stack", stat="identity") +
ggtitle("Checklist of Cods, Tunas, Flyingfishes, and Their Allies") +
scale_fill_brewer(palette="Dark2") +
theme_minimal(base_size = 18)
# Gadiformes
gadChecklist <- atlChecklist[atlChecklist$Group == "G",]
Source <- c(rep("Fishbase", 2), rep("OBIS", 2), rep("Curated", 2))
Location <- rep(c("Atlantic", "Not Atlantic"), 3)
Count <- c(length(unique(gadChecklist$FBName[gadChecklist$fbATLPresent])),
length(unique(gadChecklist$FBName[!gadChecklist$fbATLPresent])),
length(unique(gadChecklist$OBISChecklist[gadChecklist$OBIS_atlPresent])),
length(unique(gadChecklist$OBISChecklist[!gadChecklist$OBIS_atlPresent])),
length(unique(gadChecklist$EschmeyerName[gadChecklist$isAtlantic])),
length(unique(gadChecklist$EschmeyerName[!gadChecklist$isAtlantic])))
data <- data.frame(Source, Location, Count)
cod <- image_data("bba1800a-dd86-451d-a79b-c5944cfe5231", size = 256)[[1]]
ggplot(data, aes(fill=Location, y=Count, x=Source)) +
geom_bar(position="stack", stat="identity") +
ggtitle("Checklist of Cods and Their Allies") +
scale_fill_brewer(palette="Dark2") +
theme_minimal(base_size = 18) +
add_phylopic(cod, alpha = 1, x = 3, y = sum(data[1:2,3])-27.5+5, ysize = 55)
# Scombriformes
scombChecklist <- atlChecklist[atlChecklist$Group == "S",]
Source <- c(rep("Fishbase", 2), rep("OBIS", 2), rep("Curated", 2))
Location <- rep(c("Atlantic", "Not Atlantic"), 3)
Count <- c(length(unique(scombChecklist$FBName[scombChecklist$fbATLPresent])),
length(unique(scombChecklist$FBName[!scombChecklist$fbATLPresent])),
length(unique(scombChecklist$OBISChecklist[scombChecklist$OBIS_atlPresent])),
length(unique(scombChecklist$OBISChecklist[!scombChecklist$OBIS_atlPresent])),
length(unique(scombChecklist$EschmeyerName[scombChecklist$isAtlantic])),
length(unique(scombChecklist$EschmeyerName[!scombChecklist$isAtlantic])))
data <- data.frame(Source, Location, Count)
tuna <- image_data("16989401-0080-4502-828d-e85a45a262be", size = 256)[[1]]
ggplot(data, aes(fill=Location, y=Count, x=Source)) +
geom_bar(position="stack", stat="identity") +
ggtitle("Checklist of Tunas and Their Allies") +
scale_fill_brewer(palette="Dark2") +
theme_minimal(base_size = 18) +
add_phylopic(tuna, alpha = 1, x = 3, y = sum(data[5:6,3])-(35/2) + 5, ysize = 35)
# Beloniformes
belonChecklist <- atlChecklist[atlChecklist$Group == "B",]
Source <- c(rep("Fishbase", 2), rep("OBIS", 2), rep("Curated", 2))
Location <- rep(c("Atlantic", "Not Atlantic"), 3)
Count <- c(length(unique(belonChecklist$FBName[belonChecklist$fbATLPresent])),
length(unique(belonChecklist$FBName[!belonChecklist$fbATLPresent])),
length(unique(belonChecklist$OBISChecklist[belonChecklist$OBIS_atlPresent])),
length(unique(belonChecklist$OBISChecklist[!belonChecklist$OBIS_atlPresent])),
length(unique(belonChecklist$EschmeyerName[belonChecklist$isAtlantic])),
length(unique(belonChecklist$EschmeyerName[!belonChecklist$isAtlantic])))
data <- data.frame(Source, Location, Count)
flyingFish <- image_data("b6626eff-b00e-428a-b3fa-51679a0cfaa2", size = 256)[[1]]
ggplot(data, aes(fill=Location, y=Count, x=Source)) +
geom_bar(position="stack", stat="identity") +
ggtitle("Checklist of Flyingfishes and Their Allies") +
scale_fill_brewer(palette="Dark2") +
theme_minimal(base_size = 18) +
add_phylopic(flyingFish, alpha = 1, x = 3, y = sum(data[5:6,3])-7.5, ysize = 15)
# Query GBIF for occurrence data ----
atlChecklist <- read.csv("data/taxaWithinAreaOfInterest_curated8Jan.csv", stringsAsFactors = F)
atlChecklist <- atlChecklist[atlChecklist$isAtlantic,]
ddList <- read.csv("data/postVisualizationOccurrenceCounts_withNotes.csv")
ddList <- ddList[!ddList$Remove.from.dataset == "T",]
login <- GBIFLoginManager(user = "hannah0wens",
email = "hannah.owens@gmail.com",
pwd = "Llab7a3m!")
inDDlist <- lapply(X = 1:nrow(atlChecklist),
FUN = function(x) any(atlChecklist$EschmeyerName[[x]] %in% ddList$X,
atlChecklist$OBISChecklist[[x]] %in% ddList$X,
atlChecklist$GBIFnames[[x]] %in% ddList$X))
atlChecklist <- atlChecklist[unlist(inDDlist),]
GBIFsearchList <- c(atlChecklist$GBIFnames, atlChecklist$GBIF.Synonym)
GBIFsearchList <- unique(GBIFsearchList[!is.na(GBIFsearchList)])
GBIFsearchList <- studyTaxonList(GBIFsearchList, datasources = "GBIF Backbone Taxonomy")
myBridgeTreeObject <- occQuery(x = GBIFsearchList, GBIFLogin = login, datasources = "gbif",
GBIFDownloadDirectory = "data/GBIFDownloads/")
saveRDS(myBridgeTreeObject, file = "data/GBIFDownloads/myBridgeTreeObject")
myBridgeTreeObject <- readRDS(file = "data/GBIFDownloads/myBridgeTreeObject")
#Get and write citations
myOccCitations <- occCitation(myBridgeTreeObject)
sink("data/rawCitations.txt")
print(myOccCitations)
sink()
# Query OBIS for occurrence data ----
atlChecklist <- read.csv("data/taxaWithinAreaOfInterest_curated8Jan.csv", stringsAsFactors = F)
atlChecklist <- atlChecklist[atlChecklist$isAtlantic,]
ddList <- read.csv("data/postVisualizationOccurrenceCounts_withNotes.csv")
ddList <- ddList[!ddList$Remove.from.dataset == "T",]
inDDlist <- lapply(X = 1:nrow(atlChecklist),
FUN = function(x) any(atlChecklist$EschmeyerName[[x]] %in% ddList$X,
atlChecklist$OBISChecklist[[x]] %in% ddList$X,
atlChecklist$GBIFnames[[x]] %in% ddList$X))
atlChecklist <- atlChecklist[inDDlist,]
OBISsearchList <- c(atlChecklist$OBISChecklist)
OBISsearchList <- unique(OBISsearchList[!is.na(OBISsearchList)])
OBISresults <- vector(mode = "list", length = length(OBISsearchList))
OBIScitations <- NULL
for(name in OBISsearchList){
print(name)
OBISresults[[match(name, OBISsearchList)]] <- occurrence(scientificname = name, absence = F)
print(nrow(OBISresults[[match(name, OBISsearchList)]]))
if(nrow(OBISresults[[match(name, OBISsearchList)]]) > 1){
write.csv(OBISresults[[match(name, OBISsearchList)]],
file = paste0("data/OBISDownloads/", name, ".csv"))
OBIScitations <- c(unique(OBISresults[[match(name, OBISsearchList)]]$dataset_id),
OBIScitations)
}
}
# Get and save raw OBIS citation keys
obisDatasetCitations <- robis::dataset()
OBIScitations <- unique(OBIScitations)
sink("data/OBISDownloads/citationKeys.txt", append = F)
OBIScitations
sink()
mardigrasOBIScitations <- obisDatasetCitations[obisDatasetCitations$id %in% OBIScitations,]$citation
mardigrasOBIScitations <- mardigrasOBIScitations[!is.na(mardigrasOBIScitations)]
sink("data/rawOBISCitations.txt", append = F)
paste0(mardigrasOBIScitations, " Accessed via OBIS on 2020-11-04.")
sink()
# Process all results into common by-species .csv occurrence files ----
# Define function to check for decimals that are all 0s.
zeroDecCheck <- function(x){
return((x - floor(x))==0)
}
# Get data
gbifData <- readRDS(file = "data/GBIFDownloads/myBridgeTreeObject")
obisData <- list.files(path = "data/OBISDownloads/", pattern = ".csv", full.names = T)
atlChecklist <- read.csv("data/taxaWithinAreaOfInterest_curated8Jan.csv", stringsAsFactors = F)
atlChecklist <- atlChecklist[atlChecklist$isAtlantic,]
ddList <- read.csv("data/postVisualizationOccurrenceCounts_withNotes.csv")
ddList <- ddList[!ddList$Remove.from.dataset == "T",]
inDDlist <- lapply(X = 1:nrow(atlChecklist),
FUN = function(x) any(atlChecklist$EschmeyerName[[x]] %in% ddList$X,
atlChecklist$OBISChecklist[[x]] %in% ddList$X,
atlChecklist$GBIFnames[[x]] %in% ddList$X))
atlChecklist <- atlChecklist[unlist(inDDlist),]
# Merging datasets and doing some preliminary cleaning
for(sp in atlChecklist$EschmeyerName){
GBIFrec <- NULL
GBIFsynRec <- NULL
OBISrec <- NULL
print(sp)
spGBIF <- atlChecklist$GBIFnames[match(sp, atlChecklist$EschmeyerName)] # Get GBIF record, if present
if(!is.na(spGBIF)){
spGBIF <- gbifData@cleanedTaxonomy$`Best Match`[match(spGBIF, gbifData@cleanedTaxonomy$`Input Name`)]
GBIFrec <- gbifData@occResults[[match(spGBIF, names(gbifData@occResults))]]$GBIF$RawOccurrences
GBIFrec <- occ_download_import(GBIFrec,
path = GBIFrec[[1]]) %>% dplyr::select(scientificName,
decimalLatitude,
decimalLongitude,
coordinateUncertaintyInMeters,
year, month, day,
depth, depthAccuracy,
basisOfRecord, issue)
source <- rep("GBIF", n=nrow(GBIFrec))
if(nrow(GBIFrec) > 0) GBIFrec <- cbind(GBIFrec, source)
GBIFrec <- GBIFrec[!zeroDecCheck(GBIFrec$decimalLatitude),]
GBIFrec <- GBIFrec[!zeroDecCheck(GBIFrec$decimalLongitude),]
}
spGBIFSyn <- atlChecklist$GBIF.Synonym[match(sp, atlChecklist$EschmeyerName)] # Get synonymous record, if present
if(!is.na(spGBIFSyn)){
spGBIFSyn <- gbifData@cleanedTaxonomy$`Best Match`[match(spGBIFSyn, gbifData@cleanedTaxonomy$`Input Name`)]
GBIFsynRec <- gbifData@occResults[[match(spGBIFsyn, names(gbifData@occResults))]]$GBIF$RawOccurrences
GBIFsynRec <- occ_download_import(GBIFsynRec,
path = GBIFsynRec[[1]]) %>% dplyr::select(scientificName,
decimalLatitude, decimalLongitude,
coordinateUncertaintyInMeters,
year, month, day,
depth, depthAccuracy,
basisOfRecord, issue)
source <- rep("GBIF", n=nrow(GBIFsynRec))
if(nrow(GBIFsynRec) > 0) GBIFsynRec <- cbind(GBIFsynRec,source)
GBIFsynRec <- GBIFsynRec[!zeroDecCheck(GBIFsynRec$decimalLatitude),]
GBIFsynRec <- GBIFsynRec[!zeroDecCheck(GBIFsynRec$decimalLongitude),]
}
requiredOBISData <- c("scientificName", "decimalLatitude", "decimalLongitude",
"coordinateUncertaintyInMeters",
"eventDate", "year", "month", "day",
"depth", "basisOfRecord", "flags")
spOBIS <- atlChecklist$OBISChecklist[match(sp,atlChecklist$EschmeyerName)]
if(!is.na(spOBIS) && any(grepl(pattern = spOBIS, obisData))){
spOBIS <- paste0("data/OBISDownloads/", spOBIS, ".csv")
OBISrec <- read.csv(spOBIS)
OBISrec <- OBISrec %>% dplyr::select(any_of(requiredOBISData))
# Fix dates
if (all(c("year", "month", "day", "eventDate") %in% colnames(OBISrec))){
OBISrec <- OBISrec[,-match("eventDate", colnames(OBISrec))]
}
else if("eventDate" %in% colnames(OBISrec)){
dates <- cbind(format(lubridate::as_date(OBISrec$eventDate), format = "%Y"),
format(lubridate::as_date(OBISrec$eventDate), format = "%m"),
format(lubridate::as_date(OBISrec$eventDate), format = "%d"))
colnames(dates) <- c("year", "month", "day")
OBISrec <- cbind(OBISrec, dates)
}
else{
dates <- cbind(rep(NA, nrow(OBISrec)),
rep(NA, nrow(OBISrec)),
rep(NA, nrow(OBISrec)))
colnames(dates) <- c("year", "month", "day")
OBISrec <- cbind(OBISrec, dates)
}
if (!("coordinateUncertaintyInMeters" %in% colnames(OBISrec))){
coordinateUncertaintyInMeters <- rep(NA, nrow(OBISrec))
OBISrec <- cbind(OBISrec,coordinateUncertaintyInMeters)
}
if (!("depth" %in% colnames(OBISrec))){
depth <- rep(NA, nrow(OBISrec))
OBISrec <- cbind(OBISrec,depth)
}
if (!("basisOfRecord" %in% colnames(OBISrec))){
basisOfRecord <- rep(NA, nrow(OBISrec))
OBISrec <- cbind(OBISrec,basisOfRecord)
}
if (!("flags" %in% colnames(OBISrec))){
flags <- rep(NA, nrow(OBISrec))
OBISrec <- cbind(OBISrec, flags)
}
colnames(OBISrec)[match("flags", colnames(OBISrec))] <- "issue"
depthAccuracy <- rep(NA, nrow(OBISrec))
OBISrec <- cbind(OBISrec, depthAccuracy)
OBISrec <- OBISrec[ , c("scientificName",
"decimalLatitude", "decimalLongitude",
"coordinateUncertaintyInMeters",
"year", "month", "day",
"depth", "depthAccuracy",
"basisOfRecord", "issue")]
source <- rep("OBIS", n=nrow(OBISrec))
if(nrow(OBISrec) > 0) OBISrec <- cbind(OBISrec,source)
OBISrec <- OBISrec[!zeroDecCheck(OBISrec$decimalLatitude),]
OBISrec <- OBISrec[!zeroDecCheck(OBISrec$decimalLongitude),]
}
AllRec <- rbind(GBIFrec, GBIFsynRec, OBISrec)
AllRec$scientificName <- rep(sp, nrow(AllRec))
print(paste0(sp, " has ", nrow(AllRec), " points."))
write.csv(AllRec, file = paste0("data/MergedOccurrences/", sp, ".csv"))
}
|
\name{getFileListing}
\alias{getFileListing}
\title{creates a list from the url}
\usage{
getFileListing(inURL, usePWD = "readonly:readonly")
}
\arguments{
\item{inURL}{the url to go fetch}
\item{usePWD}{the password for the svn server}
}
\description{
creates a list from the url
}
| /man/getFileListing.Rd | no_license | rmflight/RVignetteChanges | R | false | false | 292 | rd | \name{getFileListing}
\alias{getFileListing}
\title{creates a list from the url}
\usage{
getFileListing(inURL, usePWD = "readonly:readonly")
}
\arguments{
\item{inURL}{the url to go fetch}
\item{usePWD}{the password for the svn server}
}
\description{
creates a list from the url
}
|
# Create UI, set up list of charts to be used, and set up chart style options of quantmod
shinyUI(pageWithSidebar(
headerPanel("Stock Charts"),
sidebarPanel(
wellPanel(
p(strong("List of Stocks to choose from")),
checkboxInput(inputId = "stock_aapl", label = "Apple (AAPL)", value = TRUE),
checkboxInput(inputId = "stock_msft", label = "Microsoft (MSFT)", value = FALSE),
checkboxInput(inputId = "stock_amzn", label = "Amazon (AMZN)", value = FALSE),
checkboxInput(inputId = "stock_googl", label = "Google (GOOGL)", value = TRUE),
checkboxInput(inputId = "stock_intc", label = "Intel (INTC))", value = FALSE),
checkboxInput(inputId = "stock_IBM", label = "IBM (IBM)", value = FALSE),
checkboxInput(inputId = "stock_ORCL", label = "Oracle (ORCL)", value = FALSE),
checkboxInput(inputId = "stock_TSLA", label = "Tesla (TLSA)", value = FALSE),
checkboxInput(inputId = "stock_EA", label = "Electronic Arts (EA)", value = FALSE),
checkboxInput(inputId = "stock_fb", label = "Facebook (FB)", value = FALSE)
),
selectInput(inputId = "chart_type",
label = "Chart type",
choices = c("Candlestick" = "candlesticks",
"Matchstick" = "matchsticks",
"Bar" = "bars",
"Line" = "line")
),
dateRangeInput(inputId = "daterange", label = "Date range \n (Must be at least 10 open market days)",
start = Sys.Date() - 30, end = Sys.Date()),
checkboxInput(inputId = "log_y", label = "Trandform to log y axis", value = FALSE)
),
mainPanel(
conditionalPanel(condition = "input.stock_aapl",
div(plotOutput(outputId = "plot_aapl"))),
conditionalPanel(condition = "input.stock_msft",
div(plotOutput(outputId = "plot_msft"))),
conditionalPanel(condition = "input.stock_amzn",
div(plotOutput(outputId = "plot_amzn"))),
conditionalPanel(condition = "input.stock_googl",
div(plotOutput(outputId = "plot_googl"))),
conditionalPanel(condition = "input.stock_intc",
div(plotOutput(outputId = "plot_intc"))),
conditionalPanel(condition = "input.stock_IBM",
div(plotOutput(outputId = "plot_IBM"))),
conditionalPanel(condition = "input.stock_ORCL",
div(plotOutput(outputId = "plot_ORCL"))),
conditionalPanel(condition = "input.stock_TSLA",
div(plotOutput(outputId = "plot_TSLA"))),
conditionalPanel(condition = "input.stock_EA",
div(plotOutput(outputId = "plot_EA"))),
conditionalPanel(condition = "input.stock_fb",
plotOutput(outputId = "plot_fb"))
)
)) | /ui.R | no_license | mzebear/615_Final | R | false | false | 2,987 | r | # Create UI, set up list of charts to be used, and set up chart style options of quantmod
shinyUI(pageWithSidebar(
headerPanel("Stock Charts"),
sidebarPanel(
wellPanel(
p(strong("List of Stocks to choose from")),
checkboxInput(inputId = "stock_aapl", label = "Apple (AAPL)", value = TRUE),
checkboxInput(inputId = "stock_msft", label = "Microsoft (MSFT)", value = FALSE),
checkboxInput(inputId = "stock_amzn", label = "Amazon (AMZN)", value = FALSE),
checkboxInput(inputId = "stock_googl", label = "Google (GOOGL)", value = TRUE),
checkboxInput(inputId = "stock_intc", label = "Intel (INTC))", value = FALSE),
checkboxInput(inputId = "stock_IBM", label = "IBM (IBM)", value = FALSE),
checkboxInput(inputId = "stock_ORCL", label = "Oracle (ORCL)", value = FALSE),
checkboxInput(inputId = "stock_TSLA", label = "Tesla (TLSA)", value = FALSE),
checkboxInput(inputId = "stock_EA", label = "Electronic Arts (EA)", value = FALSE),
checkboxInput(inputId = "stock_fb", label = "Facebook (FB)", value = FALSE)
),
selectInput(inputId = "chart_type",
label = "Chart type",
choices = c("Candlestick" = "candlesticks",
"Matchstick" = "matchsticks",
"Bar" = "bars",
"Line" = "line")
),
dateRangeInput(inputId = "daterange", label = "Date range \n (Must be at least 10 open market days)",
start = Sys.Date() - 30, end = Sys.Date()),
checkboxInput(inputId = "log_y", label = "Trandform to log y axis", value = FALSE)
),
mainPanel(
conditionalPanel(condition = "input.stock_aapl",
div(plotOutput(outputId = "plot_aapl"))),
conditionalPanel(condition = "input.stock_msft",
div(plotOutput(outputId = "plot_msft"))),
conditionalPanel(condition = "input.stock_amzn",
div(plotOutput(outputId = "plot_amzn"))),
conditionalPanel(condition = "input.stock_googl",
div(plotOutput(outputId = "plot_googl"))),
conditionalPanel(condition = "input.stock_intc",
div(plotOutput(outputId = "plot_intc"))),
conditionalPanel(condition = "input.stock_IBM",
div(plotOutput(outputId = "plot_IBM"))),
conditionalPanel(condition = "input.stock_ORCL",
div(plotOutput(outputId = "plot_ORCL"))),
conditionalPanel(condition = "input.stock_TSLA",
div(plotOutput(outputId = "plot_TSLA"))),
conditionalPanel(condition = "input.stock_EA",
div(plotOutput(outputId = "plot_EA"))),
conditionalPanel(condition = "input.stock_fb",
plotOutput(outputId = "plot_fb"))
)
)) |
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config
NULL
#' Amazon QLDB Session
#'
#' @description
#' The transactional data APIs for Amazon QLDB
#'
#' Instead of interacting directly with this API, we recommend using the
#' QLDB driver or the QLDB shell to execute data transactions on a ledger.
#'
#' - If you are working with an AWS SDK, use the QLDB driver. The driver
#' provides a high-level abstraction layer above this *QLDB Session*
#' data plane and manages [`send_command`][qldbsession_send_command]
#' API calls for you. For information and a list of supported
#' programming languages, see [Getting started with the
#' driver](https://docs.aws.amazon.com/qldb/latest/developerguide/getting-started-driver.html)
#' in the *Amazon QLDB Developer Guide*.
#'
#' - If you are working with the AWS Command Line Interface (AWS CLI),
#' use the QLDB shell. The shell is a command line interface that uses
#' the QLDB driver to interact with a ledger. For information, see
#' [Accessing Amazon QLDB using the QLDB
#' shell](https://docs.aws.amazon.com/qldb/latest/developerguide/data-shell.html).
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#' \itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
#' \item{\strong{region}:} {The AWS Region used in instantiating the client.}
#' \item{\strong{close_connection}:} {Immediately close all HTTP connections.}
#' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
#' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e., `http://s3.amazonaws.com/BUCKET/KEY`.}
#' }
#'
#' @section Service syntax:
#' ```
#' svc <- qldbsession(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string",
#' close_connection = "logical",
#' timeout = "numeric",
#' s3_force_path_style = "logical"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- qldbsession()
#' svc$send_command(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=qldbsession_send_command]{send_command} \tab Sends a command to an Amazon QLDB ledger
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname qldbsession
#' @export
qldbsession <- function(config = list()) {
svc <- .qldbsession$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.qldbsession <- list()
.qldbsession$operations <- list()
.qldbsession$metadata <- list(
service_name = "qldbsession",
endpoints = list("*" = list(endpoint = "session.qldb.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "session.qldb.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "session.qldb.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "session.qldb.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "QLDB Session",
api_version = "2019-07-11",
signing_name = "qldb",
json_version = "1.0",
target_prefix = "QLDBSession"
)
.qldbsession$service <- function(config = list()) {
handlers <- new_handlers("jsonrpc", "v4")
new_service(.qldbsession$metadata, handlers, config)
}
| /R/qldbsession_service.R | no_license | cran/paws.database | R | false | false | 4,215 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config
NULL
#' Amazon QLDB Session
#'
#' @description
#' The transactional data APIs for Amazon QLDB
#'
#' Instead of interacting directly with this API, we recommend using the
#' QLDB driver or the QLDB shell to execute data transactions on a ledger.
#'
#' - If you are working with an AWS SDK, use the QLDB driver. The driver
#' provides a high-level abstraction layer above this *QLDB Session*
#' data plane and manages [`send_command`][qldbsession_send_command]
#' API calls for you. For information and a list of supported
#' programming languages, see [Getting started with the
#' driver](https://docs.aws.amazon.com/qldb/latest/developerguide/getting-started-driver.html)
#' in the *Amazon QLDB Developer Guide*.
#'
#' - If you are working with the AWS Command Line Interface (AWS CLI),
#' use the QLDB shell. The shell is a command line interface that uses
#' the QLDB driver to interact with a ledger. For information, see
#' [Accessing Amazon QLDB using the QLDB
#' shell](https://docs.aws.amazon.com/qldb/latest/developerguide/data-shell.html).
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#' \itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
#' \item{\strong{region}:} {The AWS Region used in instantiating the client.}
#' \item{\strong{close_connection}:} {Immediately close all HTTP connections.}
#' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
#' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e., `http://s3.amazonaws.com/BUCKET/KEY`.}
#' }
#'
#' @section Service syntax:
#' ```
#' svc <- qldbsession(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string",
#' close_connection = "logical",
#' timeout = "numeric",
#' s3_force_path_style = "logical"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- qldbsession()
#' svc$send_command(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=qldbsession_send_command]{send_command} \tab Sends a command to an Amazon QLDB ledger
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname qldbsession
#' @export
qldbsession <- function(config = list()) {
svc <- .qldbsession$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.qldbsession <- list()
.qldbsession$operations <- list()
.qldbsession$metadata <- list(
service_name = "qldbsession",
endpoints = list("*" = list(endpoint = "session.qldb.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "session.qldb.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "session.qldb.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "session.qldb.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "QLDB Session",
api_version = "2019-07-11",
signing_name = "qldb",
json_version = "1.0",
target_prefix = "QLDBSession"
)
.qldbsession$service <- function(config = list()) {
handlers <- new_handlers("jsonrpc", "v4")
new_service(.qldbsession$metadata, handlers, config)
}
|
install.packages("PolynomF")
install.packages("MASS")
install.packages("mvtnorm")
install.packages("pracma")
| /sparse/repos/ehalley/Gaia-DR2-distances/install.R | permissive | yuvipanda/mybinder.org-analytics | R | false | false | 110 | r | install.packages("PolynomF")
install.packages("MASS")
install.packages("mvtnorm")
install.packages("pracma")
|
library(PopED)
### Name: diag_matlab
### Title: Function written to match MATLAB's diag function
### Aliases: diag_matlab
### Keywords: internal
### ** Examples
diag_matlab(3)
diag_matlab(c(1,2,3))
diag_matlab(cbind(1,2,3))
diag_matlab(rbind(1,2,3))
diag_matlab(matrix(c(1, 2, 3),6,6))
# here is where the R default does something different
diag(cbind(1,2,3))
diag(rbind(1,2,3))
| /data/genthat_extracted_code/PopED/examples/diag_matlab.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 390 | r | library(PopED)
### Name: diag_matlab
### Title: Function written to match MATLAB's diag function
### Aliases: diag_matlab
### Keywords: internal
### ** Examples
diag_matlab(3)
diag_matlab(c(1,2,3))
diag_matlab(cbind(1,2,3))
diag_matlab(rbind(1,2,3))
diag_matlab(matrix(c(1, 2, 3),6,6))
# here is where the R default does something different
diag(cbind(1,2,3))
diag(rbind(1,2,3))
|
#' @title Set grid row style
#'
#' @description Apply styles to an entire row identified by an expression.
#'
#' @param grid A grid created with [datagrid()].
#' @param expr An expression giving position of row. Must return a logical vector.
#' @param background Background color.
#' @param color Text color.
#' @param fontWeight Font weight, you can use \code{"bold"} for example.
#' @param ... Other CSS properties.
#' @param class CSS class to apply to the row.
#' @param cssProperties Alternative to specify CSS properties with a named list.
#'
#' @return A `datagrid` htmlwidget.
#' @export
#'
#' @importFrom rlang enquo eval_tidy
#'
#' @example examples/ex-grid_style_row.R
grid_style_row <- function(grid,
expr,
background = NULL,
color = NULL,
fontWeight = NULL,
...,
class = NULL,
cssProperties = NULL) {
expr <- enquo(expr)
check_grid(grid, "grid_style_row")
rowKey <- eval_tidy(expr, data = grid$x$data_df)
if (!is.logical(rowKey))
stop("grid_style_row: expr must evaluate to a logical vector!")
rowKey <- which(rowKey) - 1
if (is.null(class)) {
class <- paste0("datagrid-row-", genId())
}
styles <- make_styles(c(
list(
background = background,
color = color,
fontWeight = fontWeight,
...
), cssProperties
), class = class)
grid$x$rowClass <- append(
x = grid$x$rowClass,
values = list(list(
rowKey = list1(rowKey),
class = class,
styles = styles
))
)
return(grid)
}
#' @title Set grid cell(s) style
#'
#' @description Customize cell(s) appearance with CSS
#' according to an expression in the data used in the grid.
#'
#' @param grid A grid created with [datagrid()].
#' @param expr An expression giving position of row. Must return a logical vector.
#' @param column Name of column (variable name) where to apply style.
#' @param background Background color.
#' @param color Text color.
#' @param fontWeight Font weight, you can use \code{"bold"} for example.
#' @param ... Other CSS properties.
#' @param class CSS class to apply to the row.
#' @param cssProperties Alternative to specify CSS properties with a named list.
#'
#' @return A `datagrid` htmlwidget.
#' @export
#'
#' @name grid-cell-style
#'
#' @importFrom rlang enquo eval_tidy as_function
#'
#' @example examples/ex-grid_style_cell.R
grid_style_cell <- function(grid,
expr,
column,
background = NULL,
color = NULL,
fontWeight = NULL,
...,
class = NULL,
cssProperties = NULL) {
check_grid(grid, "grid_style_cell")
if (!is.character(column) | length(column) != 1)
stop("grid_style_cell: `column` must be a character of length one.", call. = FALSE)
expr <- enquo(expr)
rowKey <- eval_tidy(expr, data = grid$x$data_df)
if (is.list(rowKey)) {
args <- dropNulls(list(
background = background,
color = color,
fontWeight = fontWeight,
...
))
args <- rep_list(args, length(rowKey))
if (!is.null(class))
class <- rep(class, times = length(rowKey))
for (i in seq_along(rowKey)) {
grid <- grid_style_cell(
grid = grid,
expr = rowKey[[i]],
column = column,
cssProperties = lapply(args, `[[`, i),
class = class[i]
)
}
return(grid)
}
if (!is.logical(rowKey))
stop("grid_style_cell: expr must evaluate to a logical vector!")
rowKey <- which(rowKey) - 1
if (is.null(class)) {
class <- paste0("datagrid-cell-", genId())
}
styles <- make_styles(c(
list(
background = background,
color = color,
fontWeight = fontWeight,
...
), cssProperties
), class = class)
grid$x$cellClass <- append(
x = grid$x$cellClass,
values = list(list(
rowKey = list1(rowKey),
class = class,
column = column,
styles = styles
))
)
return(grid)
}
#' @param fun Function to apply to \code{columns} to identify rows to style.
#' @param columns Columns names to use with \code{fun}.
#'
#' @export
#'
#' @rdname grid-cell-style
grid_style_cells <- function(grid,
fun,
columns,
background = NULL,
color = NULL,
...,
class = NULL,
cssProperties = NULL) {
check_grid(grid, "grid_style_cells")
if (!is.character(columns))
stop("grid_style_cells: column must be character.", call. = FALSE)
fun <- as_function(fun)
rowKeys <- lapply(
X = grid$x$data_df[, columns, drop = FALSE],
FUN = fun
)
if (!all(vapply(rowKeys, is.logical, logical(1))))
stop("grid_style_cells: fun must evaluate to a logical vector!", call. = FALSE)
rowKeys <- lapply(rowKeys, function(x) {
which(x) - 1
})
if (is.null(class)) {
class <- paste0("datagrid-cells-", genId())
}
styles <- make_styles(c(
list(
background = background,
color = color, ...
), cssProperties
), class = class)
grid$x$cellsClass <- append(
x = grid$x$cellsClass,
values = dropNulls(lapply(
X = seq_along(rowKeys),
FUN = function(i) {
if (length(rowKeys[[i]]) > 0) {
list(
rowKey = list1(rowKeys[[i]]),
class = class,
column = columns[i],
styles = styles
)
} else {
NULL
}
}
))
)
return(grid)
}
#' Style cells with a color bar
#'
#' @param grid A grid created with [datagrid()].
#' @param column The name of the column where to create a color bar.
#' @param bar_bg Background color of the color bar.
#' @param color Color of the text.
#' @param background Background of the cell.
#' @param from Range of values of the variable to represent as a color bar.
#' @param prefix,suffix String to put in front of or after the value.
#' @param label_outside Show label outside of the color bar.
#' @param label_width Width of label in case it's displayed outside the color bar.
#' @param border_radius Border radius of color bar.
#' @param height Height in pixel of color bar.
#' @param align Alignment of label if it is displayed inside the color bar.
#'
#' @return A `datagrid` htmlwidget.
#' @export
#'
#' @importFrom htmlwidgets JS
#'
#' @example examples/ex-grid_colorbar.R
grid_colorbar <- function(grid,
column,
bar_bg = "#5E81AC",
color = "#ECEFF4",
background = "#ECEFF4",
from = NULL,
prefix = NULL,
suffix = NULL,
label_outside = FALSE,
label_width = "20px",
border_radius = "0px",
height = "16px",
align = c("left", "center", "right")) {
check_grid(grid, "grid_colorbar")
align <- match.arg(align)
stopifnot(is.character(column) & length(column) == 1)
if (!column %in% grid$x$colnames) {
stop(
"grid_colorbar: invalid 'column' supplied, can't find in data.",
call. = FALSE
)
}
if (is.null(from)) {
from <- range(pretty(grid$x$data_df[[column]]), na.rm = TRUE)
}
if (is.null(prefix))
prefix <- ""
if (is.null(suffix))
suffix <- ""
grid_columns(
grid = grid,
columns = column,
align = align,
renderer = list(
type = htmlwidgets::JS("datagrid.renderer.colorbar"),
options = list(
bar_bg = bar_bg,
color = color,
background = background,
from = from,
prefix = prefix,
suffix = suffix,
label_outside = label_outside,
label_width = label_width,
height = height,
border_radius = border_radius
)
)
)
}
#' @title Set column style
#'
#' @description Apply styles to a column according to CSS properties
#' declared by expression based on data passed to grid..
#'
#' @param grid A grid created with [datagrid()].
#' @param column Name of column (variable name) where to apply style.
#' @param background Background color.
#' @param color Text color.
#' @param fontWeight Font weight, you can use \code{"bold"} for example.
#' @param ... Other CSS properties.
#'
#' @return A `datagrid` htmlwidget.
#' @export
#'
#' @importFrom rlang enexprs eval_tidy exec
#'
#' @example examples/ex-grid_style_column.R
grid_style_column <- function(grid,
column,
background = NULL,
color = NULL,
fontWeight = NULL,
...) {
check_grid(grid, "grid_style_column")
props <- lapply(
X = enexprs(
background = background,
color = color,
fontWeight = fontWeight,
...
),
FUN = eval_tidy,
data = grid$x$data_df
)
props <- as.data.frame(dropNulls(props))
if (identical(nrow(props), 1L)) {
props <- props[rep(1, times = nrow(grid$x$data_df)), , drop = FALSE]
}
props$datagridRowKey <- seq_len(nrow(grid$x$data_df))
lprops <- split(props, props[, setdiff(names(props), "datagridRowKey"), drop = FALSE])
for (i in seq_along(lprops)) {
props_ <- lprops[[i]]
lprops_ <- lapply(
X = props_[, setdiff(names(props_), "datagridRowKey"), drop = FALSE],
FUN = unique
)
args <- c(
list(
grid = grid,
expr = props$datagridRowKey %in% props_$datagridRowKey,
column = column
),
lprops_
)
grid <- exec("grid_style_cell", !!!args)
}
return(grid)
}
| /R/grid-style.R | permissive | jeanantoinedasilva/toastui | R | false | false | 10,006 | r |
#' @title Set grid row style
#'
#' @description Apply styles to an entire row identified by an expression.
#'
#' @param grid A grid created with [datagrid()].
#' @param expr An expression giving position of row. Must return a logical vector.
#' @param background Background color.
#' @param color Text color.
#' @param fontWeight Font weight, you can use \code{"bold"} for example.
#' @param ... Other CSS properties.
#' @param class CSS class to apply to the row.
#' @param cssProperties Alternative to specify CSS properties with a named list.
#'
#' @return A `datagrid` htmlwidget.
#' @export
#'
#' @importFrom rlang enquo eval_tidy
#'
#' @example examples/ex-grid_style_row.R
grid_style_row <- function(grid,
expr,
background = NULL,
color = NULL,
fontWeight = NULL,
...,
class = NULL,
cssProperties = NULL) {
expr <- enquo(expr)
check_grid(grid, "grid_style_row")
rowKey <- eval_tidy(expr, data = grid$x$data_df)
if (!is.logical(rowKey))
stop("grid_style_row: expr must evaluate to a logical vector!")
rowKey <- which(rowKey) - 1
if (is.null(class)) {
class <- paste0("datagrid-row-", genId())
}
styles <- make_styles(c(
list(
background = background,
color = color,
fontWeight = fontWeight,
...
), cssProperties
), class = class)
grid$x$rowClass <- append(
x = grid$x$rowClass,
values = list(list(
rowKey = list1(rowKey),
class = class,
styles = styles
))
)
return(grid)
}
#' @title Set grid cell(s) style
#'
#' @description Customize cell(s) appearance with CSS
#' according to an expression in the data used in the grid.
#'
#' @param grid A grid created with [datagrid()].
#' @param expr An expression giving position of row. Must return a logical vector.
#' @param column Name of column (variable name) where to apply style.
#' @param background Background color.
#' @param color Text color.
#' @param fontWeight Font weight, you can use \code{"bold"} for example.
#' @param ... Other CSS properties.
#' @param class CSS class to apply to the row.
#' @param cssProperties Alternative to specify CSS properties with a named list.
#'
#' @return A `datagrid` htmlwidget.
#' @export
#'
#' @name grid-cell-style
#'
#' @importFrom rlang enquo eval_tidy as_function
#'
#' @example examples/ex-grid_style_cell.R
grid_style_cell <- function(grid,
expr,
column,
background = NULL,
color = NULL,
fontWeight = NULL,
...,
class = NULL,
cssProperties = NULL) {
check_grid(grid, "grid_style_cell")
if (!is.character(column) | length(column) != 1)
stop("grid_style_cell: `column` must be a character of length one.", call. = FALSE)
expr <- enquo(expr)
rowKey <- eval_tidy(expr, data = grid$x$data_df)
if (is.list(rowKey)) {
args <- dropNulls(list(
background = background,
color = color,
fontWeight = fontWeight,
...
))
args <- rep_list(args, length(rowKey))
if (!is.null(class))
class <- rep(class, times = length(rowKey))
for (i in seq_along(rowKey)) {
grid <- grid_style_cell(
grid = grid,
expr = rowKey[[i]],
column = column,
cssProperties = lapply(args, `[[`, i),
class = class[i]
)
}
return(grid)
}
if (!is.logical(rowKey))
stop("grid_style_cell: expr must evaluate to a logical vector!")
rowKey <- which(rowKey) - 1
if (is.null(class)) {
class <- paste0("datagrid-cell-", genId())
}
styles <- make_styles(c(
list(
background = background,
color = color,
fontWeight = fontWeight,
...
), cssProperties
), class = class)
grid$x$cellClass <- append(
x = grid$x$cellClass,
values = list(list(
rowKey = list1(rowKey),
class = class,
column = column,
styles = styles
))
)
return(grid)
}
#' @param fun Function to apply to \code{columns} to identify rows to style.
#' @param columns Columns names to use with \code{fun}.
#'
#' @export
#'
#' @rdname grid-cell-style
grid_style_cells <- function(grid,
fun,
columns,
background = NULL,
color = NULL,
...,
class = NULL,
cssProperties = NULL) {
check_grid(grid, "grid_style_cells")
if (!is.character(columns))
stop("grid_style_cells: column must be character.", call. = FALSE)
fun <- as_function(fun)
rowKeys <- lapply(
X = grid$x$data_df[, columns, drop = FALSE],
FUN = fun
)
if (!all(vapply(rowKeys, is.logical, logical(1))))
stop("grid_style_cells: fun must evaluate to a logical vector!", call. = FALSE)
rowKeys <- lapply(rowKeys, function(x) {
which(x) - 1
})
if (is.null(class)) {
class <- paste0("datagrid-cells-", genId())
}
styles <- make_styles(c(
list(
background = background,
color = color, ...
), cssProperties
), class = class)
grid$x$cellsClass <- append(
x = grid$x$cellsClass,
values = dropNulls(lapply(
X = seq_along(rowKeys),
FUN = function(i) {
if (length(rowKeys[[i]]) > 0) {
list(
rowKey = list1(rowKeys[[i]]),
class = class,
column = columns[i],
styles = styles
)
} else {
NULL
}
}
))
)
return(grid)
}
#' Style cells with a color bar
#'
#' @param grid A grid created with [datagrid()].
#' @param column The name of the column where to create a color bar.
#' @param bar_bg Background color of the color bar.
#' @param color Color of the text.
#' @param background Background of the cell.
#' @param from Range of values of the variable to represent as a color bar.
#' @param prefix,suffix String to put in front of or after the value.
#' @param label_outside Show label outside of the color bar.
#' @param label_width Width of label in case it's displayed outside the color bar.
#' @param border_radius Border radius of color bar.
#' @param height Height in pixel of color bar.
#' @param align Alignment of label if it is displayed inside the color bar.
#'
#' @return A `datagrid` htmlwidget.
#' @export
#'
#' @importFrom htmlwidgets JS
#'
#' @example examples/ex-grid_colorbar.R
grid_colorbar <- function(grid,
column,
bar_bg = "#5E81AC",
color = "#ECEFF4",
background = "#ECEFF4",
from = NULL,
prefix = NULL,
suffix = NULL,
label_outside = FALSE,
label_width = "20px",
border_radius = "0px",
height = "16px",
align = c("left", "center", "right")) {
check_grid(grid, "grid_colorbar")
align <- match.arg(align)
stopifnot(is.character(column) & length(column) == 1)
if (!column %in% grid$x$colnames) {
stop(
"grid_colorbar: invalid 'column' supplied, can't find in data.",
call. = FALSE
)
}
if (is.null(from)) {
from <- range(pretty(grid$x$data_df[[column]]), na.rm = TRUE)
}
if (is.null(prefix))
prefix <- ""
if (is.null(suffix))
suffix <- ""
grid_columns(
grid = grid,
columns = column,
align = align,
renderer = list(
type = htmlwidgets::JS("datagrid.renderer.colorbar"),
options = list(
bar_bg = bar_bg,
color = color,
background = background,
from = from,
prefix = prefix,
suffix = suffix,
label_outside = label_outside,
label_width = label_width,
height = height,
border_radius = border_radius
)
)
)
}
#' @title Set column style
#'
#' @description Apply styles to a column according to CSS properties
#' declared by expression based on data passed to grid..
#'
#' @param grid A grid created with [datagrid()].
#' @param column Name of column (variable name) where to apply style.
#' @param background Background color.
#' @param color Text color.
#' @param fontWeight Font weight, you can use \code{"bold"} for example.
#' @param ... Other CSS properties.
#'
#' @return A `datagrid` htmlwidget.
#' @export
#'
#' @importFrom rlang enexprs eval_tidy exec
#'
#' @example examples/ex-grid_style_column.R
grid_style_column <- function(grid,
column,
background = NULL,
color = NULL,
fontWeight = NULL,
...) {
check_grid(grid, "grid_style_column")
props <- lapply(
X = enexprs(
background = background,
color = color,
fontWeight = fontWeight,
...
),
FUN = eval_tidy,
data = grid$x$data_df
)
props <- as.data.frame(dropNulls(props))
if (identical(nrow(props), 1L)) {
props <- props[rep(1, times = nrow(grid$x$data_df)), , drop = FALSE]
}
props$datagridRowKey <- seq_len(nrow(grid$x$data_df))
lprops <- split(props, props[, setdiff(names(props), "datagridRowKey"), drop = FALSE])
for (i in seq_along(lprops)) {
props_ <- lprops[[i]]
lprops_ <- lapply(
X = props_[, setdiff(names(props_), "datagridRowKey"), drop = FALSE],
FUN = unique
)
args <- c(
list(
grid = grid,
expr = props$datagridRowKey %in% props_$datagridRowKey,
column = column
),
lprops_
)
grid <- exec("grid_style_cell", !!!args)
}
return(grid)
}
|
# univarite GARCH return innovations
library(pr)
library(xtable)
library(latex2exp)
library(grDevices)
library(TSA)
library(Matrix)
library(MASS)
library(zoo)
Cs <- NULL
rhos <- NULL
deltas <- NULL
obs <- NULL
t_rejection_rates <- NULL
q_rejection_rates <- NULL
bq_rejection_rates <- NULL
bq_rejection_rates_two <- NULL
pretest_rejection_rates <- NULL
sigma_u_sq <- 1
sigma_e_sq <- 1
gamma <- 0.0
alpha <- 0.0
beta <- 0.0
beta_0 <- 0
MC <- 1000
DEBUG <- FALSE
for (N in c(100)){
N <- N+1 # correct for the loss of 1 observation due to lagging
for (C in c(-2)){
print(C)
for (delta in c(-0.95)){
sigma_ue <- delta
Sigma <- matrix(c(sigma_u_sq, sigma_ue, sigma_ue, sigma_e_sq), 2, 2)
rho <- 1 +C/N
t_stats <- NULL
q_stats <- NULL
q_bonferroni_two_sided <- NULL
q_bonferroni_right <- NULL
infeasible_q <- NULL
t_right <- NULL
pretest_rejection <- NULL
q_test <- function() {
beta_ue <- sigma_ue/sigma_e_sq
x_lagged_demeaned <- x[1:length(x)-1] - mean(x[1:length(x)-1])
return((sum(x_lagged_demeaned*(r[2:length(r)]-beta_0*x[1:length(x)-1]
- beta_ue*(x[2:length(x)]-rho*x[1:length(x)-1])))
/((sigma_u_sq*(1-delta^2))^0.5*((sum(x_lagged_demeaned^2))^0.5))
))
}
garch_alpha <- 0.5
garch_beta <- 0.4999999
R <- matrix(c(1, delta, delta, 1), 2, 2)
for (j in 1:MC){
z <- mvrnorm(n=N, rep(0, 2), diag(c(1,1)))
h1 <- garch.sim(alpha=c(1, garch_alpha), beta=c(garch_beta), n=N)
x <- NULL
r <- NULL
x[1] <- 0.001
for(t in 2:N){
w = diag(c(1, sqrt(h1_sq[t])))%*%chol(R)%*%(z[t,])
x[t] <- gamma + rho*x[t-1] + w[1]
r[t] <- alpha + beta*x[t-1] + w[2]
}
linmod <- lm(r[2:N] ~ x[1:N-1])
s <- summary(linmod)
linmod2 <- lm(x[2:N] ~ x[1:N-1])
s2 <- summary(linmod2)
beta_hat <- s$coefficients[2,1]
SE_beta <- s$coefficients[2,2]
rho_hat <- s2$coefficients[2,1]
SE_rho <- s2$coefficients[2,2]
std_error_theoretical <- sigma_u_sq^0.5*sum((x[1:N-1]-mean(x[1:N-1]))^2)^-0.5
t <- s$coefficients[2,3]
q <- q_test()
rho_confidence_interval <- rho_ci(x, lags=1, level="0.95")
t_test_reliable <- as.numeric(sizeDistortionTest(r, x, lags=1))
ci_beta <- bonferroniQci(r,x, lags=1)
q_test_outcome_right_sided <- as.numeric(0<ci_beta[1])
q_test_outcome_two_sided <- as.numeric((0<ci_beta[1]) | (0>ci_beta[2]))
infeasible_q_outcome <- as.numeric(q > 1.645)
if (DEBUG==TRUE){
# print(var(w))
# plot.ts(x)
# plot.ts(r)
# print(s)
# print(t)
if (ci_beta[7] < (-5)){
print(ci_beta[7])}
}
t_stats[j] <- t
q_stats[j] <- q
q_bonferroni_right[j] <- q_test_outcome_right_sided
t_right[j] <- as.numeric(t > 1.645)
q_bonferroni_two_sided[j] <- q_test_outcome_two_sided
infeasible_q[j] <- infeasible_q_outcome
pretest_rejection[j] <- t_test_reliable
}
Cs <- c(Cs,C)
rhos <- c(rhos,rho)
deltas <- c(deltas,delta)
obs <- c(obs,N-1)
bq_rejection_rates <- c(bq_rejection_rates,(mean(q_bonferroni_right)))
bq_rejection_rates_two <- c(bq_rejection_rates_two, (mean(q_bonferroni_two_sided)))
q_rejection_rates <- c(q_rejection_rates,(mean(infeasible_q)))
t_rejection_rates <- c(t_rejection_rates,(mean(t_right)))
pretest_rejection_rates <- c(pretest_rejection_rates, (mean(pretest_rejection)))
}
}
}
master_df <- data.frame("Obs" = obs,
"c" = Cs,
"rho" = rhos,
"delta" = deltas,
"T.test" = t_rejection_rates,
"Pretest" = pretest_rejection_rates,
"Bonf.Q.test" = bq_rejection_rates,
"Bonf.Q.test.two.sided" = bq_rejection_rates_two)
| /MonteCarlo/mc_uGARCH.R | no_license | jpwoeltjen/PersistentRegressors | R | false | false | 4,105 | r | # univarite GARCH return innovations
library(pr)
library(xtable)
library(latex2exp)
library(grDevices)
library(TSA)
library(Matrix)
library(MASS)
library(zoo)
Cs <- NULL
rhos <- NULL
deltas <- NULL
obs <- NULL
t_rejection_rates <- NULL
q_rejection_rates <- NULL
bq_rejection_rates <- NULL
bq_rejection_rates_two <- NULL
pretest_rejection_rates <- NULL
sigma_u_sq <- 1
sigma_e_sq <- 1
gamma <- 0.0
alpha <- 0.0
beta <- 0.0
beta_0 <- 0
MC <- 1000
DEBUG <- FALSE
for (N in c(100)){
N <- N+1 # correct for the loss of 1 observation due to lagging
for (C in c(-2)){
print(C)
for (delta in c(-0.95)){
sigma_ue <- delta
Sigma <- matrix(c(sigma_u_sq, sigma_ue, sigma_ue, sigma_e_sq), 2, 2)
rho <- 1 +C/N
t_stats <- NULL
q_stats <- NULL
q_bonferroni_two_sided <- NULL
q_bonferroni_right <- NULL
infeasible_q <- NULL
t_right <- NULL
pretest_rejection <- NULL
q_test <- function() {
beta_ue <- sigma_ue/sigma_e_sq
x_lagged_demeaned <- x[1:length(x)-1] - mean(x[1:length(x)-1])
return((sum(x_lagged_demeaned*(r[2:length(r)]-beta_0*x[1:length(x)-1]
- beta_ue*(x[2:length(x)]-rho*x[1:length(x)-1])))
/((sigma_u_sq*(1-delta^2))^0.5*((sum(x_lagged_demeaned^2))^0.5))
))
}
garch_alpha <- 0.5
garch_beta <- 0.4999999
R <- matrix(c(1, delta, delta, 1), 2, 2)
for (j in 1:MC){
z <- mvrnorm(n=N, rep(0, 2), diag(c(1,1)))
h1 <- garch.sim(alpha=c(1, garch_alpha), beta=c(garch_beta), n=N)
x <- NULL
r <- NULL
x[1] <- 0.001
for(t in 2:N){
w = diag(c(1, sqrt(h1_sq[t])))%*%chol(R)%*%(z[t,])
x[t] <- gamma + rho*x[t-1] + w[1]
r[t] <- alpha + beta*x[t-1] + w[2]
}
linmod <- lm(r[2:N] ~ x[1:N-1])
s <- summary(linmod)
linmod2 <- lm(x[2:N] ~ x[1:N-1])
s2 <- summary(linmod2)
beta_hat <- s$coefficients[2,1]
SE_beta <- s$coefficients[2,2]
rho_hat <- s2$coefficients[2,1]
SE_rho <- s2$coefficients[2,2]
std_error_theoretical <- sigma_u_sq^0.5*sum((x[1:N-1]-mean(x[1:N-1]))^2)^-0.5
t <- s$coefficients[2,3]
q <- q_test()
rho_confidence_interval <- rho_ci(x, lags=1, level="0.95")
t_test_reliable <- as.numeric(sizeDistortionTest(r, x, lags=1))
ci_beta <- bonferroniQci(r,x, lags=1)
q_test_outcome_right_sided <- as.numeric(0<ci_beta[1])
q_test_outcome_two_sided <- as.numeric((0<ci_beta[1]) | (0>ci_beta[2]))
infeasible_q_outcome <- as.numeric(q > 1.645)
if (DEBUG==TRUE){
# print(var(w))
# plot.ts(x)
# plot.ts(r)
# print(s)
# print(t)
if (ci_beta[7] < (-5)){
print(ci_beta[7])}
}
t_stats[j] <- t
q_stats[j] <- q
q_bonferroni_right[j] <- q_test_outcome_right_sided
t_right[j] <- as.numeric(t > 1.645)
q_bonferroni_two_sided[j] <- q_test_outcome_two_sided
infeasible_q[j] <- infeasible_q_outcome
pretest_rejection[j] <- t_test_reliable
}
Cs <- c(Cs,C)
rhos <- c(rhos,rho)
deltas <- c(deltas,delta)
obs <- c(obs,N-1)
bq_rejection_rates <- c(bq_rejection_rates,(mean(q_bonferroni_right)))
bq_rejection_rates_two <- c(bq_rejection_rates_two, (mean(q_bonferroni_two_sided)))
q_rejection_rates <- c(q_rejection_rates,(mean(infeasible_q)))
t_rejection_rates <- c(t_rejection_rates,(mean(t_right)))
pretest_rejection_rates <- c(pretest_rejection_rates, (mean(pretest_rejection)))
}
}
}
master_df <- data.frame("Obs" = obs,
"c" = Cs,
"rho" = rhos,
"delta" = deltas,
"T.test" = t_rejection_rates,
"Pretest" = pretest_rejection_rates,
"Bonf.Q.test" = bq_rejection_rates,
"Bonf.Q.test.two.sided" = bq_rejection_rates_two)
|
x = y = z = function(a) a + 1
h = function(w) w + 2
f = function() {
a = b = c = function(d) d + 1
}
| /tests/chainFunDefs.R | no_license | duncantl/CodeAnalysis | R | false | false | 106 | r | x = y = z = function(a) a + 1
h = function(w) w + 2
f = function() {
a = b = c = function(d) d + 1
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tsbox-deprecated.R
\name{tsbox-deprecated}
\alias{tsbox-deprecated}
\alias{ts_start}
\alias{ts_end}
\title{Start and end of time series}
\usage{
ts_start(x)
ts_end(x)
}
\arguments{
\item{x}{ts-boxable time series, an object of class \code{ts}, \code{xts}, \code{zoo}, \code{data.frame}, \code{data.table}, \code{tbl}, \code{tbl_ts}, \code{tbl_time}, or \code{timeSeries}.}
}
\description{
Start and end of time series
}
| /man/tsbox-deprecated.Rd | no_license | stefanfritsch/tsbox | R | false | true | 499 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tsbox-deprecated.R
\name{tsbox-deprecated}
\alias{tsbox-deprecated}
\alias{ts_start}
\alias{ts_end}
\title{Start and end of time series}
\usage{
ts_start(x)
ts_end(x)
}
\arguments{
\item{x}{ts-boxable time series, an object of class \code{ts}, \code{xts}, \code{zoo}, \code{data.frame}, \code{data.table}, \code{tbl}, \code{tbl_ts}, \code{tbl_time}, or \code{timeSeries}.}
}
\description{
Start and end of time series
}
|
add.location<-function(data){
#library("dplyr")
location.data <- read.csv("Scripts/basic scripts/pcodes/yem_admin_20171007.csv",header=T,sep=",", encoding = "UTF-8", check.names=F, stringsAsFactors=FALSE)
location <- as.data.frame(location.data)
#admin1Name
location_merge1<-location[,c(colnames(location[c(4,3)]))]
location_merge1 <- unique(location_merge1)
dplyr::rename(location_merge1, governorate_ID = admin1Pcode) -> admin1_merge
plyr::rename(admin1_merge, c("admin1Name_en" = "governorate_name")) -> admin1_merge
#admin2Name
location_merge2<-location[,c(colnames(location[c(7,5)]))]
lOcation_merge2 <- unique(location_merge2)
dplyr::rename(location_merge2, district_ID = admin2Pcode) -> admin2_merge
plyr::rename(admin2_merge, c("admin2Name_en" = "district_name")) -> admin2_merge
#Merge
data <- dplyr::left_join(data, admin2_merge, by = "district_ID")
data <- dplyr::left_join(data, admin1_merge, by = "governorate_ID")
#Add Country name and code
data$country_name <- "Yemen"
data$country_ID <- "YE"
return(data)
}
| /Scripts/basic scripts/add_locations.R | no_license | agualtieri/yemen_jmmi | R | false | false | 1,086 | r | add.location<-function(data){
#library("dplyr")
location.data <- read.csv("Scripts/basic scripts/pcodes/yem_admin_20171007.csv",header=T,sep=",", encoding = "UTF-8", check.names=F, stringsAsFactors=FALSE)
location <- as.data.frame(location.data)
#admin1Name
location_merge1<-location[,c(colnames(location[c(4,3)]))]
location_merge1 <- unique(location_merge1)
dplyr::rename(location_merge1, governorate_ID = admin1Pcode) -> admin1_merge
plyr::rename(admin1_merge, c("admin1Name_en" = "governorate_name")) -> admin1_merge
#admin2Name
location_merge2<-location[,c(colnames(location[c(7,5)]))]
lOcation_merge2 <- unique(location_merge2)
dplyr::rename(location_merge2, district_ID = admin2Pcode) -> admin2_merge
plyr::rename(admin2_merge, c("admin2Name_en" = "district_name")) -> admin2_merge
#Merge
data <- dplyr::left_join(data, admin2_merge, by = "district_ID")
data <- dplyr::left_join(data, admin1_merge, by = "governorate_ID")
#Add Country name and code
data$country_name <- "Yemen"
data$country_ID <- "YE"
return(data)
}
|
# Jasmin Straube, Queensland Facility of Advanced Bioinformatics
# Part of this script was borrowed from the graphics and stats package.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Moleculesral Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Moleculesral Public License for more details.
#
# You should have received a copy of the GNU Moleculesral Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#' Plot of \code{lmmsde} objects
#'
#' Plot of the raw data the mean and the fitted \code{lmmsde} profile.
#'
#' @import ggplot2
#' @importFrom gridExtra grid.arrange
#' @importFrom stats spline na.omit
#' @param x An object of class \code{lmmsde}.
#' @param y \code{numeric} or \code{character} value. Either the row index or the row name determining which feature should be plotted.
#' @param data alternative \code{matrix} or \code{data.frame} containing the original data for visualisation purposes.
#' @param time alternative \code{numeric} indicating the sample time point. Vector of same length as row lenghth of data for visualisation purposes.
#' @param group alternative \code{numeric} indicating the sample group. Vector of same length as row length of data for visualisation purposes.
#' @param type a \code{character} indicating what model to plot. Default \code{'all'}, options: \code{'time'}, \code{'group'},\code{'group*time'}.
#' @param smooth an optional \code{logical} value. By default set to \code{FALSE}. If \code{TRUE} smooth representation of the fitted values.
#' @param mean alternative \code{logical} if the mean should be displayed. By default set to \code{TRUE}.
#' @param \ldots Additional arguments which are passed to \code{plot}.
#' @return plot showing raw data, mean profile and fitted profile.
#' @examples
#' \dontrun{
#' data(kidneySimTimeGroup)
#' lmmsDEtestl1 <-lmmsDE(data=kidneySimTimeGroup$data,time=kidneySimTimeGroup$time,
#' sampleID=kidneySimTimeGroup$sampleID,
#' group=kidneySimTimeGroup$group,
#' experiment="longitudinal1",basis="p-spline",keepModels=T)
#' plot(lmmsDEtestl1,y=2,type="all")
#' plot(lmmsDEtestl1,y=2,type="time")
#' plot(lmmsDEtestl1,y=2,type="group")
#' plot(lmmsDEtestl1,y=2,type="group*time",smooth=TRUE)
#'
#' #to save memory do not keep the models
#' lmmsDEtestl1 <-lmmsDE(data=kidneySimTimeGroup$data,time=kidneySimTimeGroup$time,
#' sampleID=kidneySimTimeGroup$sampleID,
#' group=kidneySimTimeGroup$group,
#' experiment="longitudinal1",basis="p-spline",keepModels=F)
#'# just the fitted trajectory
#' plot(lmmsDEtestl1,y=2,type="all")
#'
#' plot(lmmsDEtestl1,y=2,type="all",data=kidneySimTimeGroup$data,time=kidneySimTimeGroup$time,
#' group=kidneySimTimeGroup$group)}
#' @method plot lmmsde
#' @export
plot.lmmsde <- function(x, y, data, time,group,type, smooth, mean,...){
# library(graphics)
if(missing(type)|sum(type%in%"all")>0){
type <- c()
if(length(x@modelGroup)>0|ncol(x@predGroup)>1)
type <- c(type,'group')
if(length(x@modelTime)>0|ncol(x@predTime)>1)
type <- c(type,'time')
if(length(x@modelTimeGroup)>0|ncol(x@predTimeGroup)>1)
type <- c(type,'group*time')
}
name <- y
if(length(grep("all",type))>0){
type <- c('time','group','group*time')
}
if(class(y)=='numeric')
name <- as.character(x@DE$Molecule[y])
if(class(y)=='character'|class(y)=="factor"){
nam <- as.character(x@DE$Molecule)
if(sum(nam%in%as.character(y))>0){
name <- as.character(y)
y <-which(nam%in%y)
}else{
stop(paste('Could not find feature',y,'in rownames(x@pred.spline).'))
}
}
if(missing(mean))
mean <- F
if(sum(type%in%'time')>0){
name2 <- paste(name,'time')
if(length(x@modelTime)>0){
p1 <- plotLmms(x@modelTime[[y]],smooth=smooth,name2,mean=mean,...)
}else{
p1 <- plotModel(x@predTime,y,smooth=smooth,name2,data=data,time2=time,mean=mean)
}
if(length(type)<3){
suppressWarnings(print(p1))
}
}
if(sum(type%in%"group")>0){
name2 <- paste(name,'group')
if(length(x@modelGroup)>0){
p2 <- plotLmmsdeFunc(x@modelGroup,index=y,smooth=smooth,name2,mean,...)
}else{
p2 <- plotModel(x@predGroup,y,smooth=smooth,name2,data=data,time2=time,group=group,mean=mean)
}
if(length(type)<3){
suppressWarnings(print(p2))
}
}
if(sum(type%in%"group*time")>0){
name2 <- paste(name,'group*time')
if(length(x@modelTimeGroup)>0){
p3 <- plotLmmsdeFunc(x@modelTimeGroup,index=y,smooth=smooth,name2,mean,...)
} else{
p3 <- plotModel(x@predTimeGroup,index = y,smooth=smooth,name2,data=data,time2=time,group=group,mean=mean)
}
if(length(type)<3){
suppressWarnings(print(p3))
}
}
if(length(type)==3|sum(type%in%"all")>0){
grid.arrange(p1, p2, p3, ncol=2)
}
}
plotLmmsdeFunc <- function(object,index,smooth,name,mean,...){
Time <- Intensity <- Model <-Group<- NULL
if(missing(smooth))
smooth <- F
model <- object[[index]]
if(is.null(model))
stop("Requested model not available")
cl <- class(model)
group <- model$data$Group
g1 <-which(group==unique(group)[1])
g2 <- which(group==unique(group)[2])
g1Label <-sort(unique(group))[1]
g2Label <- sort(unique(group))[2]
dfmain <- data.frame(Intensity=model$data$Expr,Time=model$data$time,Group=group,size=1)
g <- ggplot()+ geom_point(aes(x=Time,y=Intensity,shape=Group,color=Group),alpha=0.5,size=3,data = dfmain,na.rm = T) +ggtitle(name)
if(mean)
g <- g + stat_summary(aes(x=Time,y=Intensity,group=Group,colour=Group,linetype='Mean'),size=1,data=dfmain,fun.y=function(x)mean(x), geom="line",na.rm = T)
f <- fitted(model,level=1)
f1 <- f[g1]
f2<- f[g2]
dfmain <- data.frame(Intensity=f,Time=model$data$time,Group=group,size=1,Model="Fitted")
if(smooth){
spl1 <- spline(x = model$data$time[g1], y = f1, n = 500, method = "natural")
s1 <- data.frame(Time=spl1$x,Intensity=spl1$y,Model="Smooth",Group=g1Label)
spl2 <- spline(x = model$data$time[g2], y = f2, n = 500, method = "natural")
s2 <- data.frame(Time=spl2$x,Intensity=spl2$y,Model="Smooth",Group=g2Label)
}else{
s1 <- data.frame(Intensity=na.omit(f1),Time=model$data$time[intersect(which(!is.na(f)),g1)],Model="Fitted",Group=g1Label)
s2 <- data.frame(Intensity=na.omit(f2),Time=model$data$time[intersect(which(!is.na(f)),g2)],Model="Fitted",Group=g2Label)
}
g <- g+ geom_line(aes(x = Time,y=Intensity,color=Group,linetype=Model),data = s1[!is.na(s1$Intensity),],size=1)+ geom_line(aes(x = Time,y=Intensity,color=Group,linetype=Model),size=1,data = s2[!is.na(s2$Intensity),])
return(g)
}
plotModel <- function(object,index,smooth,name,data,time2,group,mean,...){
Time <- Intensity <- Model <- Group<- NULL
if(missing(smooth))
smooth <- F
if(missing(mean))
mean <- F
model <- object[index,]
if(sum(is.na(model))==length(model))
stop('Error plotting molecule')
time <- suppressWarnings(as.numeric(names(model)))
s <- strsplit(colnames(object),split = " ")
s <- sapply(s,'[')
if(!is.null(dim(s)) & !is.null(s)){
group2 <- s[1,]
time <- suppressWarnings(as.numeric(s[2,]))
g1Label <-na.omit(sort(unique(group2)))[1]
g2Label <- na.omit(sort(unique(group2)))[2]
g1 <- which(group2==g1Label)
g2 <- which(group2==g2Label)
if(!missing(group)){
group2[g1] <- unique(group)[1]
group2[g2] <- unique(group)[2]
}
if(!missing(data) & !missing(time2) & !missing(group)){
dfmain <<- data.frame(Intensity=data[,index],Time=time2,Group=factor(group),size=1,Model="Mean")
g <- ggplot()+ geom_point(aes(x=Time,y=Intensity,shape=Group,color=Group),alpha=0.5,size=3,data = dfmain,na.rm = T) + ggtitle(name)
if(mean)
g <- g + stat_summary(aes(x=Time,y=Intensity,linetype=Model,color=Group,group=Group),size=1,data=dfmain,fun.y=function(x)mean(x), geom="line",na.rm = T)
}else{
dfmain <- data.frame(Intensity=model,Time=time,Group=factor(group2),size=1,Model="Mean")
g <- ggplot()+ geom_point(aes(x=Time,y=Intensity,shape=Group,color=Group),alpha=0.5,size=3,data = dfmain,na.rm = T) + ggtitle(name)
if(mean)
g <- g + stat_summary(aes(x=Time,y=Intensity,linetype=Model,color=Group,group=Group),size=1,data=dfmain,fun.y=function(x)mean(x), geom="line",na.rm = T)
}
if(smooth){
spl1 <- as.data.frame(spline(x = time[g1], y = model[g1], n = 500, method = "natural"))
s1 <- data.frame(Time=spl1$x,Intensity=spl1$y,Model="Smooth",Group=g1Label)
spl2 <- as.data.frame(spline(x = time[g2], y = model[g2], n = 500, method = "natural"))
s2 <- data.frame(Time=spl2$x,Intensity=spl2$y,Model="Smooth",Group=g2Label)
}else{
s1 <- data.frame(Time = time[g1], Intensity = model[g1],Model="Fitted",Group=g1Label)
s2 <- data.frame(Time = time[g2], Intensity = model[g2],Model="Fitted",Group=g2Label)
}
g <- g+ geom_line(aes(x = Time,y=Intensity,color=Group,linetype=Model),data = s1[!is.na(s1$Intensity),])+ geom_line(aes(x = Time,y=Intensity,color=Group,linetype=Model),data = s2[!is.na(s2$Intensity),],size=1)
}else{
# time <- as.numeric(as.character(s))
if(!missing(data) & !missing(time2)){
dfmain <- data.frame(Intensity=data[,index],Time=time2)
}else{
dfmain <- data.frame(Intensity=model,Time=time)
}
g <- ggplot()+ geom_point(aes(x=Time,y=Intensity),alpha=0.5,size=3,data = dfmain,na.rm = T) +ggtitle(name)
if(smooth){
sp1 <- as.data.frame(spline(x = time, y = model, n = 500, method = "natural"))
dfModel <- data.frame(Intensity=sp1$y,Time=sp1$x,Model="Smooth")
}else{
dfModel <- data.frame(Intensity=model,Time=time,Model="Fitted")
}
g<-g+ geom_line(aes(x = Time,y=Intensity,linetype=Model),data =dfModel[!is.na(dfModel$Intensity),],size=1)
if(mean)
g <- g + stat_summary(aes(x=Time,y=Intensity,linetype='Mean'),size=1,data=dfmain,fun.y=function(x)mean(x), geom="line",na.rm = T)
}
return(g)
}
| /R/plot.lmmsde-method.R | no_license | cran/lmms | R | false | false | 10,968 | r | # Jasmin Straube, Queensland Facility of Advanced Bioinformatics
# Part of this script was borrowed from the graphics and stats package.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Moleculesral Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Moleculesral Public License for more details.
#
# You should have received a copy of the GNU Moleculesral Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#' Plot of \code{lmmsde} objects
#'
#' Plot of the raw data the mean and the fitted \code{lmmsde} profile.
#'
#' @import ggplot2
#' @importFrom gridExtra grid.arrange
#' @importFrom stats spline na.omit
#' @param x An object of class \code{lmmsde}.
#' @param y \code{numeric} or \code{character} value. Either the row index or the row name determining which feature should be plotted.
#' @param data alternative \code{matrix} or \code{data.frame} containing the original data for visualisation purposes.
#' @param time alternative \code{numeric} indicating the sample time point. Vector of same length as row lenghth of data for visualisation purposes.
#' @param group alternative \code{numeric} indicating the sample group. Vector of same length as row length of data for visualisation purposes.
#' @param type a \code{character} indicating what model to plot. Default \code{'all'}, options: \code{'time'}, \code{'group'},\code{'group*time'}.
#' @param smooth an optional \code{logical} value. By default set to \code{FALSE}. If \code{TRUE} smooth representation of the fitted values.
#' @param mean alternative \code{logical} if the mean should be displayed. By default set to \code{TRUE}.
#' @param \ldots Additional arguments which are passed to \code{plot}.
#' @return plot showing raw data, mean profile and fitted profile.
#' @examples
#' \dontrun{
#' data(kidneySimTimeGroup)
#' lmmsDEtestl1 <-lmmsDE(data=kidneySimTimeGroup$data,time=kidneySimTimeGroup$time,
#' sampleID=kidneySimTimeGroup$sampleID,
#' group=kidneySimTimeGroup$group,
#' experiment="longitudinal1",basis="p-spline",keepModels=T)
#' plot(lmmsDEtestl1,y=2,type="all")
#' plot(lmmsDEtestl1,y=2,type="time")
#' plot(lmmsDEtestl1,y=2,type="group")
#' plot(lmmsDEtestl1,y=2,type="group*time",smooth=TRUE)
#'
#' #to save memory do not keep the models
#' lmmsDEtestl1 <-lmmsDE(data=kidneySimTimeGroup$data,time=kidneySimTimeGroup$time,
#' sampleID=kidneySimTimeGroup$sampleID,
#' group=kidneySimTimeGroup$group,
#' experiment="longitudinal1",basis="p-spline",keepModels=F)
#'# just the fitted trajectory
#' plot(lmmsDEtestl1,y=2,type="all")
#'
#' plot(lmmsDEtestl1,y=2,type="all",data=kidneySimTimeGroup$data,time=kidneySimTimeGroup$time,
#' group=kidneySimTimeGroup$group)}
#' @method plot lmmsde
#' @export
plot.lmmsde <- function(x, y, data, time,group,type, smooth, mean,...){
# library(graphics)
if(missing(type)|sum(type%in%"all")>0){
type <- c()
if(length(x@modelGroup)>0|ncol(x@predGroup)>1)
type <- c(type,'group')
if(length(x@modelTime)>0|ncol(x@predTime)>1)
type <- c(type,'time')
if(length(x@modelTimeGroup)>0|ncol(x@predTimeGroup)>1)
type <- c(type,'group*time')
}
name <- y
if(length(grep("all",type))>0){
type <- c('time','group','group*time')
}
if(class(y)=='numeric')
name <- as.character(x@DE$Molecule[y])
if(class(y)=='character'|class(y)=="factor"){
nam <- as.character(x@DE$Molecule)
if(sum(nam%in%as.character(y))>0){
name <- as.character(y)
y <-which(nam%in%y)
}else{
stop(paste('Could not find feature',y,'in rownames(x@pred.spline).'))
}
}
if(missing(mean))
mean <- F
if(sum(type%in%'time')>0){
name2 <- paste(name,'time')
if(length(x@modelTime)>0){
p1 <- plotLmms(x@modelTime[[y]],smooth=smooth,name2,mean=mean,...)
}else{
p1 <- plotModel(x@predTime,y,smooth=smooth,name2,data=data,time2=time,mean=mean)
}
if(length(type)<3){
suppressWarnings(print(p1))
}
}
if(sum(type%in%"group")>0){
name2 <- paste(name,'group')
if(length(x@modelGroup)>0){
p2 <- plotLmmsdeFunc(x@modelGroup,index=y,smooth=smooth,name2,mean,...)
}else{
p2 <- plotModel(x@predGroup,y,smooth=smooth,name2,data=data,time2=time,group=group,mean=mean)
}
if(length(type)<3){
suppressWarnings(print(p2))
}
}
if(sum(type%in%"group*time")>0){
name2 <- paste(name,'group*time')
if(length(x@modelTimeGroup)>0){
p3 <- plotLmmsdeFunc(x@modelTimeGroup,index=y,smooth=smooth,name2,mean,...)
} else{
p3 <- plotModel(x@predTimeGroup,index = y,smooth=smooth,name2,data=data,time2=time,group=group,mean=mean)
}
if(length(type)<3){
suppressWarnings(print(p3))
}
}
if(length(type)==3|sum(type%in%"all")>0){
grid.arrange(p1, p2, p3, ncol=2)
}
}
plotLmmsdeFunc <- function(object,index,smooth,name,mean,...){
Time <- Intensity <- Model <-Group<- NULL
if(missing(smooth))
smooth <- F
model <- object[[index]]
if(is.null(model))
stop("Requested model not available")
cl <- class(model)
group <- model$data$Group
g1 <-which(group==unique(group)[1])
g2 <- which(group==unique(group)[2])
g1Label <-sort(unique(group))[1]
g2Label <- sort(unique(group))[2]
dfmain <- data.frame(Intensity=model$data$Expr,Time=model$data$time,Group=group,size=1)
g <- ggplot()+ geom_point(aes(x=Time,y=Intensity,shape=Group,color=Group),alpha=0.5,size=3,data = dfmain,na.rm = T) +ggtitle(name)
if(mean)
g <- g + stat_summary(aes(x=Time,y=Intensity,group=Group,colour=Group,linetype='Mean'),size=1,data=dfmain,fun.y=function(x)mean(x), geom="line",na.rm = T)
f <- fitted(model,level=1)
f1 <- f[g1]
f2<- f[g2]
dfmain <- data.frame(Intensity=f,Time=model$data$time,Group=group,size=1,Model="Fitted")
if(smooth){
spl1 <- spline(x = model$data$time[g1], y = f1, n = 500, method = "natural")
s1 <- data.frame(Time=spl1$x,Intensity=spl1$y,Model="Smooth",Group=g1Label)
spl2 <- spline(x = model$data$time[g2], y = f2, n = 500, method = "natural")
s2 <- data.frame(Time=spl2$x,Intensity=spl2$y,Model="Smooth",Group=g2Label)
}else{
s1 <- data.frame(Intensity=na.omit(f1),Time=model$data$time[intersect(which(!is.na(f)),g1)],Model="Fitted",Group=g1Label)
s2 <- data.frame(Intensity=na.omit(f2),Time=model$data$time[intersect(which(!is.na(f)),g2)],Model="Fitted",Group=g2Label)
}
g <- g+ geom_line(aes(x = Time,y=Intensity,color=Group,linetype=Model),data = s1[!is.na(s1$Intensity),],size=1)+ geom_line(aes(x = Time,y=Intensity,color=Group,linetype=Model),size=1,data = s2[!is.na(s2$Intensity),])
return(g)
}
plotModel <- function(object,index,smooth,name,data,time2,group,mean,...){
Time <- Intensity <- Model <- Group<- NULL
if(missing(smooth))
smooth <- F
if(missing(mean))
mean <- F
model <- object[index,]
if(sum(is.na(model))==length(model))
stop('Error plotting molecule')
time <- suppressWarnings(as.numeric(names(model)))
s <- strsplit(colnames(object),split = " ")
s <- sapply(s,'[')
if(!is.null(dim(s)) & !is.null(s)){
group2 <- s[1,]
time <- suppressWarnings(as.numeric(s[2,]))
g1Label <-na.omit(sort(unique(group2)))[1]
g2Label <- na.omit(sort(unique(group2)))[2]
g1 <- which(group2==g1Label)
g2 <- which(group2==g2Label)
if(!missing(group)){
group2[g1] <- unique(group)[1]
group2[g2] <- unique(group)[2]
}
if(!missing(data) & !missing(time2) & !missing(group)){
dfmain <<- data.frame(Intensity=data[,index],Time=time2,Group=factor(group),size=1,Model="Mean")
g <- ggplot()+ geom_point(aes(x=Time,y=Intensity,shape=Group,color=Group),alpha=0.5,size=3,data = dfmain,na.rm = T) + ggtitle(name)
if(mean)
g <- g + stat_summary(aes(x=Time,y=Intensity,linetype=Model,color=Group,group=Group),size=1,data=dfmain,fun.y=function(x)mean(x), geom="line",na.rm = T)
}else{
dfmain <- data.frame(Intensity=model,Time=time,Group=factor(group2),size=1,Model="Mean")
g <- ggplot()+ geom_point(aes(x=Time,y=Intensity,shape=Group,color=Group),alpha=0.5,size=3,data = dfmain,na.rm = T) + ggtitle(name)
if(mean)
g <- g + stat_summary(aes(x=Time,y=Intensity,linetype=Model,color=Group,group=Group),size=1,data=dfmain,fun.y=function(x)mean(x), geom="line",na.rm = T)
}
if(smooth){
spl1 <- as.data.frame(spline(x = time[g1], y = model[g1], n = 500, method = "natural"))
s1 <- data.frame(Time=spl1$x,Intensity=spl1$y,Model="Smooth",Group=g1Label)
spl2 <- as.data.frame(spline(x = time[g2], y = model[g2], n = 500, method = "natural"))
s2 <- data.frame(Time=spl2$x,Intensity=spl2$y,Model="Smooth",Group=g2Label)
}else{
s1 <- data.frame(Time = time[g1], Intensity = model[g1],Model="Fitted",Group=g1Label)
s2 <- data.frame(Time = time[g2], Intensity = model[g2],Model="Fitted",Group=g2Label)
}
g <- g+ geom_line(aes(x = Time,y=Intensity,color=Group,linetype=Model),data = s1[!is.na(s1$Intensity),])+ geom_line(aes(x = Time,y=Intensity,color=Group,linetype=Model),data = s2[!is.na(s2$Intensity),],size=1)
}else{
# time <- as.numeric(as.character(s))
if(!missing(data) & !missing(time2)){
dfmain <- data.frame(Intensity=data[,index],Time=time2)
}else{
dfmain <- data.frame(Intensity=model,Time=time)
}
g <- ggplot()+ geom_point(aes(x=Time,y=Intensity),alpha=0.5,size=3,data = dfmain,na.rm = T) +ggtitle(name)
if(smooth){
sp1 <- as.data.frame(spline(x = time, y = model, n = 500, method = "natural"))
dfModel <- data.frame(Intensity=sp1$y,Time=sp1$x,Model="Smooth")
}else{
dfModel <- data.frame(Intensity=model,Time=time,Model="Fitted")
}
g<-g+ geom_line(aes(x = Time,y=Intensity,linetype=Model),data =dfModel[!is.na(dfModel$Intensity),],size=1)
if(mean)
g <- g + stat_summary(aes(x=Time,y=Intensity,linetype='Mean'),size=1,data=dfmain,fun.y=function(x)mean(x), geom="line",na.rm = T)
}
return(g)
}
|
#source("slow-sim.R")
source("slow-sim-sol.R")
#source("slow-sim-parallel-sol.R")
set.seed <- 232323
observations <- 5000
covariates <- 10
testdata <- as.data.frame(
matrix(rnorm(observations * covariates),
nrow = observations
))
test <- simulate(reps = 100, seed = 20141028, data = testdata)
system.time(test <- simulate(reps = 100, seed = 20141028, data = testdata))
#debugonce(simulate)
slow <- function() {
source("slow-sim.R")
simulate(reps = 100, seed = 20141028, data = testdata)
}
better <- function() {
source("slow-sim-sol.R")
simulate(reps = 100, seed = 20141028, data = testdata)
}
parallel <- function() {
source("slow-sim-parallel-sol.R")
simulate(reps = 100, seed = 20141028, data = testdata)
}
testfun <- function() {
sqrt(x)
}
bench::mark(slow = slow(),
better = better(),
parallel = parallel(), check = FALSE)
| /test_sim.R | no_license | fort-w2021/performance-ex-corneliagru | R | false | false | 897 | r | #source("slow-sim.R")
source("slow-sim-sol.R")
#source("slow-sim-parallel-sol.R")
set.seed <- 232323
observations <- 5000
covariates <- 10
testdata <- as.data.frame(
matrix(rnorm(observations * covariates),
nrow = observations
))
test <- simulate(reps = 100, seed = 20141028, data = testdata)
system.time(test <- simulate(reps = 100, seed = 20141028, data = testdata))
#debugonce(simulate)
slow <- function() {
source("slow-sim.R")
simulate(reps = 100, seed = 20141028, data = testdata)
}
better <- function() {
source("slow-sim-sol.R")
simulate(reps = 100, seed = 20141028, data = testdata)
}
parallel <- function() {
source("slow-sim-parallel-sol.R")
simulate(reps = 100, seed = 20141028, data = testdata)
}
testfun <- function() {
sqrt(x)
}
bench::mark(slow = slow(),
better = better(),
parallel = parallel(), check = FALSE)
|
library(interpretR)
### Name: parDepPlot
### Title: Model interpretation functions: Partial Dependence Plots
### Aliases: parDepPlot
### ** Examples
library(randomForest)
#Prepare data
data(iris)
iris <- iris[1:100,]
iris$Species <- as.factor(ifelse(factor(iris$Species)=="setosa",0,1))
#Cross-validated models
#Estimate 10 models and create 10 test sets
data <- list()
rf <- list()
for (i in 1:10) {
ind <- sample(nrow(iris),50)
rf[[i]] <- randomForest(Species~., iris[ind,])
data[[i]] <- iris[-ind,]
}
parDepPlot(x.name="Petal.Width", object=rf, data=data)
#Single model
#Estimate a single model
ind <- sample(nrow(iris),50)
rf <- randomForest(Species~., iris[ind,])
parDepPlot(x.name="Petal.Width", object=rf, data=iris[-ind,])
| /data/genthat_extracted_code/interpretR/examples/parDepPlot.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 748 | r | library(interpretR)
### Name: parDepPlot
### Title: Model interpretation functions: Partial Dependence Plots
### Aliases: parDepPlot
### ** Examples
library(randomForest)
#Prepare data
data(iris)
iris <- iris[1:100,]
iris$Species <- as.factor(ifelse(factor(iris$Species)=="setosa",0,1))
#Cross-validated models
#Estimate 10 models and create 10 test sets
data <- list()
rf <- list()
for (i in 1:10) {
ind <- sample(nrow(iris),50)
rf[[i]] <- randomForest(Species~., iris[ind,])
data[[i]] <- iris[-ind,]
}
parDepPlot(x.name="Petal.Width", object=rf, data=data)
#Single model
#Estimate a single model
ind <- sample(nrow(iris),50)
rf <- randomForest(Species~., iris[ind,])
parDepPlot(x.name="Petal.Width", object=rf, data=iris[-ind,])
|
#Using shazam to generate mutation frequency
#slancast@scg4.stanford.edu:/srv/gsfs0/projects/snyder/slancast/IMGT_analyses/IMGTIgM/analyses/mutation_frequency
#
library(alakazam)
library(shazam)
library(ggplot2)
Subject <- Sys.getenv(c("var"))
print("T1")
IgMT1 <- readChangeoDb(paste("../../",Subject,"T1_db-pass_clone-pass_germ-pass.tab",sep=""))
ID <- c()
Isotype <- c()
for (i in 1:nrow(IgMT1)) { ID <- append(ID,paste(Subject,"T1",sep="")) }
IgMT1 <- cbind(IgMT1, ID)
print("T2")
IgMT2 <- readChangeoDb(paste("../../",Subject,"T2_db-pass_clone-pass_germ-pass.tab",sep=""))
ID <- c()
Isotype <- c()
for (i in 1:nrow(IgMT2)) { ID <- append(ID,paste(Subject,"T2",sep="")) }
IgMT2 <- cbind(IgMT2, ID)
print("T3")
IgMT3 <- readChangeoDb(paste("../../",Subject,"T3_db-pass_clone-pass_germ-pass.tab",sep=""))
ID <- c()
Isotype <- c()
for (i in 1:nrow(IgMT3)) { ID <- append(ID,paste(Subject,"T3",sep="")) }
IgMT3 <- cbind(IgMT3, ID)
db <- rbind(IgMT1, IgMT2, IgMT3)
db_new <- calcDBObservedMutations(db, sequenceColumn="SEQUENCE_IMGT", germlineColumn="GERMLINE_IMGT_D_MASK", frequency=FALSE,regionDefinition=IMGT_V_BY_REGIONS)
print("mutations_calculated")
print("db type")
print(typeof(db_new))
#CDR3_Mut <- matrix(nrow = 0,ncol = 63)
#colnames(CDR3_Mut) <- c(colnames(db_new), "Mutations") #I don't think I use this anymore.
print(ncol(db_new))
write.table(db_new, file=paste(Subject,"db_new.csv", sep=""), sep=",")
pdf(paste("Mutation_frequency_",Subject,"_0FWR1.pdf",sep=""))
ggplot(db_new, aes(x=ID, y=OBSERVED_FWR1_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_FWR1_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_0CDR1.pdf",sep=""))
ggplot(db_new, aes(x=ID, y=OBSERVED_CDR1_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_CDR1_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_0FWR2.pdf",sep=""))
ggplot(db_new, aes(x=ID, y=OBSERVED_FWR2_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_FWR2_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_0CDR2.pdf",sep=""))
ggplot(db_new, aes(x=ID, y=OBSERVED_CDR2_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_CDR2_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_0FWR3.pdf",sep=""))
ggplot(db_new, aes(x=ID, y=OBSERVED_FWR3_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_FWR3_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_0CDR3.pdf",sep=""))
ggplot(db_new, aes(x=ID, y=OBSERVED_CDR3_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_CDR3_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
print("db type")
print(typeof(db_new))
CDR3_Mut <- matrix(nrow = 0,ncol = 62)
colnames(CDR3_Mut) <- colnames(db_new)
print(CDR3_Mut)
print(ncol(db_new))
print("beginning for loop")
for (i in 1:nrow(db_new)){
if (as.integer(db_new[i,]$OBSERVED_CDR3_R) >= 1){
print(i)
CDR3_Mut <- rbind(CDR3_Mut, db_new[i,]) }}
write.table(CDR3_Mut, "CDR3_Mut.tab", sep="\t")
CDR3_Mut <- data.frame(CDR3_Mut)
print("making plots without no mutations")
pdf(paste("Mutation_frequency_",Subject,"_FWR1.pdf",sep=""))
ggplot(CDR3_Mut, aes(x=ID, y=OBSERVED_FWR1_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_FWR1_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_CDR1.pdf",sep=""))
ggplot(CDR3_Mut, aes(x=ID, y=OBSERVED_CDR1_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_CDR1_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_FWR2.pdf",sep=""))
ggplot(CDR3_Mut, aes(x=ID, y=OBSERVED_FWR2_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_FWR2_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_CDR2.pdf",sep=""))
ggplot(CDR3_Mut, aes(x=ID, y=OBSERVED_CDR2_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_CDR2_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_FWR3.pdf",sep=""))
ggplot(CDR3_Mut, aes(x=ID, y=OBSERVED_FWR3_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_FWR3_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_CDR3.pdf",sep=""))
ggplot(CDR3_Mut, aes(x=ID, y=OBSERVED_CDR3_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_CDR3_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off() | /AntibodyPropertiesScripts/mutation_frequencyIgM.r | no_license | slancast/Immuno | R | false | false | 5,405 | r | #Using shazam to generate mutation frequency
#slancast@scg4.stanford.edu:/srv/gsfs0/projects/snyder/slancast/IMGT_analyses/IMGTIgM/analyses/mutation_frequency
#
library(alakazam)
library(shazam)
library(ggplot2)
Subject <- Sys.getenv(c("var"))
print("T1")
IgMT1 <- readChangeoDb(paste("../../",Subject,"T1_db-pass_clone-pass_germ-pass.tab",sep=""))
ID <- c()
Isotype <- c()
for (i in 1:nrow(IgMT1)) { ID <- append(ID,paste(Subject,"T1",sep="")) }
IgMT1 <- cbind(IgMT1, ID)
print("T2")
IgMT2 <- readChangeoDb(paste("../../",Subject,"T2_db-pass_clone-pass_germ-pass.tab",sep=""))
ID <- c()
Isotype <- c()
for (i in 1:nrow(IgMT2)) { ID <- append(ID,paste(Subject,"T2",sep="")) }
IgMT2 <- cbind(IgMT2, ID)
print("T3")
IgMT3 <- readChangeoDb(paste("../../",Subject,"T3_db-pass_clone-pass_germ-pass.tab",sep=""))
ID <- c()
Isotype <- c()
for (i in 1:nrow(IgMT3)) { ID <- append(ID,paste(Subject,"T3",sep="")) }
IgMT3 <- cbind(IgMT3, ID)
db <- rbind(IgMT1, IgMT2, IgMT3)
db_new <- calcDBObservedMutations(db, sequenceColumn="SEQUENCE_IMGT", germlineColumn="GERMLINE_IMGT_D_MASK", frequency=FALSE,regionDefinition=IMGT_V_BY_REGIONS)
print("mutations_calculated")
print("db type")
print(typeof(db_new))
#CDR3_Mut <- matrix(nrow = 0,ncol = 63)
#colnames(CDR3_Mut) <- c(colnames(db_new), "Mutations") #I don't think I use this anymore.
print(ncol(db_new))
write.table(db_new, file=paste(Subject,"db_new.csv", sep=""), sep=",")
pdf(paste("Mutation_frequency_",Subject,"_0FWR1.pdf",sep=""))
ggplot(db_new, aes(x=ID, y=OBSERVED_FWR1_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_FWR1_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_0CDR1.pdf",sep=""))
ggplot(db_new, aes(x=ID, y=OBSERVED_CDR1_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_CDR1_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_0FWR2.pdf",sep=""))
ggplot(db_new, aes(x=ID, y=OBSERVED_FWR2_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_FWR2_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_0CDR2.pdf",sep=""))
ggplot(db_new, aes(x=ID, y=OBSERVED_CDR2_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_CDR2_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_0FWR3.pdf",sep=""))
ggplot(db_new, aes(x=ID, y=OBSERVED_FWR3_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_FWR3_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_0CDR3.pdf",sep=""))
ggplot(db_new, aes(x=ID, y=OBSERVED_CDR3_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_CDR3_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
print("db type")
print(typeof(db_new))
CDR3_Mut <- matrix(nrow = 0,ncol = 62)
colnames(CDR3_Mut) <- colnames(db_new)
print(CDR3_Mut)
print(ncol(db_new))
print("beginning for loop")
for (i in 1:nrow(db_new)){
if (as.integer(db_new[i,]$OBSERVED_CDR3_R) >= 1){
print(i)
CDR3_Mut <- rbind(CDR3_Mut, db_new[i,]) }}
write.table(CDR3_Mut, "CDR3_Mut.tab", sep="\t")
CDR3_Mut <- data.frame(CDR3_Mut)
print("making plots without no mutations")
pdf(paste("Mutation_frequency_",Subject,"_FWR1.pdf",sep=""))
ggplot(CDR3_Mut, aes(x=ID, y=OBSERVED_FWR1_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_FWR1_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_CDR1.pdf",sep=""))
ggplot(CDR3_Mut, aes(x=ID, y=OBSERVED_CDR1_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_CDR1_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_FWR2.pdf",sep=""))
ggplot(CDR3_Mut, aes(x=ID, y=OBSERVED_FWR2_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_FWR2_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_CDR2.pdf",sep=""))
ggplot(CDR3_Mut, aes(x=ID, y=OBSERVED_CDR2_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_CDR2_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_FWR3.pdf",sep=""))
ggplot(CDR3_Mut, aes(x=ID, y=OBSERVED_FWR3_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_FWR3_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off()
pdf(paste("Mutation_frequency_",Subject,"_CDR3.pdf",sep=""))
ggplot(CDR3_Mut, aes(x=ID, y=OBSERVED_CDR3_R)) +
geom_violin(aes(fill=ID)) +
ggtitle("OBSERVED_CDR3_R") +
xlab("ID") +
ylab("Mutation_Frequency") +
theme(panel.background = element_rect(fill = "white"))
dev.off() |
################################### Array data ###################################
rm(list=ls())
library(pracma)
setwd("/Volumes/Data1/PROJECTS/PersistentHomology_ASD_Brain/")
source("SCRIPTS/FinalScripts/supplementary/Functions.R")
data=read.csv("DATA/Voineagu2011_Array.csv") # read in data
kegg=read.csv("DATA/c2.cp.kegg.v6.2.symbols.csv")
# select gene sets with at least half of the genes present in the dataset
l=rep(0, ncol(kegg))
for (j in c(1:length(l))) l[j]=length(intersect(data$Symbol, kegg[,j]))/(nrow(kegg)-length(which(kegg[,j]%in%"")))
kegg=kegg[, which(l>0.5)]
symbols=data$Symbol
data=data[,-c(1,2)] # remove gene annotation columns
results_kegg_MD_Array=run_MD_geneSets(expData=data,symbols=symbols, kegg=kegg,num_trials=100, transposed=TRUE, asd_label="A_", con_label="C_")
names(results_kegg_MD_Array)=colnames(kegg)
results_kegg_MD_Array[which(results_kegg_MD_Array< 0.01)]
save(results_kegg_MD_Array, file="RESULTS//FINAL/Mahalanobis_Kegg/results_kegg_MD_Array.rda")
################################### Replication in RNA-seq data ###################################
rm(list=ls())
setwd("/Volumes/Data1/PROJECTS/PersistentHomology_ASD_Brain/")
source("SCRIPTS/FinalScripts/supplementary/Functions.R")
data=read.csv("DATA/Parikshak2016_RnaSeq.csv") # read in data
kegg=read.csv("DATA/c2.cp.kegg.v6.2.symbols.csv")
load("RESULTS//FINAL/Mahalanobis_Kegg/results_kegg_MD_Array.rda")
kegg=kegg[,which(colnames(kegg)%in%names(results_kegg_MD_Array[which(results_kegg_MD_Array < 0.01)])) ]
hgnc=read.table("/Volumes/MacintoshHD_RNA/Users/rna/REFERENCE/HUMAN/Ensembl_GRCh37_hg19/ANNOTATION/Ensembl_to_other/HGNC_EnsemblGeneID.txt", sep="\t", header=TRUE)
symbols=hgnc$Approved.Symbol[match(data[,1], hgnc$EnsemblGeneID)]
data=data[,-1] # remove gene annotation columns
results_kegg_MD_RNAseq=run_MD_geneSets(expData=data,symbols=symbols, kegg=kegg,num_trials=100, transposed=TRUE, asd_label="ASD_", con_label="CTL_")
save(results_kegg_MD_RNAseq, file="RESULTS/FINAL/Mahalanobis_Kegg/results_kegg_MD_RNAseq.rda")
| /6..MahalanobisDistance_KEGG_geneSets.R | no_license | Voineagulab/Persistent_Homology_ASD_Brain | R | false | false | 2,038 | r | ################################### Array data ###################################
rm(list=ls())
library(pracma)
setwd("/Volumes/Data1/PROJECTS/PersistentHomology_ASD_Brain/")
source("SCRIPTS/FinalScripts/supplementary/Functions.R")
data=read.csv("DATA/Voineagu2011_Array.csv") # read in data
kegg=read.csv("DATA/c2.cp.kegg.v6.2.symbols.csv")
# select gene sets with at least half of the genes present in the dataset
l=rep(0, ncol(kegg))
for (j in c(1:length(l))) l[j]=length(intersect(data$Symbol, kegg[,j]))/(nrow(kegg)-length(which(kegg[,j]%in%"")))
kegg=kegg[, which(l>0.5)]
symbols=data$Symbol
data=data[,-c(1,2)] # remove gene annotation columns
results_kegg_MD_Array=run_MD_geneSets(expData=data,symbols=symbols, kegg=kegg,num_trials=100, transposed=TRUE, asd_label="A_", con_label="C_")
names(results_kegg_MD_Array)=colnames(kegg)
results_kegg_MD_Array[which(results_kegg_MD_Array< 0.01)]
save(results_kegg_MD_Array, file="RESULTS//FINAL/Mahalanobis_Kegg/results_kegg_MD_Array.rda")
################################### Replication in RNA-seq data ###################################
rm(list=ls())
setwd("/Volumes/Data1/PROJECTS/PersistentHomology_ASD_Brain/")
source("SCRIPTS/FinalScripts/supplementary/Functions.R")
data=read.csv("DATA/Parikshak2016_RnaSeq.csv") # read in data
kegg=read.csv("DATA/c2.cp.kegg.v6.2.symbols.csv")
load("RESULTS//FINAL/Mahalanobis_Kegg/results_kegg_MD_Array.rda")
kegg=kegg[,which(colnames(kegg)%in%names(results_kegg_MD_Array[which(results_kegg_MD_Array < 0.01)])) ]
hgnc=read.table("/Volumes/MacintoshHD_RNA/Users/rna/REFERENCE/HUMAN/Ensembl_GRCh37_hg19/ANNOTATION/Ensembl_to_other/HGNC_EnsemblGeneID.txt", sep="\t", header=TRUE)
symbols=hgnc$Approved.Symbol[match(data[,1], hgnc$EnsemblGeneID)]
data=data[,-1] # remove gene annotation columns
results_kegg_MD_RNAseq=run_MD_geneSets(expData=data,symbols=symbols, kegg=kegg,num_trials=100, transposed=TRUE, asd_label="ASD_", con_label="CTL_")
save(results_kegg_MD_RNAseq, file="RESULTS/FINAL/Mahalanobis_Kegg/results_kegg_MD_RNAseq.rda")
|
testlist <- list(id = integer(0), x = c(1.91374883209651e+214, 1.83300721970325e+267, 1.91216362379663e+214, 1.91374883209651e+214, 1.91374883209651e+214, 1.91374883209651e+214, 1.37132860659696e-319, 0, 8.94258818972656e-322, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result) | /ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1610030421-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 521 | r | testlist <- list(id = integer(0), x = c(1.91374883209651e+214, 1.83300721970325e+267, 1.91216362379663e+214, 1.91374883209651e+214, 1.91374883209651e+214, 1.91374883209651e+214, 1.37132860659696e-319, 0, 8.94258818972656e-322, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result) |
approxVarEst <-
function(se0, sg0, D1, D2, r, n1, n2, control=NULL){
if (is.null(control)) {
tol = 0.01
s2profile = "se"
lklMethod = "REML"
} else {
tol = control$tol
s2profile = control$s2profile
lklMethod = control$lklMethod
}
matTr <- function(z) sum(diag(z))
p = dim(D1)[1]
Ip = diag(rep(1, p))
n = n1 + n2
N = n * p
##residual matrices\n
R1 = matrix(0, p, p)
R2 = matrix(0, p, p)
for (i in 1:n1) {
R1 = R1 + r[, i] %o% r[, i]
}
for (i in (n1 + 1):n) {
R2 = R2 + r[, i] %o% r[, i]
}
D1D1 = (D1 %*% t(D1))
D2D2 = (D2 %*% t(D2))
gap = 1
sg = sg0
se = se0
cnt = 0
## Whether it's ML or REML
lklConst = ifelse(lklMethod == "REML", N - 2*p, N)
while (gap > tol) {
sg0 = sg
se0 = se
cnt = cnt + 1
tmp0 = matTr(solve(sg * D1D1 + se * Ip) %*% R1) + matTr(solve(sg * D2D2 + se * Ip) %*% R2)
if (s2profile == "se") {
se = tmp0 / lklConst
tmp1 = cov(t(r[, 1:n1]))
tmp = min(diag(tmp1))
tmp = min(se, tmp)
tmp1 = tmp1 - diag(rep(tmp, p))
tmp1 = solve(D1) %*% tmp1
tmp2 = cov(t(r[, (n1 + 1):n]))
tmp = min(diag(tmp2))
tmp = min(se, tmp)
tmp2 = tmp2 - diag(rep(tmp, p))
tmp2 = solve(D2) %*% tmp2
sg = (mean(diag(tmp1)) + mean(diag(tmp2)))/2
tau = sg/se
} else {
sg = tmp0/lklConst
tmp1 = cov(t(r[, 1:n1]))
tmp = min(diag(tmp1))
tmp = min(sg, tmp)
tmp1 = tmp1 - diag(rep(tmp, p))
tmp2 = cov(t(r[, (n1 + 1):n]))
tmp = min(diag(tmp2))
tmp = min(sg, tmp)
tmp2 = tmp2 - diag(rep(tmp, p))
se = (mean(diag(tmp1)) + mean(diag(tmp2)))/2
tau = se/sg
}
gap = abs(sg - sg0) + abs(se - se0)
}
return(list(s2e = se, s2g = sg, tau=tau, finalgap = gap, niter = cnt))
}
| /netgsa/R/approxVarEst.R | no_license | ingted/R-Examples | R | false | false | 1,906 | r | approxVarEst <-
function(se0, sg0, D1, D2, r, n1, n2, control=NULL){
if (is.null(control)) {
tol = 0.01
s2profile = "se"
lklMethod = "REML"
} else {
tol = control$tol
s2profile = control$s2profile
lklMethod = control$lklMethod
}
matTr <- function(z) sum(diag(z))
p = dim(D1)[1]
Ip = diag(rep(1, p))
n = n1 + n2
N = n * p
##residual matrices\n
R1 = matrix(0, p, p)
R2 = matrix(0, p, p)
for (i in 1:n1) {
R1 = R1 + r[, i] %o% r[, i]
}
for (i in (n1 + 1):n) {
R2 = R2 + r[, i] %o% r[, i]
}
D1D1 = (D1 %*% t(D1))
D2D2 = (D2 %*% t(D2))
gap = 1
sg = sg0
se = se0
cnt = 0
## Whether it's ML or REML
lklConst = ifelse(lklMethod == "REML", N - 2*p, N)
while (gap > tol) {
sg0 = sg
se0 = se
cnt = cnt + 1
tmp0 = matTr(solve(sg * D1D1 + se * Ip) %*% R1) + matTr(solve(sg * D2D2 + se * Ip) %*% R2)
if (s2profile == "se") {
se = tmp0 / lklConst
tmp1 = cov(t(r[, 1:n1]))
tmp = min(diag(tmp1))
tmp = min(se, tmp)
tmp1 = tmp1 - diag(rep(tmp, p))
tmp1 = solve(D1) %*% tmp1
tmp2 = cov(t(r[, (n1 + 1):n]))
tmp = min(diag(tmp2))
tmp = min(se, tmp)
tmp2 = tmp2 - diag(rep(tmp, p))
tmp2 = solve(D2) %*% tmp2
sg = (mean(diag(tmp1)) + mean(diag(tmp2)))/2
tau = sg/se
} else {
sg = tmp0/lklConst
tmp1 = cov(t(r[, 1:n1]))
tmp = min(diag(tmp1))
tmp = min(sg, tmp)
tmp1 = tmp1 - diag(rep(tmp, p))
tmp2 = cov(t(r[, (n1 + 1):n]))
tmp = min(diag(tmp2))
tmp = min(sg, tmp)
tmp2 = tmp2 - diag(rep(tmp, p))
se = (mean(diag(tmp1)) + mean(diag(tmp2)))/2
tau = se/sg
}
gap = abs(sg - sg0) + abs(se - se0)
}
return(list(s2e = se, s2g = sg, tau=tau, finalgap = gap, niter = cnt))
}
|
main <-function() {
set.seed(100)
load_packages()
opt <- parse_options()
# Get colors
if (opt$color_scheme == "red") {
fill_color <- "red2"
} else if (opt$color_scheme == "blue") {
fill_color <- "navy"
} else if (opt$color_scheme == "green") {
fill_color <- "#009E73"
}
# Get the names of the first and second dataset that we will be working with
data_names <- str_split(opt$datasets, ",")[[1]]
dataset1 <- data_names[1]
dataset2 <- data_names[2]
# datatype label (ONT or PacBio)
dtype <- opt$dtype
# Get transcripts expressed in the Illumina data from the Kallisto
# abundance files
illumina_1 <- filter_kallisto_illumina_transcripts(opt$illumina_kallisto_1)
illumina_2 <- filter_kallisto_illumina_transcripts(opt$illumina_kallisto_2)
colnames(illumina_1) <- c("annot_transcript_id", "illumina_counts_1")
colnames(illumina_2) <- c("annot_transcript_id", "illumina_counts_2")
illumina_table <- merge(illumina_1, illumina_2, by = "annot_transcript_id",
all.x = T, all.y = T)
print(paste0("Illumina transcripts: ", nrow(illumina_table)))
illumina_table[is.na(illumina_table)] <- 0
illumina_table$illumina_counts_1 <- round(illumina_table$illumina_counts_1)
illumina_table$illumina_counts_2 <- round(illumina_table$illumina_counts_2)
# Read PacBio abundance file
pb_abundance_orig <- as.data.frame(read_delim(opt$infile, "\t", escape_double = FALSE,
col_names = TRUE, trim_ws = TRUE, na = "NA"))
# Keep known transcripts only
pb_abundance <- subset(pb_abundance_orig, transcript_novelty == "Known")
# Cut out unnecessary cols
pb_abundance <- pb_abundance[, c("annot_transcript_id", dataset1, dataset2)]
# Merge PacBio with Illumina on annot_transcript_name
merged_illumina_pacbio <- merge(illumina_table, pb_abundance, by = "annot_transcript_id",
all.x = T, all.y = T)
merged_illumina_pacbio[is.na(merged_illumina_pacbio)] <- 0
merged_illumina_pacbio <- merged_illumina_pacbio[, c("annot_transcript_id",
"illumina_counts_1",
"illumina_counts_2",
dataset1, dataset2)]
# Now, format table for edgeR by setting the rownames to the gene names
rownames(merged_illumina_pacbio) <- merged_illumina_pacbio$annot_transcript_id
merged_illumina_pacbio$annot_transcript_id <- NULL
# Remove rows that only contain zeros
merged_illumina_pacbio <- merged_illumina_pacbio[rowSums(merged_illumina_pacbio) > 0, ]
print(head(merged_illumina_pacbio))
# edgeR basics:
group <- factor(c("Illumina","Illumina","PacBio","PacBio")) # Indicate which group each col belongs to
y <- DGEList(counts=merged_illumina_pacbio, group = group) # Create a DGEList object
design <- model.matrix(~group)
# Filter out lowly expressed
keep <- filterByExpr(y, design)
y <- y[keep, , keep.lib.sizes=FALSE]
y <- calcNormFactors(y) # Normalize counts in the object
y <- estimateDisp(y,design)
# Pairwise testing approach for DE Genes. "classic" edgeR
et <- exactTest(y, pair=c("Illumina","PacBio"))
# Extract exact test table for plotting
illumina_PB_et <- et$table
illumina_PB_et$transcript_id <- rownames(illumina_PB_et)
# Adjust p-values
illumina_PB_et$adj_pval <- p.adjust(illumina_PB_et$PValue, method = "bonferroni")
illumina_PB_et$status <- as.factor(ifelse(abs(illumina_PB_et$logFC) > 1 & illumina_PB_et$adj_pval < 0.01,
"significant", "not_sig"))
# MA plot
ma_plot(illumina_PB_et, fill_color, opt$outdir, dtype, opt$xmax, opt$ymax)
# Merge the EdgeR table with the other information
illumina_PB_et <- merge(illumina_PB_et, merged_illumina_pacbio,
by.x = "transcript_id", by.y = "row.names",
all.x = T, all.y = F)
# Merge in human-readable gene names
transcript_names <- get_transcript_names(opt$illumina_kallisto_1)
illumina_PB_et <- merge(illumina_PB_et, transcript_names, by.x = "transcript_id",
by.y = "t_ID", all.x = T, all.y = F)
# Merge in transcript lengths
transcript_lengths <- get_transcript_lengths(opt$illumina_kallisto_1)
illumina_PB_et <- merge(illumina_PB_et, transcript_lengths, by.x = "transcript_id",
by.y = "t_ID", all.x = T, all.y = F)
illumina_PB_et <- illumina_PB_et[order(illumina_PB_et$adj_pval),]
write.table(illumina_PB_et,
paste(opt$outdir, "/edgeR_", dtype, "_illumina_transcript_counts.tsv", sep=""),
row.names=F, col.names=T, quote=F, sep = "\t")
}
ma_plot <- function(data, fillcolor, outdir, dtype, xmax, ymax) {
n_sig <- length(data$status[data$status == "significant"])
n_no_sig <- length(data$status[data$status == "not_sig"])
fname <- paste(outdir, "/edgeR_", dtype, "_illumina_transcript_counts_MA_plot.png", sep="")
xlabel <- "log2(CPM)"
ylabel <- paste0(dtype, " to Illumina log2-fold change")
png(filename = fname,
width = 2500, height = 2500, units = "px",
bg = "white", res = 300)
print(head(data))
data$status <- factor(data$status, levels = c("significant", "not_sig"))
g <- ggplot(data, aes(x=logCPM, y=logFC, color = status)) +
geom_point(alpha = 0.4, size = 2) +
xlab(xlabel) + ylab(ylabel) + theme_bw() +
coord_cartesian(xlim=c(-5,xmax), ylim = c(-1*ymax,ymax)) +
scale_color_manual(values = c("orange", fillcolor),
labels = c(paste0("Significant (n=", n_sig, ")"),
paste0("Not sig. (n=", n_no_sig, ")"))) +
theme(axis.text.x = element_text(color="black", size=24),
axis.text.y = element_text(color="black", size=24),
axis.title.x = element_text(color="black", size=24),
axis.title.y = element_text(color="black", size=24)) +
guides(colour = guide_legend(override.aes = list(alpha=1,size=3))) +
theme(legend.position=c(0.27,0.08),
legend.title = element_blank(),
legend.background = element_rect(fill="white", color = "black"),
legend.key = element_rect(fill="transparent"),
legend.text = element_text(colour = 'black', size = 24))
print(g)
dev.off()
}
filter_kallisto_illumina_transcripts <- function(kallisto_file) {
# This function takes a Kallisto abundance file and filters the transcripts
# based on criteria designed to make the transcript set comparable to what can
# be detected using PacBio
gencode.quantitation <- as.data.frame(read_delim(kallisto_file, "\t", escape_double = FALSE,
col_names = TRUE, trim_ws = TRUE, na = "NA"))
# Split GENCODE transcript multi-id by '|'
extraCols <- str_split_fixed(gencode.quantitation$target_id, "\\|",9)[,c(5,6,8,1,2)]
colnames(extraCols) <- c("transcript", "gene_name", "class", "t_ID", "g_ID")
gencode.quantitation <- cbind(extraCols, gencode.quantitation)
gencode.quantitation <- subset(gencode.quantitation, tpm > 0)
return(gencode.quantitation[,c("t_ID", "est_counts")])
}
get_transcript_names <- function(kallisto_file) {
data <- as.data.frame(read_delim(kallisto_file, "\t", escape_double = FALSE,
col_names = TRUE, trim_ws = TRUE, na = "NA"))
extraCols <- str_split_fixed(data$target_id, "\\|",9)[,c(5,6,8,1,2)]
colnames(extraCols) <- c("transcript", "gene", "class", "t_ID", "g_ID")
return(unique(extraCols[,c("t_ID", "transcript")]))
}
get_transcript_lengths <- function(kallisto_file) {
data <- as.data.frame(read_delim(kallisto_file, "\t", escape_double = FALSE,
col_names = TRUE, trim_ws = TRUE, na = "NA"))
extraCols <- str_split_fixed(data$target_id, "\\|",9)[,c(5,6,8,1,2)]
colnames(extraCols) <- c("transcript", "gene", "class", "t_ID", "g_ID")
data <- cbind(extraCols, data)
return(data[,c("t_ID", "length")])
}
load_packages <- function() {
suppressPackageStartupMessages(library("ggplot2"))
suppressPackageStartupMessages(library("dplyr"))
suppressPackageStartupMessages(library("plyr"))
suppressPackageStartupMessages(library("Hmisc"))
suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("readr"))
suppressPackageStartupMessages(library("reshape"))
suppressPackageStartupMessages(library("stringr"))
suppressPackageStartupMessages(library("data.table"))
suppressPackageStartupMessages(library("preprocessCore"))
suppressPackageStartupMessages(library("edgeR"))
return
}
parse_options <- function() {
option_list <- list(
make_option(c("--f"), action = "store", dest = "infile",
default = NULL, help = "TALON abundance file (not filtered)"),
make_option(c("--datasets"), action = "store", dest = "datasets",
default = NULL, help = "Comma-delimited list of two dataset names to include in the analysis."),
make_option(c("--ik1"), action = "store", dest = "illumina_kallisto_1",
default = NULL, help = "Rep1 Illumina Kallisto file."),
make_option(c("--ik2"), action = "store", dest = "illumina_kallisto_2",
default = NULL, help = "Rep2 Illumina Kallisto file."),
make_option(c("--color"), action = "store", dest = "color_scheme",
default = NULL, help = "blue, red, or green"),
make_option(c("--xmax"), action = "store", dest = "xmax",
default = 16, help = "Max x-value for plot (default = 16)"),
make_option(c("--ymax"), action = "store", dest = "ymax",
default = 20, help = "Max y-value for plot (default = 20)"),
make_option(c("-o","--outdir"), action = "store", dest = "outdir",
default = NULL, help = "Output directory for plots and outfiles"),
make_option(c("--dtype"), action = "store", dest = "dtype",
default = "PacBio", help = "Datatype label to display on plot"))
opt <- parse_args(OptionParser(option_list=option_list))
return(opt)
}
main()
| /plotting_scripts/longread_v_illumina_transcript_counts_edgeR.R | permissive | mortazavilab/TALON-paper-2020 | R | false | false | 10,546 | r | main <-function() {
set.seed(100)
load_packages()
opt <- parse_options()
# Get colors
if (opt$color_scheme == "red") {
fill_color <- "red2"
} else if (opt$color_scheme == "blue") {
fill_color <- "navy"
} else if (opt$color_scheme == "green") {
fill_color <- "#009E73"
}
# Get the names of the first and second dataset that we will be working with
data_names <- str_split(opt$datasets, ",")[[1]]
dataset1 <- data_names[1]
dataset2 <- data_names[2]
# datatype label (ONT or PacBio)
dtype <- opt$dtype
# Get transcripts expressed in the Illumina data from the Kallisto
# abundance files
illumina_1 <- filter_kallisto_illumina_transcripts(opt$illumina_kallisto_1)
illumina_2 <- filter_kallisto_illumina_transcripts(opt$illumina_kallisto_2)
colnames(illumina_1) <- c("annot_transcript_id", "illumina_counts_1")
colnames(illumina_2) <- c("annot_transcript_id", "illumina_counts_2")
illumina_table <- merge(illumina_1, illumina_2, by = "annot_transcript_id",
all.x = T, all.y = T)
print(paste0("Illumina transcripts: ", nrow(illumina_table)))
illumina_table[is.na(illumina_table)] <- 0
illumina_table$illumina_counts_1 <- round(illumina_table$illumina_counts_1)
illumina_table$illumina_counts_2 <- round(illumina_table$illumina_counts_2)
# Read PacBio abundance file
pb_abundance_orig <- as.data.frame(read_delim(opt$infile, "\t", escape_double = FALSE,
col_names = TRUE, trim_ws = TRUE, na = "NA"))
# Keep known transcripts only
pb_abundance <- subset(pb_abundance_orig, transcript_novelty == "Known")
# Cut out unnecessary cols
pb_abundance <- pb_abundance[, c("annot_transcript_id", dataset1, dataset2)]
# Merge PacBio with Illumina on annot_transcript_name
merged_illumina_pacbio <- merge(illumina_table, pb_abundance, by = "annot_transcript_id",
all.x = T, all.y = T)
merged_illumina_pacbio[is.na(merged_illumina_pacbio)] <- 0
merged_illumina_pacbio <- merged_illumina_pacbio[, c("annot_transcript_id",
"illumina_counts_1",
"illumina_counts_2",
dataset1, dataset2)]
# Now, format table for edgeR by setting the rownames to the gene names
rownames(merged_illumina_pacbio) <- merged_illumina_pacbio$annot_transcript_id
merged_illumina_pacbio$annot_transcript_id <- NULL
# Remove rows that only contain zeros
merged_illumina_pacbio <- merged_illumina_pacbio[rowSums(merged_illumina_pacbio) > 0, ]
print(head(merged_illumina_pacbio))
# edgeR basics:
group <- factor(c("Illumina","Illumina","PacBio","PacBio")) # Indicate which group each col belongs to
y <- DGEList(counts=merged_illumina_pacbio, group = group) # Create a DGEList object
design <- model.matrix(~group)
# Filter out lowly expressed
keep <- filterByExpr(y, design)
y <- y[keep, , keep.lib.sizes=FALSE]
y <- calcNormFactors(y) # Normalize counts in the object
y <- estimateDisp(y,design)
# Pairwise testing approach for DE Genes. "classic" edgeR
et <- exactTest(y, pair=c("Illumina","PacBio"))
# Extract exact test table for plotting
illumina_PB_et <- et$table
illumina_PB_et$transcript_id <- rownames(illumina_PB_et)
# Adjust p-values
illumina_PB_et$adj_pval <- p.adjust(illumina_PB_et$PValue, method = "bonferroni")
illumina_PB_et$status <- as.factor(ifelse(abs(illumina_PB_et$logFC) > 1 & illumina_PB_et$adj_pval < 0.01,
"significant", "not_sig"))
# MA plot
ma_plot(illumina_PB_et, fill_color, opt$outdir, dtype, opt$xmax, opt$ymax)
# Merge the EdgeR table with the other information
illumina_PB_et <- merge(illumina_PB_et, merged_illumina_pacbio,
by.x = "transcript_id", by.y = "row.names",
all.x = T, all.y = F)
# Merge in human-readable gene names
transcript_names <- get_transcript_names(opt$illumina_kallisto_1)
illumina_PB_et <- merge(illumina_PB_et, transcript_names, by.x = "transcript_id",
by.y = "t_ID", all.x = T, all.y = F)
# Merge in transcript lengths
transcript_lengths <- get_transcript_lengths(opt$illumina_kallisto_1)
illumina_PB_et <- merge(illumina_PB_et, transcript_lengths, by.x = "transcript_id",
by.y = "t_ID", all.x = T, all.y = F)
illumina_PB_et <- illumina_PB_et[order(illumina_PB_et$adj_pval),]
write.table(illumina_PB_et,
paste(opt$outdir, "/edgeR_", dtype, "_illumina_transcript_counts.tsv", sep=""),
row.names=F, col.names=T, quote=F, sep = "\t")
}
ma_plot <- function(data, fillcolor, outdir, dtype, xmax, ymax) {
n_sig <- length(data$status[data$status == "significant"])
n_no_sig <- length(data$status[data$status == "not_sig"])
fname <- paste(outdir, "/edgeR_", dtype, "_illumina_transcript_counts_MA_plot.png", sep="")
xlabel <- "log2(CPM)"
ylabel <- paste0(dtype, " to Illumina log2-fold change")
png(filename = fname,
width = 2500, height = 2500, units = "px",
bg = "white", res = 300)
print(head(data))
data$status <- factor(data$status, levels = c("significant", "not_sig"))
g <- ggplot(data, aes(x=logCPM, y=logFC, color = status)) +
geom_point(alpha = 0.4, size = 2) +
xlab(xlabel) + ylab(ylabel) + theme_bw() +
coord_cartesian(xlim=c(-5,xmax), ylim = c(-1*ymax,ymax)) +
scale_color_manual(values = c("orange", fillcolor),
labels = c(paste0("Significant (n=", n_sig, ")"),
paste0("Not sig. (n=", n_no_sig, ")"))) +
theme(axis.text.x = element_text(color="black", size=24),
axis.text.y = element_text(color="black", size=24),
axis.title.x = element_text(color="black", size=24),
axis.title.y = element_text(color="black", size=24)) +
guides(colour = guide_legend(override.aes = list(alpha=1,size=3))) +
theme(legend.position=c(0.27,0.08),
legend.title = element_blank(),
legend.background = element_rect(fill="white", color = "black"),
legend.key = element_rect(fill="transparent"),
legend.text = element_text(colour = 'black', size = 24))
print(g)
dev.off()
}
filter_kallisto_illumina_transcripts <- function(kallisto_file) {
# This function takes a Kallisto abundance file and filters the transcripts
# based on criteria designed to make the transcript set comparable to what can
# be detected using PacBio
gencode.quantitation <- as.data.frame(read_delim(kallisto_file, "\t", escape_double = FALSE,
col_names = TRUE, trim_ws = TRUE, na = "NA"))
# Split GENCODE transcript multi-id by '|'
extraCols <- str_split_fixed(gencode.quantitation$target_id, "\\|",9)[,c(5,6,8,1,2)]
colnames(extraCols) <- c("transcript", "gene_name", "class", "t_ID", "g_ID")
gencode.quantitation <- cbind(extraCols, gencode.quantitation)
gencode.quantitation <- subset(gencode.quantitation, tpm > 0)
return(gencode.quantitation[,c("t_ID", "est_counts")])
}
get_transcript_names <- function(kallisto_file) {
data <- as.data.frame(read_delim(kallisto_file, "\t", escape_double = FALSE,
col_names = TRUE, trim_ws = TRUE, na = "NA"))
extraCols <- str_split_fixed(data$target_id, "\\|",9)[,c(5,6,8,1,2)]
colnames(extraCols) <- c("transcript", "gene", "class", "t_ID", "g_ID")
return(unique(extraCols[,c("t_ID", "transcript")]))
}
get_transcript_lengths <- function(kallisto_file) {
data <- as.data.frame(read_delim(kallisto_file, "\t", escape_double = FALSE,
col_names = TRUE, trim_ws = TRUE, na = "NA"))
extraCols <- str_split_fixed(data$target_id, "\\|",9)[,c(5,6,8,1,2)]
colnames(extraCols) <- c("transcript", "gene", "class", "t_ID", "g_ID")
data <- cbind(extraCols, data)
return(data[,c("t_ID", "length")])
}
load_packages <- function() {
suppressPackageStartupMessages(library("ggplot2"))
suppressPackageStartupMessages(library("dplyr"))
suppressPackageStartupMessages(library("plyr"))
suppressPackageStartupMessages(library("Hmisc"))
suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("readr"))
suppressPackageStartupMessages(library("reshape"))
suppressPackageStartupMessages(library("stringr"))
suppressPackageStartupMessages(library("data.table"))
suppressPackageStartupMessages(library("preprocessCore"))
suppressPackageStartupMessages(library("edgeR"))
return
}
parse_options <- function() {
option_list <- list(
make_option(c("--f"), action = "store", dest = "infile",
default = NULL, help = "TALON abundance file (not filtered)"),
make_option(c("--datasets"), action = "store", dest = "datasets",
default = NULL, help = "Comma-delimited list of two dataset names to include in the analysis."),
make_option(c("--ik1"), action = "store", dest = "illumina_kallisto_1",
default = NULL, help = "Rep1 Illumina Kallisto file."),
make_option(c("--ik2"), action = "store", dest = "illumina_kallisto_2",
default = NULL, help = "Rep2 Illumina Kallisto file."),
make_option(c("--color"), action = "store", dest = "color_scheme",
default = NULL, help = "blue, red, or green"),
make_option(c("--xmax"), action = "store", dest = "xmax",
default = 16, help = "Max x-value for plot (default = 16)"),
make_option(c("--ymax"), action = "store", dest = "ymax",
default = 20, help = "Max y-value for plot (default = 20)"),
make_option(c("-o","--outdir"), action = "store", dest = "outdir",
default = NULL, help = "Output directory for plots and outfiles"),
make_option(c("--dtype"), action = "store", dest = "dtype",
default = "PacBio", help = "Datatype label to display on plot"))
opt <- parse_args(OptionParser(option_list=option_list))
return(opt)
}
main()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/docx_add.R
\name{body_add_fpar}
\alias{body_add_fpar}
\title{add fpar}
\usage{
body_add_fpar(x, value, style = NULL, pos = "after")
}
\arguments{
\item{x}{a docx device}
\item{value}{a character}
\item{style}{paragraph style. If NULL, paragraph settings from \code{fpar} will be used. If not
NULL, it must be a paragraph style name (located in the template
provided as \code{read_docx(path = ...)}); in that case, paragraph settings from \code{fpar} will be
ignored.}
\item{pos}{where to add the new element relative to the cursor,
one of "after", "before", "on".}
}
\description{
add an \code{fpar} (a formatted paragraph) into an rdocx object
}
\examples{
library(magrittr)
bold_face <- shortcuts$fp_bold(font.size = 30)
bold_redface <- update(bold_face, color = "red")
fpar_ <- fpar(ftext("Hello ", prop = bold_face),
ftext("World", prop = bold_redface ),
ftext(", how are you?", prop = bold_face ) )
doc <- read_docx() \%>\% body_add_fpar(fpar_)
print(doc, target = tempfile(fileext = ".docx"))
# a way of using fpar to center an image in a Word doc ----
rlogo <- file.path( R.home("doc"), "html", "logo.jpg" )
img_in_par <- fpar(
external_img(src = rlogo, height = 1.06/2, width = 1.39/2),
fp_p = fp_par(text.align = "center") )
read_docx() \%>\% body_add_fpar(img_in_par) \%>\%
print(target = tempfile(fileext = ".docx") )
}
\seealso{
\code{\link{fpar}}
Other functions for adding content:
\code{\link{body_add_blocks}()},
\code{\link{body_add_break}()},
\code{\link{body_add_docx}()},
\code{\link{body_add_gg}()},
\code{\link{body_add_img}()},
\code{\link{body_add_par}()},
\code{\link{body_add_table}()},
\code{\link{body_add_toc}()}
}
\concept{functions for adding content}
| /man/body_add_fpar.Rd | no_license | ldbruce-2018/officer | R | false | true | 1,804 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/docx_add.R
\name{body_add_fpar}
\alias{body_add_fpar}
\title{add fpar}
\usage{
body_add_fpar(x, value, style = NULL, pos = "after")
}
\arguments{
\item{x}{a docx device}
\item{value}{a character}
\item{style}{paragraph style. If NULL, paragraph settings from \code{fpar} will be used. If not
NULL, it must be a paragraph style name (located in the template
provided as \code{read_docx(path = ...)}); in that case, paragraph settings from \code{fpar} will be
ignored.}
\item{pos}{where to add the new element relative to the cursor,
one of "after", "before", "on".}
}
\description{
add an \code{fpar} (a formatted paragraph) into an rdocx object
}
\examples{
library(magrittr)
bold_face <- shortcuts$fp_bold(font.size = 30)
bold_redface <- update(bold_face, color = "red")
fpar_ <- fpar(ftext("Hello ", prop = bold_face),
ftext("World", prop = bold_redface ),
ftext(", how are you?", prop = bold_face ) )
doc <- read_docx() \%>\% body_add_fpar(fpar_)
print(doc, target = tempfile(fileext = ".docx"))
# a way of using fpar to center an image in a Word doc ----
rlogo <- file.path( R.home("doc"), "html", "logo.jpg" )
img_in_par <- fpar(
external_img(src = rlogo, height = 1.06/2, width = 1.39/2),
fp_p = fp_par(text.align = "center") )
read_docx() \%>\% body_add_fpar(img_in_par) \%>\%
print(target = tempfile(fileext = ".docx") )
}
\seealso{
\code{\link{fpar}}
Other functions for adding content:
\code{\link{body_add_blocks}()},
\code{\link{body_add_break}()},
\code{\link{body_add_docx}()},
\code{\link{body_add_gg}()},
\code{\link{body_add_img}()},
\code{\link{body_add_par}()},
\code{\link{body_add_table}()},
\code{\link{body_add_toc}()}
}
\concept{functions for adding content}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/help.R
\name{docker_container}
\alias{docker_container}
\title{Management commands for working with a particular docker container}
\description{
Methods for working with a particular docker container. Container
objects are returned by creating or running a docker container, or
by using \code{$container$get} to fetch an existing container by
name or id.
}
\details{
\Sexpr[results=rd,stage=render]{stevedore:::generate_help("docker_container")}
}
\seealso{
\code{\link{docker_container_collection}} for other
container management methods.
}
| /man/docker_container.Rd | no_license | cran/stevedore | R | false | true | 623 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/help.R
\name{docker_container}
\alias{docker_container}
\title{Management commands for working with a particular docker container}
\description{
Methods for working with a particular docker container. Container
objects are returned by creating or running a docker container, or
by using \code{$container$get} to fetch an existing container by
name or id.
}
\details{
\Sexpr[results=rd,stage=render]{stevedore:::generate_help("docker_container")}
}
\seealso{
\code{\link{docker_container_collection}} for other
container management methods.
}
|
#!/usr/bin/env Rscript
# This script will run the resampling through the grid
library(plyr)
to.force <- FALSE
master <- "/data/Projects/ABIDE_Initiative/CPAC/abide/for_grant/rois/standard_3mm.nii.gz"
dir.create("sge", showWarnings=F)
dir.create("sge/scripts", showWarnings=F)
dir.create("sge/logs", showWarnings=F)
# Get the paths and their parts
df <- read.csv("z_df_filepaths.csv")
n <- nrow(df)
# Create and run all the qsubs
l_ply(1:n, function(i) {
dfi <- df[i,]
dfi$deriv_paths <- as.character(dfi$deriv_paths)
dfi$resamp_paths <- as.character(dfi$resamp_paths)
# check input
if (!file.exists(dfi$deriv_paths)) {
cat("Input path", dfi$deriv_paths, "doesn't exist\n")
return
}
# check output
if (file.exists(dfi$resamp_paths)) {
if (to.force) {
cat("Output path", dfi$resamp_paths, "already exists...removing!\n")
file.remove(dfi$resamp_paths)
} else {
cat("Output path", dfi$resamp_paths, "already exists...skipping\n")
return
}
}
prefix <- sprintf("resample_derivs_%s_%s_%s", dfi$strats, dfi$subjects, dfi$derivs)
lfn <- sprintf("sge/logs/%s.log", prefix)
sfn <- sprintf("sge/scripts/%s.bash", prefix)
raw_cmd <- "3dresample -input %s -master %s -prefix %s -rmode NN"
cmd <- sprintf(raw_cmd, dfi$deriv_paths, master, dfi$resamp_paths)
cat(
"#!/usr/bash",
"",
paste("echo", cmd),
cmd,
sep="\n",
file=sfn
)
qcmd <- sprintf("qsub -S /bin/bash -V -cwd -o %s -j y %s", lfn, sfn)
cat(qcmd, "\n")
system(qcmd)
})
| /config/50_niak/37_resample_derivatives.R | no_license | fitrialif/abide-1 | R | false | false | 1,714 | r | #!/usr/bin/env Rscript
# This script will run the resampling through the grid
library(plyr)
to.force <- FALSE
master <- "/data/Projects/ABIDE_Initiative/CPAC/abide/for_grant/rois/standard_3mm.nii.gz"
dir.create("sge", showWarnings=F)
dir.create("sge/scripts", showWarnings=F)
dir.create("sge/logs", showWarnings=F)
# Get the paths and their parts
df <- read.csv("z_df_filepaths.csv")
n <- nrow(df)
# Create and run all the qsubs
l_ply(1:n, function(i) {
dfi <- df[i,]
dfi$deriv_paths <- as.character(dfi$deriv_paths)
dfi$resamp_paths <- as.character(dfi$resamp_paths)
# check input
if (!file.exists(dfi$deriv_paths)) {
cat("Input path", dfi$deriv_paths, "doesn't exist\n")
return
}
# check output
if (file.exists(dfi$resamp_paths)) {
if (to.force) {
cat("Output path", dfi$resamp_paths, "already exists...removing!\n")
file.remove(dfi$resamp_paths)
} else {
cat("Output path", dfi$resamp_paths, "already exists...skipping\n")
return
}
}
prefix <- sprintf("resample_derivs_%s_%s_%s", dfi$strats, dfi$subjects, dfi$derivs)
lfn <- sprintf("sge/logs/%s.log", prefix)
sfn <- sprintf("sge/scripts/%s.bash", prefix)
raw_cmd <- "3dresample -input %s -master %s -prefix %s -rmode NN"
cmd <- sprintf(raw_cmd, dfi$deriv_paths, master, dfi$resamp_paths)
cat(
"#!/usr/bash",
"",
paste("echo", cmd),
cmd,
sep="\n",
file=sfn
)
qcmd <- sprintf("qsub -S /bin/bash -V -cwd -o %s -j y %s", lfn, sfn)
cat(qcmd, "\n")
system(qcmd)
})
|
library(mbbefd)
testfunc <- function(x)
c(summary(x), sd=sd(x), tl=etl(x))
extensive <- TRUE
extensive <- FALSE
# test invalid param
n <- 5
a <- 0
b <- -1/2
mbbefd:::rmbbefdCpp(n, a, b)
mbbefd:::rmbbefdR(n, a, b)
g <- 1/2
b <- 3
mbbefd:::rMBBEFDCpp(n, g, b)
mbbefd:::rMBBEFDR(n, g, b)
#test of MBBEFD(a,b) distribution
n <- 10
a <- 0
b <- 1/2
mbbefd:::rmbbefdCpp(n, a, b)
mbbefd:::rmbbefdR(n, a, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rmbbefdCpp(n, a, b)))
print(testfunc(mbbefd:::rmbbefdR(n, a, b)))
}
a <- 1/2
b <- 1
n <- 10
mbbefd:::rmbbefdCpp(n, a, b)
mbbefd:::rmbbefdR(n, a, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rmbbefdCpp(n, a, b)))
print(testfunc(mbbefd:::rmbbefdR(n, a, b)))
}
a <- -1/2
b <- 3
n <- 10
mbbefd:::rmbbefdCpp(n, a, b)
mbbefd:::rmbbefdR(n, a, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rmbbefdCpp(n, a, b)))
print(testfunc(mbbefd:::rmbbefdR(n, a, b)))
}
a <- Inf
b <- 1/3
n <- 10
mbbefd:::rmbbefdCpp(n, a, b)
mbbefd:::rmbbefdR(n, a, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rmbbefdCpp(n, a, b)))
print(testfunc(mbbefd:::rmbbefdR(n, a, b)))
}
#test of MBBEFD(g,b) distribution
n <- 10
g <- 1
b <- 1/2
mbbefd:::rMBBEFDCpp(n, g, b)
mbbefd:::rMBBEFDR(n, g, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rMBBEFDCpp(n, g, b)))
print(testfunc(mbbefd:::rMBBEFDR(n, g, b)))
}
n <- 10
g <- 2
b <- 0
mbbefd:::rMBBEFDCpp(n, g, b)
mbbefd:::rMBBEFDR(n, g, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rMBBEFDCpp(n, g, b)))
print(testfunc(mbbefd:::rMBBEFDR(n, g, b)))
}
n <- 10
g <- 2
b <- 1/2
mbbefd:::rMBBEFDCpp(n, g, b)
mbbefd:::rMBBEFDR(n, g, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rMBBEFDCpp(n, g, b)))
print(testfunc(mbbefd:::rMBBEFDR(n, g, b)))
}
n <- 10
g <- 2
b <- 1
mbbefd:::rMBBEFDCpp(n, g, b)
mbbefd:::rMBBEFDR(n, g, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rMBBEFDCpp(n, g, b)))
print(testfunc(mbbefd:::rMBBEFDR(n, g, b)))
}
n <- 10
g <- 2
b <- 3
mbbefd:::rMBBEFDCpp(n, g, b)
mbbefd:::rMBBEFDR(n, g, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rMBBEFDCpp(n, g, b)))
print(testfunc(mbbefd:::rMBBEFDR(n, g, b)))
} | /mbbefd/tests/test-rng-mbbefd.R | no_license | ingted/R-Examples | R | false | false | 2,360 | r | library(mbbefd)
testfunc <- function(x)
c(summary(x), sd=sd(x), tl=etl(x))
extensive <- TRUE
extensive <- FALSE
# test invalid param
n <- 5
a <- 0
b <- -1/2
mbbefd:::rmbbefdCpp(n, a, b)
mbbefd:::rmbbefdR(n, a, b)
g <- 1/2
b <- 3
mbbefd:::rMBBEFDCpp(n, g, b)
mbbefd:::rMBBEFDR(n, g, b)
#test of MBBEFD(a,b) distribution
n <- 10
a <- 0
b <- 1/2
mbbefd:::rmbbefdCpp(n, a, b)
mbbefd:::rmbbefdR(n, a, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rmbbefdCpp(n, a, b)))
print(testfunc(mbbefd:::rmbbefdR(n, a, b)))
}
a <- 1/2
b <- 1
n <- 10
mbbefd:::rmbbefdCpp(n, a, b)
mbbefd:::rmbbefdR(n, a, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rmbbefdCpp(n, a, b)))
print(testfunc(mbbefd:::rmbbefdR(n, a, b)))
}
a <- -1/2
b <- 3
n <- 10
mbbefd:::rmbbefdCpp(n, a, b)
mbbefd:::rmbbefdR(n, a, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rmbbefdCpp(n, a, b)))
print(testfunc(mbbefd:::rmbbefdR(n, a, b)))
}
a <- Inf
b <- 1/3
n <- 10
mbbefd:::rmbbefdCpp(n, a, b)
mbbefd:::rmbbefdR(n, a, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rmbbefdCpp(n, a, b)))
print(testfunc(mbbefd:::rmbbefdR(n, a, b)))
}
#test of MBBEFD(g,b) distribution
n <- 10
g <- 1
b <- 1/2
mbbefd:::rMBBEFDCpp(n, g, b)
mbbefd:::rMBBEFDR(n, g, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rMBBEFDCpp(n, g, b)))
print(testfunc(mbbefd:::rMBBEFDR(n, g, b)))
}
n <- 10
g <- 2
b <- 0
mbbefd:::rMBBEFDCpp(n, g, b)
mbbefd:::rMBBEFDR(n, g, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rMBBEFDCpp(n, g, b)))
print(testfunc(mbbefd:::rMBBEFDR(n, g, b)))
}
n <- 10
g <- 2
b <- 1/2
mbbefd:::rMBBEFDCpp(n, g, b)
mbbefd:::rMBBEFDR(n, g, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rMBBEFDCpp(n, g, b)))
print(testfunc(mbbefd:::rMBBEFDR(n, g, b)))
}
n <- 10
g <- 2
b <- 1
mbbefd:::rMBBEFDCpp(n, g, b)
mbbefd:::rMBBEFDR(n, g, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rMBBEFDCpp(n, g, b)))
print(testfunc(mbbefd:::rMBBEFDR(n, g, b)))
}
n <- 10
g <- 2
b <- 3
mbbefd:::rMBBEFDCpp(n, g, b)
mbbefd:::rMBBEFDR(n, g, b)
if(extensive)
{
n <- 1e6
print(testfunc(mbbefd:::rMBBEFDCpp(n, g, b)))
print(testfunc(mbbefd:::rMBBEFDR(n, g, b)))
} |
#You must have the source file "household_power_consumption.txt" in your workdir; getwd()
#Read Data
Data <- read.table ("household_power_consumption.txt",header=T,sep=";")
#Create one column called Date2 and Change Datatype
Data$Date2 <- as.Date(Data$Date,"%d/%m/%Y")
#Create a subset of data
SubsetData <- subset(Data, Date2 == "2007-02-01" | Date2 == "2007-02-02")
#Convert Column "Global_active_power" as numeric
SubsetData$Global_active_power <- as.numeric(as.character(SubsetData$Global_active_power))
#Create Graphic
hist(SubsetData$Global_active_power , col="red", main="Global Active Power",xlab="Global Active Power (kilowatts)")
# Copy my plot to a plot1.PNG file
dev.copy(png, file = "plot1.png" ,width = 480, height = 480)
dev.off() | /plot1.R | no_license | yonascustom/Module-4DataScience | R | false | false | 759 | r | #You must have the source file "household_power_consumption.txt" in your workdir; getwd()
#Read Data
Data <- read.table ("household_power_consumption.txt",header=T,sep=";")
#Create one column called Date2 and Change Datatype
Data$Date2 <- as.Date(Data$Date,"%d/%m/%Y")
#Create a subset of data
SubsetData <- subset(Data, Date2 == "2007-02-01" | Date2 == "2007-02-02")
#Convert Column "Global_active_power" as numeric
SubsetData$Global_active_power <- as.numeric(as.character(SubsetData$Global_active_power))
#Create Graphic
hist(SubsetData$Global_active_power , col="red", main="Global Active Power",xlab="Global Active Power (kilowatts)")
# Copy my plot to a plot1.PNG file
dev.copy(png, file = "plot1.png" ,width = 480, height = 480)
dev.off() |
# Project
# 2015-02-22
# Bryan Urban
library(tidyr)
library(data.table)
## GET RAW DATA
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
"dataset.zip", mode="wb")
# Unzip all files to "data" folder in working directory
## 0. Read Test Data
test <- read.csv("data/test/X_test.txt", header=FALSE, sep="")
test_labels <- read.csv("data/test/y_test.txt", header=FALSE, sep="")
test_subject <- read.csv("data/test/subject_test.txt", header=FALSE, sep="")
train <- read.csv("data/train/X_train.txt", header=FALSE, sep="")
train_labels <- read.csv("data/train/y_train.txt", header=FALSE, sep="")
train_subject <- read.csv("data/train/subject_train.txt", header=FALSE, sep="")
features <- read.csv("data/features.txt", header=FALSE, sep="")
## PROCESS DATA
## 1. Merge Data into One Big Data Set
all_data <- rbind(test, train)
all_labels <- rbind(test_labels, train_labels) # activity labels
all_subjects <- rbind(test_subject, train_subject) # subject IDs
## 2. Extract measurements of mean and standard deviation on each measurement
# find columns that inclue a mean or sd
mean_ind <- grep("*mean*", features[,2], ignore.case=TRUE)
std_ind <- grep("*std*", features[,2], ignore.case=TRUE)
keep_ind <- c(mean_ind, std_ind)
ind_labels <- as.character(features[keep_ind,2])
# keep only the selected columns
sub_data <- all_data[,keep_ind] # 86 variables kept
sub_data$subject <- unlist(all_subjects) # add subject ID
## 3. Use descriptive activity names
sub_data$activity <- all_labels
# convert numeric activity labels to text
activity_key <-
c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS",
"SITTING", "STANDING", "LAYING")
sub_data$activity <- activity_key[unlist(sub_data$activity)]
## 4. Label the dataset with descriptive column names
colnames <- make.names(names=ind_labels,unique=TRUE,allow_=TRUE)
# rid triple, double, and trailing dots
colnames <- gsub("[.]{3}", ".", colnames)
colnames <- gsub("[.]{2}", ".", colnames) #
colnames <- gsub("[.]$", "", colnames) #
# assign names to dataset
names(sub_data)[1:length(ind_labels)] <- colnames
## 5. Make tidy dataset
td <- data.table(sub_data)
# calculate mean for each subject and activity
td <- td[, lapply(.SD,mean), by=list(subject, activity)]
write.table(td, "tidy.txt" ,row.name=FALSE) | /run_analysis.R | no_license | bjurban/getdata-p3 | R | false | false | 2,360 | r | # Project
# 2015-02-22
# Bryan Urban
library(tidyr)
library(data.table)
## GET RAW DATA
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
"dataset.zip", mode="wb")
# Unzip all files to "data" folder in working directory
## 0. Read Test Data
test <- read.csv("data/test/X_test.txt", header=FALSE, sep="")
test_labels <- read.csv("data/test/y_test.txt", header=FALSE, sep="")
test_subject <- read.csv("data/test/subject_test.txt", header=FALSE, sep="")
train <- read.csv("data/train/X_train.txt", header=FALSE, sep="")
train_labels <- read.csv("data/train/y_train.txt", header=FALSE, sep="")
train_subject <- read.csv("data/train/subject_train.txt", header=FALSE, sep="")
features <- read.csv("data/features.txt", header=FALSE, sep="")
## PROCESS DATA
## 1. Merge Data into One Big Data Set
all_data <- rbind(test, train)
all_labels <- rbind(test_labels, train_labels) # activity labels
all_subjects <- rbind(test_subject, train_subject) # subject IDs
## 2. Extract measurements of mean and standard deviation on each measurement
# find columns that inclue a mean or sd
mean_ind <- grep("*mean*", features[,2], ignore.case=TRUE)
std_ind <- grep("*std*", features[,2], ignore.case=TRUE)
keep_ind <- c(mean_ind, std_ind)
ind_labels <- as.character(features[keep_ind,2])
# keep only the selected columns
sub_data <- all_data[,keep_ind] # 86 variables kept
sub_data$subject <- unlist(all_subjects) # add subject ID
## 3. Use descriptive activity names
sub_data$activity <- all_labels
# convert numeric activity labels to text
activity_key <-
c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS",
"SITTING", "STANDING", "LAYING")
sub_data$activity <- activity_key[unlist(sub_data$activity)]
## 4. Label the dataset with descriptive column names
colnames <- make.names(names=ind_labels,unique=TRUE,allow_=TRUE)
# rid triple, double, and trailing dots
colnames <- gsub("[.]{3}", ".", colnames)
colnames <- gsub("[.]{2}", ".", colnames) #
colnames <- gsub("[.]$", "", colnames) #
# assign names to dataset
names(sub_data)[1:length(ind_labels)] <- colnames
## 5. Make tidy dataset
td <- data.table(sub_data)
# calculate mean for each subject and activity
td <- td[, lapply(.SD,mean), by=list(subject, activity)]
write.table(td, "tidy.txt" ,row.name=FALSE) |
KICH <- read_tsv("data-raw/PSI_download_KICH.zip")
save(KICH, file = "data/KICH.rda")
| /data-raw/KICH.R | no_license | SiYangming/TCGAspliceseqData | R | false | false | 86 | r | KICH <- read_tsv("data-raw/PSI_download_KICH.zip")
save(KICH, file = "data/KICH.rda")
|
\name{phewasMetaModels}
\alias{phewasMetaModels}
\title{
Perform meta-analysis of PheWAS results
}
\description{
This function wraps the \code{\link[meta:metagen]{meta package's metagen}} function to meta analyze \code{phewas} results.
}
\usage{
phewasMetaModels(results, ...)
}
\arguments{
\item{results}{
Data frame containing \code{phewas} results. This data frame should include an additional column, \code{study}, identifying which study the results are for. See details and examples for more information.
}
\item{\dots}{
Additional parameters to be passed to \code{metagen}.
}
}
\details{
This function uses \code{by} to split \code{results} into groups of studies based on \code{phenotype, snp, } and \code{adjustment}. \code{phewasMeta} forces \code{NA} adjustment values to be a character string "NA" due to \code{by} restrictions on the \code{INDICES}.
Note that \code{update.meta} can be used to alter individual \code{metagen} objects.
}
\value{
A \code{by} object with named entries containing the meta analysis models. Each entry is named in the form `phenotype snp adjustment`.
}
\seealso{
\code{\link[PheWAS:phewasMeta]{phewasMeta}} will return a data frame of results similar to \code{phewas} output.
\code{\link[meta:update.meta]{update.meta}} will allow users to alter returned \code{metagen} objects.
}
\examples{
#Generate some example data
ex=generateExample(hit="335")
#Extract the two parts from the returned list
id.icd9.count=ex$id.icd9.count
genotypes=ex$genotypes
#Create the PheWAS code table- translates the icd9s, adds exclusions, and reshapes to a wide format
phenotypes=createPhewasTable(id.icd9.count,fast=TRUE)
#Run the PheWAS
results.1=phewas(phenotypes,genotypes,cores=4,significance.threshold=c("bonferroni"))
#Set up a study identifier
results.1$study="335"
#Perform another PheWAS
ex=generateExample(hit="250.2")
id.icd9.count=ex$id.icd9.count
genotypes=ex$genotypes
phenotypes=createPhewasTable(id.icd9.count,fast=TRUE)
results.2=phewas(phenotypes,genotypes,cores=4,significance.threshold=c("bonferroni"))
results.2$study="250.2"
#Join the two sets of PheWAS results
results=rbind(results.1,results.2)
#Perform the meta analysis, and do not assume fixed effects.
results.meta=phewasMetaModels(results, fixed=FALSE, keep.both=FALSE)
results.meta$`250.2 rsEXAMPLE NA`
}
\keyword{ htest }
| /man/phewasMetaModels.Rd | no_license | yaomin/PheWAS | R | false | false | 2,333 | rd | \name{phewasMetaModels}
\alias{phewasMetaModels}
\title{
Perform meta-analysis of PheWAS results
}
\description{
This function wraps the \code{\link[meta:metagen]{meta package's metagen}} function to meta analyze \code{phewas} results.
}
\usage{
phewasMetaModels(results, ...)
}
\arguments{
\item{results}{
Data frame containing \code{phewas} results. This data frame should include an additional column, \code{study}, identifying which study the results are for. See details and examples for more information.
}
\item{\dots}{
Additional parameters to be passed to \code{metagen}.
}
}
\details{
This function uses \code{by} to split \code{results} into groups of studies based on \code{phenotype, snp, } and \code{adjustment}. \code{phewasMeta} forces \code{NA} adjustment values to be a character string "NA" due to \code{by} restrictions on the \code{INDICES}.
Note that \code{update.meta} can be used to alter individual \code{metagen} objects.
}
\value{
A \code{by} object with named entries containing the meta analysis models. Each entry is named in the form `phenotype snp adjustment`.
}
\seealso{
\code{\link[PheWAS:phewasMeta]{phewasMeta}} will return a data frame of results similar to \code{phewas} output.
\code{\link[meta:update.meta]{update.meta}} will allow users to alter returned \code{metagen} objects.
}
\examples{
#Generate some example data
ex=generateExample(hit="335")
#Extract the two parts from the returned list
id.icd9.count=ex$id.icd9.count
genotypes=ex$genotypes
#Create the PheWAS code table- translates the icd9s, adds exclusions, and reshapes to a wide format
phenotypes=createPhewasTable(id.icd9.count,fast=TRUE)
#Run the PheWAS
results.1=phewas(phenotypes,genotypes,cores=4,significance.threshold=c("bonferroni"))
#Set up a study identifier
results.1$study="335"
#Perform another PheWAS
ex=generateExample(hit="250.2")
id.icd9.count=ex$id.icd9.count
genotypes=ex$genotypes
phenotypes=createPhewasTable(id.icd9.count,fast=TRUE)
results.2=phewas(phenotypes,genotypes,cores=4,significance.threshold=c("bonferroni"))
results.2$study="250.2"
#Join the two sets of PheWAS results
results=rbind(results.1,results.2)
#Perform the meta analysis, and do not assume fixed effects.
results.meta=phewasMetaModels(results, fixed=FALSE, keep.both=FALSE)
results.meta$`250.2 rsEXAMPLE NA`
}
\keyword{ htest }
|
density.a2 <- function (a2, methyl.level.pos )
{
###############################################################
# R FUNCTIONS FOR WRITING OUT THE LOG OF MARIGINAL DISTRIBUTION OF EMISSION HYPER PARAMETERS a2
#
# Arguments:
#
# a2 Emission hyper parameter a2
# methyl.level.pos a vector of the methyl level of control samples
###############################################################
methyl.nonNA.pos<-methyl.level.pos[!is.na(methyl.level.pos)]
m <- length( methyl.nonNA.pos)
first.part <- m*log(a2)
second.part <- (a2-1)*sum(log(methyl.nonNA.pos))
log.prob <- first.part + second.part
return (log.prob)
}
| /HMM.DM.code/density.a2.R | no_license | Maria-831/HMM-DM | R | false | false | 719 | r |
density.a2 <- function (a2, methyl.level.pos )
{
###############################################################
# R FUNCTIONS FOR WRITING OUT THE LOG OF MARIGINAL DISTRIBUTION OF EMISSION HYPER PARAMETERS a2
#
# Arguments:
#
# a2 Emission hyper parameter a2
# methyl.level.pos a vector of the methyl level of control samples
###############################################################
methyl.nonNA.pos<-methyl.level.pos[!is.na(methyl.level.pos)]
m <- length( methyl.nonNA.pos)
first.part <- m*log(a2)
second.part <- (a2-1)*sum(log(methyl.nonNA.pos))
log.prob <- first.part + second.part
return (log.prob)
}
|
library(ggplot2)
library(forecast)
theme_set(theme_classic())
# Subset data
nottem_small <- window(nottem, start=c(1920, 1), end=c(1925, 12)) # subset a smaller timewindow
# Plot
ggseasonplot(AirPassengers) + labs(title="Seasonal plot: International Airline Passengers")
ggseasonplot(nottem_small) + labs(title="Seasonal plot: Air temperatures at Nottingham Castle")
| /seasonal plot.R | no_license | y1220/R-practice | R | false | false | 370 | r | library(ggplot2)
library(forecast)
theme_set(theme_classic())
# Subset data
nottem_small <- window(nottem, start=c(1920, 1), end=c(1925, 12)) # subset a smaller timewindow
# Plot
ggseasonplot(AirPassengers) + labs(title="Seasonal plot: International Airline Passengers")
ggseasonplot(nottem_small) + labs(title="Seasonal plot: Air temperatures at Nottingham Castle")
|
# Various functions
# Change log:
# 2015-07-15: lineplot now accepts factors as the first effect.
# Stat functions---------
#scaled mass index as a measure for body condition according to Peig 2009
smi = function (M, L)
{
plot(log(M)~log(L))
{
if (require(smatr)) {
SMA = sma(log(M)~log(L))
bSMA = coef(SMA)[2]}
else {
OLS = lm(log(M)~log(L))
bOLS = coef(OLS)[2]
r = cor.test(~log(M)+log(L), method = "pearson")$estimate
#outliers = which(abs(rstandard(ols))>3)
bSMA = bOLS/r }
}
L0 = median(L, na.rm = T)
SMi = M*((L0/L)^bSMA)
return(SMi)
}
# standard error of mean
se <- function(x) {
sqrt(var(x,na.rm=TRUE)/length(na.omit(x)))
}
#z scores, aka standardized values with 0 mean and unit SD
z = function(x) {(x - mean(x, na.rm = T)) / sd(x, na.rm = T)}
#Repeatability calculation----------
# following Lessells & Boag 1994
rpt <- function(x, ...) UseMethod("rpt")
rpt.formula <- function(formula, data)
{
if (missing(formula) || (length(formula) != 3L) || (length(attr(terms(formula[-2L]),
"term.labels")) != 1L))
stop("'formula' missing or incorrect")
mf <- model.frame(formula = formula, data = data)
resp.index <- attr(attr(mf, "terms"), "response")
eff <- factor(mf[[-resp.index]])
resp <- mf[[resp.index]]
#resp <- model.response(mf) #the same
model = lm(resp~eff)
MSa <- anova(model)["Mean Sq"]["eff",]
MSw <- anova(model)["Mean Sq"]["Residuals",]
n <- table(eff)
K <- length(levels(eff))
M <- sum(n)
n0 <- (1/(K-1))* (M - (sum(n^2)/M))
s2 <- MSw
s2A <- (MSa-MSw)/n0
r <- s2A/(s2 + s2A)
p <- round(anova(model)["Pr(>F)"]["eff",],3)
F <- round(anova(model)["F value"]["eff",], 3)
Fdf1 <- anova(model)["Df"]["eff",]
Fdf2 <- anova(model)["Df"]["Residuals",]
{if (p == 0) cat("r = ", round(r,3), ", F", Fdf1, ",", Fdf2, "= ", F, ", p < 0.001", "\n", sep = "")
else cat("r = ", round(r,3), ", F", Fdf1, ",", Fdf2, "= ", F, ", p = ", p, "\n", sep = "")
}
}
rpt.default <- function (data, response, effect)
{
m <- match.call(expand.dots = FALSE)
eff = eval(m$effect, data)
resp = eval(m$response, data)
if (is.factor(eff) != TRUE) eff = factor(eff)
model = lm(resp~eff, data = data)
MSa <- anova(model)["Mean Sq"]["eff",]
MSw <- anova(model)["Mean Sq"]["Residuals",]
n <- table(eff)
K <- length(levels(eff))
M <- sum(n)
n0 <- (1/(K-1))* (M - (sum(n^2)/M))
s2 <- MSw
s2A <- (MSa-MSw)/n0
r <- s2A/(s2 + s2A)
p <- round(anova(model)["Pr(>F)"]["eff",],3)
F <- round(anova(model)["F value"]["eff",], 3)
Fdf1 <- anova(model)["Df"]["eff",]
Fdf2 <- anova(model)["Df"]["Residuals",]
{if (p == 0) cat("r = ", round(r,3), ", F", Fdf1, ",", Fdf2, "= ", F, ", p < 0.001", "\n", sep = "")
else cat("r = ", round(r,3), ", F", Fdf1, ",", Fdf2, "= ", F, ", p = ", p, "\n", sep = "")
}
}
#--------------Errorbar function to display mean+CI----------------------------
errorbars<-function (response, factor1, factor2, error.bars = c("se", "sd",
"conf.int", "none"), level = 0.95, xlab = deparse(substitute(factor1)),
ylab = paste("mean of", deparse(substitute(response))),
legend.lab = deparse(substitute(factor2)), main = "",
pch = 1:n.levs.2, lty = 1:n.levs.2, col= 1:n.levs.2,
levs="", levs2="", x.posn=0, y.posn=0, bty="l", l.bty="n", hatwidth = 0.3, ...)
{
if (!is.numeric(response))
stop("Argument response must be numeric.")
xlab
ylab
legend.lab
error.bars <- match.arg(error.bars)
if (missing(factor2)) {
if (!is.factor(factor1))
stop("Argument factor1 must be a factor.")
valid <- complete.cases(factor1, response)
factor1 <- factor1[valid]
response <- response[valid]
means <- tapply(response, factor1, mean)
sds <- tapply(response, factor1, sd)
ns <- tapply(response, factor1, length)
if (error.bars == "se")
sds <- sds/sqrt(ns)
if (error.bars == "conf.int")
sds <- qt((1 - level)/2, df = ns - 1, lower.tail = FALSE) *
sds/sqrt(ns)
yrange <- if (error.bars != "none")
c(0.9*min(means - sds),1.1*max(means + sds))
else range(means)
if (levs[1] == "")
levs <- levels(factor1)
n.levs <- length(levels(factor1))
plot(c(1, n.levs), xlim=c(0.5,n.levs+.5),yrange, type = "n",
xlab = xlab, ylab = ylab,
axes = FALSE, main = main)
points(1:n.levs, means, type = "p")
box(bty=bty)
axis(2)
axis(1, at = 1:n.levs, labels = levs)
if (error.bars != "none")
arrows(1:n.levs, means - sds, 1:n.levs, means + sds,
angle = 90, lty = 1, code = 3, length = hatwidth)
}
else {
if (!(is.factor(factor1) | is.factor(factor2)))
stop("Arguments factor1 and factor2 must be factors.")
valid <- complete.cases(factor1, factor2, response)
factor1 <- factor1[valid]
factor2 <- factor2[valid]
response <- response[valid]
means <- tapply(response, list(factor1, factor2), mean)
sds <- tapply(response, list(factor1, factor2), sd)
ns <- tapply(response, list(factor1, factor2), length)
if (error.bars == "se")
sds <- sds/sqrt(ns)
if (error.bars == "conf.int")
sds <- qt((1 - level)/2, df = ns - 1, lower.tail = FALSE) *
sds/sqrt(ns)
yrange <- if (error.bars != "none")
c(0.9*min(means - sds), 1.1*max(means + sds))
else range(means)
if (levs[1] == "")
levs=levels(factor1)
levs.1 <- levels(factor1)
levs.2 <- levels(factor2)
n.levs.1 <- length(levs.1)
n.levs.2 <- length(levs.2)
plot(c(1, n.levs.1 + .5), yrange, type = "n", xlab = xlab,
ylab = ylab, axes = FALSE, main = main)
box(bty=bty)
axis(2)
axis(1, at = 1:n.levs.1+n.levs.2*.05, labels = levs)
for (i in 1:n.levs.2) {
points(1:n.levs.1+0.1*(i-1), means[, i], type = "p", pch = pch[i],
col = col[i],lty = lty[i])
if (error.bars != "none")
arrows(1:n.levs.1+.1*(i-1), means[, i] - sds[, i],
1:n.levs.1+.1*(i-1), col = col[i],
means[, i] + sds[, i], angle = 90, code = 3,
lty = lty[i], length = hatwidth)
}
if (x.posn==0)
x.posn <- n.levs.1 + 0.3
if (y.posn==0)
y.posn <- sum(c(0.1, 0.9) * par("usr")[c(3, 4)])
text(x.posn, y.posn, legend.lab, adj = c(0, -0.5))
if (levs2[1]=="") levs2=levs.2
legend(x.posn, y.posn, levs2, pch = pch, col=col,
lty = lty, bty=l.bty)
}
invisible(NULL)
}
#Matched plot--------
# where the response is plotted with connected lines by grouping variable
matched <- function(formula, data, subset = NULL, space1 = 0.25, space2 = 0.25, label = TRUE, xlim = NULL, ylab = NULL, xlab = NULL, enlarge.overlapped = TRUE, lwd.cor = 1, ...)
{
if (missing(formula)) stop("'formula' missing or incorrect")
modf <- match.call()
if (missing(data)) data <- environment(formula)
## evaluate and install the model frame
m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"),
names(modf), 0)
modf <- modf[c(1, m)]
modf$drop.unused.levels <- TRUE
modf[[1]] <- as.name("model.frame")
mf <- eval(modf, parent.frame())
mf <- mf[order(mf[[3]], mf[[2]]),]
resp <- model.response(mf)
effect <- factor(mf[[2]])
group <- as.numeric(effect)
nlev <- length(levels(effect))
levs <- levels(factor(group))
id <- factor(mf[[3]])
if (label == TRUE) space2 = space2 + 0.25
{if (is.null(xlim)) xlm <- c(1-space1, nlev+space2)
else xlm <- xlim}
{if (is.null(xlab)) xlb <- names(mf)[2]
else xlb <- xlab}
{if (is.null(ylab)) ylb <- names(mf)[1]
else ylb <- ylab}
plot(resp~group, type = "n", xaxt = "n",
xlim = xlm, ylab = ylb, xlab = xlb, ...)
axis(1, at = levs, labels = levels(effect))
D <- data.frame(id, x = group, y = resp)
if (any(duplicated(D)))
{warning("ID does not define unique cases, changing the line width for overlapping segments will not work.
If data contains replicated cases, it may be better to use an ID that defines unique cases, i.e.
'ID-replicate1' 'ID-replicate2', possibly using the function 'interaction'")
enlarge.overlapped <- FALSE
}
Dspl <- split(D, D$id)
sapply(Dspl, function(x) points(x$x, x$y, ...))
segments <- lapply(1: (nlev-1), function(x) data.frame(x0 = NA, x1 = NA, y0 = NA, y1 = NA))
for (i in 1:(nlev-1))
{
wide <- data.frame(x0 = sapply(Dspl, function(x) x$x[i]), x1 = sapply(Dspl, function(x) x$x[i+1]),
y0 = sapply(Dspl, function(x) x$y[i]), y1 = sapply(Dspl, function(x) x$y[i+1]))
recs <- do.call("paste", c(wide, sep = "\r"))
same <- table(recs) #indicates the number of identical records (i.e. segments to draw)
wide$same <- as.numeric(same)[match(recs,names(same))]
wide$same[is.na(wide$x0) | is.na(wide$x1)] <- NA
segments[[i]] <- wide
}
{if (enlarge.overlapped)
sapply(segments, function(x)
segments(x$x0, x$y0, x$x1, x$y1, lwd = x$same*lwd.cor, ...))
else
sapply(segments, function(x)
segments(x$x0, x$y0, x$x1, x$y1, ...))
}
label.y <- sapply(Dspl, function(x) tail(x$y, 1L))
label.y <- ifelse(duplicated(label.y), jitter(label.y), label.y)
if (label) text(x=(nlev + space2/10), y = label.y, labels = names(Dspl), pos = 4)
}
# Bar and line plots with se error bars-------
bars <- function(formula, data, subset = NULL, hatwidth = 0.5, ylim = NULL,
xlab = NULL, ylab = NULL, ...)
{
dots <- list(...)
mf <- match.call()
if (missing(data)) data <- environment(formula)
## evaluate and install the model frame
m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"),
names(mf), 0)
mf <- mf[c(1, m)]
mf$drop.unused.levels <- TRUE
mf[[1]] <- as.name("model.frame")
fr <- eval(mf, parent.frame())
resp <- model.response(fr)
effect <- factor(fr[[2]])
{if (is.null(xlab)) xlb = names(fr)[2]
else xlb = xlab}
{if (is.null(ylab)) ylb = names(fr)[1]
else ylb = ylab}
{if (dim(fr)[2] == 3)
{
eff2 <- factor(fr[[3]])
y = tapply(resp, list(eff2, effect), mean, na.rm = T)
sem = tapply(resp, list(eff2, effect), se)
ymax = max(y+sem)*1.1
{if (is.null(ylim)) yl = c(0, ymax)
else yl = ylim
}
bp = barplot(y, beside = T, ylim = yl, xlab = xlb, ylab = ylb, ...)
hw = hatwidth*0.25
arrows(bp, y+sem, bp, y-sem, angle = 90, code = 3, length = hw)
}
else if (dim(fr)[2] == 2)
{
y = tapply(resp, effect, mean, na.rm = T)
sem = tapply(resp, effect, se)
ymax = max(y+sem)*1.1
{if (is.null(ylim)) yl = c(0, ymax)
else yl = ylim
}
bp = barplot(y, ylim = yl, xlab = xlb, ylab = ylb, ...)
arrows(bp, y+sem, bp, y-sem, angle = 90, code = 3,
length = hatwidth*1)
}
else stop("please use formula with one response and one or two effects, e.g. response~effect + effect2")
}
box(bty = "l")
}
# Lineplot now accepts factors as the first effect argument. However, when the
# first factor is factor, but has numeric values, the function uses these values instead
# of the factor levels. So for example, if time is like 1,2,4, then it will plot as 1,2,3,4
# on the x axis with no values for 3. Otherwise, it would skip 3. If this is the desired effect
# than it makes sense to rename the x variable such as something1,2,4.
lineplot <- function(formula, data, subset = NULL, hatwidth = 0.5, type = "b", pch = 19, ylim = NULL, xlab = NULL, ylab = NULL, spread = TRUE, off = 0.1, ...)
{
dots <- list(...)
mf <- match.call()
if (missing(data)) data <- environment(formula)
## evaluate and install the model frame
m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"),
names(mf), 0)
mf <- mf[c(1, m)]
mf$drop.unused.levels <- TRUE
mf[[1]] <- as.name("model.frame")
fr <- eval(mf, parent.frame())
resp <- model.response(fr)
effect <- fr[[2]]
nlev1 = length(unique(effect))
{if (is.null(xlab)) xlb = names(fr)[2]
else xlb = xlab}
{if (is.null(ylab)) ylb = names(fr)[1]
else ylb = ylab}
{if (dim(fr)[2] == 3)
{
eff2 <- factor(fr[[3]])
nlev2 <- length(levels(eff2))
matr = tapply(resp, list(effect, eff2), mean, na.rm = T)
sem = tapply(resp, list(effect, eff2), se)
}
else if (dim(fr)[2] == 2) {
matr = tapply(resp, effect, mean, na.rm = T)
sem = tapply(resp, effect, se)
nlev2 = 1
}
else stop("please use formula with one response and one or two effects, e.g. response~effect + effect2")
}
if (!is.numeric(type.convert(dimnames(matr)[[1]], as.is = T))){
if (is.factor(effect)) {
x = 1:nlevels(effect)
names(x) = levels(effect)
xaxt = "n"
}
else {
effect = factor(effect)
x = 1:nlevels(effect)
names(x) = levels(effect)
xaxt = "n"
}
}
else {
x = as.numeric(dimnames(matr)[[1]])
xaxt = "s"
}
y = matr
ymin = min(y-sem)*0.9
ymax = max(y+sem)*1.1
hw = hatwidth*0.25
#check if col was specified in the main call
if (is.null(dots$col)) color = rep(1:nlev2, each = nlev1)
else color = rep(dots$col, each = nlev1)
#check if ylim was specified in the main call
if (is.null(ylim)) yl = c(ymin, ymax)
else yl = ylim
{if (spread)
{
dif = mean(diff(x))*off
posit = 1:nlev2 - median(1:nlev2)
offs = posit * dif
X = sapply(offs, function(o) o+x)
matplot(x=X, y=y, type = type, ylim = yl, pch = pch,
xlab = xlb, ylab = ylb, xaxt = xaxt, ...)
arrows(X, y+sem, X, y-sem, angle = 90, code = 3, length = hw,
col = color)
}
else
{
matplot(x=x, y=y, type = type, ylim = yl, pch = pch,
xlab = xlb, ylab = ylb, xaxt = xaxt, ...)
arrows(x, y+sem, x, y-sem, angle = 90, code = 3, length = hw,
col = color)
}
}
if (xaxt == "n") {
axis(1, at = x, labels = names(x))
}
box(bty = "l")
}
pairs2 <- function (x,y,smooth=TRUE, digits=2, prefix="", cex.cor=NULL, stars = TRUE, ...)
{
panel.cor <- function(x, y, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r.obj = cor.test(x, y,use="pairwise",...)
r = as.numeric(r.obj$estimate)
p = r.obj$p.value
{if (stars) mystars <-
ifelse(p < .001, "***", ifelse(p < .01, "** ", ifelse(p < .05, "* ", ifelse(p < 0.1, ".", " "))))
else mystars <- NULL }
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, mystars, sep="")
cex = ifelse(is.null(cex.cor), 0.8/strwidth(txt), cex.cor)
cexfinal = ifelse(stars, 1.2, cex * abs(r)+0.5)
text(0.5, 0.5, txt, cex = cexfinal)
}
panel.hist <- function(x)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col="cyan")
}
{if (smooth)
pairs(x,diag.panel=panel.hist,lower.panel=panel.cor,upper.panel=panel.smooth)
else #smooth is not true
pairs(x,diag.panel=panel.hist,lower.panel=panel.cor)}
}
# Table formatting -----
fixed_digits <- function(xs, n = 2) {
formatC(xs, digits = n, format = "f")
}
# nice p-values for tables...
nice_pval <- function(ps, html = FALSE) {
tiny <- ifelse(html, "< 0.001", "<0.001")
ps_chr <- fixed_digits(ps, 3)
ps_chr[ps < 0.001] <- tiny
ps_chr
}
# ...and for in-text
nice_pval2 <- function(ps, html = FALSE) {
tiny <- ifelse(html, "< 0.001", "<0.001")
ps_chr <- paste0("= ", fixed_digits(ps, 3))
ps_chr[ps < 0.001] <- tiny
ps_chr
}
nice_modtable <- function(table, stat.digit = 2, pval = "p.value"){
if (is.matrix(table)) table <- as.data.frame(table)
names(table)[1] <- "term"
col.d2 <- which(!names(table) %in% c("term", pval))
for (i in col.d2){
table[,i] <- fixed_digits(table[,i], stat.digit)
}
table[,names(table) %in% pval] <- nice_pval(table[,names(table) %in% pval])
table
}
nicetab <- function(x) knitr::kable(nice_modtable(broom::tidy(x)))
| /R/OwnFunctions.r | no_license | kolorado/ownr | R | false | false | 16,513 | r | # Various functions
# Change log:
# 2015-07-15: lineplot now accepts factors as the first effect.
# Stat functions---------
#scaled mass index as a measure for body condition according to Peig 2009
smi = function (M, L)
{
plot(log(M)~log(L))
{
if (require(smatr)) {
SMA = sma(log(M)~log(L))
bSMA = coef(SMA)[2]}
else {
OLS = lm(log(M)~log(L))
bOLS = coef(OLS)[2]
r = cor.test(~log(M)+log(L), method = "pearson")$estimate
#outliers = which(abs(rstandard(ols))>3)
bSMA = bOLS/r }
}
L0 = median(L, na.rm = T)
SMi = M*((L0/L)^bSMA)
return(SMi)
}
# standard error of mean
se <- function(x) {
sqrt(var(x,na.rm=TRUE)/length(na.omit(x)))
}
#z scores, aka standardized values with 0 mean and unit SD
z = function(x) {(x - mean(x, na.rm = T)) / sd(x, na.rm = T)}
#Repeatability calculation----------
# following Lessells & Boag 1994
rpt <- function(x, ...) UseMethod("rpt")
rpt.formula <- function(formula, data)
{
if (missing(formula) || (length(formula) != 3L) || (length(attr(terms(formula[-2L]),
"term.labels")) != 1L))
stop("'formula' missing or incorrect")
mf <- model.frame(formula = formula, data = data)
resp.index <- attr(attr(mf, "terms"), "response")
eff <- factor(mf[[-resp.index]])
resp <- mf[[resp.index]]
#resp <- model.response(mf) #the same
model = lm(resp~eff)
MSa <- anova(model)["Mean Sq"]["eff",]
MSw <- anova(model)["Mean Sq"]["Residuals",]
n <- table(eff)
K <- length(levels(eff))
M <- sum(n)
n0 <- (1/(K-1))* (M - (sum(n^2)/M))
s2 <- MSw
s2A <- (MSa-MSw)/n0
r <- s2A/(s2 + s2A)
p <- round(anova(model)["Pr(>F)"]["eff",],3)
F <- round(anova(model)["F value"]["eff",], 3)
Fdf1 <- anova(model)["Df"]["eff",]
Fdf2 <- anova(model)["Df"]["Residuals",]
{if (p == 0) cat("r = ", round(r,3), ", F", Fdf1, ",", Fdf2, "= ", F, ", p < 0.001", "\n", sep = "")
else cat("r = ", round(r,3), ", F", Fdf1, ",", Fdf2, "= ", F, ", p = ", p, "\n", sep = "")
}
}
rpt.default <- function (data, response, effect)
{
m <- match.call(expand.dots = FALSE)
eff = eval(m$effect, data)
resp = eval(m$response, data)
if (is.factor(eff) != TRUE) eff = factor(eff)
model = lm(resp~eff, data = data)
MSa <- anova(model)["Mean Sq"]["eff",]
MSw <- anova(model)["Mean Sq"]["Residuals",]
n <- table(eff)
K <- length(levels(eff))
M <- sum(n)
n0 <- (1/(K-1))* (M - (sum(n^2)/M))
s2 <- MSw
s2A <- (MSa-MSw)/n0
r <- s2A/(s2 + s2A)
p <- round(anova(model)["Pr(>F)"]["eff",],3)
F <- round(anova(model)["F value"]["eff",], 3)
Fdf1 <- anova(model)["Df"]["eff",]
Fdf2 <- anova(model)["Df"]["Residuals",]
{if (p == 0) cat("r = ", round(r,3), ", F", Fdf1, ",", Fdf2, "= ", F, ", p < 0.001", "\n", sep = "")
else cat("r = ", round(r,3), ", F", Fdf1, ",", Fdf2, "= ", F, ", p = ", p, "\n", sep = "")
}
}
#--------------Errorbar function to display mean+CI----------------------------
errorbars<-function (response, factor1, factor2, error.bars = c("se", "sd",
"conf.int", "none"), level = 0.95, xlab = deparse(substitute(factor1)),
ylab = paste("mean of", deparse(substitute(response))),
legend.lab = deparse(substitute(factor2)), main = "",
pch = 1:n.levs.2, lty = 1:n.levs.2, col= 1:n.levs.2,
levs="", levs2="", x.posn=0, y.posn=0, bty="l", l.bty="n", hatwidth = 0.3, ...)
{
if (!is.numeric(response))
stop("Argument response must be numeric.")
xlab
ylab
legend.lab
error.bars <- match.arg(error.bars)
if (missing(factor2)) {
if (!is.factor(factor1))
stop("Argument factor1 must be a factor.")
valid <- complete.cases(factor1, response)
factor1 <- factor1[valid]
response <- response[valid]
means <- tapply(response, factor1, mean)
sds <- tapply(response, factor1, sd)
ns <- tapply(response, factor1, length)
if (error.bars == "se")
sds <- sds/sqrt(ns)
if (error.bars == "conf.int")
sds <- qt((1 - level)/2, df = ns - 1, lower.tail = FALSE) *
sds/sqrt(ns)
yrange <- if (error.bars != "none")
c(0.9*min(means - sds),1.1*max(means + sds))
else range(means)
if (levs[1] == "")
levs <- levels(factor1)
n.levs <- length(levels(factor1))
plot(c(1, n.levs), xlim=c(0.5,n.levs+.5),yrange, type = "n",
xlab = xlab, ylab = ylab,
axes = FALSE, main = main)
points(1:n.levs, means, type = "p")
box(bty=bty)
axis(2)
axis(1, at = 1:n.levs, labels = levs)
if (error.bars != "none")
arrows(1:n.levs, means - sds, 1:n.levs, means + sds,
angle = 90, lty = 1, code = 3, length = hatwidth)
}
else {
if (!(is.factor(factor1) | is.factor(factor2)))
stop("Arguments factor1 and factor2 must be factors.")
valid <- complete.cases(factor1, factor2, response)
factor1 <- factor1[valid]
factor2 <- factor2[valid]
response <- response[valid]
means <- tapply(response, list(factor1, factor2), mean)
sds <- tapply(response, list(factor1, factor2), sd)
ns <- tapply(response, list(factor1, factor2), length)
if (error.bars == "se")
sds <- sds/sqrt(ns)
if (error.bars == "conf.int")
sds <- qt((1 - level)/2, df = ns - 1, lower.tail = FALSE) *
sds/sqrt(ns)
yrange <- if (error.bars != "none")
c(0.9*min(means - sds), 1.1*max(means + sds))
else range(means)
if (levs[1] == "")
levs=levels(factor1)
levs.1 <- levels(factor1)
levs.2 <- levels(factor2)
n.levs.1 <- length(levs.1)
n.levs.2 <- length(levs.2)
plot(c(1, n.levs.1 + .5), yrange, type = "n", xlab = xlab,
ylab = ylab, axes = FALSE, main = main)
box(bty=bty)
axis(2)
axis(1, at = 1:n.levs.1+n.levs.2*.05, labels = levs)
for (i in 1:n.levs.2) {
points(1:n.levs.1+0.1*(i-1), means[, i], type = "p", pch = pch[i],
col = col[i],lty = lty[i])
if (error.bars != "none")
arrows(1:n.levs.1+.1*(i-1), means[, i] - sds[, i],
1:n.levs.1+.1*(i-1), col = col[i],
means[, i] + sds[, i], angle = 90, code = 3,
lty = lty[i], length = hatwidth)
}
if (x.posn==0)
x.posn <- n.levs.1 + 0.3
if (y.posn==0)
y.posn <- sum(c(0.1, 0.9) * par("usr")[c(3, 4)])
text(x.posn, y.posn, legend.lab, adj = c(0, -0.5))
if (levs2[1]=="") levs2=levs.2
legend(x.posn, y.posn, levs2, pch = pch, col=col,
lty = lty, bty=l.bty)
}
invisible(NULL)
}
#Matched plot--------
# where the response is plotted with connected lines by grouping variable
matched <- function(formula, data, subset = NULL, space1 = 0.25, space2 = 0.25, label = TRUE, xlim = NULL, ylab = NULL, xlab = NULL, enlarge.overlapped = TRUE, lwd.cor = 1, ...)
{
if (missing(formula)) stop("'formula' missing or incorrect")
modf <- match.call()
if (missing(data)) data <- environment(formula)
## evaluate and install the model frame
m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"),
names(modf), 0)
modf <- modf[c(1, m)]
modf$drop.unused.levels <- TRUE
modf[[1]] <- as.name("model.frame")
mf <- eval(modf, parent.frame())
mf <- mf[order(mf[[3]], mf[[2]]),]
resp <- model.response(mf)
effect <- factor(mf[[2]])
group <- as.numeric(effect)
nlev <- length(levels(effect))
levs <- levels(factor(group))
id <- factor(mf[[3]])
if (label == TRUE) space2 = space2 + 0.25
{if (is.null(xlim)) xlm <- c(1-space1, nlev+space2)
else xlm <- xlim}
{if (is.null(xlab)) xlb <- names(mf)[2]
else xlb <- xlab}
{if (is.null(ylab)) ylb <- names(mf)[1]
else ylb <- ylab}
plot(resp~group, type = "n", xaxt = "n",
xlim = xlm, ylab = ylb, xlab = xlb, ...)
axis(1, at = levs, labels = levels(effect))
D <- data.frame(id, x = group, y = resp)
if (any(duplicated(D)))
{warning("ID does not define unique cases, changing the line width for overlapping segments will not work.
If data contains replicated cases, it may be better to use an ID that defines unique cases, i.e.
'ID-replicate1' 'ID-replicate2', possibly using the function 'interaction'")
enlarge.overlapped <- FALSE
}
Dspl <- split(D, D$id)
sapply(Dspl, function(x) points(x$x, x$y, ...))
segments <- lapply(1: (nlev-1), function(x) data.frame(x0 = NA, x1 = NA, y0 = NA, y1 = NA))
for (i in 1:(nlev-1))
{
wide <- data.frame(x0 = sapply(Dspl, function(x) x$x[i]), x1 = sapply(Dspl, function(x) x$x[i+1]),
y0 = sapply(Dspl, function(x) x$y[i]), y1 = sapply(Dspl, function(x) x$y[i+1]))
recs <- do.call("paste", c(wide, sep = "\r"))
same <- table(recs) #indicates the number of identical records (i.e. segments to draw)
wide$same <- as.numeric(same)[match(recs,names(same))]
wide$same[is.na(wide$x0) | is.na(wide$x1)] <- NA
segments[[i]] <- wide
}
{if (enlarge.overlapped)
sapply(segments, function(x)
segments(x$x0, x$y0, x$x1, x$y1, lwd = x$same*lwd.cor, ...))
else
sapply(segments, function(x)
segments(x$x0, x$y0, x$x1, x$y1, ...))
}
label.y <- sapply(Dspl, function(x) tail(x$y, 1L))
label.y <- ifelse(duplicated(label.y), jitter(label.y), label.y)
if (label) text(x=(nlev + space2/10), y = label.y, labels = names(Dspl), pos = 4)
}
# Bar and line plots with se error bars-------
bars <- function(formula, data, subset = NULL, hatwidth = 0.5, ylim = NULL,
xlab = NULL, ylab = NULL, ...)
{
dots <- list(...)
mf <- match.call()
if (missing(data)) data <- environment(formula)
## evaluate and install the model frame
m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"),
names(mf), 0)
mf <- mf[c(1, m)]
mf$drop.unused.levels <- TRUE
mf[[1]] <- as.name("model.frame")
fr <- eval(mf, parent.frame())
resp <- model.response(fr)
effect <- factor(fr[[2]])
{if (is.null(xlab)) xlb = names(fr)[2]
else xlb = xlab}
{if (is.null(ylab)) ylb = names(fr)[1]
else ylb = ylab}
{if (dim(fr)[2] == 3)
{
eff2 <- factor(fr[[3]])
y = tapply(resp, list(eff2, effect), mean, na.rm = T)
sem = tapply(resp, list(eff2, effect), se)
ymax = max(y+sem)*1.1
{if (is.null(ylim)) yl = c(0, ymax)
else yl = ylim
}
bp = barplot(y, beside = T, ylim = yl, xlab = xlb, ylab = ylb, ...)
hw = hatwidth*0.25
arrows(bp, y+sem, bp, y-sem, angle = 90, code = 3, length = hw)
}
else if (dim(fr)[2] == 2)
{
y = tapply(resp, effect, mean, na.rm = T)
sem = tapply(resp, effect, se)
ymax = max(y+sem)*1.1
{if (is.null(ylim)) yl = c(0, ymax)
else yl = ylim
}
bp = barplot(y, ylim = yl, xlab = xlb, ylab = ylb, ...)
arrows(bp, y+sem, bp, y-sem, angle = 90, code = 3,
length = hatwidth*1)
}
else stop("please use formula with one response and one or two effects, e.g. response~effect + effect2")
}
box(bty = "l")
}
# Lineplot now accepts factors as the first effect argument. However, when the
# first factor is factor, but has numeric values, the function uses these values instead
# of the factor levels. So for example, if time is like 1,2,4, then it will plot as 1,2,3,4
# on the x axis with no values for 3. Otherwise, it would skip 3. If this is the desired effect
# than it makes sense to rename the x variable such as something1,2,4.
lineplot <- function(formula, data, subset = NULL, hatwidth = 0.5, type = "b", pch = 19, ylim = NULL, xlab = NULL, ylab = NULL, spread = TRUE, off = 0.1, ...)
{
dots <- list(...)
mf <- match.call()
if (missing(data)) data <- environment(formula)
## evaluate and install the model frame
m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"),
names(mf), 0)
mf <- mf[c(1, m)]
mf$drop.unused.levels <- TRUE
mf[[1]] <- as.name("model.frame")
fr <- eval(mf, parent.frame())
resp <- model.response(fr)
effect <- fr[[2]]
nlev1 = length(unique(effect))
{if (is.null(xlab)) xlb = names(fr)[2]
else xlb = xlab}
{if (is.null(ylab)) ylb = names(fr)[1]
else ylb = ylab}
{if (dim(fr)[2] == 3)
{
eff2 <- factor(fr[[3]])
nlev2 <- length(levels(eff2))
matr = tapply(resp, list(effect, eff2), mean, na.rm = T)
sem = tapply(resp, list(effect, eff2), se)
}
else if (dim(fr)[2] == 2) {
matr = tapply(resp, effect, mean, na.rm = T)
sem = tapply(resp, effect, se)
nlev2 = 1
}
else stop("please use formula with one response and one or two effects, e.g. response~effect + effect2")
}
if (!is.numeric(type.convert(dimnames(matr)[[1]], as.is = T))){
if (is.factor(effect)) {
x = 1:nlevels(effect)
names(x) = levels(effect)
xaxt = "n"
}
else {
effect = factor(effect)
x = 1:nlevels(effect)
names(x) = levels(effect)
xaxt = "n"
}
}
else {
x = as.numeric(dimnames(matr)[[1]])
xaxt = "s"
}
y = matr
ymin = min(y-sem)*0.9
ymax = max(y+sem)*1.1
hw = hatwidth*0.25
#check if col was specified in the main call
if (is.null(dots$col)) color = rep(1:nlev2, each = nlev1)
else color = rep(dots$col, each = nlev1)
#check if ylim was specified in the main call
if (is.null(ylim)) yl = c(ymin, ymax)
else yl = ylim
{if (spread)
{
dif = mean(diff(x))*off
posit = 1:nlev2 - median(1:nlev2)
offs = posit * dif
X = sapply(offs, function(o) o+x)
matplot(x=X, y=y, type = type, ylim = yl, pch = pch,
xlab = xlb, ylab = ylb, xaxt = xaxt, ...)
arrows(X, y+sem, X, y-sem, angle = 90, code = 3, length = hw,
col = color)
}
else
{
matplot(x=x, y=y, type = type, ylim = yl, pch = pch,
xlab = xlb, ylab = ylb, xaxt = xaxt, ...)
arrows(x, y+sem, x, y-sem, angle = 90, code = 3, length = hw,
col = color)
}
}
if (xaxt == "n") {
axis(1, at = x, labels = names(x))
}
box(bty = "l")
}
pairs2 <- function (x,y,smooth=TRUE, digits=2, prefix="", cex.cor=NULL, stars = TRUE, ...)
{
panel.cor <- function(x, y, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r.obj = cor.test(x, y,use="pairwise",...)
r = as.numeric(r.obj$estimate)
p = r.obj$p.value
{if (stars) mystars <-
ifelse(p < .001, "***", ifelse(p < .01, "** ", ifelse(p < .05, "* ", ifelse(p < 0.1, ".", " "))))
else mystars <- NULL }
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, mystars, sep="")
cex = ifelse(is.null(cex.cor), 0.8/strwidth(txt), cex.cor)
cexfinal = ifelse(stars, 1.2, cex * abs(r)+0.5)
text(0.5, 0.5, txt, cex = cexfinal)
}
panel.hist <- function(x)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col="cyan")
}
{if (smooth)
pairs(x,diag.panel=panel.hist,lower.panel=panel.cor,upper.panel=panel.smooth)
else #smooth is not true
pairs(x,diag.panel=panel.hist,lower.panel=panel.cor)}
}
# Table formatting -----
fixed_digits <- function(xs, n = 2) {
formatC(xs, digits = n, format = "f")
}
# nice p-values for tables...
nice_pval <- function(ps, html = FALSE) {
tiny <- ifelse(html, "< 0.001", "<0.001")
ps_chr <- fixed_digits(ps, 3)
ps_chr[ps < 0.001] <- tiny
ps_chr
}
# ...and for in-text
nice_pval2 <- function(ps, html = FALSE) {
tiny <- ifelse(html, "< 0.001", "<0.001")
ps_chr <- paste0("= ", fixed_digits(ps, 3))
ps_chr[ps < 0.001] <- tiny
ps_chr
}
nice_modtable <- function(table, stat.digit = 2, pval = "p.value"){
if (is.matrix(table)) table <- as.data.frame(table)
names(table)[1] <- "term"
col.d2 <- which(!names(table) %in% c("term", pval))
for (i in col.d2){
table[,i] <- fixed_digits(table[,i], stat.digit)
}
table[,names(table) %in% pval] <- nice_pval(table[,names(table) %in% pval])
table
}
nicetab <- function(x) knitr::kable(nice_modtable(broom::tidy(x)))
|
#' rfm_model fucntion
#'
#' Load data in to create model
#'
#' @param df a data frame loaded with rfm_load or created previously
#' @return a Data.frame object
#'
#'
#' @export
rfm_model <- function(df){
r <- quantile(df$recency, c(0.25,0.5,0.75))
df$Re <- ifelse(df$recency<r[1],4,
ifelse(df$recency>=r[1] & df$recency<r[2],3,
ifelse(df$recency>=r[2] & df$recency<r[3],2,1)))
df$Fr <- ifelse(df$frequency<=1,1,
ifelse(df$frequency>1 & df$frequency<3,2,3))
m <- quantile(df$monetary, c(0.2,0.4,0.6,0.8))
df$Mo <- ifelse(df$monetary<m[1],1,
ifelse(df$monetary>=m[1] & df$monetary<m[2],2,
ifelse(df$monetary>=m[2] & df$monetary<m[3],3,
ifelse(df$monetary>=m[3],4,5))))
df$Re <- as.factor(df$Re)
df$Mo <- as.factor(df$Mo)
df$Fr <- as.factor(df$Fr)
df$score <- paste(df$Re,df$Fr,df$Mo, sep = '')
return(df)
}
| /R/rfm_model.R | no_license | CJB2014/rfm | R | false | false | 980 | r | #' rfm_model fucntion
#'
#' Load data in to create model
#'
#' @param df a data frame loaded with rfm_load or created previously
#' @return a Data.frame object
#'
#'
#' @export
rfm_model <- function(df){
r <- quantile(df$recency, c(0.25,0.5,0.75))
df$Re <- ifelse(df$recency<r[1],4,
ifelse(df$recency>=r[1] & df$recency<r[2],3,
ifelse(df$recency>=r[2] & df$recency<r[3],2,1)))
df$Fr <- ifelse(df$frequency<=1,1,
ifelse(df$frequency>1 & df$frequency<3,2,3))
m <- quantile(df$monetary, c(0.2,0.4,0.6,0.8))
df$Mo <- ifelse(df$monetary<m[1],1,
ifelse(df$monetary>=m[1] & df$monetary<m[2],2,
ifelse(df$monetary>=m[2] & df$monetary<m[3],3,
ifelse(df$monetary>=m[3],4,5))))
df$Re <- as.factor(df$Re)
df$Mo <- as.factor(df$Mo)
df$Fr <- as.factor(df$Fr)
df$score <- paste(df$Re,df$Fr,df$Mo, sep = '')
return(df)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/returns.R
\name{get_quarterly_log_returns}
\alias{get_quarterly_log_returns}
\title{get_quarterly_log_returns}
\usage{
get_quarterly_log_returns(tibble_obj)
}
\arguments{
\item{tibble_obj}{most simply, returned from a call to tq_get()}
}
\value{
a tibble with the quarterly log returns
}
\description{
Calculates the quarterly log returns using xts build in periodReturns
functionality. Returns are quarter over quarter, using adjusted close prices.
}
| /man/get_quarterly_log_returns.Rd | no_license | gmahjub/steal-basis-r | R | false | true | 530 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/returns.R
\name{get_quarterly_log_returns}
\alias{get_quarterly_log_returns}
\title{get_quarterly_log_returns}
\usage{
get_quarterly_log_returns(tibble_obj)
}
\arguments{
\item{tibble_obj}{most simply, returned from a call to tq_get()}
}
\value{
a tibble with the quarterly log returns
}
\description{
Calculates the quarterly log returns using xts build in periodReturns
functionality. Returns are quarter over quarter, using adjusted close prices.
}
|
# bird traits
library(plyr)
all_data <- fread("clean_data/all_data_standardized.csv")
# name reassignments to avoid problems
#problem_species <- read.csv("Edited Data/problem_species.csv", stringsAsFactors = FALSE)
problem_birds <- read.csv("raw_data/problem_birds.csv", stringsAsFactors = FALSE)
#all_data$species_synon <- mapvalues(all_data$species, problem_species$original_species, problem_species$fixed_species)
all_data$species_synon <- mapvalues(all_data$species, problem_birds$manomet_species, problem_birds$elton_species)
# limitting analysis to just 1958-2016, because that's the range of TerraClimate
all_data <- all_data[-which(is.na(all_data$tmax)),]
bird_species <- unique(all_data[which(all_data$dataset %in% c("manomet", "korea", "japan", "ussr")),c("species","species_synon")]) # species from datasets with potentially birds
bird_species$species_synon_clean <- sapply(bird_species$species_synon, name.cleanup)
# species <- fread("clean_data/species.csv", stringsAsFactors = F)
# bird_species <- species[tax_group == "bird",]
# bird_species$species_synon <- mapvalues(bird_species$species, problem_birds$manomet_species, problem_birds$elton_species)
# bird_species$species_synon_clean <- sapply(bird_species$species_synon, name.cleanup)
bird_trait_data <- fread("raw_data/traits/BirdFuncDat.txt")
write.csv(bird_trait_data, "raw_data/Eltontraits.csv", row.names = FALSE) #just to look at it more easily outside of R
bird_traits <- bird_trait_data[Scientific %in% bird_species$species | Scientific %in% bird_species$species_synon | Scientific %in% bird_species$species_synon_clean,]
bird_traits <- bird_traits[, c("Scientific","Diet-5Cat", "BodyMass-Value")] # I'm just pulling out mass & diet category
# paste0(round(nrow(bird_traits)/nrow(bird_species), 2)*100,"% of species have Elton trait data") #It's supposed to be 100% if you have only birds in the species list (Manomet)
# bird_species[which(bird_species$species_synon_clean %!in% bird_trait_data$Scientific)] # Ok, I got all species! (when looking at just Manomet - I threw in all species including non-birds into the species list for korea, japan, and kivach)
bird_species_traits <- data.frame(species = as.character(bird_species$species),
species_synon = as.character(bird_species$species_synon),
species_synon_clean = as.character(bird_species$species_synon_clean))
bird_species_traits$diet <- sapply(bird_species_traits$species_synon_clean, function(x) ifelse(x %!in% bird_traits$Scientific, NA , as.character(bird_traits[Scientific == x, "Diet-5Cat"])))
bird_species_traits$mass <- sapply(bird_species_traits$species_synon_clean, function(x) ifelse(x %!in% bird_traits$Scientific, NA , as.character(bird_traits[Scientific == x, "BodyMass-Value"])))
bird_species_traits[,c("species_synon", "diet", "mass")]
bird_species_traits <- bird_species_traits[which(complete.cases(bird_species_traits)),]
bird_species_traits <- bird_species_traits[-which(duplicated(bird_species_traits$species_synon_clean)),] # deleting a few duplicates that arose from the name cleaning and synonym finding
write.csv(bird_species_traits, "clean_data/bird_traits.csv", row.names = FALSE) # done!
| /scripts/bird_traits.R | no_license | stemkov/pheno_variance | R | false | false | 3,229 | r | # bird traits
library(plyr)
all_data <- fread("clean_data/all_data_standardized.csv")
# name reassignments to avoid problems
#problem_species <- read.csv("Edited Data/problem_species.csv", stringsAsFactors = FALSE)
problem_birds <- read.csv("raw_data/problem_birds.csv", stringsAsFactors = FALSE)
#all_data$species_synon <- mapvalues(all_data$species, problem_species$original_species, problem_species$fixed_species)
all_data$species_synon <- mapvalues(all_data$species, problem_birds$manomet_species, problem_birds$elton_species)
# limitting analysis to just 1958-2016, because that's the range of TerraClimate
all_data <- all_data[-which(is.na(all_data$tmax)),]
bird_species <- unique(all_data[which(all_data$dataset %in% c("manomet", "korea", "japan", "ussr")),c("species","species_synon")]) # species from datasets with potentially birds
bird_species$species_synon_clean <- sapply(bird_species$species_synon, name.cleanup)
# species <- fread("clean_data/species.csv", stringsAsFactors = F)
# bird_species <- species[tax_group == "bird",]
# bird_species$species_synon <- mapvalues(bird_species$species, problem_birds$manomet_species, problem_birds$elton_species)
# bird_species$species_synon_clean <- sapply(bird_species$species_synon, name.cleanup)
bird_trait_data <- fread("raw_data/traits/BirdFuncDat.txt")
write.csv(bird_trait_data, "raw_data/Eltontraits.csv", row.names = FALSE) #just to look at it more easily outside of R
bird_traits <- bird_trait_data[Scientific %in% bird_species$species | Scientific %in% bird_species$species_synon | Scientific %in% bird_species$species_synon_clean,]
bird_traits <- bird_traits[, c("Scientific","Diet-5Cat", "BodyMass-Value")] # I'm just pulling out mass & diet category
# paste0(round(nrow(bird_traits)/nrow(bird_species), 2)*100,"% of species have Elton trait data") #It's supposed to be 100% if you have only birds in the species list (Manomet)
# bird_species[which(bird_species$species_synon_clean %!in% bird_trait_data$Scientific)] # Ok, I got all species! (when looking at just Manomet - I threw in all species including non-birds into the species list for korea, japan, and kivach)
bird_species_traits <- data.frame(species = as.character(bird_species$species),
species_synon = as.character(bird_species$species_synon),
species_synon_clean = as.character(bird_species$species_synon_clean))
bird_species_traits$diet <- sapply(bird_species_traits$species_synon_clean, function(x) ifelse(x %!in% bird_traits$Scientific, NA , as.character(bird_traits[Scientific == x, "Diet-5Cat"])))
bird_species_traits$mass <- sapply(bird_species_traits$species_synon_clean, function(x) ifelse(x %!in% bird_traits$Scientific, NA , as.character(bird_traits[Scientific == x, "BodyMass-Value"])))
bird_species_traits[,c("species_synon", "diet", "mass")]
bird_species_traits <- bird_species_traits[which(complete.cases(bird_species_traits)),]
bird_species_traits <- bird_species_traits[-which(duplicated(bird_species_traits$species_synon_clean)),] # deleting a few duplicates that arose from the name cleaning and synonym finding
write.csv(bird_species_traits, "clean_data/bird_traits.csv", row.names = FALSE) # done!
|
#' Given rectangle code return perimeter as a polygon in lat lon
#'
#'
#' The outline/boundary of a statistical rectangle is returned as 5 positions,
#' the first and last of which are the same.
#'
#' @name rectPeri
#' @aliases rPeri srPeri mrPeri drPeri
#' @param r,sr,mr,dr Rectangle codes.
#' @param dlat,dlon Dimensions of latitude and longitude given in minutes and
#' degrees for \code{mrPeri} and \code{drPeri}, respectively.
#' @return Rectangle outline as 5 positions.
#' @note Should perhaps be extended to give a list or dataframe of polygons for
#' more than one \code{r, sr, mr} or \code{dr}.
#' @seealso \code{\link{deg2rect}}, \code{\link{rectArea}},
#' \code{\link{geoarea}}.
#' @keywords arith manip
#' @examples
#'
#' geoplot(island, type = "n", grid = FALSE)
#' geolines(rPeri(468))
#' geolines(srPeri(4681))
#'
#' @export rPeri
#' @rdname rectPeri
rPeri <-
function(r)
{
lat <- r2d(r)$lat
lon <- r2d(r)$lon
lat <- lat + c(1/4, 1/4, - 1/4, - 1/4, 1/4)
lon <- lon + c(0.5, - 0.5, - 0.5, 0.5, 0.5)
data.frame(lat = lat, lon = lon)
}
#' @export srPeri
#' @rdname rectPeri
srPeri <-
function(sr)
{
lat <- sr2d(sr)$lat
lon <- sr2d(sr)$lon
lat <- lat + c(1/8, 1/8, - 1/8, - 1/8, 1/8)
lon <- lon + c(0.25, - 0.25, - 0.25, 0.25, 0.25)
data.frame(lat = lat, lon = lon)
}
#' @export mrPeri
#' @rdname rectPeri
mrPeri <-
function(mr, dlat = 5, dlon = 10)
{
lat <- mr2d(mr, dlat = dlat, dlon = dlon)$lat
lon <- mr2d(mr, dlat = dlat, dlon = dlon)$lon
lat <- lat + c(dlat/120, dlat/120, - dlat/120, - dlat/120, dlat/120)
lon <- lon + c(dlon/120, - dlon/120, - dlon/120, dlon/120, dlon/120)
data.frame(lat = lat, lon = lon)
}
#' @export drPeri
#' @rdname rectPeri
drPeri <-
function(dr, dlat = 1, dlon = 2)
{
lat <- dr2d(dr, dlat = dlat, dlon = dlon)$lat
lon <- dr2d(dr, dlat = dlat, dlon = dlon)$lon
lat <- lat + c(dlat/2, dlat/2, - dlat/2, - dlat/2, dlat/2)
lon <- lon + c(dlon/2, - dlon/2, - dlon/2, dlon/2, dlon/2)
data.frame(lat = lat, lon = lon)
}
| /R/rectPeri.R | no_license | Hafro/geo | R | false | false | 2,029 | r | #' Given rectangle code return perimeter as a polygon in lat lon
#'
#'
#' The outline/boundary of a statistical rectangle is returned as 5 positions,
#' the first and last of which are the same.
#'
#' @name rectPeri
#' @aliases rPeri srPeri mrPeri drPeri
#' @param r,sr,mr,dr Rectangle codes.
#' @param dlat,dlon Dimensions of latitude and longitude given in minutes and
#' degrees for \code{mrPeri} and \code{drPeri}, respectively.
#' @return Rectangle outline as 5 positions.
#' @note Should perhaps be extended to give a list or dataframe of polygons for
#' more than one \code{r, sr, mr} or \code{dr}.
#' @seealso \code{\link{deg2rect}}, \code{\link{rectArea}},
#' \code{\link{geoarea}}.
#' @keywords arith manip
#' @examples
#'
#' geoplot(island, type = "n", grid = FALSE)
#' geolines(rPeri(468))
#' geolines(srPeri(4681))
#'
#' @export rPeri
#' @rdname rectPeri
rPeri <-
function(r)
{
lat <- r2d(r)$lat
lon <- r2d(r)$lon
lat <- lat + c(1/4, 1/4, - 1/4, - 1/4, 1/4)
lon <- lon + c(0.5, - 0.5, - 0.5, 0.5, 0.5)
data.frame(lat = lat, lon = lon)
}
#' @export srPeri
#' @rdname rectPeri
srPeri <-
function(sr)
{
lat <- sr2d(sr)$lat
lon <- sr2d(sr)$lon
lat <- lat + c(1/8, 1/8, - 1/8, - 1/8, 1/8)
lon <- lon + c(0.25, - 0.25, - 0.25, 0.25, 0.25)
data.frame(lat = lat, lon = lon)
}
#' @export mrPeri
#' @rdname rectPeri
mrPeri <-
function(mr, dlat = 5, dlon = 10)
{
lat <- mr2d(mr, dlat = dlat, dlon = dlon)$lat
lon <- mr2d(mr, dlat = dlat, dlon = dlon)$lon
lat <- lat + c(dlat/120, dlat/120, - dlat/120, - dlat/120, dlat/120)
lon <- lon + c(dlon/120, - dlon/120, - dlon/120, dlon/120, dlon/120)
data.frame(lat = lat, lon = lon)
}
#' @export drPeri
#' @rdname rectPeri
drPeri <-
function(dr, dlat = 1, dlon = 2)
{
lat <- dr2d(dr, dlat = dlat, dlon = dlon)$lat
lon <- dr2d(dr, dlat = dlat, dlon = dlon)$lon
lat <- lat + c(dlat/2, dlat/2, - dlat/2, - dlat/2, dlat/2)
lon <- lon + c(dlon/2, - dlon/2, - dlon/2, dlon/2, dlon/2)
data.frame(lat = lat, lon = lon)
}
|
\name{pandadem}
\alias{pandadem}
\docType{data}
\title{
A RasterLayer containing a digital elevation model (DEM) for the panda data
}
\description{
Digital elevation model of the terrain within the panda's range with a cell resolution of 30 meters (ASTER GDEM version 2, Tachikawa et. al. 2011).
}
\usage{data(pandadem)}
\format{
A raster package RasterLayer object with 58 rows, 78 columns, and
a cell size of 30 by 30 meters. Each cell contains an elevation in meters.
}
%\details{
%% ~~ If necessary, more details than the __description__ above ~~
%}
\source{
URL: http://asterweb.jpl.nasa.gov/gdem.asp
}
\references{
Tachikawa, T., Hato, M., Kaku, M. & Iwasaki, A. (2011). The characteristics of ASTER GDEM version 2, IGARSS.
}
\examples{
library(raster)
data(pandadem)
plot(pandadem)
}
\keyword{datasets}
| /man/pandadem.Rd | no_license | cran/mkde | R | false | false | 815 | rd | \name{pandadem}
\alias{pandadem}
\docType{data}
\title{
A RasterLayer containing a digital elevation model (DEM) for the panda data
}
\description{
Digital elevation model of the terrain within the panda's range with a cell resolution of 30 meters (ASTER GDEM version 2, Tachikawa et. al. 2011).
}
\usage{data(pandadem)}
\format{
A raster package RasterLayer object with 58 rows, 78 columns, and
a cell size of 30 by 30 meters. Each cell contains an elevation in meters.
}
%\details{
%% ~~ If necessary, more details than the __description__ above ~~
%}
\source{
URL: http://asterweb.jpl.nasa.gov/gdem.asp
}
\references{
Tachikawa, T., Hato, M., Kaku, M. & Iwasaki, A. (2011). The characteristics of ASTER GDEM version 2, IGARSS.
}
\examples{
library(raster)
data(pandadem)
plot(pandadem)
}
\keyword{datasets}
|
#' Read numbering.xml in the docx
#'
#' @param file docx file path
#'
#' @return numbering
#' @export
#'
#' @examples
#' \dontrun{
#' read_numbering("reference.docx")
#' }
read_numbering <- function(file){
tmpdir = fs::file_temp()
officer::unpack_folder(file = file, folder = tmpdir)
x <- xml2::read_xml(fs::path_abs(fs::path(tmpdir, "word/numbering.xml")))
fs::dir_delete(tmpdir)
x
}
write_numbering <- function(numbering_xml, org_docx, new_docx){
tmpdir = fs::file_temp()
officer::unpack_folder(file = org_docx, folder = tmpdir)
xml2::write_xml(numbering_xml,
fs::path_abs(fs::path(tmpdir, "word/numbering.xml")),
format="format_whitespace")
officer::pack_folder(new_docx, folder = tmpdir)
fs::dir_delete(tmpdir)
}
| /R/files.R | permissive | niszet/numberingx | R | false | false | 782 | r | #' Read numbering.xml in the docx
#'
#' @param file docx file path
#'
#' @return numbering
#' @export
#'
#' @examples
#' \dontrun{
#' read_numbering("reference.docx")
#' }
read_numbering <- function(file){
tmpdir = fs::file_temp()
officer::unpack_folder(file = file, folder = tmpdir)
x <- xml2::read_xml(fs::path_abs(fs::path(tmpdir, "word/numbering.xml")))
fs::dir_delete(tmpdir)
x
}
write_numbering <- function(numbering_xml, org_docx, new_docx){
tmpdir = fs::file_temp()
officer::unpack_folder(file = org_docx, folder = tmpdir)
xml2::write_xml(numbering_xml,
fs::path_abs(fs::path(tmpdir, "word/numbering.xml")),
format="format_whitespace")
officer::pack_folder(new_docx, folder = tmpdir)
fs::dir_delete(tmpdir)
}
|
## Copyright 2013 Nick Polson, James Scott, and Jesse Windle.
## This file is part of BayesLogit, distributed under the GNU General Public
## License version 3 or later and without ANY warranty, implied or otherwise.
## library("BayesLogit")
library("coda")
source("Benchmark-Utilities.R")
source("DynNBPG.R")
source("DynNBFS-2009.R")
source("DynNBCUBS.R")
source("DynNBOmegaBlock.R")
################################################################################
## SETUP ##
################################################################################
run <- list("flu" =FALSE,
"synth1"=FALSE,
"synth2"=FALSE,
"synth3"=FALSE,
"allsynth"=FALSE,
"allview"=FALSE)
write.dir = "Bench-Dyn-06" # Used in file.path
write.it = TRUE
plot.it = FALSE
read.it = FALSE
methods = c("PG", "FS", "CUBS", "OmegaBlock")
## The methods to run from the sequence above.
run.idc = 1:3
samp = 10000
burn = 2000
verbose = 1000
ntrials = 10 ## Ambiguous languge. Repetitions of MCMC.
options = list("just.max"=FALSE, "starts"=1);
################################################################################
## Dyn NB Benchmark ##
################################################################################
benchmark.dyn.NB <- function(y, X.dyn, X.stc=NULL,
samp=1000, burn=100, ntrials=1, verbose=100,
method="PG", var.names=c("beta"), dset.name="NULL",
m.0=NULL, C.0=NULL,
mu.m0=NULL, mu.P0=NULL,
phi.m0=NULL, phi.P0=NULL,
W.a0=NULL, W.b0=NULL,
d.true=NULL,
beta.true=NULL, iota.true=NULL,
mu.true=NULL, phi.true=NULL, W.true=NULL, options=NULL)
{
## n is total number of trials per observation - not vectorized, i.e. the same for each i.
cat("Will benchmark", method[1], "using", dset.name, "dataset for variable(s)", var.names, "\n");
sstat = list(); for (nm in var.names) sstat[[nm]] = list();
arate = rep(0, ntrials);
ess.time = rep(0, ntrials);
gb = list();
i = 0
num.errors = 0
while (i < ntrials) {
i = i + 1
if (method=="PG") {
gb <- dyn.NB.PG(y=y, X.dyn=X.dyn, X.stc=X.stc,
samp=samp, burn=burn, verbose=verbose,
m.0=m.0, C.0=C.0,
mu.m0=mu.m0, mu.P0=mu.P0,
phi.m0=phi.m0, phi.P0=phi.P0,
W.a0=W.a0, W.b0=W.b0,
d.true=d.true, w.true=NULL,
beta.true=beta.true, iota.true=iota.true,
mu.true=mu.true, phi.true=phi.true, W.true=W.true);
gb$beta = gb$beta[,,-1,drop=FALSE]
gb$a.rate = 1
} else if (method=="FS") {
gb <- dyn.NB.FS(y=y, X.dyn=X.dyn, X.stc=X.stc,
samp=samp, burn=burn, verbose=verbose,
m.0=m.0, C.0=C.0,
mu.m0=mu.m0, mu.P0=mu.P0,
phi.m0=phi.m0, phi.P0=phi.P0,
W.a0=W.a0, W.b0=W.b0,
d.true=d.true, lambda.true=NULL, r.true=NULL,
beta.true=beta.true, iota.true=iota.true,
mu.true=mu.true, phi.true=phi.true, W.true=W.true)
gb$beta = gb$beta[,,-1,drop=FALSE]
gb$a.rate=1
} else if (method=="CUBS") {
gb <- dyn.NB.CUBS(y=y, X.dyn=X.dyn, m0=m.0, C0=C.0,
samp=samp, burn=burn, verbose=verbose,
mu.m0=mu.m0, mu.P0=mu.P0,
phi.m0=phi.m0, phi.P0=phi.P0,
W.a0=W.a0, W.b0=W.b0, X.stc=X.stc,
mu.true = mu.true, phi.true=phi.true, W.true=W.true, d.true=d.true)
gb$beta = gb$beta[,,-1,drop=FALSE]
gb$a.rate = gb$ac.rate[samp]
} else if (method=="OmegaBlock") {
m0.stc = NULL
C0.stc = NULL
m0.dyn = m.0
C0.dyn = C.0
if (!is.null(X.stc)) {
P.a = ncol(X.stc)
P.b = ncol(X.dyn)
idc.a = 1:P.a
idc.b = (P.a+1):(P.a+P.b)
m0.stc = m.0[idc.a]
C0.stc = C.0[idc.a, idc.a]
m0.dyn = m.0[idc.b]
C0.dyn = C.0[idc.b,idc.b]
}
gb <- dyn.nb.om(y=y, X.dyn=X.dyn, m0=m0.dyn, C0=C0.dyn,
samp=samp, burn=burn, verbose=verbose, starts=options$starts,
mu.m0=mu.m0, mu.P0=mu.P0,
phi.m0=phi.m0, phi.P0=phi.P0,
W.a0=W.a0, W.b0=W.b0,
X.stc=X.stc, m0.stc=m0.stc, C0.stc=C0.stc,
mu.true = mu.true, phi.true=phi.true, W.true=W.true,
alpha.true=NULL, beta.true = NULL, d.true=d.true,
just.max=options$just.max)
gb$options = options
} else {
print("Unknown method.")
return(NA);
}
if (gb$error) {
num.errors = num.errors + 1
i = i-1
cat("Error. Dump:\n", gb$dump, "\n");
}
else {
for (nm in var.names) {
if (length(dim(gb[[nm]])) > 2) sstat[[nm]][[i]] = sum.stat.dyn(gb[[nm]], gb$ess.time[3], thin=1);
if (length(dim(gb[[nm]])) ==2) sstat[[nm]][[i]] = sum.stat(gb[[nm]], gb$ess.time[3], thin=1);
}
arate[i] = gb$a.rate[1]
ess.time[i] = gb$ess.time[3]
}
if (num.errors > 10) return(NA)
}
for (nm in var.names)
sstat[[nm]] = simplify2array(sstat[[nm]]);
out <- list("gb"=gb, "sstat"=sstat, "arate"=arate, "ess.time"=ess.time);
out
} ## benchmark.dyn.NB
################################################################################
## BENCHMARK DATA SETS ##
################################################################################
##------------------------------------------------------------------------------
## FLU DATA ##
##------------------------------------------------------------------------------
if (run$flu) {
flu = read.csv("../../fludata/flu5years.csv", header=TRUE)
## Can use any of these
y1 = flu$"A.H3."
y2 = flu$"A.H1."
y3 = flu$B
y4 = flu$Total.number.positive
## plot(y1)
rawX = flu[,10:109]
N = nrow(rawX)
T = length(y1)
## Naive PCA
## Create a reduced-dimension representation of the search terms
NPC = 4 ## Can play with this
mypc = princomp(rawX)
X = mypc$scores[,1:NPC]
## plot(X[,2])
A = rawX - rep(1, N) %*% t(colMeans(rawX));
sv = svd(A)
Z = sv$u %*% diag(sv$d);
cor(mypc$scores, Z)[1:4,1:4];
## you won't lose rank unless a column vector = alpha 1.
## So if we want to do a dynamic regression with intercept then we should use
## X.dyn = as.matrix(Z[,1:NPC])
## X.stc = matrix(1, nrow=T, ncol=1)
## X.stc = NULL
## Evolving intercept
X.dyn = matrix(1, nrow=T, ncol=1)
X.stc = Z[,1:NPC];
N.b = ncol(X.dyn)
if (!is.null(X.stc)) N.a = ncol(X.stc) else N.a = 0
N = N.b + N.a
## Prior
mu.m0 = rep(0.0, N.b)
mu.P0 = rep(0.1, N.b)
phi.m0 = rep(0.95, N.b)
phi.P0 = rep(100, N.b)
m0 = rep(0.0, N);
C0 = diag(3.0, N);
W.gs = 0.01
W.a0 = rep(100, N.b);
W.b0 = W.a0 * W.gs;
bench.flu = list();
## source("Benchmark-DynNB.R")
for (i in 1:3) {
## source("Benchmark-DynNB.R")
nm = methods[i];
bench.flu[[nm]] <- benchmark.dyn.NB(y=y1, X.dyn=X.dyn, X.stc=X.stc,
samp=samp, burn=burn, ntrials=1, verbose=verbose,
method=nm, var.names="beta", dset.name="Flu",
m.0=m0, C.0=C0,
mu.m0=mu.m0, mu.P0=mu.P0,
phi.m0=phi.m0, phi.P0=phi.P0,
W.a0=W.a0, W.b0=W.b0, d.true=NULL);
## mu.true=rep(0.0, N.b), phi.true=rep(1, N.b)
}
flu.table = setup.table.dyn(bench.flu, "beta")
}
##------------------------------------------------------------------------------
## SYNTHETIC 1 ##
##------------------------------------------------------------------------------
if (run$synth1) {
load("Benchmark-DataSets/DynNB-synth-1.RData")
y = dyn.nb.1$y;
X.dyn = dyn.nb.1$X;
P = 1
N.y = length(y)
options$starts=seq(1, N.y, 2)
## Prior
b.m0 = 3.0;
b.C0 = 3.0;
W = 0.1;
W.a0 = 300;
W.b0 = W.a0 * W;
bench.synth1 = list();
## source("Benchmark-DynNB.R")
for (i in run.idc) {
## source("Benchmark-DynNB.R")
nm = methods[i];
bench.synth1[[nm]] <- benchmark.dyn.NB(y, X.dyn=X.dyn, X.stc=NULL,
samp=samp, burn=burn, ntrials=ntrials, verbose=verbose,
method=nm, var.names="beta", dset.name="Synth1",
m.0=b.m0, C.0=b.C0,
W.a0=W.a0, W.b0=W.b0,
mu.true=0.0, phi.true=1.0, d.true=4, options=options);
}
synth1.table = setup.table.dyn(bench.synth1, "beta")
if (plot.it) { plot.bench(pg, fs); plot.check.NB(y, X.dyn, bmark1=pg, bmark2=fs); }
if (write.it) save(bench.synth1, synth1.table, file=file.path(write.dir, "bmark-synth1.RData"))
}
##------------------------------------------------------------------------------
## SYNTH 2 ##
##------------------------------------------------------------------------------
if (run$synth2) {
load("Benchmark-DataSets/DynNB-synth-2.RData")
y = dyn.nb.2$y;
X.dyn = dyn.nb.2$X;
## Prior
b.m0 = 3.0;
b.C0 = 3.0;
W = 0.1;
W.a0 = 300;
W.b0 = W.a0 * W;
bench.synth2 = list();
## source("Benchmark-DynNB.R")
for (i in run.idc) {
## source("Benchmark-DynNB.R")
nm = methods[i];
bench.synth2[[nm]] <- benchmark.dyn.NB(y, X.dyn=X.dyn, X.stc=NULL,
samp=samp, burn=burn, ntrials=1, verbose=verbose,
method=nm, var.names="beta", dset.name="Synth2",
m.0=b.m0, C.0=b.C0,
W.a0=W.a0, W.b0=W.b0,
mu.true=0.0, phi.true=1.0, options=options);
}
synth2.table = setup.table.dyn(bench.synth2, "beta")
if (plot.it) { plot.bench(pg, fs); plot.check.NB(y, X.dyn, bmark1=pg, bmark2=fs); }
if (write.it) save(bench.synth2, synth2.table, file=file.path(write.dir, "bmark-synth2.RData"))
}
##------------------------------------------------------------------------------
## SYNTH 3 ##
##------------------------------------------------------------------------------
if (run$synth3) {
load("Benchmark-DataSets/nb.synth3.RData")
y = y;
X.dyn = X.dyn;
Ny = length(y)
options$starts = c(1, Ny, 50)
## Prior
b.m0 = 0.0;
b.C0 = 3.0;
W = 0.1;
W.a0 = 10;
W.b0 = W.a0 * W;
mu.m0 = 0.0
mu.P0 = 1.0
phi.m0 = 0.95
phi.P0 = 100
bench.synth3 = list();
## source("Benchmark-DynNB.R")
for (i in run.idc) {
## source("Benchmark-DynNB.R")
nm = methods[i];
bench.synth3[[nm]] <- benchmark.dyn.NB(y, X.dyn=X.dyn, X.stc=NULL,
samp=samp, burn=burn, ntrials=ntrials, verbose=verbose,
method=nm, var.names="beta", dset.name="Synth3",
m.0=b.m0, C.0=b.C0,
W.a0=W.a0, W.b0=W.b0,
mu.true=0.0, phi.true=1.0, d.true = d, options=options);
}
synth3.table = setup.table.dyn(bench.synth3, "beta")
if (write.it) save(bench.synth3, synth3.table, file=file.path(write.dir, "bmark-synth3.RData"))
}
##------------------------------------------------------------------------------
## GENERIC SYNTH ##
##------------------------------------------------------------------------------
if (run$allsynth)
{
## source("Benchmark-DynNB.R")
sstats = list()
tables = list()
ids = list()
iter = 0
P = 2
nb.mean = 100
corr.type = "low"
## est.ar = "with.ar"
est.ar = "wout.ar"
for (est.ar in c("wout.ar", "with.ar")) {
## for (P in c(2,4)) {
for (nb.mean in c(10,100)) {
for (corr.type in c("low", "high")) {
iter = iter + 1
cat("AR:", est.ar, "\n");
dset.name = paste(corr.type, "-", P, "-mu-", nb.mean, sep="");
source.file = paste("DynNB-synth-", dset.name, ".RData", sep="")
load(file.path("Benchmark-DataSets", source.file))
filename = paste("bench-dynnb-", dset.name, "-", est.ar, ".RData", sep="")
T = length(y)
X.dyn = X
X.stc = matrix(1, nrow=T, ncol=1)
P = ncol(X.dyn)
options$starts = seq(1,T,50)
## Prior
m0 = rep(0, P+1)
C0 = diag(100, P+1)
phi.m0 = rep(0.95, P);
phi.V0 = rep(0.1, P);
W.guess = 0.1
W.a0 = rep(300, P)
W.b0 = W.a0 * W.guess
mu.true = rep(0.0, P)
if (est.ar=="with.ar") {
phi.true = NULL
W.true = NULL
}
bench.synth = list();
if (read.it) load(file.path(write.dir, filename))
## source("Benchmark-DynNB.R")
for (i in run.idc) {
## source("Benchmark-DynNB.R")
nm = methods[i];
bench.synth[[nm]] <- benchmark.dyn.NB(y, X.dyn=X.dyn, X.stc=X.stc,
samp=samp, burn=burn, ntrials=ntrials, verbose=verbose,
method=nm, var.names=c("beta", "alpha", "phi", "W"),
dset.name=dset.name,
m.0=m0, C.0=C0,
phi.m0=phi.m0, phi.P0=1/phi.V0,
W.a0=W.a0, W.b0=W.b0,
mu.true=mu.true, phi.true=phi.true, W.true=W.true, d.true=d.true, options=options);
}
synth.table = setup.table.dyn(bench.synth, "beta")
## if (plot.it) { plot.bench(pg, fs); plot.check.logit(y, X, n=n, bmark1=pg, bmark2=fs); }
if (write.it) save(bench.synth, synth.table, dset.name, file=file.path(write.dir, filename))
sstats[[iter]] = synth.table$ave.sstat
tables[[iter]] = synth.table$table
ids[[iter]] = filename
}}}#}
}
## Write tables.
if (FALSE) {
for (i in 1:length(ids)) {
tempname = sub("RData", "table", ids[[i]])
write.table(tables[[i]], tempname)
}
}
if (run$allview) {
P = 2
nb.mean = 10
corr.type = "high"
## est.ar = "with.ar"
est.ar = "wout.ar"
for (est.ar in c("wout.ar", "with.ar")) {
## for (P in c(2,4)) {
for (nb.mean in c(10,100)) {
for (corr.type in c("low", "high")) {
cat("AR:", est.ar, "P:", P, "nb.mean:", nb.mean, "corr:", corr.type, "\n");
dset.name = paste(corr.type, "-", P, "-mu-", nb.mean, sep="");
source.file = paste("DynNB-synth-", dset.name, ".RData", sep="")
bench.base = paste("bench-dynnb-", dset.name, "-", est.ar, sep="");
bench.data = paste(bench.base, ".RData", sep="")
table.file = paste("table.", bench.base, sep="");
load(file.path("Benchmark-DataSets", source.file))
load(file.path(write.dir, bench.data))
## cat("phi.true", phi.true, "\n");
## cat("W.true:", W.true, "\n");
the.table = synth.table$table
the.table[3,2] = mean(bench.synth$CUBS$arate) ## CUBS
write.table(the.table, file=table.file, row.names=TRUE, col.names=TRUE);
par(mfrow=c(P+1,1))
for (i in 1:P) {
plot(beta[i,], col=1, type="l")
## plot(synth.table$ave.sstat[i,,1,1], type="l", col=2)
lines(synth.table$ave.sstat[i,,1,1], col=2, lty=2)
lines(synth.table$ave.sstat[i,,1,2], col=3, lty=3)
lines(synth.table$ave.sstat[i,,1,3], col=4, lty=4) ## CUBS
}
alpha.pg = mean(bench.synth$PG$gb$alpha);
alpha.fs = mean(bench.synth$FS$gb$alpha);
alpha.cb = mean(bench.synth$CUBS$gb$alpha); ## CUBS
lmean.pg = apply((synth.table$ave.sstat[,,1,1]) * t(X), 2, sum) + alpha.pg
lmean.fs = apply((synth.table$ave.sstat[,,1,2]) * t(X), 2, sum) + alpha.fs
lmean.cb = apply((synth.table$ave.sstat[,,1,3]) * t(X), 2, sum) + alpha.cb ## CUBS
plot(y, cex=0.5)
lines(exp(log.mean))
lines(exp(lmean.pg), col=2, lty=2)
lines(exp(lmean.fs), col=3, lty=3)
lines(exp(lmean.cb), col=4, lty=4) ## CUBS
readline("<ENTER>")
}}}
}
################################################################################
#-------------------------------------------------------------------------------
## GENERATE AND TEST ##
#-------------------------------------------------------------------------------
################################################################################
if (FALSE)
{
T = 500
P = 2
corr.type = "low"
nb.mean = 24
## for (P in c(2,4)) {
## for (corr.type in c("low", "high")) {
## for (nb.mean in c(10, 100)) {
c = 0.5
d.true = 4
marg.V = 5 / sqrt(P) * c
phi.true = rep(0.95, P)
W.true = marg.V * (1 - phi.true^2)
beta = matrix(0, nrow=P, ncol=T+1)
beta[,1] = 0
for (i in 2:(T+1))
beta[,i] = phi.true * (beta[,i-1]) + rnorm(P, 0, sqrt(W.true))
xgrid = seq(-1, 1, length.out=T)
tX = matrix(0, nrow=P, ncol=T);
if (corr.type=="low") freq = c(1, 2, 3, 4)
if (corr.type=="high") freq = c(1, 1.1, 1.2, 1.3)
for (i in 1:P)
tX[i,] = cos(freq[i] * pi * xgrid);
tX = tX / sqrt(P) * (1-c)
X = t(tX)
log.mean = log(nb.mean) + colSums(beta[,-1] * tX)
psi = log.mean - log(d.true)
p.success = 1 / (1 + exp(-psi))
y = rnbinom(T, d.true, 1-p.success) ## p.success is prob of registering a single count.
## filename = paste("DynNB-synth-", corr.type, "-", P, "-mu-", nb.mean, ".RData", sep="")
## if (FALSE) {
## save(d.true, nb.mean, marg.V, phi.true, W.true, beta, tX, X, log.mean, psi, p.success, y, freq, xgrid,
## file=filename, compress=TRUE)
## }
#-----------------------------------------------------------------------------
X.dyn = X;
T = length(y)
X.dyn = X
X.stc = matrix(1, nrow=T, ncol=1)
P = ncol(X.dyn)
## Prior
m0 = rep(0, P+1)
C0 = diag(100, P+1)
phi.m0 = rep(0.95, P);
phi.V0 = rep(0.1, P);
W.guess = 0.1
W.a0 = rep(300, P)
W.b0 = W.a0 * W.guess
mu.true = rep(0.0, P)
if (est.ar=="with.ar") {
phi.true = NULL
W.true = NULL
}
out = list()
## source("Benchmark-DynNB.R")
for (i in run.idc) {
## source("Benchmark-DynNB.R")
nm = methods[i];
out[[nm]] <- benchmark.dyn.NB(y, X.dyn=X.dyn, X.stc=X.stc,
samp=samp, burn=burn, ntrials=ntrials, verbose=verbose,
method=nm, var.names="beta", dset.name="Synth1",
m.0=m0, C.0=C0,
W.a0=W.a0, W.b0=W.b0,
mu.true=mu.true, phi.true=phi.true, W.true=W.true, d.true=NULL);
}
synth.table = setup.table.dyn(out, "beta")
}
| /Code/R/Benchmark-DynNB.R | no_license | lawmurray/BayesLogit | R | false | false | 19,417 | r | ## Copyright 2013 Nick Polson, James Scott, and Jesse Windle.
## This file is part of BayesLogit, distributed under the GNU General Public
## License version 3 or later and without ANY warranty, implied or otherwise.
## library("BayesLogit")
library("coda")
source("Benchmark-Utilities.R")
source("DynNBPG.R")
source("DynNBFS-2009.R")
source("DynNBCUBS.R")
source("DynNBOmegaBlock.R")
################################################################################
## SETUP ##
################################################################################
run <- list("flu" =FALSE,
"synth1"=FALSE,
"synth2"=FALSE,
"synth3"=FALSE,
"allsynth"=FALSE,
"allview"=FALSE)
write.dir = "Bench-Dyn-06" # Used in file.path
write.it = TRUE
plot.it = FALSE
read.it = FALSE
methods = c("PG", "FS", "CUBS", "OmegaBlock")
## The methods to run from the sequence above.
run.idc = 1:3
samp = 10000
burn = 2000
verbose = 1000
ntrials = 10 ## Ambiguous languge. Repetitions of MCMC.
options = list("just.max"=FALSE, "starts"=1);
################################################################################
## Dyn NB Benchmark ##
################################################################################
benchmark.dyn.NB <- function(y, X.dyn, X.stc=NULL,
samp=1000, burn=100, ntrials=1, verbose=100,
method="PG", var.names=c("beta"), dset.name="NULL",
m.0=NULL, C.0=NULL,
mu.m0=NULL, mu.P0=NULL,
phi.m0=NULL, phi.P0=NULL,
W.a0=NULL, W.b0=NULL,
d.true=NULL,
beta.true=NULL, iota.true=NULL,
mu.true=NULL, phi.true=NULL, W.true=NULL, options=NULL)
{
## n is total number of trials per observation - not vectorized, i.e. the same for each i.
cat("Will benchmark", method[1], "using", dset.name, "dataset for variable(s)", var.names, "\n");
sstat = list(); for (nm in var.names) sstat[[nm]] = list();
arate = rep(0, ntrials);
ess.time = rep(0, ntrials);
gb = list();
i = 0
num.errors = 0
while (i < ntrials) {
i = i + 1
if (method=="PG") {
gb <- dyn.NB.PG(y=y, X.dyn=X.dyn, X.stc=X.stc,
samp=samp, burn=burn, verbose=verbose,
m.0=m.0, C.0=C.0,
mu.m0=mu.m0, mu.P0=mu.P0,
phi.m0=phi.m0, phi.P0=phi.P0,
W.a0=W.a0, W.b0=W.b0,
d.true=d.true, w.true=NULL,
beta.true=beta.true, iota.true=iota.true,
mu.true=mu.true, phi.true=phi.true, W.true=W.true);
gb$beta = gb$beta[,,-1,drop=FALSE]
gb$a.rate = 1
} else if (method=="FS") {
gb <- dyn.NB.FS(y=y, X.dyn=X.dyn, X.stc=X.stc,
samp=samp, burn=burn, verbose=verbose,
m.0=m.0, C.0=C.0,
mu.m0=mu.m0, mu.P0=mu.P0,
phi.m0=phi.m0, phi.P0=phi.P0,
W.a0=W.a0, W.b0=W.b0,
d.true=d.true, lambda.true=NULL, r.true=NULL,
beta.true=beta.true, iota.true=iota.true,
mu.true=mu.true, phi.true=phi.true, W.true=W.true)
gb$beta = gb$beta[,,-1,drop=FALSE]
gb$a.rate=1
} else if (method=="CUBS") {
gb <- dyn.NB.CUBS(y=y, X.dyn=X.dyn, m0=m.0, C0=C.0,
samp=samp, burn=burn, verbose=verbose,
mu.m0=mu.m0, mu.P0=mu.P0,
phi.m0=phi.m0, phi.P0=phi.P0,
W.a0=W.a0, W.b0=W.b0, X.stc=X.stc,
mu.true = mu.true, phi.true=phi.true, W.true=W.true, d.true=d.true)
gb$beta = gb$beta[,,-1,drop=FALSE]
gb$a.rate = gb$ac.rate[samp]
} else if (method=="OmegaBlock") {
m0.stc = NULL
C0.stc = NULL
m0.dyn = m.0
C0.dyn = C.0
if (!is.null(X.stc)) {
P.a = ncol(X.stc)
P.b = ncol(X.dyn)
idc.a = 1:P.a
idc.b = (P.a+1):(P.a+P.b)
m0.stc = m.0[idc.a]
C0.stc = C.0[idc.a, idc.a]
m0.dyn = m.0[idc.b]
C0.dyn = C.0[idc.b,idc.b]
}
gb <- dyn.nb.om(y=y, X.dyn=X.dyn, m0=m0.dyn, C0=C0.dyn,
samp=samp, burn=burn, verbose=verbose, starts=options$starts,
mu.m0=mu.m0, mu.P0=mu.P0,
phi.m0=phi.m0, phi.P0=phi.P0,
W.a0=W.a0, W.b0=W.b0,
X.stc=X.stc, m0.stc=m0.stc, C0.stc=C0.stc,
mu.true = mu.true, phi.true=phi.true, W.true=W.true,
alpha.true=NULL, beta.true = NULL, d.true=d.true,
just.max=options$just.max)
gb$options = options
} else {
print("Unknown method.")
return(NA);
}
if (gb$error) {
num.errors = num.errors + 1
i = i-1
cat("Error. Dump:\n", gb$dump, "\n");
}
else {
for (nm in var.names) {
if (length(dim(gb[[nm]])) > 2) sstat[[nm]][[i]] = sum.stat.dyn(gb[[nm]], gb$ess.time[3], thin=1);
if (length(dim(gb[[nm]])) ==2) sstat[[nm]][[i]] = sum.stat(gb[[nm]], gb$ess.time[3], thin=1);
}
arate[i] = gb$a.rate[1]
ess.time[i] = gb$ess.time[3]
}
if (num.errors > 10) return(NA)
}
for (nm in var.names)
sstat[[nm]] = simplify2array(sstat[[nm]]);
out <- list("gb"=gb, "sstat"=sstat, "arate"=arate, "ess.time"=ess.time);
out
} ## benchmark.dyn.NB
################################################################################
## BENCHMARK DATA SETS ##
################################################################################
##------------------------------------------------------------------------------
## FLU DATA ##
##------------------------------------------------------------------------------
if (run$flu) {
flu = read.csv("../../fludata/flu5years.csv", header=TRUE)
## Can use any of these
y1 = flu$"A.H3."
y2 = flu$"A.H1."
y3 = flu$B
y4 = flu$Total.number.positive
## plot(y1)
rawX = flu[,10:109]
N = nrow(rawX)
T = length(y1)
## Naive PCA
## Create a reduced-dimension representation of the search terms
NPC = 4 ## Can play with this
mypc = princomp(rawX)
X = mypc$scores[,1:NPC]
## plot(X[,2])
A = rawX - rep(1, N) %*% t(colMeans(rawX));
sv = svd(A)
Z = sv$u %*% diag(sv$d);
cor(mypc$scores, Z)[1:4,1:4];
## you won't lose rank unless a column vector = alpha 1.
## So if we want to do a dynamic regression with intercept then we should use
## X.dyn = as.matrix(Z[,1:NPC])
## X.stc = matrix(1, nrow=T, ncol=1)
## X.stc = NULL
## Evolving intercept
X.dyn = matrix(1, nrow=T, ncol=1)
X.stc = Z[,1:NPC];
N.b = ncol(X.dyn)
if (!is.null(X.stc)) N.a = ncol(X.stc) else N.a = 0
N = N.b + N.a
## Prior
mu.m0 = rep(0.0, N.b)
mu.P0 = rep(0.1, N.b)
phi.m0 = rep(0.95, N.b)
phi.P0 = rep(100, N.b)
m0 = rep(0.0, N);
C0 = diag(3.0, N);
W.gs = 0.01
W.a0 = rep(100, N.b);
W.b0 = W.a0 * W.gs;
bench.flu = list();
## source("Benchmark-DynNB.R")
for (i in 1:3) {
## source("Benchmark-DynNB.R")
nm = methods[i];
bench.flu[[nm]] <- benchmark.dyn.NB(y=y1, X.dyn=X.dyn, X.stc=X.stc,
samp=samp, burn=burn, ntrials=1, verbose=verbose,
method=nm, var.names="beta", dset.name="Flu",
m.0=m0, C.0=C0,
mu.m0=mu.m0, mu.P0=mu.P0,
phi.m0=phi.m0, phi.P0=phi.P0,
W.a0=W.a0, W.b0=W.b0, d.true=NULL);
## mu.true=rep(0.0, N.b), phi.true=rep(1, N.b)
}
flu.table = setup.table.dyn(bench.flu, "beta")
}
##------------------------------------------------------------------------------
## SYNTHETIC 1 ##
##------------------------------------------------------------------------------
if (run$synth1) {
load("Benchmark-DataSets/DynNB-synth-1.RData")
y = dyn.nb.1$y;
X.dyn = dyn.nb.1$X;
P = 1
N.y = length(y)
options$starts=seq(1, N.y, 2)
## Prior
b.m0 = 3.0;
b.C0 = 3.0;
W = 0.1;
W.a0 = 300;
W.b0 = W.a0 * W;
bench.synth1 = list();
## source("Benchmark-DynNB.R")
for (i in run.idc) {
## source("Benchmark-DynNB.R")
nm = methods[i];
bench.synth1[[nm]] <- benchmark.dyn.NB(y, X.dyn=X.dyn, X.stc=NULL,
samp=samp, burn=burn, ntrials=ntrials, verbose=verbose,
method=nm, var.names="beta", dset.name="Synth1",
m.0=b.m0, C.0=b.C0,
W.a0=W.a0, W.b0=W.b0,
mu.true=0.0, phi.true=1.0, d.true=4, options=options);
}
synth1.table = setup.table.dyn(bench.synth1, "beta")
if (plot.it) { plot.bench(pg, fs); plot.check.NB(y, X.dyn, bmark1=pg, bmark2=fs); }
if (write.it) save(bench.synth1, synth1.table, file=file.path(write.dir, "bmark-synth1.RData"))
}
##------------------------------------------------------------------------------
## SYNTH 2 ##
##------------------------------------------------------------------------------
if (run$synth2) {
load("Benchmark-DataSets/DynNB-synth-2.RData")
y = dyn.nb.2$y;
X.dyn = dyn.nb.2$X;
## Prior
b.m0 = 3.0;
b.C0 = 3.0;
W = 0.1;
W.a0 = 300;
W.b0 = W.a0 * W;
bench.synth2 = list();
## source("Benchmark-DynNB.R")
for (i in run.idc) {
## source("Benchmark-DynNB.R")
nm = methods[i];
bench.synth2[[nm]] <- benchmark.dyn.NB(y, X.dyn=X.dyn, X.stc=NULL,
samp=samp, burn=burn, ntrials=1, verbose=verbose,
method=nm, var.names="beta", dset.name="Synth2",
m.0=b.m0, C.0=b.C0,
W.a0=W.a0, W.b0=W.b0,
mu.true=0.0, phi.true=1.0, options=options);
}
synth2.table = setup.table.dyn(bench.synth2, "beta")
if (plot.it) { plot.bench(pg, fs); plot.check.NB(y, X.dyn, bmark1=pg, bmark2=fs); }
if (write.it) save(bench.synth2, synth2.table, file=file.path(write.dir, "bmark-synth2.RData"))
}
##------------------------------------------------------------------------------
## SYNTH 3 ##
##------------------------------------------------------------------------------
if (run$synth3) {
load("Benchmark-DataSets/nb.synth3.RData")
y = y;
X.dyn = X.dyn;
Ny = length(y)
options$starts = c(1, Ny, 50)
## Prior
b.m0 = 0.0;
b.C0 = 3.0;
W = 0.1;
W.a0 = 10;
W.b0 = W.a0 * W;
mu.m0 = 0.0
mu.P0 = 1.0
phi.m0 = 0.95
phi.P0 = 100
bench.synth3 = list();
## source("Benchmark-DynNB.R")
for (i in run.idc) {
## source("Benchmark-DynNB.R")
nm = methods[i];
bench.synth3[[nm]] <- benchmark.dyn.NB(y, X.dyn=X.dyn, X.stc=NULL,
samp=samp, burn=burn, ntrials=ntrials, verbose=verbose,
method=nm, var.names="beta", dset.name="Synth3",
m.0=b.m0, C.0=b.C0,
W.a0=W.a0, W.b0=W.b0,
mu.true=0.0, phi.true=1.0, d.true = d, options=options);
}
synth3.table = setup.table.dyn(bench.synth3, "beta")
if (write.it) save(bench.synth3, synth3.table, file=file.path(write.dir, "bmark-synth3.RData"))
}
##------------------------------------------------------------------------------
## GENERIC SYNTH ##
##------------------------------------------------------------------------------
if (run$allsynth)
{
## source("Benchmark-DynNB.R")
sstats = list()
tables = list()
ids = list()
iter = 0
P = 2
nb.mean = 100
corr.type = "low"
## est.ar = "with.ar"
est.ar = "wout.ar"
for (est.ar in c("wout.ar", "with.ar")) {
## for (P in c(2,4)) {
for (nb.mean in c(10,100)) {
for (corr.type in c("low", "high")) {
iter = iter + 1
cat("AR:", est.ar, "\n");
dset.name = paste(corr.type, "-", P, "-mu-", nb.mean, sep="");
source.file = paste("DynNB-synth-", dset.name, ".RData", sep="")
load(file.path("Benchmark-DataSets", source.file))
filename = paste("bench-dynnb-", dset.name, "-", est.ar, ".RData", sep="")
T = length(y)
X.dyn = X
X.stc = matrix(1, nrow=T, ncol=1)
P = ncol(X.dyn)
options$starts = seq(1,T,50)
## Prior
m0 = rep(0, P+1)
C0 = diag(100, P+1)
phi.m0 = rep(0.95, P);
phi.V0 = rep(0.1, P);
W.guess = 0.1
W.a0 = rep(300, P)
W.b0 = W.a0 * W.guess
mu.true = rep(0.0, P)
if (est.ar=="with.ar") {
phi.true = NULL
W.true = NULL
}
bench.synth = list();
if (read.it) load(file.path(write.dir, filename))
## source("Benchmark-DynNB.R")
for (i in run.idc) {
## source("Benchmark-DynNB.R")
nm = methods[i];
bench.synth[[nm]] <- benchmark.dyn.NB(y, X.dyn=X.dyn, X.stc=X.stc,
samp=samp, burn=burn, ntrials=ntrials, verbose=verbose,
method=nm, var.names=c("beta", "alpha", "phi", "W"),
dset.name=dset.name,
m.0=m0, C.0=C0,
phi.m0=phi.m0, phi.P0=1/phi.V0,
W.a0=W.a0, W.b0=W.b0,
mu.true=mu.true, phi.true=phi.true, W.true=W.true, d.true=d.true, options=options);
}
synth.table = setup.table.dyn(bench.synth, "beta")
## if (plot.it) { plot.bench(pg, fs); plot.check.logit(y, X, n=n, bmark1=pg, bmark2=fs); }
if (write.it) save(bench.synth, synth.table, dset.name, file=file.path(write.dir, filename))
sstats[[iter]] = synth.table$ave.sstat
tables[[iter]] = synth.table$table
ids[[iter]] = filename
}}}#}
}
## Write tables.
if (FALSE) {
for (i in 1:length(ids)) {
tempname = sub("RData", "table", ids[[i]])
write.table(tables[[i]], tempname)
}
}
if (run$allview) {
P = 2
nb.mean = 10
corr.type = "high"
## est.ar = "with.ar"
est.ar = "wout.ar"
for (est.ar in c("wout.ar", "with.ar")) {
## for (P in c(2,4)) {
for (nb.mean in c(10,100)) {
for (corr.type in c("low", "high")) {
cat("AR:", est.ar, "P:", P, "nb.mean:", nb.mean, "corr:", corr.type, "\n");
dset.name = paste(corr.type, "-", P, "-mu-", nb.mean, sep="");
source.file = paste("DynNB-synth-", dset.name, ".RData", sep="")
bench.base = paste("bench-dynnb-", dset.name, "-", est.ar, sep="");
bench.data = paste(bench.base, ".RData", sep="")
table.file = paste("table.", bench.base, sep="");
load(file.path("Benchmark-DataSets", source.file))
load(file.path(write.dir, bench.data))
## cat("phi.true", phi.true, "\n");
## cat("W.true:", W.true, "\n");
the.table = synth.table$table
the.table[3,2] = mean(bench.synth$CUBS$arate) ## CUBS
write.table(the.table, file=table.file, row.names=TRUE, col.names=TRUE);
par(mfrow=c(P+1,1))
for (i in 1:P) {
plot(beta[i,], col=1, type="l")
## plot(synth.table$ave.sstat[i,,1,1], type="l", col=2)
lines(synth.table$ave.sstat[i,,1,1], col=2, lty=2)
lines(synth.table$ave.sstat[i,,1,2], col=3, lty=3)
lines(synth.table$ave.sstat[i,,1,3], col=4, lty=4) ## CUBS
}
alpha.pg = mean(bench.synth$PG$gb$alpha);
alpha.fs = mean(bench.synth$FS$gb$alpha);
alpha.cb = mean(bench.synth$CUBS$gb$alpha); ## CUBS
lmean.pg = apply((synth.table$ave.sstat[,,1,1]) * t(X), 2, sum) + alpha.pg
lmean.fs = apply((synth.table$ave.sstat[,,1,2]) * t(X), 2, sum) + alpha.fs
lmean.cb = apply((synth.table$ave.sstat[,,1,3]) * t(X), 2, sum) + alpha.cb ## CUBS
plot(y, cex=0.5)
lines(exp(log.mean))
lines(exp(lmean.pg), col=2, lty=2)
lines(exp(lmean.fs), col=3, lty=3)
lines(exp(lmean.cb), col=4, lty=4) ## CUBS
readline("<ENTER>")
}}}
}
################################################################################
#-------------------------------------------------------------------------------
## GENERATE AND TEST ##
#-------------------------------------------------------------------------------
################################################################################
if (FALSE)
{
T = 500
P = 2
corr.type = "low"
nb.mean = 24
## for (P in c(2,4)) {
## for (corr.type in c("low", "high")) {
## for (nb.mean in c(10, 100)) {
c = 0.5
d.true = 4
marg.V = 5 / sqrt(P) * c
phi.true = rep(0.95, P)
W.true = marg.V * (1 - phi.true^2)
beta = matrix(0, nrow=P, ncol=T+1)
beta[,1] = 0
for (i in 2:(T+1))
beta[,i] = phi.true * (beta[,i-1]) + rnorm(P, 0, sqrt(W.true))
xgrid = seq(-1, 1, length.out=T)
tX = matrix(0, nrow=P, ncol=T);
if (corr.type=="low") freq = c(1, 2, 3, 4)
if (corr.type=="high") freq = c(1, 1.1, 1.2, 1.3)
for (i in 1:P)
tX[i,] = cos(freq[i] * pi * xgrid);
tX = tX / sqrt(P) * (1-c)
X = t(tX)
log.mean = log(nb.mean) + colSums(beta[,-1] * tX)
psi = log.mean - log(d.true)
p.success = 1 / (1 + exp(-psi))
y = rnbinom(T, d.true, 1-p.success) ## p.success is prob of registering a single count.
## filename = paste("DynNB-synth-", corr.type, "-", P, "-mu-", nb.mean, ".RData", sep="")
## if (FALSE) {
## save(d.true, nb.mean, marg.V, phi.true, W.true, beta, tX, X, log.mean, psi, p.success, y, freq, xgrid,
## file=filename, compress=TRUE)
## }
#-----------------------------------------------------------------------------
X.dyn = X;
T = length(y)
X.dyn = X
X.stc = matrix(1, nrow=T, ncol=1)
P = ncol(X.dyn)
## Prior
m0 = rep(0, P+1)
C0 = diag(100, P+1)
phi.m0 = rep(0.95, P);
phi.V0 = rep(0.1, P);
W.guess = 0.1
W.a0 = rep(300, P)
W.b0 = W.a0 * W.guess
mu.true = rep(0.0, P)
if (est.ar=="with.ar") {
phi.true = NULL
W.true = NULL
}
out = list()
## source("Benchmark-DynNB.R")
for (i in run.idc) {
## source("Benchmark-DynNB.R")
nm = methods[i];
out[[nm]] <- benchmark.dyn.NB(y, X.dyn=X.dyn, X.stc=X.stc,
samp=samp, burn=burn, ntrials=ntrials, verbose=verbose,
method=nm, var.names="beta", dset.name="Synth1",
m.0=m0, C.0=C0,
W.a0=W.a0, W.b0=W.b0,
mu.true=mu.true, phi.true=phi.true, W.true=W.true, d.true=NULL);
}
synth.table = setup.table.dyn(out, "beta")
}
|
#GaussianGenerator, Adrian Stetco, University of Manchester, 2016
#Generates clouds of Gaussians
#Uses mvtnorm library, install it with "install.packages("mvtnorm")"
library(mvtnorm)
gaussianGenerator <- function(kcenters, dim, variance, points){
j<-1
c<- matrix(0,kcenters,dim)
x <- rmvnorm(n=kcenters*1000,
mean=rep(0, dim),
sigma=diag(variance,dim))
sampl<-sample(nrow(x), kcenters, replace=FALSE)
for (i in 1:nrow(x))
{if (i %in% sampl) {
c[j,]<- x[i,]
j<-j+1
}
}
samplesCNr<-(points-kcenters)/kcenters
sampleData<- matrix(0, points, dim)
sampleData[1:kcenters,]<-c[1:kcenters,]
k<-kcenters+1
for( i in 1:kcenters){
y <- rmvnorm(n=samplesCNr, mean=c[i,], sigma=diag(10,dim)) #runif(1, 5.0, 7.5)
for(j in 1:samplesCNr){
sampleData[k,]<- y[j,]
k<-k+1
}
}
return(sampleData)
}
#for testing, uncomment the following lines
#g<-gaussianGenerator(5,2,4000, 1000)
#plot(g)
| /gaussianGenerator.r | no_license | adrianstetco/fcmppi | R | false | false | 1,010 | r | #GaussianGenerator, Adrian Stetco, University of Manchester, 2016
#Generates clouds of Gaussians
#Uses mvtnorm library, install it with "install.packages("mvtnorm")"
library(mvtnorm)
gaussianGenerator <- function(kcenters, dim, variance, points){
j<-1
c<- matrix(0,kcenters,dim)
x <- rmvnorm(n=kcenters*1000,
mean=rep(0, dim),
sigma=diag(variance,dim))
sampl<-sample(nrow(x), kcenters, replace=FALSE)
for (i in 1:nrow(x))
{if (i %in% sampl) {
c[j,]<- x[i,]
j<-j+1
}
}
samplesCNr<-(points-kcenters)/kcenters
sampleData<- matrix(0, points, dim)
sampleData[1:kcenters,]<-c[1:kcenters,]
k<-kcenters+1
for( i in 1:kcenters){
y <- rmvnorm(n=samplesCNr, mean=c[i,], sigma=diag(10,dim)) #runif(1, 5.0, 7.5)
for(j in 1:samplesCNr){
sampleData[k,]<- y[j,]
k<-k+1
}
}
return(sampleData)
}
#for testing, uncomment the following lines
#g<-gaussianGenerator(5,2,4000, 1000)
#plot(g)
|
# Purpose : resampling with GDAL;
# Maintainer : Tomislav Hengl (tom.hengl@wur.nl)
# Contributions : ;
# Dev Status : Pre-Alpha
# Note : ;
.programPath <- function(path, utility){
if(missing(path)){
if(!file.exists("C:/PROGRA~1/GDAL/")&.Platform$OS.type == "windows"){
if(requireNamespace("gdalUtils", quietly = TRUE)){
path <- getOption("gdalUtils_gdalPath")[[1]]$path
if(is.null(path)){
## force gdal installation:
gdalUtils::gdal_setInstallation()
message("Forcing installation of GDAL utilities... this might take time.")
path <- getOption("gdalUtils_gdalPath")[[1]]$path
}
}
}
if(file.exists(paste0("C:/PROGRA~1/GDAL/", utility, ".exe"))&.Platform$OS.type == "windows"){
program = shQuote(shortPathName(normalizePath(file.path("C:/PROGRA~1/GDAL/", paste0(utility, ".exe")))))
}
}
if(.Platform$OS.type == "windows") {
program = shQuote(shortPathName(normalizePath(file.path(path, paste(utility, ".exe", sep="")))))
} else {
program = utility
}
return(program)
}
.gdalwarp.SpatialPixels <- function(obj, proj4s = proj4string(obj), GridTopology = NULL, pixsize, resampling_method = "bilinear", NAflag = get("NAflag", envir = GSIF.opts), tmp.file = FALSE, show.output.on.console = FALSE, program){
if(missing(program)){
program <- .programPath(utility="gdalwarp")
}
if(!nchar(program)==0){
if(requireNamespace("stringr", quietly = TRUE)){
message(paste('Resampling', length(names(obj)), 'layers to CRS(\"', stringr::str_trim(substr(proj4s, 1, 20)), ' ... ', '\") with grid cell size:', pixsize, '...'))
if(any(class(obj)=="SpatialPixelsDataFrame")){
size = ncol(obj)
} else { if(any(class(obj)=="RasterLayer")){
size = 1
}}
pb <- txtProgressBar(min=0, max=size, style=3)
for(i in 1:size){
## name the temp file:
if(any(class(obj)=="RasterLayer")){
if(raster::inMemory(obj)==TRUE & tmp.file==TRUE){
tf <- tempfile()
extension <- ".tif"
} else {
## file extension:
if(requireNamespace("tools", quietly = TRUE)){
extension <- paste(".", tools::file_ext(raster::filename(obj)), sep="")
tf <- strsplit(raster::filename(obj), extension)[[1]]
}
}
}
if(any(class(obj)=="SpatialPixelsDataFrame")){
extension <- ".tif"
if(tmp.file==TRUE){
tf <- tempfile()
} else {
tf <- paste(normalizeFilename(deparse(substitute(obj, env = parent.frame()))), names(obj)[i], sep="_")
}
}
## check if it is factor or numeric:
if(any(class(obj)=="RasterLayer")){
isfactor <- is.factor(obj)
} else {
if(any(class(obj)=="SpatialPixelsDataFrame")){
isfactor <- is.factor(obj@data[,i])
}
}
## write to a file if necessary:
if(any(class(obj)=="SpatialPixelsDataFrame")){
if(isfactor){
x <- writeRaster(raster(obj[i]), filename=paste(tf, extension, sep=""), format="GTiff", overwrite=TRUE)
if(raster::maxValue(x)==0) { warning(paste("Layer", names(obj[i]), "is of type 'factor' but contains no levels")) }
} else {
writeGDAL(obj[i], paste(tf, extension, sep=""), "GTiff", mvFlag = NAflag)
}
} else {
if(any(class(obj)=="RasterLayer")){
if(raster::inMemory(obj)==TRUE){
x <- writeRaster(obj, filename=paste(tf, extension, sep=""), format="GTiff", overwrite=TRUE)
}
if(isfactor){
if(raster::maxValue(x)==0) { warning(paste("Layer", names(obj), "is of type 'factor' but contains no levels")) }
}
}}
## resample to WGS84 system:
if(isfactor){
if(is.null(GridTopology)){
if(is.na(proj4string(obj))|proj4string(obj)=="NA"){
system(paste(program, ' ', tf, extension, ' ', tf, '_ll.tif -dstnodata \"255\" -r near -ot \"Byte\" -tr ', pixsize, ' ', pixsize, sep=""), show.output.on.console = show.output.on.console)
} else {
system(paste(program, ' ', tf, '.tif', ' -t_srs \"', proj4s, '\" ', tf, '_ll.tif -dstnodata \"255\" -r near -ot \"Byte\" -tr ', pixsize, ' ', pixsize, sep=""), show.output.on.console = show.output.on.console)
}
} else {
if(class(GridTopology)=="GridTopology"){
bbox = bbox(SpatialGrid(GridTopology))
if(is.na(proj4string(obj))|proj4string(obj)=="NA"){
system(paste(program, ' ', tf, extension, ' ', tf, '_ll.tif -dstnodata \"255\" -ot \"Byte\" -r near', ' -te ', bbox[1,1], ' ', bbox[2,1], ' ', bbox[1,2], ' ', bbox[2,2], ' -ts ', GridTopology@cells.dim[1], ' ', GridTopology@cells.dim[2], sep=""), show.output.on.console = show.output.on.console)
} else {
system(paste(program, ' ', tf, '.tif', ' -t_srs \"', proj4s, '\" ', tf, '_ll.tif -dstnodata \"255\" -ot \"Byte\" -r near', ' -te ', bbox[1,1], ' ', bbox[2,1], ' ', bbox[1,2], ' ', bbox[2,2], ' -ts ', GridTopology@cells.dim[1], ' ', GridTopology@cells.dim[2], sep=""), show.output.on.console = show.output.on.console)
}
} else {
stop("'GridTopology-class' object required")
}
}
}
else {
if(is.null(GridTopology)){
if(is.na(proj4string(obj))|proj4string(obj)=="NA"){
system(paste(program, ' ', tf, extension, ' ', tf, '_ll.tif -dstnodata \"', NAflag, '\" -r ', resampling_method, ' -tr ', pixsize, ' ', pixsize, sep=""), show.output.on.console = show.output.on.console)
} else {
system(paste(program, ' ', tf, '.tif', ' -t_srs \"', proj4s, '\" ', tf, '_ll.tif -dstnodata \"', NAflag, '\" -r ', resampling_method, ' -tr ', pixsize, ' ', pixsize, sep=""), show.output.on.console = show.output.on.console)
}
} else {
if(class(GridTopology)=="GridTopology"){
bbox = bbox(SpatialGrid(GridTopology))
if(is.na(proj4string(obj))|proj4string(obj)=="NA"){
system(paste(program, ' ', tf, extension, ' ', tf, '_ll.tif -dstnodata \"', NAflag, '\" -r ', resampling_method, ' -te ', bbox[1,1], ' ', bbox[2,1], ' ', bbox[1,2], ' ', bbox[2,2], ' -ts ', GridTopology@cells.dim[1], ' ', GridTopology@cells.dim[2], sep=""), show.output.on.console = show.output.on.console)
} else {
system(paste(program, ' ', tf, '.tif', ' -t_srs \"', proj4s, '\" ', tf, '_ll.tif -dstnodata \"', NAflag, '\" -r ', resampling_method, ' -te ', bbox[1,1], ' ', bbox[2,1], ' ', bbox[1,2], ' ', bbox[2,2], ' -ts ', GridTopology@cells.dim[1], ' ', GridTopology@cells.dim[2], sep=""), show.output.on.console = show.output.on.console)
}
} else {
stop("'GridTopology-class' object required")
}
}
}
## read images back to R:
if(i==1){
if(any(class(obj)=="RasterLayer")){
if(raster::inMemory(obj)==TRUE){
res <- readGDAL(paste(tf, "_ll.tif", sep=""), silent = FALSE)
names(res) <- names(obj)[i]
} else {
res <- raster(paste(tf, "_ll.tif", sep=""))
}
} else {
if(any(class(obj)=="SpatialPixelsDataFrame")) {
res <- readGDAL(paste(tf, "_ll.tif", sep=""), silent = FALSE)
names(res) <- names(obj)[i]
}
}
} else{
if(any(class(obj)=="SpatialPixelsDataFrame")) {
res@data[,names(obj)[i]] <- readGDAL(paste(tf, "_ll.tif", sep=""), silent = FALSE)$band1
}
}
# reformat to the original factors:
if(isfactor & any(class(obj)=="SpatialPixelsDataFrame")){
res@data[,i] <- as.factor(res@data[,i])
levels(res@data[,i]) = levels(obj@data[,i])
}
## clean up:
if(any(class(obj)=="RasterLayer")){
if(raster::inMemory(obj)==FALSE){
message(paste("\n", paste(tf, "_ll.tif", sep="")))
}
} else {
unlink(paste(tf, "_ll.tif", sep=""))
unlink(paste(tf, extension, sep=""))
}
setTxtProgressBar(pb, i)
}
close(pb)
cat(i, "\r")
flush.console()
}} else {
stop("Could not locate GDAL. For more info see package 'gdalUtils'.")
}
return(res)
}
setMethod("warp", signature(obj = "SpatialPixelsDataFrame"), .gdalwarp.SpatialPixels)
setMethod("warp", signature(obj = "RasterLayer"), .gdalwarp.SpatialPixels)
## end of script;
| /R/warp.R | no_license | GreatEmerald/GSIF | R | false | false | 9,193 | r | # Purpose : resampling with GDAL;
# Maintainer : Tomislav Hengl (tom.hengl@wur.nl)
# Contributions : ;
# Dev Status : Pre-Alpha
# Note : ;
.programPath <- function(path, utility){
if(missing(path)){
if(!file.exists("C:/PROGRA~1/GDAL/")&.Platform$OS.type == "windows"){
if(requireNamespace("gdalUtils", quietly = TRUE)){
path <- getOption("gdalUtils_gdalPath")[[1]]$path
if(is.null(path)){
## force gdal installation:
gdalUtils::gdal_setInstallation()
message("Forcing installation of GDAL utilities... this might take time.")
path <- getOption("gdalUtils_gdalPath")[[1]]$path
}
}
}
if(file.exists(paste0("C:/PROGRA~1/GDAL/", utility, ".exe"))&.Platform$OS.type == "windows"){
program = shQuote(shortPathName(normalizePath(file.path("C:/PROGRA~1/GDAL/", paste0(utility, ".exe")))))
}
}
if(.Platform$OS.type == "windows") {
program = shQuote(shortPathName(normalizePath(file.path(path, paste(utility, ".exe", sep="")))))
} else {
program = utility
}
return(program)
}
.gdalwarp.SpatialPixels <- function(obj, proj4s = proj4string(obj), GridTopology = NULL, pixsize, resampling_method = "bilinear", NAflag = get("NAflag", envir = GSIF.opts), tmp.file = FALSE, show.output.on.console = FALSE, program){
if(missing(program)){
program <- .programPath(utility="gdalwarp")
}
if(!nchar(program)==0){
if(requireNamespace("stringr", quietly = TRUE)){
message(paste('Resampling', length(names(obj)), 'layers to CRS(\"', stringr::str_trim(substr(proj4s, 1, 20)), ' ... ', '\") with grid cell size:', pixsize, '...'))
if(any(class(obj)=="SpatialPixelsDataFrame")){
size = ncol(obj)
} else { if(any(class(obj)=="RasterLayer")){
size = 1
}}
pb <- txtProgressBar(min=0, max=size, style=3)
for(i in 1:size){
## name the temp file:
if(any(class(obj)=="RasterLayer")){
if(raster::inMemory(obj)==TRUE & tmp.file==TRUE){
tf <- tempfile()
extension <- ".tif"
} else {
## file extension:
if(requireNamespace("tools", quietly = TRUE)){
extension <- paste(".", tools::file_ext(raster::filename(obj)), sep="")
tf <- strsplit(raster::filename(obj), extension)[[1]]
}
}
}
if(any(class(obj)=="SpatialPixelsDataFrame")){
extension <- ".tif"
if(tmp.file==TRUE){
tf <- tempfile()
} else {
tf <- paste(normalizeFilename(deparse(substitute(obj, env = parent.frame()))), names(obj)[i], sep="_")
}
}
## check if it is factor or numeric:
if(any(class(obj)=="RasterLayer")){
isfactor <- is.factor(obj)
} else {
if(any(class(obj)=="SpatialPixelsDataFrame")){
isfactor <- is.factor(obj@data[,i])
}
}
## write to a file if necessary:
if(any(class(obj)=="SpatialPixelsDataFrame")){
if(isfactor){
x <- writeRaster(raster(obj[i]), filename=paste(tf, extension, sep=""), format="GTiff", overwrite=TRUE)
if(raster::maxValue(x)==0) { warning(paste("Layer", names(obj[i]), "is of type 'factor' but contains no levels")) }
} else {
writeGDAL(obj[i], paste(tf, extension, sep=""), "GTiff", mvFlag = NAflag)
}
} else {
if(any(class(obj)=="RasterLayer")){
if(raster::inMemory(obj)==TRUE){
x <- writeRaster(obj, filename=paste(tf, extension, sep=""), format="GTiff", overwrite=TRUE)
}
if(isfactor){
if(raster::maxValue(x)==0) { warning(paste("Layer", names(obj), "is of type 'factor' but contains no levels")) }
}
}}
## resample to WGS84 system:
if(isfactor){
if(is.null(GridTopology)){
if(is.na(proj4string(obj))|proj4string(obj)=="NA"){
system(paste(program, ' ', tf, extension, ' ', tf, '_ll.tif -dstnodata \"255\" -r near -ot \"Byte\" -tr ', pixsize, ' ', pixsize, sep=""), show.output.on.console = show.output.on.console)
} else {
system(paste(program, ' ', tf, '.tif', ' -t_srs \"', proj4s, '\" ', tf, '_ll.tif -dstnodata \"255\" -r near -ot \"Byte\" -tr ', pixsize, ' ', pixsize, sep=""), show.output.on.console = show.output.on.console)
}
} else {
if(class(GridTopology)=="GridTopology"){
bbox = bbox(SpatialGrid(GridTopology))
if(is.na(proj4string(obj))|proj4string(obj)=="NA"){
system(paste(program, ' ', tf, extension, ' ', tf, '_ll.tif -dstnodata \"255\" -ot \"Byte\" -r near', ' -te ', bbox[1,1], ' ', bbox[2,1], ' ', bbox[1,2], ' ', bbox[2,2], ' -ts ', GridTopology@cells.dim[1], ' ', GridTopology@cells.dim[2], sep=""), show.output.on.console = show.output.on.console)
} else {
system(paste(program, ' ', tf, '.tif', ' -t_srs \"', proj4s, '\" ', tf, '_ll.tif -dstnodata \"255\" -ot \"Byte\" -r near', ' -te ', bbox[1,1], ' ', bbox[2,1], ' ', bbox[1,2], ' ', bbox[2,2], ' -ts ', GridTopology@cells.dim[1], ' ', GridTopology@cells.dim[2], sep=""), show.output.on.console = show.output.on.console)
}
} else {
stop("'GridTopology-class' object required")
}
}
}
else {
if(is.null(GridTopology)){
if(is.na(proj4string(obj))|proj4string(obj)=="NA"){
system(paste(program, ' ', tf, extension, ' ', tf, '_ll.tif -dstnodata \"', NAflag, '\" -r ', resampling_method, ' -tr ', pixsize, ' ', pixsize, sep=""), show.output.on.console = show.output.on.console)
} else {
system(paste(program, ' ', tf, '.tif', ' -t_srs \"', proj4s, '\" ', tf, '_ll.tif -dstnodata \"', NAflag, '\" -r ', resampling_method, ' -tr ', pixsize, ' ', pixsize, sep=""), show.output.on.console = show.output.on.console)
}
} else {
if(class(GridTopology)=="GridTopology"){
bbox = bbox(SpatialGrid(GridTopology))
if(is.na(proj4string(obj))|proj4string(obj)=="NA"){
system(paste(program, ' ', tf, extension, ' ', tf, '_ll.tif -dstnodata \"', NAflag, '\" -r ', resampling_method, ' -te ', bbox[1,1], ' ', bbox[2,1], ' ', bbox[1,2], ' ', bbox[2,2], ' -ts ', GridTopology@cells.dim[1], ' ', GridTopology@cells.dim[2], sep=""), show.output.on.console = show.output.on.console)
} else {
system(paste(program, ' ', tf, '.tif', ' -t_srs \"', proj4s, '\" ', tf, '_ll.tif -dstnodata \"', NAflag, '\" -r ', resampling_method, ' -te ', bbox[1,1], ' ', bbox[2,1], ' ', bbox[1,2], ' ', bbox[2,2], ' -ts ', GridTopology@cells.dim[1], ' ', GridTopology@cells.dim[2], sep=""), show.output.on.console = show.output.on.console)
}
} else {
stop("'GridTopology-class' object required")
}
}
}
## read images back to R:
if(i==1){
if(any(class(obj)=="RasterLayer")){
if(raster::inMemory(obj)==TRUE){
res <- readGDAL(paste(tf, "_ll.tif", sep=""), silent = FALSE)
names(res) <- names(obj)[i]
} else {
res <- raster(paste(tf, "_ll.tif", sep=""))
}
} else {
if(any(class(obj)=="SpatialPixelsDataFrame")) {
res <- readGDAL(paste(tf, "_ll.tif", sep=""), silent = FALSE)
names(res) <- names(obj)[i]
}
}
} else{
if(any(class(obj)=="SpatialPixelsDataFrame")) {
res@data[,names(obj)[i]] <- readGDAL(paste(tf, "_ll.tif", sep=""), silent = FALSE)$band1
}
}
# reformat to the original factors:
if(isfactor & any(class(obj)=="SpatialPixelsDataFrame")){
res@data[,i] <- as.factor(res@data[,i])
levels(res@data[,i]) = levels(obj@data[,i])
}
## clean up:
if(any(class(obj)=="RasterLayer")){
if(raster::inMemory(obj)==FALSE){
message(paste("\n", paste(tf, "_ll.tif", sep="")))
}
} else {
unlink(paste(tf, "_ll.tif", sep=""))
unlink(paste(tf, extension, sep=""))
}
setTxtProgressBar(pb, i)
}
close(pb)
cat(i, "\r")
flush.console()
}} else {
stop("Could not locate GDAL. For more info see package 'gdalUtils'.")
}
return(res)
}
setMethod("warp", signature(obj = "SpatialPixelsDataFrame"), .gdalwarp.SpatialPixels)
setMethod("warp", signature(obj = "RasterLayer"), .gdalwarp.SpatialPixels)
## end of script;
|
# Download OML DB snapshot
dest.file = "omlsnapshot.sql.gz"
db.user = "root"
db.name = "oml"
download.file("https://www.openml.org/downloads/ExpDB_SNAPSHOT.sql.gz", destfile = dest.file, quiet = FALSE)
system(sprintf("zcat %s | mysql -u '%s' %s", normalizePath(dest.file), db.user, db.name), wait = TRUE)
| /jr_stuff/getOMLSnap.R | no_license | jakob-r/OMLbots | R | false | false | 306 | r | # Download OML DB snapshot
dest.file = "omlsnapshot.sql.gz"
db.user = "root"
db.name = "oml"
download.file("https://www.openml.org/downloads/ExpDB_SNAPSHOT.sql.gz", destfile = dest.file, quiet = FALSE)
system(sprintf("zcat %s | mysql -u '%s' %s", normalizePath(dest.file), db.user, db.name), wait = TRUE)
|
#Chapter 9 Importing, saving and managing data (YaRrr! The Pirate's Guide to R, Nathaniel D. Phillips)
#Get working directory on your system
getwd()
#Set working directory to your code portfolio folder
setwd("C:/Users/subha/Desktop/ANLY506/ANLY506")
#List of objects in current workspace that you have set
ls()
#Create some sample dataframes for reference
study1.df <- data.frame(id = 1:5,
sex = c("m", "m", "f", "f", "m"),
score = c(51, 20, 67, 52, 42))
score.by.sex <- aggregate(score ~ sex,
FUN = mean,
data = study1.df)
study1.htest <- t.test(score ~ sex,
data = study1.df)
# The new objects now show up in the list when you use ls() function
ls()
#Save the objects in the working directory that you created
save(study1.df, score.by.sex, study1.htest,
file = "study1.RData")
#If you want to save all the objects in the workspace use the following command
save.image(file = "projectimage.RData")
#load objects in the datafile into workspace
load(file = "study1.RData")
#load objects in image file into the workspace
load(file = "projectimage.RData")
#Remove specific objects
rm(score.by.sex)
#Remove all objects
rm(list=ls())
#load objects in the datafile into workspace
load(file = "study1.RData")
#Write data to text file
write.table(x = study1.df,
file = "study1.txt", # Save the file as study1.txt
sep = "\t") # Make the columns tab-delimited
#Remove all objects
rm(list=ls())
#Read table from txt file
mydata <- read.table(file = 'study1.txt', # file location from working directory
sep = '\t', # file is tab--delimited
header = TRUE, # This piece of code states that there is a header to the dataset
stringsAsFactors = FALSE) # Very imoortant to not convert strings to factors
mydata
str(mydata)
#Read directly from web
fromweb <- read.table(file = 'http://goo.gl/jTNf6P',
sep = '\t',
header = TRUE)
fromweb
# Practice
rm(list=ls())
#4
a <- data.frame("sex" = c("m", "f", "m"),
"age" = c(19, 43, 25),
"favorite.movie" = c("Moon", "The Goonies", "Spice World"))
b <- mean(a$age)
c <- table(a$sex)
ls()
#Read directly from web
club.df <- read.table(file = 'http://nathanieldphillips.com/wp-content/uploads/2015/12/club.txt',
sep = '\t',
header = TRUE)
#Write data to text file
write.table(x = club.df,
file = "club.txt", # Save the file as txt
sep = "\t") # Make the columns tab-delimited
save.image(file = "myobjects.RData")
rm(list=ls())
| /Exploratory Data Analysis - Code Portfolio/Week 2/Week 2.R | no_license | spemmaraju/MS-Analytics | R | false | false | 2,799 | r | #Chapter 9 Importing, saving and managing data (YaRrr! The Pirate's Guide to R, Nathaniel D. Phillips)
#Get working directory on your system
getwd()
#Set working directory to your code portfolio folder
setwd("C:/Users/subha/Desktop/ANLY506/ANLY506")
#List of objects in current workspace that you have set
ls()
#Create some sample dataframes for reference
study1.df <- data.frame(id = 1:5,
sex = c("m", "m", "f", "f", "m"),
score = c(51, 20, 67, 52, 42))
score.by.sex <- aggregate(score ~ sex,
FUN = mean,
data = study1.df)
study1.htest <- t.test(score ~ sex,
data = study1.df)
# The new objects now show up in the list when you use ls() function
ls()
#Save the objects in the working directory that you created
save(study1.df, score.by.sex, study1.htest,
file = "study1.RData")
#If you want to save all the objects in the workspace use the following command
save.image(file = "projectimage.RData")
#load objects in the datafile into workspace
load(file = "study1.RData")
#load objects in image file into the workspace
load(file = "projectimage.RData")
#Remove specific objects
rm(score.by.sex)
#Remove all objects
rm(list=ls())
#load objects in the datafile into workspace
load(file = "study1.RData")
#Write data to text file
write.table(x = study1.df,
file = "study1.txt", # Save the file as study1.txt
sep = "\t") # Make the columns tab-delimited
#Remove all objects
rm(list=ls())
#Read table from txt file
mydata <- read.table(file = 'study1.txt', # file location from working directory
sep = '\t', # file is tab--delimited
header = TRUE, # This piece of code states that there is a header to the dataset
stringsAsFactors = FALSE) # Very imoortant to not convert strings to factors
mydata
str(mydata)
#Read directly from web
fromweb <- read.table(file = 'http://goo.gl/jTNf6P',
sep = '\t',
header = TRUE)
fromweb
# Practice
rm(list=ls())
#4
a <- data.frame("sex" = c("m", "f", "m"),
"age" = c(19, 43, 25),
"favorite.movie" = c("Moon", "The Goonies", "Spice World"))
b <- mean(a$age)
c <- table(a$sex)
ls()
#Read directly from web
club.df <- read.table(file = 'http://nathanieldphillips.com/wp-content/uploads/2015/12/club.txt',
sep = '\t',
header = TRUE)
#Write data to text file
write.table(x = club.df,
file = "club.txt", # Save the file as txt
sep = "\t") # Make the columns tab-delimited
save.image(file = "myobjects.RData")
rm(list=ls())
|
setwd("C:/Users/Chris Zucchet/Documents/AFL-Prediction-task")
library(DMwR);library(tidyverse);library(DBI);library(xgboost);library(recipes);library(rsample);library(RSQLite);library(purrr);library(e1071);library(digest);library(mlr);library(parallelMap);options(dplyr.width = Inf)
range01 <- function(x){(x-min(x))/(max(x)-min(x))}
con = dbConnect(SQLite(), "PlayerRecords.sqlite")
num_cols = c("KI","MK","HB","GL","BH","HO","TK","RB","IF","CL","CG","FF","FA","BR","CP","UP","CM","MI","one_pc","BO","GA","game_played","Year","Round","Diff","Age_Years","Games","PercentWon")
scale_cols = c("KI","MK","HB","GL","BH","HO","TK","RB","IF","CL","CG","FF","FA","CP","UP","CM","MI","one_pc","BO","GA","game_played","Age_Years","Games","PercentWon","dt_score","error_rate","fairness","impact_plays")
afl_model = xgb.load('afl_model')
afl_model = xgb.load('afl_model_pre2017')
records = dbGetQuery(con, "SELECT * FROM Player_Detail WHERE year = 2018") %>%group_by(Player,KI,MK,CP,Team,ID) %>%
filter(row_number(HB) == 1) %>% ungroup() %>% rename(number = "#", one_pc = "1%", game_played = "%P")
records[records == "Â"] <- "0"
records2 = records
records_t2 = records2 %>% mutate_if(names(records) %in% num_cols, as.numeric)%>% na.omit() ;rm(records)
teams = unique(dbGetQuery(con, "SELECT Team, GL FROM Player_Detail"));teams = sort(unique(teams[,"Team"]));state = c("SA","QLD","VIC","VIC","VIC","WA","VIC","QLD","NSW","VIC","VIC","VIC","SA","VIC","VIC","NSW","WA","VIC")
home_team = data.frame(Team = teams,home_state = state) %>% mutate_if(is.factor, as.character);away_team = data.frame(Opponent = teams,away_state = state) %>% mutate_if(is.factor, as.character)
records_t3 = records_t2 %>%
mutate(is_winner = ifelse(Diff >0,1,0),
is_1_6_margin = ifelse(abs(Diff) <= 6,1,0),is_7_30_margin = ifelse(abs(Diff) > 6 & abs(Diff) <= 30,1,0),is_30_margin = ifelse(abs(Diff) >= 31,1,0),
is_6_margin = ifelse(abs(Diff) <= 6,1,0),is_30_margin = ifelse(abs(Diff) <= 30,1,0),is_50_margin = ifelse(abs(Diff) <= 50,1,0),is_100_margin = ifelse(abs(Diff) <= 100,1,0),
games_1_10 = ifelse(Games <= 10, 1,0), games_0_50 = ifelse(Games <= 50, 1,0), #games_51_more = ifelse(Games > 50, 1,0),
dt_score = (KI*2.5)+(HB*1.5)+(MK*3)+(GL*6)+(BH*2)+(TK*2)+(HO*1.5)+(FF*1)+(FA*-3),
error_rate = CG/(KI+HB),
fairness = FF-FA,
impact_plays = (IF*0.5)+(TK*0.25)+(MI*1)+(one_pc*3)+(BO*0.25)+(GA*1.5)+(RB*1)+(CG*-1)+(CL*1)+(CM*1),
# CP_score =(CP*1.5)+(UP*0.75),
gl_dt =(GL*6)/((KI*2.5)+(HB*1.5)+(MK*3)+(GL*6)+(BH*2)+(TK*4)+(HO*1)+(FF*1)+(FA*-3))) %>%
group_by(ID,Team) %>%
mutate(GL_Sum = sum(GL),CG_Sum = sum(CG),MI_Sum = sum(MI)) %>%
ungroup(ID) %>%
mutate(GL_prop = GL/GL_Sum,CG_prop = CG/CG_Sum,MI_prop = MI/MI_Sum) %>% select(-GL_Sum,-CG_Sum,-MI_Sum)
records_t4 = records_t3 %>% left_join(home_team) %>% left_join(away_team);rm(records_t3);rm(home_team);rm(away_team)
records_t5 = records_t4 %>% mutate(interstate = ifelse(home_state != away_state, 1,0))
rec_split = records_t5 %>% split(.$ID)
for(i in 1:length(rec_split)){
for(j in scale_cols){
rec_split[[i]][,j] = range01(rec_split[[i]][,j])
}
}
rec_all_temp = bind_rows(rec_split)
rec_all_temp$ID_Team = paste0(rec_all_temp$ID,"_",rec_all_temp$Team)
rec_split2 = rec_all_temp %>% split(.$ID_Team)
for(i in 1:length(rec_split2)){
for(j in scale_cols){
rec_split2[[i]][,j] = range01(rec_split2[[i]][,j])
}
}
rec_all2 = bind_rows(rec_split2)
rec_all2 = rec_all2 %>% select(scale_cols)
names(rec_all2) = paste0("Team","_", names(rec_all2))
rec_allt2 = cbind(rec_all_temp, rec_all2)
rec_all = rec_allt2
rec_train = recipe(BR ~., rec_all) %>%
step_dummy(Team,Home_Away,Opponent, home_state, away_state) %>%
step_other(Venue, threshold = .2) %>%
step_dummy(Venue) %>%
prep() %>%
bake(newdata = rec_all)
rec_lines_split = rec_train %>% split(.$ID)
for(i in 1:length(rec_lines_split)){
rec_temp = rec_lines_split[[i]] %>% select(-number,-Player,-DI,-ID,-BR,-ID_Team)%>% data.frame()
train_matrix <- xgb.DMatrix(data = as.matrix(rec_temp))
afl_pred = round(predict(afl_model,train_matrix),6)
vote_0_seq = seq(1,nrow(rec_lines_split[[i]])*4,4);vote_1_seq = seq(2,nrow(rec_lines_split[[i]])*4,4);vote_2_seq = seq(3,nrow(rec_lines_split[[i]])*4,4);vote_3_seq = seq(4,nrow(rec_lines_split[[i]])*4,4)
vote_0 = afl_pred[vote_0_seq];vote_1 = afl_pred[vote_1_seq];vote_2 = afl_pred[vote_2_seq];vote_3 = afl_pred[vote_3_seq]
rec_lines_split[[i]]$vote_3 = vote_3;rec_lines_split[[i]]$vote_2 = vote_2;rec_lines_split[[i]]$vote_1 = vote_1;rec_lines_split[[i]]$vote_0 = vote_0
}
Player_Team = records_t5 %>% select(Player,ID,Team) %>% unique()
afl_predict = bind_rows(rec_lines_split) %>% mutate(Player = as.character(Player),number = as.character(number),ID = as.character(ID)) %>% data.frame() %>% left_join(Player_Team)
player_votes = afl_predict %>% select(ID, Player,Team, BR,vote_3,vote_2,vote_1,vote_0)
player_votes$vote_3 = round(afl_predict$vote_3,6);player_votes$vote_2 = round(afl_predict$vote_2,6)
player_votes$vote_1 = round(afl_predict$vote_1,6);player_votes$vote_0 = round(afl_predict$vote_0,6)
player_votes_split = player_votes %>% split(.$ID)
for(i in 1:length(player_votes_split)){
player_votes_split[[i]]$vote_3_pred = ifelse(max(player_votes_split[[i]]$vote_3) == player_votes_split[[i]]$vote_3,3,0)
BR_3 = player_votes_split[[i]] %>% filter(vote_3_pred == 3) %>% select(ID,Player,Team,vote_3_pred)
player_2_votes = anti_join(player_votes_split[[i]],BR_3)
player_2_votes$vote_2_pred = ifelse(max(player_2_votes$vote_2+player_2_votes$vote_3) == player_2_votes$vote_2+player_2_votes$vote_3,2,0)
BR_2 = player_2_votes %>% filter(vote_2_pred == 2) %>% select(ID,Player,Team,vote_2_pred)
player_1_votes = anti_join(player_votes_split[[i]],BR_2) %>% anti_join(BR_3)
player_1_votes$vote_1_pred = ifelse(max(player_1_votes$vote_0) == player_1_votes$vote_1,1,0)
BR_1 = player_1_votes %>% filter(vote_1_pred == 1) %>% select(ID,Player,Team,vote_1_pred)
player_votes_split[[i]] = left_join(player_votes_split[[i]],BR_3) %>% left_join(BR_2) %>% left_join(BR_1)
player_votes_split[[i]][is.na(player_votes_split[[i]])] <- 0
player_votes_split[[i]]$BR_Pred = player_votes_split[[i]]$vote_3_pred+player_votes_split[[i]]$vote_2_pred+player_votes_split[[i]]$vote_1_pred
player_votes_split[[i]] = player_votes_split[[i]] %>% select(-vote_3_pred,-vote_2_pred,-vote_1_pred)
}
player_predict = bind_rows(player_votes_split)
player_predict %>% group_by(Player, Team) %>%
summarise(BR_sum = sum(BR),BRPred_sum = sum(BR_Pred)) %>% arrange(desc(BRPred_sum)) %>% head(20)
records_votes = records_t5 %>% left_join(player_predict) %>% data.frame() %>% mutate(vote_diff = BR - BR_Pred, vote_sum = vote_3+vote_2+vote_1)
records_votes %>% filter(Player == "Mitchell, Tom" & Team == "Hawthorn") %>% filter(vote_diff != 0)
records_votes %>% filter(ID == "2018_1_20180324_1925_MCG") %>% arrange(desc(vote_sum)) %>% head(10)
| /_4_2018_Predictions.R | no_license | czucchet/AFL-Prediction-task | R | false | false | 7,015 | r | setwd("C:/Users/Chris Zucchet/Documents/AFL-Prediction-task")
library(DMwR);library(tidyverse);library(DBI);library(xgboost);library(recipes);library(rsample);library(RSQLite);library(purrr);library(e1071);library(digest);library(mlr);library(parallelMap);options(dplyr.width = Inf)
range01 <- function(x){(x-min(x))/(max(x)-min(x))}
con = dbConnect(SQLite(), "PlayerRecords.sqlite")
num_cols = c("KI","MK","HB","GL","BH","HO","TK","RB","IF","CL","CG","FF","FA","BR","CP","UP","CM","MI","one_pc","BO","GA","game_played","Year","Round","Diff","Age_Years","Games","PercentWon")
scale_cols = c("KI","MK","HB","GL","BH","HO","TK","RB","IF","CL","CG","FF","FA","CP","UP","CM","MI","one_pc","BO","GA","game_played","Age_Years","Games","PercentWon","dt_score","error_rate","fairness","impact_plays")
afl_model = xgb.load('afl_model')
afl_model = xgb.load('afl_model_pre2017')
records = dbGetQuery(con, "SELECT * FROM Player_Detail WHERE year = 2018") %>%group_by(Player,KI,MK,CP,Team,ID) %>%
filter(row_number(HB) == 1) %>% ungroup() %>% rename(number = "#", one_pc = "1%", game_played = "%P")
records[records == "Â"] <- "0"
records2 = records
records_t2 = records2 %>% mutate_if(names(records) %in% num_cols, as.numeric)%>% na.omit() ;rm(records)
teams = unique(dbGetQuery(con, "SELECT Team, GL FROM Player_Detail"));teams = sort(unique(teams[,"Team"]));state = c("SA","QLD","VIC","VIC","VIC","WA","VIC","QLD","NSW","VIC","VIC","VIC","SA","VIC","VIC","NSW","WA","VIC")
home_team = data.frame(Team = teams,home_state = state) %>% mutate_if(is.factor, as.character);away_team = data.frame(Opponent = teams,away_state = state) %>% mutate_if(is.factor, as.character)
records_t3 = records_t2 %>%
mutate(is_winner = ifelse(Diff >0,1,0),
is_1_6_margin = ifelse(abs(Diff) <= 6,1,0),is_7_30_margin = ifelse(abs(Diff) > 6 & abs(Diff) <= 30,1,0),is_30_margin = ifelse(abs(Diff) >= 31,1,0),
is_6_margin = ifelse(abs(Diff) <= 6,1,0),is_30_margin = ifelse(abs(Diff) <= 30,1,0),is_50_margin = ifelse(abs(Diff) <= 50,1,0),is_100_margin = ifelse(abs(Diff) <= 100,1,0),
games_1_10 = ifelse(Games <= 10, 1,0), games_0_50 = ifelse(Games <= 50, 1,0), #games_51_more = ifelse(Games > 50, 1,0),
dt_score = (KI*2.5)+(HB*1.5)+(MK*3)+(GL*6)+(BH*2)+(TK*2)+(HO*1.5)+(FF*1)+(FA*-3),
error_rate = CG/(KI+HB),
fairness = FF-FA,
impact_plays = (IF*0.5)+(TK*0.25)+(MI*1)+(one_pc*3)+(BO*0.25)+(GA*1.5)+(RB*1)+(CG*-1)+(CL*1)+(CM*1),
# CP_score =(CP*1.5)+(UP*0.75),
gl_dt =(GL*6)/((KI*2.5)+(HB*1.5)+(MK*3)+(GL*6)+(BH*2)+(TK*4)+(HO*1)+(FF*1)+(FA*-3))) %>%
group_by(ID,Team) %>%
mutate(GL_Sum = sum(GL),CG_Sum = sum(CG),MI_Sum = sum(MI)) %>%
ungroup(ID) %>%
mutate(GL_prop = GL/GL_Sum,CG_prop = CG/CG_Sum,MI_prop = MI/MI_Sum) %>% select(-GL_Sum,-CG_Sum,-MI_Sum)
records_t4 = records_t3 %>% left_join(home_team) %>% left_join(away_team);rm(records_t3);rm(home_team);rm(away_team)
records_t5 = records_t4 %>% mutate(interstate = ifelse(home_state != away_state, 1,0))
rec_split = records_t5 %>% split(.$ID)
for(i in 1:length(rec_split)){
for(j in scale_cols){
rec_split[[i]][,j] = range01(rec_split[[i]][,j])
}
}
rec_all_temp = bind_rows(rec_split)
rec_all_temp$ID_Team = paste0(rec_all_temp$ID,"_",rec_all_temp$Team)
rec_split2 = rec_all_temp %>% split(.$ID_Team)
for(i in 1:length(rec_split2)){
for(j in scale_cols){
rec_split2[[i]][,j] = range01(rec_split2[[i]][,j])
}
}
rec_all2 = bind_rows(rec_split2)
rec_all2 = rec_all2 %>% select(scale_cols)
names(rec_all2) = paste0("Team","_", names(rec_all2))
rec_allt2 = cbind(rec_all_temp, rec_all2)
rec_all = rec_allt2
rec_train = recipe(BR ~., rec_all) %>%
step_dummy(Team,Home_Away,Opponent, home_state, away_state) %>%
step_other(Venue, threshold = .2) %>%
step_dummy(Venue) %>%
prep() %>%
bake(newdata = rec_all)
rec_lines_split = rec_train %>% split(.$ID)
for(i in 1:length(rec_lines_split)){
rec_temp = rec_lines_split[[i]] %>% select(-number,-Player,-DI,-ID,-BR,-ID_Team)%>% data.frame()
train_matrix <- xgb.DMatrix(data = as.matrix(rec_temp))
afl_pred = round(predict(afl_model,train_matrix),6)
vote_0_seq = seq(1,nrow(rec_lines_split[[i]])*4,4);vote_1_seq = seq(2,nrow(rec_lines_split[[i]])*4,4);vote_2_seq = seq(3,nrow(rec_lines_split[[i]])*4,4);vote_3_seq = seq(4,nrow(rec_lines_split[[i]])*4,4)
vote_0 = afl_pred[vote_0_seq];vote_1 = afl_pred[vote_1_seq];vote_2 = afl_pred[vote_2_seq];vote_3 = afl_pred[vote_3_seq]
rec_lines_split[[i]]$vote_3 = vote_3;rec_lines_split[[i]]$vote_2 = vote_2;rec_lines_split[[i]]$vote_1 = vote_1;rec_lines_split[[i]]$vote_0 = vote_0
}
Player_Team = records_t5 %>% select(Player,ID,Team) %>% unique()
afl_predict = bind_rows(rec_lines_split) %>% mutate(Player = as.character(Player),number = as.character(number),ID = as.character(ID)) %>% data.frame() %>% left_join(Player_Team)
player_votes = afl_predict %>% select(ID, Player,Team, BR,vote_3,vote_2,vote_1,vote_0)
player_votes$vote_3 = round(afl_predict$vote_3,6);player_votes$vote_2 = round(afl_predict$vote_2,6)
player_votes$vote_1 = round(afl_predict$vote_1,6);player_votes$vote_0 = round(afl_predict$vote_0,6)
player_votes_split = player_votes %>% split(.$ID)
for(i in 1:length(player_votes_split)){
player_votes_split[[i]]$vote_3_pred = ifelse(max(player_votes_split[[i]]$vote_3) == player_votes_split[[i]]$vote_3,3,0)
BR_3 = player_votes_split[[i]] %>% filter(vote_3_pred == 3) %>% select(ID,Player,Team,vote_3_pred)
player_2_votes = anti_join(player_votes_split[[i]],BR_3)
player_2_votes$vote_2_pred = ifelse(max(player_2_votes$vote_2+player_2_votes$vote_3) == player_2_votes$vote_2+player_2_votes$vote_3,2,0)
BR_2 = player_2_votes %>% filter(vote_2_pred == 2) %>% select(ID,Player,Team,vote_2_pred)
player_1_votes = anti_join(player_votes_split[[i]],BR_2) %>% anti_join(BR_3)
player_1_votes$vote_1_pred = ifelse(max(player_1_votes$vote_0) == player_1_votes$vote_1,1,0)
BR_1 = player_1_votes %>% filter(vote_1_pred == 1) %>% select(ID,Player,Team,vote_1_pred)
player_votes_split[[i]] = left_join(player_votes_split[[i]],BR_3) %>% left_join(BR_2) %>% left_join(BR_1)
player_votes_split[[i]][is.na(player_votes_split[[i]])] <- 0
player_votes_split[[i]]$BR_Pred = player_votes_split[[i]]$vote_3_pred+player_votes_split[[i]]$vote_2_pred+player_votes_split[[i]]$vote_1_pred
player_votes_split[[i]] = player_votes_split[[i]] %>% select(-vote_3_pred,-vote_2_pred,-vote_1_pred)
}
player_predict = bind_rows(player_votes_split)
player_predict %>% group_by(Player, Team) %>%
summarise(BR_sum = sum(BR),BRPred_sum = sum(BR_Pred)) %>% arrange(desc(BRPred_sum)) %>% head(20)
records_votes = records_t5 %>% left_join(player_predict) %>% data.frame() %>% mutate(vote_diff = BR - BR_Pred, vote_sum = vote_3+vote_2+vote_1)
records_votes %>% filter(Player == "Mitchell, Tom" & Team == "Hawthorn") %>% filter(vote_diff != 0)
records_votes %>% filter(ID == "2018_1_20180324_1925_MCG") %>% arrange(desc(vote_sum)) %>% head(10)
|
ReadGwHeadData <- function(path, gwConversion) {
# Rachel Blakers v0-1 1/12/2013
#
#
# INPUT:
#
#
# OUTPUT:
#
Gobs = ReadTimeseriesData(path)
Gobs = lapply(gwConversion, function(x) Gobs[, x])
# Check that timeseries data is regular i.e. there are no
# missing dates
regBool = sapply(Gobs, is.regular, strict = TRUE)
if (!all(regBool)) {
mssg = c("Input timeseries data is not regular.")
stop(mssg)
}
return(Gobs)
} | /Model/hydrological/Setup/ReadGwHeadData.R | no_license | HYDFKI7/integrated-mk | R | false | false | 464 | r |
ReadGwHeadData <- function(path, gwConversion) {
# Rachel Blakers v0-1 1/12/2013
#
#
# INPUT:
#
#
# OUTPUT:
#
Gobs = ReadTimeseriesData(path)
Gobs = lapply(gwConversion, function(x) Gobs[, x])
# Check that timeseries data is regular i.e. there are no
# missing dates
regBool = sapply(Gobs, is.regular, strict = TRUE)
if (!all(regBool)) {
mssg = c("Input timeseries data is not regular.")
stop(mssg)
}
return(Gobs)
} |
plot1<-function(){
hp1<-read.table("household_power_consumption.txt",header=TRUE,sep=";")
hp1$Date=as.Date(hp1$Date,format="%d/%m/%Y")
req_dates<-subset(hp1,hp1$Date=="2007-02-01"|hp1$Date=="2007-02-02")
hp1global_actvpwr<-as.numeric(as.character(hp1$Global_active_power))
hist(hp1global_actvpwr,main="freq of global actv power",xlab="global active power",ylab="frequency",xlim=c(0,6),col="red")
dev.copy(png,"plot1.png",width=480,height=480)
dev.off()
} | /plot1.R | no_license | anuragswain63/exploratory_analysis_proj1 | R | false | false | 471 | r | plot1<-function(){
hp1<-read.table("household_power_consumption.txt",header=TRUE,sep=";")
hp1$Date=as.Date(hp1$Date,format="%d/%m/%Y")
req_dates<-subset(hp1,hp1$Date=="2007-02-01"|hp1$Date=="2007-02-02")
hp1global_actvpwr<-as.numeric(as.character(hp1$Global_active_power))
hist(hp1global_actvpwr,main="freq of global actv power",xlab="global active power",ylab="frequency",xlim=c(0,6),col="red")
dev.copy(png,"plot1.png",width=480,height=480)
dev.off()
} |
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## makeCacheMatrix creates a List that should be saved as function of a valid square,
## invertible matrix. This list is created so that its companion function cacheSolve
## can be executed multiple times. During first cacheSolve execution for the saved list, the
## solve function is executed and its results saved into the inverse matrix minv in the
## environment. In subsequent executions against that list, the previously saved results are
## simply returned.
## use following to create a valid... testmatrix<-matrix(c(1,2,4,0,3,1,4,4,0),nrow=3,ncol=3)
makeCacheMatrix <- function(x = matrix()) {
minv <- NULL
set <- function(y) {
x <<- y
minv <<- NULL
}
get <- function() x
setinv2 <- function(solve) minv <<- solve
getinv2 <- function() minv
list(set = set, get = get,
setinv2 = setinv2,
getinv2 = getinv2)
}
## Write a short comment describing this function
## cacheSolve processes a list created by its companion function makeCacheMatrix
## from an invertible square matrix. When cacheSolve is first executed against the list,
## the inverted matrix is computed through Solve and then saved for subsequent executions
## into environment's minv matrix variable. In subsequent executions, minv is simply returned.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
minv <- x$getinv()
if(!is.null(minv)) {
##message("getting cached inverse")
return(minv)
}
data <- x$get()
minv <- solve(data, ...)
##message("saving cached inverse")
x$setinv(minv)
minv
}
| /cachematrix.R | no_license | BobJofElm/ProgrammingAssignment2 | R | false | false | 1,712 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## makeCacheMatrix creates a List that should be saved as function of a valid square,
## invertible matrix. This list is created so that its companion function cacheSolve
## can be executed multiple times. During first cacheSolve execution for the saved list, the
## solve function is executed and its results saved into the inverse matrix minv in the
## environment. In subsequent executions against that list, the previously saved results are
## simply returned.
## use following to create a valid... testmatrix<-matrix(c(1,2,4,0,3,1,4,4,0),nrow=3,ncol=3)
makeCacheMatrix <- function(x = matrix()) {
minv <- NULL
set <- function(y) {
x <<- y
minv <<- NULL
}
get <- function() x
setinv2 <- function(solve) minv <<- solve
getinv2 <- function() minv
list(set = set, get = get,
setinv2 = setinv2,
getinv2 = getinv2)
}
## Write a short comment describing this function
## cacheSolve processes a list created by its companion function makeCacheMatrix
## from an invertible square matrix. When cacheSolve is first executed against the list,
## the inverted matrix is computed through Solve and then saved for subsequent executions
## into environment's minv matrix variable. In subsequent executions, minv is simply returned.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
minv <- x$getinv()
if(!is.null(minv)) {
##message("getting cached inverse")
return(minv)
}
data <- x$get()
minv <- solve(data, ...)
##message("saving cached inverse")
x$setinv(minv)
minv
}
|
source('/home/allenday/cvsroot/celsius/dump/lib/R/Celsius/IO/exe.R');
conn.exe = Celsius.exe('/home/allenday/cvsroot/celsius/dump/data2/HG-U133_Plus_2.rma_trim2');
cart = read.csv('./cart.xls',sep="\t");#http://genome.ucla.edu/u/~allenday/tmp/GeneCorrOutput/Corr_20080416_150528.xls',sep="\t");
platform.names = scan('/home/allenday/cvsroot/celsius/dump/data2/HG-U133_Plus_2.rma_trim2.exe.col', what='character');
platform.summary = read.table('/home/allenday/cvsroot/celsius/dump/data2/HG-U133_Plus_2.rma_trim2.exe.summary');
platform.mean = platform.summary[,5]/10000;
platform.sd = platform.summary[,7]/10000;
cart.region = read.table('./cart.region.txt',header=TRUE);
cart.gene = read.table('./cart.gene.txt');
cart.colcount = dim(cart.gene)[1];
samplerep = 1;
cart.samplesize = cart.colcount;
cart.sample = sample((1:cart.colcount)+2, cart.samplesize);
cart.mean = mean(as.numeric(t(cart[2:(dim(cart)[1]),3:(dim(cart)[2])])))
cart.sd = sd( as.numeric(t(cart[2:(dim(cart)[1]),3:(dim(cart)[2])])))
pdf('cart.pdf',width=20,height=10);
plotted = list();
for ( j in 1:cart.colcount ){
this.probeset = as.vector(cart.region[j,4]);
this.symbol = as.vector(cart.gene[cart.gene[,2]==this.probeset,1])
#plot each gene symbol only once
if ( is.null(plotted[[this.symbol]]) ) {
print(paste(j,this.probeset,this.symbol,sep='*'))
p1 = read.table(paste('./region/',this.probeset,'.dat',sep=''),header=TRUE);
if ( sum(p1[,1]!=0) == 0 ) { next }
p1.rowcount = dim(p1)[1];
d.pos = (p1[,2] - ((cart.region[j,3]+cart.region[j,2])/2))/1000000;
#do not compare to probesets from the same gene symbol.
d.sig = as.numeric(as.vector(t(cart[match(as.vector(p1[,4]),as.vector(cart[,2])),
# ((1:cart.colcount)+2)[! as.vector(t(cart[1,(1:cart.colcount)+2])) == this.probeset] ### same probeset
((1:cart.colcount)+2)[! cart[ match( as.vector(t(cart[1,(1:cart.colcount)+2])), cart[1:54676,2] ), 1 ] == this.symbol] ### same gene symbol
])));
d.sig = matrix(d.sig,nrow=p1.rowcount,byrow=TRUE);
d.sig.summary = apply(d.sig,1,mean);
d = cbind(
d.pos,
as.vector(cart[match(as.vector(p1[,4]),as.vector(cart[,2])),1]),
d.sig.summary,
(d.sig.summary - cart.mean) / cart.sd,
(d.sig.summary - platform.mean[ match(p1[,4], platform.names) ] ) / platform.sd[ match(p1[,4], platform.names) ],
-log10(apply(cbind(1:nrow(d.sig)),1,function(x)t.test(d.sig[x,], as.vector(get(conn.exe, as.character(p1[x,4]))), alternative="greater")$p.value))
# (d.sig.summary - apply(matrix(p1[,4],nrow=1), 2, function(x){mean(as.vector(get(conn.exe,x)))})) / apply(matrix(p1[,4],nrow=1), 2, function(x){sd(as.vector(get(conn.exe,x)))})
# apply(matrix(p1[,4],nrow=1), 2, function(x){mean(as.vector(get(conn.exe,x)))}),
# apply(matrix(p1[,4],nrow=1), 2, function(x){sd(as.vector(get(conn.exe,x)))}),
);
# postscript(paste('ps/',this.probeset,'.ps',sep=''),width=20,height=10);
# ymin=min(as.numeric(d[,2]));
# ymax=max(as.numeric(d[,2]));
ycol=3;
ylab='r';
ymin=min(as.numeric(d[,ycol]));
ymax=max(as.numeric(d[,ycol]));
plot(d[ !match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2]!='---', 1 ],d[ !match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2]!='---', ycol ],pch=1,xlim=c(-3,3),ylim=c(ymin,ymax),xlab='genomic offset, megabases', ylab=ylab, main=paste(as.vector(t(cart.gene[cart.gene[,2]==this.probeset,]))));
points(d[ match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2] != '---' & d[,2] != this.symbol, 1 ],d[ match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2] != '---' & d[,2] != this.symbol, ycol ],pch=2);
points(d[ d[,2] == '---', 1 ],d[ d[,2] == '---', ycol ],pch=4);
points(d[ d[,2] == this.symbol | d[,2] == this.probeset | d[,1]==0, 1 ],d[ d[,2] == this.symbol | d[,2] == this.probeset | d[,1]==0, ycol ],pch=16);
ycol=6;
ylab='-log10(t-test p-value)';
ymin=min(as.numeric(d[,ycol]));
ymax=max(as.numeric(d[,ycol]));
plot(d[ !match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2]!='---', 1 ],d[ !match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2]!='---', ycol ],pch=1,xlim=c(-3,3),ylim=c(ymin,ymax),xlab='genomic offset, megabases', ylab=ylab, main=paste(as.vector(t(cart.gene[cart.gene[,2]==this.probeset,]))));
points(d[ match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2] != '---' & d[,2] != this.symbol, 1 ],d[ match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2] != '---' & d[,2] != this.symbol, ycol ],pch=2);
points(d[ d[,2] == '---', 1 ],d[ d[,2] == '---', ycol ],pch=4);
points(d[ d[,2] == this.symbol | d[,2] == this.probeset | d[,1]==0, 1 ],d[ d[,2] == this.symbol | d[,2] == this.probeset | d[,1]==0, ycol ],pch=16);
# ycol=5;
# ylab='s.d. of r';
# ymin=min(as.numeric(d[,ycol]));
# ymax=max(as.numeric(d[,ycol]));
#
# plot(d[ !match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2]!='---', 1 ],d[ !match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2]!='---', ycol ],pch=1,xlim=c(-3,3),ylim=c(ymin,ymax),xlab='genomic offset, megabases', ylab=ylab, main=paste(as.vector(t(cart.gene[cart.gene[,2]==this.probeset,]))));
# points(d[ match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2] != '---' & d[,2] != this.symbol, 1 ],d[ match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2] != '---' & d[,2] != this.symbol, ycol ],pch=2);
# points(d[ d[,2] == '---', 1 ],d[ d[,2] == '---', ycol ],pch=4);
# points(d[ d[,2] == this.symbol | d[,2] == this.probeset | d[,1]==0, 1 ],d[ d[,2] == this.symbol | d[,2] == this.probeset | d[,1]==0, ycol ],pch=16);
# dev.off();
plotted[this.symbol] = TRUE;
}
}
dev.off();
| /cor/cart/binsearch.R | no_license | allenday/dissertation | R | false | false | 5,596 | r | source('/home/allenday/cvsroot/celsius/dump/lib/R/Celsius/IO/exe.R');
conn.exe = Celsius.exe('/home/allenday/cvsroot/celsius/dump/data2/HG-U133_Plus_2.rma_trim2');
cart = read.csv('./cart.xls',sep="\t");#http://genome.ucla.edu/u/~allenday/tmp/GeneCorrOutput/Corr_20080416_150528.xls',sep="\t");
platform.names = scan('/home/allenday/cvsroot/celsius/dump/data2/HG-U133_Plus_2.rma_trim2.exe.col', what='character');
platform.summary = read.table('/home/allenday/cvsroot/celsius/dump/data2/HG-U133_Plus_2.rma_trim2.exe.summary');
platform.mean = platform.summary[,5]/10000;
platform.sd = platform.summary[,7]/10000;
cart.region = read.table('./cart.region.txt',header=TRUE);
cart.gene = read.table('./cart.gene.txt');
cart.colcount = dim(cart.gene)[1];
samplerep = 1;
cart.samplesize = cart.colcount;
cart.sample = sample((1:cart.colcount)+2, cart.samplesize);
cart.mean = mean(as.numeric(t(cart[2:(dim(cart)[1]),3:(dim(cart)[2])])))
cart.sd = sd( as.numeric(t(cart[2:(dim(cart)[1]),3:(dim(cart)[2])])))
pdf('cart.pdf',width=20,height=10);
plotted = list();
for ( j in 1:cart.colcount ){
this.probeset = as.vector(cart.region[j,4]);
this.symbol = as.vector(cart.gene[cart.gene[,2]==this.probeset,1])
#plot each gene symbol only once
if ( is.null(plotted[[this.symbol]]) ) {
print(paste(j,this.probeset,this.symbol,sep='*'))
p1 = read.table(paste('./region/',this.probeset,'.dat',sep=''),header=TRUE);
if ( sum(p1[,1]!=0) == 0 ) { next }
p1.rowcount = dim(p1)[1];
d.pos = (p1[,2] - ((cart.region[j,3]+cart.region[j,2])/2))/1000000;
#do not compare to probesets from the same gene symbol.
d.sig = as.numeric(as.vector(t(cart[match(as.vector(p1[,4]),as.vector(cart[,2])),
# ((1:cart.colcount)+2)[! as.vector(t(cart[1,(1:cart.colcount)+2])) == this.probeset] ### same probeset
((1:cart.colcount)+2)[! cart[ match( as.vector(t(cart[1,(1:cart.colcount)+2])), cart[1:54676,2] ), 1 ] == this.symbol] ### same gene symbol
])));
d.sig = matrix(d.sig,nrow=p1.rowcount,byrow=TRUE);
d.sig.summary = apply(d.sig,1,mean);
d = cbind(
d.pos,
as.vector(cart[match(as.vector(p1[,4]),as.vector(cart[,2])),1]),
d.sig.summary,
(d.sig.summary - cart.mean) / cart.sd,
(d.sig.summary - platform.mean[ match(p1[,4], platform.names) ] ) / platform.sd[ match(p1[,4], platform.names) ],
-log10(apply(cbind(1:nrow(d.sig)),1,function(x)t.test(d.sig[x,], as.vector(get(conn.exe, as.character(p1[x,4]))), alternative="greater")$p.value))
# (d.sig.summary - apply(matrix(p1[,4],nrow=1), 2, function(x){mean(as.vector(get(conn.exe,x)))})) / apply(matrix(p1[,4],nrow=1), 2, function(x){sd(as.vector(get(conn.exe,x)))})
# apply(matrix(p1[,4],nrow=1), 2, function(x){mean(as.vector(get(conn.exe,x)))}),
# apply(matrix(p1[,4],nrow=1), 2, function(x){sd(as.vector(get(conn.exe,x)))}),
);
# postscript(paste('ps/',this.probeset,'.ps',sep=''),width=20,height=10);
# ymin=min(as.numeric(d[,2]));
# ymax=max(as.numeric(d[,2]));
ycol=3;
ylab='r';
ymin=min(as.numeric(d[,ycol]));
ymax=max(as.numeric(d[,ycol]));
plot(d[ !match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2]!='---', 1 ],d[ !match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2]!='---', ycol ],pch=1,xlim=c(-3,3),ylim=c(ymin,ymax),xlab='genomic offset, megabases', ylab=ylab, main=paste(as.vector(t(cart.gene[cart.gene[,2]==this.probeset,]))));
points(d[ match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2] != '---' & d[,2] != this.symbol, 1 ],d[ match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2] != '---' & d[,2] != this.symbol, ycol ],pch=2);
points(d[ d[,2] == '---', 1 ],d[ d[,2] == '---', ycol ],pch=4);
points(d[ d[,2] == this.symbol | d[,2] == this.probeset | d[,1]==0, 1 ],d[ d[,2] == this.symbol | d[,2] == this.probeset | d[,1]==0, ycol ],pch=16);
ycol=6;
ylab='-log10(t-test p-value)';
ymin=min(as.numeric(d[,ycol]));
ymax=max(as.numeric(d[,ycol]));
plot(d[ !match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2]!='---', 1 ],d[ !match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2]!='---', ycol ],pch=1,xlim=c(-3,3),ylim=c(ymin,ymax),xlab='genomic offset, megabases', ylab=ylab, main=paste(as.vector(t(cart.gene[cart.gene[,2]==this.probeset,]))));
points(d[ match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2] != '---' & d[,2] != this.symbol, 1 ],d[ match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2] != '---' & d[,2] != this.symbol, ycol ],pch=2);
points(d[ d[,2] == '---', 1 ],d[ d[,2] == '---', ycol ],pch=4);
points(d[ d[,2] == this.symbol | d[,2] == this.probeset | d[,1]==0, 1 ],d[ d[,2] == this.symbol | d[,2] == this.probeset | d[,1]==0, ycol ],pch=16);
# ycol=5;
# ylab='s.d. of r';
# ymin=min(as.numeric(d[,ycol]));
# ymax=max(as.numeric(d[,ycol]));
#
# plot(d[ !match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2]!='---', 1 ],d[ !match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2]!='---', ycol ],pch=1,xlim=c(-3,3),ylim=c(ymin,ymax),xlab='genomic offset, megabases', ylab=ylab, main=paste(as.vector(t(cart.gene[cart.gene[,2]==this.probeset,]))));
# points(d[ match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2] != '---' & d[,2] != this.symbol, 1 ],d[ match(d[,2], cart.gene[,1], nomatch=FALSE) & d[,2] != '---' & d[,2] != this.symbol, ycol ],pch=2);
# points(d[ d[,2] == '---', 1 ],d[ d[,2] == '---', ycol ],pch=4);
# points(d[ d[,2] == this.symbol | d[,2] == this.probeset | d[,1]==0, 1 ],d[ d[,2] == this.symbol | d[,2] == this.probeset | d[,1]==0, ycol ],pch=16);
# dev.off();
plotted[this.symbol] = TRUE;
}
}
dev.off();
|
library(shiny)
library(xtable)
library(colorspace)
source("profilingFunctions.R",local=TRUE)
# List of filenames to open
flist=list(
Control="../FitnessReports/QFA0018_FitnessReport_30_MDRMDP.txt",
Cdc13="../FitnessReports/QFA0015_FitnessReport_28_MDRMDP.txt",
Pol_alpha="../FitnessReports/QFA0010_FitnessReport_33_MDRMDP.txt",
Pol_delta="../FitnessReports/QFA0012_FitnessReport_30_MDRMDP.txt",
Pol_epsilon="../FitnessReports/QFA0011_FitnessReport_36_MDRMDP.txt",
HU_30="../FitnessReports/FIT_G4QUADDRUGS_100mM HU 30_SCALED.txt"
)
# Give columns pretty names
prettyNames=c("Control","cdc13-1","Pol \u03B1","Pol \u03B4","Pol \u03B5","HU 100mM","SD","ORF")
# Load fitness data consistent with default user input values
vals=list()
vals$prof=makeProfiles(flist,"MeanFit")
colnames(vals$prof)=prettyNames
vals$nExp=dim(vals$prof)[2]-2
vals$sims=similarities(vals$prof,"mahalanobis")
gdict=rownames(vals$prof)
names(gdict)=vals$prof$ORF
shinyServer(function(input, output) {
updateScreens <- reactive({
vals = list()
vals$prof=makeProfiles(flist[as.numeric(input$checkGroup)],input$pType)
vals$nExp=dim(vals$prof)[2]-2
vals$sims=similarities(vals$prof,input$dType)
colnames(vals$prof)=prettyNames[as.numeric(input$checkGroup)]
vals$simtarg=c()
return(vals)
})
updateTarget <- reactive({
vals = updateScreens()
if(input$ggroup==1){
gene=parseGeneList(input$glist,gdict)
}else{
gene=parseGeneList(fc$GroupORFs[as.numeric(input$ggroup)],gdict)
}
if((length(gene)==1)&(gene%in%rownames(vals$sims))) {
simtarg = findSimilar(gene,vals$sims)
}else{
simtarg=c()
}
return(simtarg)
})
output$downloadPlot <- downloadHandler(
filename = function() { "FitnessProfile.pdf" },
content = function(file) {
vals = updateScreens()
if(input$ggroup==1){
gene=parseGeneList(input$glist,gdict)
}else{
gene=parseGeneList(fc$GroupORFs[as.numeric(input$ggroup)],gdict)
}
simtarg = updateTarget()
near = head(simtarg,n=input$nearNum)
far = tail(simtarg,n=input$farNum)
cairo_pdf(file,width=14,height=10,onefile=TRUE)
print(plotSimilarFit(gene,vals$prof,nearest=near,farthest=far))
dev.off()
})
output$profiles <- renderPlot({
vals = updateScreens()
if(input$ggroup==1){
gene=parseGeneList(input$glist,gdict)
}else{
print(fc$GroupORFs[input$ggroup])
print(input$ggroup)
gene=parseGeneList(fc$GroupORFs[as.numeric(input$ggroup)],gdict)
}
simtarg = updateTarget()
near = head(simtarg,n=input$nearNum)
far = tail(simtarg,n=input$farNum)
plotSimilarFit(gene,vals$prof,nearest=near,farthest=far)
})
output$ranks <- renderPlot({
vals = updateScreens()
if(input$ggroup==1){
gene=parseGeneList(input$glist,gdict)
}else{
gene=parseGeneList(fc$GroupORFs[as.numeric(input$ggroup)],gdict)
}
gene=intersect(gene,rownames(vals$sims))
plotGenomewideSimilarity(gene,vals$sims)
})
output$nearest <- reactiveText(function(){
vals = updateScreens()
simtarg = updateTarget()
if((length(simtarg)>0)&(input$nearNum>0)){
near = head(simtarg,n=input$nearNum)
dftab=data.frame('Gene'=paste('<a href="http://www.yeastgenome.org/locus/',tolower(names(near)),'/overview" target="_blank">',tolower(names(near)),'</a>',sep=""),Distance=near,row.names=NULL)
colnames(dftab)=c("Gene Name","Distance")
return(as.character(print(xtable(dftab,align=c("r","c","c")),type="html",sanitize.text.function = function(x){x})))
}else{return("")}
})
output$farthest <- reactiveText(function(){
vals = updateScreens()
simtarg = updateTarget()
if((length(simtarg)>0)&(input$farNum>0)){
far = rev(tail(simtarg,n=input$farNum))
dftab=data.frame(Gene=paste('<a href="http://www.yeastgenome.org/cgi-bin/locus.fpl?locus=',tolower(names(far)),'" target="_blank">',tolower(names(far)),'</a>',sep=""),Distance=far,row.names=NULL)
colnames(dftab)=c("Gene Name","Distance")
return(as.character(print(xtable(dftab,align=c("r","c","c")),type="html",sanitize.text.function = function(x){x})))
}else{return("")}
})
})
| /profilyzer/server.R | no_license | lwlss/Dubarry2015 | R | false | false | 4,108 | r | library(shiny)
library(xtable)
library(colorspace)
source("profilingFunctions.R",local=TRUE)
# List of filenames to open
flist=list(
Control="../FitnessReports/QFA0018_FitnessReport_30_MDRMDP.txt",
Cdc13="../FitnessReports/QFA0015_FitnessReport_28_MDRMDP.txt",
Pol_alpha="../FitnessReports/QFA0010_FitnessReport_33_MDRMDP.txt",
Pol_delta="../FitnessReports/QFA0012_FitnessReport_30_MDRMDP.txt",
Pol_epsilon="../FitnessReports/QFA0011_FitnessReport_36_MDRMDP.txt",
HU_30="../FitnessReports/FIT_G4QUADDRUGS_100mM HU 30_SCALED.txt"
)
# Give columns pretty names
prettyNames=c("Control","cdc13-1","Pol \u03B1","Pol \u03B4","Pol \u03B5","HU 100mM","SD","ORF")
# Load fitness data consistent with default user input values
vals=list()
vals$prof=makeProfiles(flist,"MeanFit")
colnames(vals$prof)=prettyNames
vals$nExp=dim(vals$prof)[2]-2
vals$sims=similarities(vals$prof,"mahalanobis")
gdict=rownames(vals$prof)
names(gdict)=vals$prof$ORF
shinyServer(function(input, output) {
updateScreens <- reactive({
vals = list()
vals$prof=makeProfiles(flist[as.numeric(input$checkGroup)],input$pType)
vals$nExp=dim(vals$prof)[2]-2
vals$sims=similarities(vals$prof,input$dType)
colnames(vals$prof)=prettyNames[as.numeric(input$checkGroup)]
vals$simtarg=c()
return(vals)
})
updateTarget <- reactive({
vals = updateScreens()
if(input$ggroup==1){
gene=parseGeneList(input$glist,gdict)
}else{
gene=parseGeneList(fc$GroupORFs[as.numeric(input$ggroup)],gdict)
}
if((length(gene)==1)&(gene%in%rownames(vals$sims))) {
simtarg = findSimilar(gene,vals$sims)
}else{
simtarg=c()
}
return(simtarg)
})
output$downloadPlot <- downloadHandler(
filename = function() { "FitnessProfile.pdf" },
content = function(file) {
vals = updateScreens()
if(input$ggroup==1){
gene=parseGeneList(input$glist,gdict)
}else{
gene=parseGeneList(fc$GroupORFs[as.numeric(input$ggroup)],gdict)
}
simtarg = updateTarget()
near = head(simtarg,n=input$nearNum)
far = tail(simtarg,n=input$farNum)
cairo_pdf(file,width=14,height=10,onefile=TRUE)
print(plotSimilarFit(gene,vals$prof,nearest=near,farthest=far))
dev.off()
})
output$profiles <- renderPlot({
vals = updateScreens()
if(input$ggroup==1){
gene=parseGeneList(input$glist,gdict)
}else{
print(fc$GroupORFs[input$ggroup])
print(input$ggroup)
gene=parseGeneList(fc$GroupORFs[as.numeric(input$ggroup)],gdict)
}
simtarg = updateTarget()
near = head(simtarg,n=input$nearNum)
far = tail(simtarg,n=input$farNum)
plotSimilarFit(gene,vals$prof,nearest=near,farthest=far)
})
output$ranks <- renderPlot({
vals = updateScreens()
if(input$ggroup==1){
gene=parseGeneList(input$glist,gdict)
}else{
gene=parseGeneList(fc$GroupORFs[as.numeric(input$ggroup)],gdict)
}
gene=intersect(gene,rownames(vals$sims))
plotGenomewideSimilarity(gene,vals$sims)
})
output$nearest <- reactiveText(function(){
vals = updateScreens()
simtarg = updateTarget()
if((length(simtarg)>0)&(input$nearNum>0)){
near = head(simtarg,n=input$nearNum)
dftab=data.frame('Gene'=paste('<a href="http://www.yeastgenome.org/locus/',tolower(names(near)),'/overview" target="_blank">',tolower(names(near)),'</a>',sep=""),Distance=near,row.names=NULL)
colnames(dftab)=c("Gene Name","Distance")
return(as.character(print(xtable(dftab,align=c("r","c","c")),type="html",sanitize.text.function = function(x){x})))
}else{return("")}
})
output$farthest <- reactiveText(function(){
vals = updateScreens()
simtarg = updateTarget()
if((length(simtarg)>0)&(input$farNum>0)){
far = rev(tail(simtarg,n=input$farNum))
dftab=data.frame(Gene=paste('<a href="http://www.yeastgenome.org/cgi-bin/locus.fpl?locus=',tolower(names(far)),'" target="_blank">',tolower(names(far)),'</a>',sep=""),Distance=far,row.names=NULL)
colnames(dftab)=c("Gene Name","Distance")
return(as.character(print(xtable(dftab,align=c("r","c","c")),type="html",sanitize.text.function = function(x){x})))
}else{return("")}
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/single-result.R
\name{single_result}
\alias{single_result}
\alias{dispatch_result}
\alias{new_single_result}
\alias{print.single_result}
\title{Access a single artifact.}
\usage{
dispatch_result(q)
new_single_result(artifact)
\method{print}{single_result}(x, ...)
}
\arguments{
\item{q}{a \code{query} object.}
\item{artifact}{retrieved from a repository with \link[repository:read_artifacts]{repository::read_artifacts}}
\item{x}{an object used to select a method.}
\item{...}{further arguments passed to or from other methods.}
}
\value{
A \link{single_result} object or wrapped query \code{q}.
}
\description{
\code{dispatch_result} makes the decision whether to return
a wrapped \code{query} object for further narrowing of the query, or
a \code{single_result} object which wraps a single artifact retrieved
from a \link[repository:repository]{repository::repository}.
\code{new_single_result} creates a simple wrapper around an
artifact. It then dispatches calls to \code{print} and exposes additional
operations via the dollar operator \code{$}.
}
| /man/single_result.Rd | permissive | lbartnik/ui | R | false | true | 1,138 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/single-result.R
\name{single_result}
\alias{single_result}
\alias{dispatch_result}
\alias{new_single_result}
\alias{print.single_result}
\title{Access a single artifact.}
\usage{
dispatch_result(q)
new_single_result(artifact)
\method{print}{single_result}(x, ...)
}
\arguments{
\item{q}{a \code{query} object.}
\item{artifact}{retrieved from a repository with \link[repository:read_artifacts]{repository::read_artifacts}}
\item{x}{an object used to select a method.}
\item{...}{further arguments passed to or from other methods.}
}
\value{
A \link{single_result} object or wrapped query \code{q}.
}
\description{
\code{dispatch_result} makes the decision whether to return
a wrapped \code{query} object for further narrowing of the query, or
a \code{single_result} object which wraps a single artifact retrieved
from a \link[repository:repository]{repository::repository}.
\code{new_single_result} creates a simple wrapper around an
artifact. It then dispatches calls to \code{print} and exposes additional
operations via the dollar operator \code{$}.
}
|
####### redefine the start/ end values of a GRanges object
# rangeObj : a GRanges object.
# newValues : a vector of the same size as rangeObj
# startOrEnd: which values should be replaced: either "start", "end", or "both"
redefineStartEnd <-
function(
rangeObj,
newValues,
startOrEnd){
if(length(rangeObj) != length(newValues)){
stop("The rangeObj and your new values don't have the same length!\n")
}
else{
if(startOrEnd == "start" || startOrEnd == "both"){
start(rangeObj) <- newValues
}
if(startOrEnd == "end" || startOrEnd == "both"){
end(rangeObj) <- newValues
}
}
return(rangeObj)
}
readStartCov1Aln <-
function(
alnGRanges,
oneBinRanges,
fixedInterval,
renameChr,
charPerc){
oneBinReadStartCov <- oneBinRanges
vecReadStartCov <-
suppressWarnings(findOverlaps(oneBinRanges, alnGRanges))
values(oneBinReadStartCov) =
S4Vectors::DataFrame(
values=as.table(vecReadStartCov),
idSeq=oneBinRanges$idSeq
)
#regroup per gene name into one object:GRangesList
reduceRSC <-
reduce(oneBinReadStartCov, with.revmap=TRUE)
revmap <-
mcols(reduceRSC)$revmap
listReadStartCov <-
relist(oneBinReadStartCov[unlist(revmap)], revmap)
#normalize the previous ranges to the specified interval: 0-100 or 0 center
#for the reverse strand reverse the fixedInterval extremes
ixReverse <- which(unlist(runValue(strand(listReadStartCov))) == "-")
if(length(ixReverse) > 0){
listReadStartCovNormPlus <-
normRange(listReadStartCov[-ixReverse], fixedInterval)
listReadStartCovNormRev <-
normRange(listReadStartCov[ixReverse], -1 * fixedInterval)
listReadStartCovNorm <-
c(listReadStartCovNormPlus, listReadStartCovNormRev)
}
else{
listReadStartCovNorm <-
normRange(listReadStartCov, fixedInterval)
}
#get the summarized coverage
covSummarized <- getSummarizedCov(
listReadStartCovNorm,
charPerc,
seqNameRedef=renameChr)
return(covSummarized)
}
####### normalizes the GRanges sizes to the fixed interval size
####### and report the associated counts values
# listTSSReadStartCov : a list of GRanges object.
# : it contains the nbr of counts in 1bin ranges
# : example:
# listTSSReadStartCov <- computeReadStartCov(posByChrom,flankPromoterUniq)
# fixedInterval : a vector containing the min and max of the interval
# : the range of the fixed interval
# : to which all the ranges should be normalized.
# : example: c(0,100) or c(-20,20)
# : Warning! a non-integer interval of a 0-1 interval
# : will be automatically transformed to an integer range.
# : Instead of 0-1, use 0-100.
# renameChr : a character object
# : it provides a name for the fixed interval.
# : The new unique seqnames.
# : example: CDSLength or aroundTSS
#it returns a list of GRanges objects
normRange <-
function(
listTSSReadStartCov,
fixedInterval){
iStart <- fixedInterval[1]
iEnd <- fixedInterval[2]
#check that the length of ranges in the listTSSReadStartCov object
#is the same for all objects
testLength <-
sapply(listTSSReadStartCov, length)
testLengthList <- unique(testLength)
if(length(testLengthList) != 1){
ixLengthPb <- which(testLength != length(iStart:iEnd))
if(length(ixLengthPb)/length(testLength) >= 0.7){
stop(paste("More than 70% of the coding sequences (idSeq) have",
"multiple gene annontations!\n", sep=""))
}
else{
warning("idSeqs with multiple gene ids have been eliminated!\n")
listTSSReadStartCov <- listTSSReadStartCov[-ixLengthPb]
if(length(iStart:iEnd) <=1 ){
warning("The fixedInterval parameter is not correct. \n")
}
#the new ranges would start at iStart, end at iEnd and have a length
#equal to length of the old range here I take advantage that my old
#ranges is made up of 1bp bins
newIntervalPos <-
seq(iStart, iEnd, length.out=length(listTSSReadStartCov[[1]]))
listTSSReadStartCovRedefined <-
sapply(
listTSSReadStartCov,
redefineStartEnd,
newValues=newIntervalPos,
startOrEnd="both"
)
return(listTSSReadStartCovRedefined)
}
}
else{
#and the same length as the fixed interval
if(testLengthList != length(iStart:iEnd)){
stop("The fixedInterval has different length from the GRanges objects!\n")
}
else{
if(length(iStart:iEnd) <= 1){
warning("The fixedInterval parameter is not correct. \n")
}
#the new ranges would start at iStart, end at iEnd and have a length
#equal to length of the old range here I take advantage that my old
#ranges is made up of 1bp bins
newIntervalPos <-
seq(
iStart,
iEnd,
length.out=length(listTSSReadStartCov[[1]])
)
listTSSReadStartCovRedefined <-
sapply(
listTSSReadStartCov,
redefineStartEnd,
newValues=newIntervalPos,
startOrEnd="both"
)
return(listTSSReadStartCovRedefined)
}
}
}
####### apply the reference rage to the summarized coverage of all GRanges
# listCovNorm : a list of GRanges object.
# : list of read counts in exactly the same 1bin ranges
# : example:
# listTSSReadStartCov <- computeReadStartCov(posByChrom,flankPromoterUniq)
# listCovNorm=normRange(listTSSReadStartCov,c((-1) * 20, 20), "TSS")
# charPerc : a string
# : "perc" (the default) or "sum"
# : example: charPerc <- "perc"
#it returns a GRanges object: the sum or the percentage of coverage per position
getSummarizedCov <-
function(
listCovNorm,
charPerc="perc",
seqNameRedef){
#check that the length of ranges in all the objects of the list
testLengthList <- unique(sapply(listCovNorm, length))
if(length(testLengthList) != 1){
stop("Objects in the listCovNorm list have the different ranges!\n")
}
if(charPerc != "perc" && charPerc != "sum"){
warning("charPerc parameter invalid: default value = perc!\n")
charPerc <- "perc"
}
#now get the sum and the % of read counts per each position in the range
#check the number of objects found.
#If less than 10 send a warning message
nbrObj <- length(listCovNorm)
if(nbrObj < 1){
stop("No range has been provided for coverage plot!\n")
}
else{
if(nbrObj >= 1 && nbrObj < 10){
warning(paste("Only ", nbrObj, " ranges analyzed!\n", sep=""))
}
if(seqNameRedef == "" || missing(seqNameRedef) || is.na(seqNameRedef)){
warning("Invalid seqNameRedef parameter -> No renaming.\n")
}
#sum the read Start counts over all the TSS
listReadStartCov <- lapply(listCovNorm,
function(TSSCounts){
TSSCounts = sort(TSSCounts);
TSSCounts$values
})
sumReadStartPerPos <- Reduce("+", listReadStartCov)
if(charPerc == "perc"){
percReadStartPerPos <-
sumReadStartPerPos/sum(sumReadStartPerPos)*100
TSSFlankPercReadStart <-
GRanges(
seqnames=seqNameRedef,
ranges(sort(listCovNorm[[1]])),
values=percReadStartPerPos
)
}
else{
TSSFlankPercReadStart <-
GRanges(
seqnames=seqNameRedef,
ranges(sort(listCovNorm[[1]])),
values=sumReadStartPerPos
)
}
}
return(TSSFlankPercReadStart)
}
####### prepare the feature object and shift it accordingly
# transcGRangesList : a GRangesList object.
# : it contains the GRanges of exons for each transcript
# : example:
# transcGRangesList=exonsBy(txdb,by="tx",use.names=T)
# shiftValue : numeric
# : offset of read starts on transcript.
# : output of plotSummarizedCov function
# : example: shift=-14
# returns a list of integers, for transcripts sizes superior to the shiftValue
applyShiftFeature <-
function(
transcGRangesList,
shiftValue){
#check parameter validity
if(missing(shiftValue) || !inherits(shiftValue, "numeric")){
shiftValue <- 0
warning("Incorrect shiftValue parameter! No shift is performed!\n")
}
if(!is(transcGRangesList, "GRangesList")){
stop(
paste(
"transcGRangesList parameter is of class ",
class(transcGRangesList),
" instead of GRangesList!\n",
sep=""
)
)
}
transcWidth <-
GenomicFeatures::transcriptWidths(
start(transcGRangesList),
end(transcGRangesList)
)
absShiftVal <- abs(shiftValue)
#if width of transcript is smaller than the absolute shiftValue
#eliminate the transcript
ixSmallTransc <- which(transcWidth <= absShiftVal)
if(length(ixSmallTransc) > 0){
transcGRangesList <- transcGRangesList[-ixSmallTransc]
transcWidth <-
GenomicFeatures::transcriptWidths(
start(transcGRangesList),
end(transcGRangesList)
)
}
#now if the shiftValue is positive, the start of the transcript is shifted
if(shiftValue > 0){
usefulRangeOnTransc <-
cbind(
startT=rep(absShiftVal+1, length(transcGRangesList)),
endT=transcWidth
)
}
#else, it is the end of the transcript that we shift
else{
usefulRangeOnTransc <- cbind(startT=1, endT=transcWidth - absShiftVal)
}
listeUsefulRanges <-
lapply(seq_len(length(transcGRangesList)),
function(ixTransc){
usefulRangeOnTransc[ixTransc, 1]:usefulRangeOnTransc[ixTransc, 2]}
)
#for the remaining positions in the transcript, make 1bp bins of the genomic positions
shiftedTransc <-
GenomicFeatures::transcriptLocs2refLocs(
listeUsefulRanges,
start(transcGRangesList),
end(transcGRangesList),
as.character(S4Vectors::runValue(strand(transcGRangesList))),
decreasing.rank.on.minus.strand=TRUE
)
names(shiftedTransc) <- names(transcGRangesList)
return(shiftedTransc)
}
naTozeroRle=function(rleObject){
#check rleObject class
ixNA=which(is.na(S4Vectors::runValue(rleObject)))
if(length(ixNA)>0)
{
S4Vectors::runValue(rleObject)[ixNA]=0;
S4Vectors::runLength(rleObject)[ixNA]=0
}
return(rleObject)
}
#make the ggplot2 format
funcDataFrameGGPlot2 <-
function(
listData,
columnIndex,
logBool){
dataGgplot <- data.frame()
for(ixData in seq_len(length(listData))){
data <- listData[[ixData]]
for(i in seq_len(length(columnIndex))){
if(logBool == 1){
value <-
log2(as.numeric(as.character(data[,columnIndex[i]])) + 1)
}
else{
value <- as.numeric(as.character(data[,columnIndex[i]]))
}
#if no names for the list
if(is.null(names(listData)[ixData]) || is.na(names(listData)[ixData])){
sampleName <- paste("data", ixData, sep="")
}
else{
sampleName <- names(listData)[ixData]
}
tmpData <-
cbind(
rep(sampleName, nrow(data)),
rep(colnames(data)[columnIndex[i]], nrow(data)),
value
)
if(ixData == 1 && i == 1){
dataGgplot <- tmpData
}
else{
dataGgplot <- rbind(dataGgplot, tmpData)
}
}
}
colnames(dataGgplot) <- c("sample", "type", "value")
dataGgplot <- as.data.frame(dataGgplot)
dataGgplot$value <- as.numeric(as.character(dataGgplot$value))
return(dataGgplot)
}
#ggplot boxplot
funcBoxplot <-
function(
dataGgplot,
xText,
yText,
titleText){
type=NULL
value=NULL
x=NULL
pBoxplot <- ggplot2::ggplot(data = dataGgplot, ggplot2::aes(x=type, y=value))
if(length(which(colnames(dataGgplot) == "sample")) == 1){
pBoxplot <- pBoxplot +
ggplot2::geom_boxplot(ggplot2::aes(fill = sample))
pBoxplot <- pBoxplot +
ggplot2::geom_point(
ggplot2::aes(y = value, group = sample),
position=ggplot2::position_dodge(width=0.75)
)
}
else{
pBoxplot <- pBoxplot +
ggplot2::geom_boxplot(ggplot2::aes(fill = type))
pBoxplot <- pBoxplot +
ggplot2::geom_point(
ggplot2::aes(y = value, group = type),
position =ggplot2::position_dodge(width=0.75)
)
}
pBoxplot <- pBoxplot +
ggplot2::xlab(xText) + ggplot2::ylab(yText) +
ggplot2::ggtitle(titleText)
pBoxplot <- pBoxplot +
ggplot2::guides(fill=ggplot2::guide_legend(title="sample")) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle=90, hjust=1))
pBoxplot
return(pBoxplot)
}
#http://gastonsanchez.wordpress.com/2012/08/27/scatterplot-matrices-with-ggplot/
makePairs <-
function(data){
x <- NULL
y <- NULL
grid <- expand.grid(x=1:ncol(data), y=1:ncol(data))
gridUpper <- subset(grid, x < y)
upper <- do.call("rbind", lapply(seq_len(NROW(gridUpper)), function(i){
xcol <- gridUpper[i, "x"]
ycol <- gridUpper[i, "y"]
data.frame(xvar=names(data)[ycol], yvar=names(data)[xcol],
x=data[,xcol], y=data[,ycol], data)
}))
upper$xvar <- factor(upper$xvar, levels=names(data))
upper$yvar <- factor(upper$yvar, levels=names(data))
gridLower<- subset(grid,x>y)
lower <- do.call("rbind", lapply(seq_len(NROW(gridLower)), function(i){
xcol <- gridLower[i, "x"]
ycol <- gridLower[i, "y"]
data.frame(
xvar=names(data)[ycol],
yvar=names(data)[xcol],
x=(max(data[,xcol])+min(data[,xcol]))/2,
y=(max(data[,ycol])+min(data[,ycol]))/2,
labs=round(cor(data[,xcol],data[,ycol]),3)
)
}))
densities <- do.call("rbind", lapply(1:ncol(data), function(i){
data.frame(xvar=names(data)[i], yvar=names(data)[i], x=data[, i])
}))
list(upper=upper, lower=lower, densities=densities)
}
#this function is an adaptation of the ggplot pairs plot
#it takes as input a matrix or data.frame and makes the pairs ggplot
funcPlotPairs <-
function(
data,
title){
..scaled.. <- NULL
x <- NULL
y <- NULL
labs <- NULL
gridData <- makePairs(data)
upperGrid <- data.frame(gridData$upper)
pPairs <- ggplot2::ggplot(upperGrid, ggplot2::aes_string(x="x", y="y")) +
ggplot2::facet_grid(xvar ~ yvar, scales="free") +
ggplot2::geom_point(color="#6495ED") +
ggplot2::stat_density(
ggplot2::aes(x=x, y=..scaled.. * diff(range(x)) + min(x)),
data=gridData$densities, position="identity",
colour="grey20", geom="line", lwd=1) +
ggplot2::geom_abline(
ggplot2::aes(color="Diagonal", fill="Diagonal", intercept = 0, slope=1),
lwd=1) +
ggplot2::geom_smooth(
ggplot2::aes(x=x, y=y, color = "Linear_regression", fill = "Linear_regression"),
method = "lm", lwd=1) +
ggplot2::theme_bw() +
ggplot2::theme(
axis.title.x=ggplot2::element_blank(),
axis.title.y=ggplot2::element_blank()) +
ggplot2::geom_text(
data=gridData$lower,
ggplot2::aes(x=x, y=y, label=labs, group=NULL),
fontface=2,
colour="steelblue4") +
ggplot2::scale_fill_manual(
name="Lines",
values=c('Diagonal'='white', 'Linear_regression'='white')) +
ggplot2::scale_colour_manual(
name="Lines",
values=c('Diagonal'='brown2', 'Linear_regression'='black'),
guide='legend') +
ggplot2::guides(
colour=ggplot2::guide_legend(override.aes=list(linetype=c(1, 1)))) +
ggplot2::ggtitle(title)
return(pPairs)
}
getCodons <-
function(
seqChar, sizeMotif, stepSize){
# oneCharSplit <- sapply(seqChar, strsplit, "")[[1]]
#
# codonSeq <-
# paste0(
# oneCharSplit[c(TRUE, FALSE, FALSE)],
# oneCharSplit[c(FALSE, TRUE, FALSE)],
# oneCharSplit[c(FALSE, FALSE, TRUE)]
# )
motifStart <- seq.int(1, nchar(seqChar)-1, stepSize)
motifEnd <- motifStart + sizeMotif - 1L
codonSeq <-
substring(
seqChar,
motifStart,
motifEnd
)
#motifs with a size different from sizeMotif are discarded
ixMotifSizes <- which(nchar(codonSeq) != sizeMotif)
if(length(ixMotifSizes) > 0){
codonSeq <- codonSeq[-ixMotifSizes]
}
codonsInSeq <- cbind(seq_len(length(codonSeq)), codonSeq)
colnames(codonsInSeq) <- c("codonID", "codon")
codonsInSeq[,1] <- as.numeric(as.character(codonsInSeq[, 1]))
return(codonsInSeq)
}
| /R/utils.R | no_license | alexMPopa/RiboProfiling | R | false | false | 18,489 | r |
####### redefine the start/ end values of a GRanges object
# rangeObj : a GRanges object.
# newValues : a vector of the same size as rangeObj
# startOrEnd: which values should be replaced: either "start", "end", or "both"
redefineStartEnd <-
function(
rangeObj,
newValues,
startOrEnd){
if(length(rangeObj) != length(newValues)){
stop("The rangeObj and your new values don't have the same length!\n")
}
else{
if(startOrEnd == "start" || startOrEnd == "both"){
start(rangeObj) <- newValues
}
if(startOrEnd == "end" || startOrEnd == "both"){
end(rangeObj) <- newValues
}
}
return(rangeObj)
}
readStartCov1Aln <-
function(
alnGRanges,
oneBinRanges,
fixedInterval,
renameChr,
charPerc){
oneBinReadStartCov <- oneBinRanges
vecReadStartCov <-
suppressWarnings(findOverlaps(oneBinRanges, alnGRanges))
values(oneBinReadStartCov) =
S4Vectors::DataFrame(
values=as.table(vecReadStartCov),
idSeq=oneBinRanges$idSeq
)
#regroup per gene name into one object:GRangesList
reduceRSC <-
reduce(oneBinReadStartCov, with.revmap=TRUE)
revmap <-
mcols(reduceRSC)$revmap
listReadStartCov <-
relist(oneBinReadStartCov[unlist(revmap)], revmap)
#normalize the previous ranges to the specified interval: 0-100 or 0 center
#for the reverse strand reverse the fixedInterval extremes
ixReverse <- which(unlist(runValue(strand(listReadStartCov))) == "-")
if(length(ixReverse) > 0){
listReadStartCovNormPlus <-
normRange(listReadStartCov[-ixReverse], fixedInterval)
listReadStartCovNormRev <-
normRange(listReadStartCov[ixReverse], -1 * fixedInterval)
listReadStartCovNorm <-
c(listReadStartCovNormPlus, listReadStartCovNormRev)
}
else{
listReadStartCovNorm <-
normRange(listReadStartCov, fixedInterval)
}
#get the summarized coverage
covSummarized <- getSummarizedCov(
listReadStartCovNorm,
charPerc,
seqNameRedef=renameChr)
return(covSummarized)
}
####### normalizes the GRanges sizes to the fixed interval size
####### and report the associated counts values
# listTSSReadStartCov : a list of GRanges object.
# : it contains the nbr of counts in 1bin ranges
# : example:
# listTSSReadStartCov <- computeReadStartCov(posByChrom,flankPromoterUniq)
# fixedInterval : a vector containing the min and max of the interval
# : the range of the fixed interval
# : to which all the ranges should be normalized.
# : example: c(0,100) or c(-20,20)
# : Warning! a non-integer interval of a 0-1 interval
# : will be automatically transformed to an integer range.
# : Instead of 0-1, use 0-100.
# renameChr : a character object
# : it provides a name for the fixed interval.
# : The new unique seqnames.
# : example: CDSLength or aroundTSS
#it returns a list of GRanges objects
normRange <-
function(
listTSSReadStartCov,
fixedInterval){
iStart <- fixedInterval[1]
iEnd <- fixedInterval[2]
#check that the length of ranges in the listTSSReadStartCov object
#is the same for all objects
testLength <-
sapply(listTSSReadStartCov, length)
testLengthList <- unique(testLength)
if(length(testLengthList) != 1){
ixLengthPb <- which(testLength != length(iStart:iEnd))
if(length(ixLengthPb)/length(testLength) >= 0.7){
stop(paste("More than 70% of the coding sequences (idSeq) have",
"multiple gene annontations!\n", sep=""))
}
else{
warning("idSeqs with multiple gene ids have been eliminated!\n")
listTSSReadStartCov <- listTSSReadStartCov[-ixLengthPb]
if(length(iStart:iEnd) <=1 ){
warning("The fixedInterval parameter is not correct. \n")
}
#the new ranges would start at iStart, end at iEnd and have a length
#equal to length of the old range here I take advantage that my old
#ranges is made up of 1bp bins
newIntervalPos <-
seq(iStart, iEnd, length.out=length(listTSSReadStartCov[[1]]))
listTSSReadStartCovRedefined <-
sapply(
listTSSReadStartCov,
redefineStartEnd,
newValues=newIntervalPos,
startOrEnd="both"
)
return(listTSSReadStartCovRedefined)
}
}
else{
#and the same length as the fixed interval
if(testLengthList != length(iStart:iEnd)){
stop("The fixedInterval has different length from the GRanges objects!\n")
}
else{
if(length(iStart:iEnd) <= 1){
warning("The fixedInterval parameter is not correct. \n")
}
#the new ranges would start at iStart, end at iEnd and have a length
#equal to length of the old range here I take advantage that my old
#ranges is made up of 1bp bins
newIntervalPos <-
seq(
iStart,
iEnd,
length.out=length(listTSSReadStartCov[[1]])
)
listTSSReadStartCovRedefined <-
sapply(
listTSSReadStartCov,
redefineStartEnd,
newValues=newIntervalPos,
startOrEnd="both"
)
return(listTSSReadStartCovRedefined)
}
}
}
####### apply the reference rage to the summarized coverage of all GRanges
# listCovNorm : a list of GRanges object.
# : list of read counts in exactly the same 1bin ranges
# : example:
# listTSSReadStartCov <- computeReadStartCov(posByChrom,flankPromoterUniq)
# listCovNorm=normRange(listTSSReadStartCov,c((-1) * 20, 20), "TSS")
# charPerc : a string
# : "perc" (the default) or "sum"
# : example: charPerc <- "perc"
#it returns a GRanges object: the sum or the percentage of coverage per position
getSummarizedCov <-
function(
listCovNorm,
charPerc="perc",
seqNameRedef){
#check that the length of ranges in all the objects of the list
testLengthList <- unique(sapply(listCovNorm, length))
if(length(testLengthList) != 1){
stop("Objects in the listCovNorm list have the different ranges!\n")
}
if(charPerc != "perc" && charPerc != "sum"){
warning("charPerc parameter invalid: default value = perc!\n")
charPerc <- "perc"
}
#now get the sum and the % of read counts per each position in the range
#check the number of objects found.
#If less than 10 send a warning message
nbrObj <- length(listCovNorm)
if(nbrObj < 1){
stop("No range has been provided for coverage plot!\n")
}
else{
if(nbrObj >= 1 && nbrObj < 10){
warning(paste("Only ", nbrObj, " ranges analyzed!\n", sep=""))
}
if(seqNameRedef == "" || missing(seqNameRedef) || is.na(seqNameRedef)){
warning("Invalid seqNameRedef parameter -> No renaming.\n")
}
#sum the read Start counts over all the TSS
listReadStartCov <- lapply(listCovNorm,
function(TSSCounts){
TSSCounts = sort(TSSCounts);
TSSCounts$values
})
sumReadStartPerPos <- Reduce("+", listReadStartCov)
if(charPerc == "perc"){
percReadStartPerPos <-
sumReadStartPerPos/sum(sumReadStartPerPos)*100
TSSFlankPercReadStart <-
GRanges(
seqnames=seqNameRedef,
ranges(sort(listCovNorm[[1]])),
values=percReadStartPerPos
)
}
else{
TSSFlankPercReadStart <-
GRanges(
seqnames=seqNameRedef,
ranges(sort(listCovNorm[[1]])),
values=sumReadStartPerPos
)
}
}
return(TSSFlankPercReadStart)
}
####### prepare the feature object and shift it accordingly
# transcGRangesList : a GRangesList object.
# : it contains the GRanges of exons for each transcript
# : example:
# transcGRangesList=exonsBy(txdb,by="tx",use.names=T)
# shiftValue : numeric
# : offset of read starts on transcript.
# : output of plotSummarizedCov function
# : example: shift=-14
# returns a list of integers, for transcripts sizes superior to the shiftValue
applyShiftFeature <-
function(
transcGRangesList,
shiftValue){
#check parameter validity
if(missing(shiftValue) || !inherits(shiftValue, "numeric")){
shiftValue <- 0
warning("Incorrect shiftValue parameter! No shift is performed!\n")
}
if(!is(transcGRangesList, "GRangesList")){
stop(
paste(
"transcGRangesList parameter is of class ",
class(transcGRangesList),
" instead of GRangesList!\n",
sep=""
)
)
}
transcWidth <-
GenomicFeatures::transcriptWidths(
start(transcGRangesList),
end(transcGRangesList)
)
absShiftVal <- abs(shiftValue)
#if width of transcript is smaller than the absolute shiftValue
#eliminate the transcript
ixSmallTransc <- which(transcWidth <= absShiftVal)
if(length(ixSmallTransc) > 0){
transcGRangesList <- transcGRangesList[-ixSmallTransc]
transcWidth <-
GenomicFeatures::transcriptWidths(
start(transcGRangesList),
end(transcGRangesList)
)
}
#now if the shiftValue is positive, the start of the transcript is shifted
if(shiftValue > 0){
usefulRangeOnTransc <-
cbind(
startT=rep(absShiftVal+1, length(transcGRangesList)),
endT=transcWidth
)
}
#else, it is the end of the transcript that we shift
else{
usefulRangeOnTransc <- cbind(startT=1, endT=transcWidth - absShiftVal)
}
listeUsefulRanges <-
lapply(seq_len(length(transcGRangesList)),
function(ixTransc){
usefulRangeOnTransc[ixTransc, 1]:usefulRangeOnTransc[ixTransc, 2]}
)
#for the remaining positions in the transcript, make 1bp bins of the genomic positions
shiftedTransc <-
GenomicFeatures::transcriptLocs2refLocs(
listeUsefulRanges,
start(transcGRangesList),
end(transcGRangesList),
as.character(S4Vectors::runValue(strand(transcGRangesList))),
decreasing.rank.on.minus.strand=TRUE
)
names(shiftedTransc) <- names(transcGRangesList)
return(shiftedTransc)
}
naTozeroRle=function(rleObject){
#check rleObject class
ixNA=which(is.na(S4Vectors::runValue(rleObject)))
if(length(ixNA)>0)
{
S4Vectors::runValue(rleObject)[ixNA]=0;
S4Vectors::runLength(rleObject)[ixNA]=0
}
return(rleObject)
}
#make the ggplot2 format
funcDataFrameGGPlot2 <-
function(
listData,
columnIndex,
logBool){
dataGgplot <- data.frame()
for(ixData in seq_len(length(listData))){
data <- listData[[ixData]]
for(i in seq_len(length(columnIndex))){
if(logBool == 1){
value <-
log2(as.numeric(as.character(data[,columnIndex[i]])) + 1)
}
else{
value <- as.numeric(as.character(data[,columnIndex[i]]))
}
#if no names for the list
if(is.null(names(listData)[ixData]) || is.na(names(listData)[ixData])){
sampleName <- paste("data", ixData, sep="")
}
else{
sampleName <- names(listData)[ixData]
}
tmpData <-
cbind(
rep(sampleName, nrow(data)),
rep(colnames(data)[columnIndex[i]], nrow(data)),
value
)
if(ixData == 1 && i == 1){
dataGgplot <- tmpData
}
else{
dataGgplot <- rbind(dataGgplot, tmpData)
}
}
}
colnames(dataGgplot) <- c("sample", "type", "value")
dataGgplot <- as.data.frame(dataGgplot)
dataGgplot$value <- as.numeric(as.character(dataGgplot$value))
return(dataGgplot)
}
#ggplot boxplot
funcBoxplot <-
function(
dataGgplot,
xText,
yText,
titleText){
type=NULL
value=NULL
x=NULL
pBoxplot <- ggplot2::ggplot(data = dataGgplot, ggplot2::aes(x=type, y=value))
if(length(which(colnames(dataGgplot) == "sample")) == 1){
pBoxplot <- pBoxplot +
ggplot2::geom_boxplot(ggplot2::aes(fill = sample))
pBoxplot <- pBoxplot +
ggplot2::geom_point(
ggplot2::aes(y = value, group = sample),
position=ggplot2::position_dodge(width=0.75)
)
}
else{
pBoxplot <- pBoxplot +
ggplot2::geom_boxplot(ggplot2::aes(fill = type))
pBoxplot <- pBoxplot +
ggplot2::geom_point(
ggplot2::aes(y = value, group = type),
position =ggplot2::position_dodge(width=0.75)
)
}
pBoxplot <- pBoxplot +
ggplot2::xlab(xText) + ggplot2::ylab(yText) +
ggplot2::ggtitle(titleText)
pBoxplot <- pBoxplot +
ggplot2::guides(fill=ggplot2::guide_legend(title="sample")) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle=90, hjust=1))
pBoxplot
return(pBoxplot)
}
#http://gastonsanchez.wordpress.com/2012/08/27/scatterplot-matrices-with-ggplot/
makePairs <-
function(data){
x <- NULL
y <- NULL
grid <- expand.grid(x=1:ncol(data), y=1:ncol(data))
gridUpper <- subset(grid, x < y)
upper <- do.call("rbind", lapply(seq_len(NROW(gridUpper)), function(i){
xcol <- gridUpper[i, "x"]
ycol <- gridUpper[i, "y"]
data.frame(xvar=names(data)[ycol], yvar=names(data)[xcol],
x=data[,xcol], y=data[,ycol], data)
}))
upper$xvar <- factor(upper$xvar, levels=names(data))
upper$yvar <- factor(upper$yvar, levels=names(data))
gridLower<- subset(grid,x>y)
lower <- do.call("rbind", lapply(seq_len(NROW(gridLower)), function(i){
xcol <- gridLower[i, "x"]
ycol <- gridLower[i, "y"]
data.frame(
xvar=names(data)[ycol],
yvar=names(data)[xcol],
x=(max(data[,xcol])+min(data[,xcol]))/2,
y=(max(data[,ycol])+min(data[,ycol]))/2,
labs=round(cor(data[,xcol],data[,ycol]),3)
)
}))
densities <- do.call("rbind", lapply(1:ncol(data), function(i){
data.frame(xvar=names(data)[i], yvar=names(data)[i], x=data[, i])
}))
list(upper=upper, lower=lower, densities=densities)
}
#this function is an adaptation of the ggplot pairs plot
#it takes as input a matrix or data.frame and makes the pairs ggplot
funcPlotPairs <-
function(
data,
title){
..scaled.. <- NULL
x <- NULL
y <- NULL
labs <- NULL
gridData <- makePairs(data)
upperGrid <- data.frame(gridData$upper)
pPairs <- ggplot2::ggplot(upperGrid, ggplot2::aes_string(x="x", y="y")) +
ggplot2::facet_grid(xvar ~ yvar, scales="free") +
ggplot2::geom_point(color="#6495ED") +
ggplot2::stat_density(
ggplot2::aes(x=x, y=..scaled.. * diff(range(x)) + min(x)),
data=gridData$densities, position="identity",
colour="grey20", geom="line", lwd=1) +
ggplot2::geom_abline(
ggplot2::aes(color="Diagonal", fill="Diagonal", intercept = 0, slope=1),
lwd=1) +
ggplot2::geom_smooth(
ggplot2::aes(x=x, y=y, color = "Linear_regression", fill = "Linear_regression"),
method = "lm", lwd=1) +
ggplot2::theme_bw() +
ggplot2::theme(
axis.title.x=ggplot2::element_blank(),
axis.title.y=ggplot2::element_blank()) +
ggplot2::geom_text(
data=gridData$lower,
ggplot2::aes(x=x, y=y, label=labs, group=NULL),
fontface=2,
colour="steelblue4") +
ggplot2::scale_fill_manual(
name="Lines",
values=c('Diagonal'='white', 'Linear_regression'='white')) +
ggplot2::scale_colour_manual(
name="Lines",
values=c('Diagonal'='brown2', 'Linear_regression'='black'),
guide='legend') +
ggplot2::guides(
colour=ggplot2::guide_legend(override.aes=list(linetype=c(1, 1)))) +
ggplot2::ggtitle(title)
return(pPairs)
}
getCodons <-
function(
seqChar, sizeMotif, stepSize){
# oneCharSplit <- sapply(seqChar, strsplit, "")[[1]]
#
# codonSeq <-
# paste0(
# oneCharSplit[c(TRUE, FALSE, FALSE)],
# oneCharSplit[c(FALSE, TRUE, FALSE)],
# oneCharSplit[c(FALSE, FALSE, TRUE)]
# )
motifStart <- seq.int(1, nchar(seqChar)-1, stepSize)
motifEnd <- motifStart + sizeMotif - 1L
codonSeq <-
substring(
seqChar,
motifStart,
motifEnd
)
#motifs with a size different from sizeMotif are discarded
ixMotifSizes <- which(nchar(codonSeq) != sizeMotif)
if(length(ixMotifSizes) > 0){
codonSeq <- codonSeq[-ixMotifSizes]
}
codonsInSeq <- cbind(seq_len(length(codonSeq)), codonSeq)
colnames(codonsInSeq) <- c("codonID", "codon")
codonsInSeq[,1] <- as.numeric(as.character(codonsInSeq[, 1]))
return(codonsInSeq)
}
|
###################
# early R code ---- ~ 2009 / (C) Scott Alexander Malec
###################
library(RMySQL)
library(tm)
setwd("/home/propp/wikileaks/");
# retrieve data from redacted_war_diary_irq and put into individual text files
drv = dbDriver("MySQL")
con = dbConnect(drv, dbname='wikileaks', user='root', password='mandarin',
host='127.0.0.1')
nrows = fetch(dbSendQuery(con, 'SELECT COUNT(DISTINCT id) FROM redacted_war_diary_irq'))
nr = as.numeric(nrows)
nr = 20
#rewrite as *apply
for (i in 1:nr) {
mydata = dbSendQuery(con, 'SELECT summary FROM redacted_war_diary_irq rwdi WHERE rwdi.id = "i"')
data = fetch(mydata, n=1)
dbClearResult(mydata)
filename = cat(".txt", i)
write(data, file="filename")
data
}
nr = as.numeric(nrows)
nr = 20
for (i in 1:nr) {
query <- cat('SELECT summary FROM redacted_war_diary_irq rwdi WHERE rwdi.id = ', i)
data = as.data.frame(dbGetQuery(con, query))
data
##filename = cat(".txt", i)
##write(data, file="filename")
}
###########
###########
| /wikwekaSQL.R | no_license | kingfish777/wikiweka | R | false | false | 1,015 | r | ###################
# early R code ---- ~ 2009 / (C) Scott Alexander Malec
###################
library(RMySQL)
library(tm)
setwd("/home/propp/wikileaks/");
# retrieve data from redacted_war_diary_irq and put into individual text files
drv = dbDriver("MySQL")
con = dbConnect(drv, dbname='wikileaks', user='root', password='mandarin',
host='127.0.0.1')
nrows = fetch(dbSendQuery(con, 'SELECT COUNT(DISTINCT id) FROM redacted_war_diary_irq'))
nr = as.numeric(nrows)
nr = 20
#rewrite as *apply
for (i in 1:nr) {
mydata = dbSendQuery(con, 'SELECT summary FROM redacted_war_diary_irq rwdi WHERE rwdi.id = "i"')
data = fetch(mydata, n=1)
dbClearResult(mydata)
filename = cat(".txt", i)
write(data, file="filename")
data
}
nr = as.numeric(nrows)
nr = 20
for (i in 1:nr) {
query <- cat('SELECT summary FROM redacted_war_diary_irq rwdi WHERE rwdi.id = ', i)
data = as.data.frame(dbGetQuery(con, query))
data
##filename = cat(".txt", i)
##write(data, file="filename")
}
###########
###########
|
### instal packages (if needed) ##
#install.packages("tidyverse")
#install.packages("lubridate")
#install.packages("readr")
# load libraries
library(tidyverse)
library(readr)
library(lubridate)
#read in preliminary data
preliminary_data <- read_csv("_data/_raw/preliminary_data.csv")
View(preliminary_data)
##############################################################################################
## Explore data
# show structure of data set
str(preliminary_data)
# Check what variables are included in the dataset:
names(preliminary_data)
# Sumarize all variables:
summary(preliminary_data)
# Look at the first 6 rows of the dataset:
head(preliminary_data)
# Check if there are any missing values:
summary(is.na(preliminary_data))
#rename thermo-temperature column
names(preliminary_data) <- c("date", "individual","time","pos_beh","context","substrate","hab_type","sun","therm_t","t_lo","t_hi","amb_t")
names(preliminary_data)
##############################################################################################
# Change "Os" (out of sight) to "NA"
preliminary_data$pos_beh[preliminary_data$pos_beh == "Os"] <- "NA"
preliminary_data$context[preliminary_data$context == "Os"] <- "NA"
preliminary_data$substrate[preliminary_data$substrate == "Os"] <- "NA"
preliminary_data$hab_type[preliminary_data$hab_type == "Os"] <- "NA"
preliminary_data$sun[preliminary_data$sun == "Os"] <- "NA"
preliminary_data$therm_t[preliminary_data$therm_t == "Os"] <- "NA"
preliminary_data$t_lo[preliminary_data$t_lo == "Os"] <- "NA"
preliminary_data$t_hi[preliminary_data$t_hi == "Os"] <- "NA"
preliminary_data$amb_t[preliminary_data$amb_t == "Os"] <- "NA"
preliminary_data
#### Fix capitalization ####
## positional behavior
preliminary_data$pos_beh[preliminary_data$pos_beh == "st"] <- "St"
preliminary_data$pos_beh[preliminary_data$pos_beh == "ST"] <- "St"
preliminary_data$pos_beh[preliminary_data$pos_beh == "ABp"] <- "Abp"
preliminary_data$pos_beh[preliminary_data$pos_beh == "ABpW"] <- "Abp"
preliminary_data$pos_beh[preliminary_data$pos_beh == "Bp"] <- "BpS"
preliminary_data$pos_beh[preliminary_data$pos_beh == "SBp"] <- "BpS"
preliminary_data$pos_beh[preliminary_data$pos_beh == "qw"] <- "QW"
preliminary_data$pos_beh[preliminary_data$pos_beh == "Qw"] <- "QW"
## context
preliminary_data$context[preliminary_data$context == "FO"] <- "Fo"
preliminary_data$context[preliminary_data$context == "fo"] <- "Fo"
preliminary_data$context[preliminary_data$context == "TV"] <- "Tv"
preliminary_data$context[preliminary_data$context == "tv"] <- "Tv"
preliminary_data$context[preliminary_data$context == "SO"] <- "So"
preliminary_data$context[preliminary_data$context == "so"] <- "So"
preliminary_data$context[preliminary_data$context == "re"] <- "RE"
preliminary_data$context[preliminary_data$context == "Re"] <- "RE"
## substrate
preliminary_data$substrate[preliminary_data$substrate == "t"] <- "T"
preliminary_data$substrate[preliminary_data$substrate == "a"] <- "A"
preliminary_data$substrate[preliminary_data$substrate == "W/T"] <- "W"
preliminary_data$substrate[preliminary_data$substrate == "T/W"] <- "W"
## habitat type
preliminary_data$hab_type[preliminary_data$hab_type == "wd"] <- "WD"
preliminary_data$hab_type[preliminary_data$hab_type == "Wd"] <- "WD"
preliminary_data$hab_type[preliminary_data$hab_type == "Gl"] <- "GL"
preliminary_data$hab_type[preliminary_data$hab_type == "gl"] <- "GL"
preliminary_data$hab_type[preliminary_data$hab_type == "Bm"] <- "BM"
preliminary_data$hab_type[preliminary_data$hab_type == "bm"] <- "BM"
preliminary_data$hab_type[preliminary_data$hab_type == "ga"] <- "GA"
preliminary_data$hab_type[preliminary_data$hab_type == "Ga"] <- "GA"
## Individual Names
preliminary_data$individual <- as.character(preliminary_data$individual)
preliminary_data$individual[preliminary_data$individual == "Lp"] <- "LP"
preliminary_data$individual[preliminary_data$individual == "Bi"] <- "BI"
preliminary_data$individual[preliminary_data$individual == "Si"] <- "SI"
preliminary_data$individual[preliminary_data$individual == "Kl"] <- "KL"
preliminary_data$individual[preliminary_data$individual == "Jm"] <- "JM"
preliminary_data$individual[preliminary_data$individual == "Dw"] <- "DW"
preliminary_data$individual[preliminary_data$individual == "Bo"] <- "BO"
preliminary_data$individual[preliminary_data$individual == "Lx"] <- "LX"
preliminary_data$individual[preliminary_data$individual == "Lu"] <- "LU"
preliminary_data$individual[preliminary_data$individual == "Mi"] <- "MI"
preliminary_data$individual[preliminary_data$individual == "Df"] <- "DF"
###################################################################################
## Change to factor and numeric
factor_cols <- c("pos_beh","context", "substrate", "hab_type", "individual")
numeric_cols <- c("sun", "therm_t", "t_lo", "t_hi", "amb_t")
preliminary_data[factor_cols] <- lapply(preliminary_data[factor_cols], as.factor)
preliminary_data[numeric_cols] <- lapply(preliminary_data[numeric_cols], as.numeric)
str(preliminary_data)
#Re-name
preliminary_data2 <- preliminary_data
View(preliminary_data2)
###################################################################################
#Add time of day column
preliminary_data2$time_od <- NA
preliminary_data2$time_od[preliminary_data2$time <= 32400] <- "e_morning"
preliminary_data2$time_od[preliminary_data2$time > 32400 & preliminary_data2$time <= 43200] <- "l_morning"
preliminary_data2$time_od[preliminary_data2$time > 43200 & preliminary_data2$time <= 50400] <- "e_afternoon"
preliminary_data2$time_od[preliminary_data2$time > 50400 & preliminary_data2$time < 61200] <- "l_afternoon"
preliminary_data2$time_od[preliminary_data2$time >= 61200] <- "evening"
preliminary_data2$time_od <- factor(preliminary_data2$time_od, levels = c("e_morning", "l_morning", "e_afternoon", "l_afternoon", "evening"))
levels(preliminary_data2$time_od)
#create tidy database for analysis
write.csv(preliminary_data2, "C:/Users/nw185_000/Documents/Iowa/Dissertation/Data/individual-project-nwackerly/_data/_tidy/prelim_data_tidycols.csv", row.names=F)
| /_data_wrangling/preliminary_data_wrangling.R | no_license | EEB698-F2018/individual-project-nwackerly | R | false | false | 6,231 | r |
### instal packages (if needed) ##
#install.packages("tidyverse")
#install.packages("lubridate")
#install.packages("readr")
# load libraries
library(tidyverse)
library(readr)
library(lubridate)
#read in preliminary data
preliminary_data <- read_csv("_data/_raw/preliminary_data.csv")
View(preliminary_data)
##############################################################################################
## Explore data
# show structure of data set
str(preliminary_data)
# Check what variables are included in the dataset:
names(preliminary_data)
# Sumarize all variables:
summary(preliminary_data)
# Look at the first 6 rows of the dataset:
head(preliminary_data)
# Check if there are any missing values:
summary(is.na(preliminary_data))
#rename thermo-temperature column
names(preliminary_data) <- c("date", "individual","time","pos_beh","context","substrate","hab_type","sun","therm_t","t_lo","t_hi","amb_t")
names(preliminary_data)
##############################################################################################
# Change "Os" (out of sight) to "NA"
preliminary_data$pos_beh[preliminary_data$pos_beh == "Os"] <- "NA"
preliminary_data$context[preliminary_data$context == "Os"] <- "NA"
preliminary_data$substrate[preliminary_data$substrate == "Os"] <- "NA"
preliminary_data$hab_type[preliminary_data$hab_type == "Os"] <- "NA"
preliminary_data$sun[preliminary_data$sun == "Os"] <- "NA"
preliminary_data$therm_t[preliminary_data$therm_t == "Os"] <- "NA"
preliminary_data$t_lo[preliminary_data$t_lo == "Os"] <- "NA"
preliminary_data$t_hi[preliminary_data$t_hi == "Os"] <- "NA"
preliminary_data$amb_t[preliminary_data$amb_t == "Os"] <- "NA"
preliminary_data
#### Fix capitalization ####
## positional behavior
preliminary_data$pos_beh[preliminary_data$pos_beh == "st"] <- "St"
preliminary_data$pos_beh[preliminary_data$pos_beh == "ST"] <- "St"
preliminary_data$pos_beh[preliminary_data$pos_beh == "ABp"] <- "Abp"
preliminary_data$pos_beh[preliminary_data$pos_beh == "ABpW"] <- "Abp"
preliminary_data$pos_beh[preliminary_data$pos_beh == "Bp"] <- "BpS"
preliminary_data$pos_beh[preliminary_data$pos_beh == "SBp"] <- "BpS"
preliminary_data$pos_beh[preliminary_data$pos_beh == "qw"] <- "QW"
preliminary_data$pos_beh[preliminary_data$pos_beh == "Qw"] <- "QW"
## context
preliminary_data$context[preliminary_data$context == "FO"] <- "Fo"
preliminary_data$context[preliminary_data$context == "fo"] <- "Fo"
preliminary_data$context[preliminary_data$context == "TV"] <- "Tv"
preliminary_data$context[preliminary_data$context == "tv"] <- "Tv"
preliminary_data$context[preliminary_data$context == "SO"] <- "So"
preliminary_data$context[preliminary_data$context == "so"] <- "So"
preliminary_data$context[preliminary_data$context == "re"] <- "RE"
preliminary_data$context[preliminary_data$context == "Re"] <- "RE"
## substrate
preliminary_data$substrate[preliminary_data$substrate == "t"] <- "T"
preliminary_data$substrate[preliminary_data$substrate == "a"] <- "A"
preliminary_data$substrate[preliminary_data$substrate == "W/T"] <- "W"
preliminary_data$substrate[preliminary_data$substrate == "T/W"] <- "W"
## habitat type
preliminary_data$hab_type[preliminary_data$hab_type == "wd"] <- "WD"
preliminary_data$hab_type[preliminary_data$hab_type == "Wd"] <- "WD"
preliminary_data$hab_type[preliminary_data$hab_type == "Gl"] <- "GL"
preliminary_data$hab_type[preliminary_data$hab_type == "gl"] <- "GL"
preliminary_data$hab_type[preliminary_data$hab_type == "Bm"] <- "BM"
preliminary_data$hab_type[preliminary_data$hab_type == "bm"] <- "BM"
preliminary_data$hab_type[preliminary_data$hab_type == "ga"] <- "GA"
preliminary_data$hab_type[preliminary_data$hab_type == "Ga"] <- "GA"
## Individual Names
preliminary_data$individual <- as.character(preliminary_data$individual)
preliminary_data$individual[preliminary_data$individual == "Lp"] <- "LP"
preliminary_data$individual[preliminary_data$individual == "Bi"] <- "BI"
preliminary_data$individual[preliminary_data$individual == "Si"] <- "SI"
preliminary_data$individual[preliminary_data$individual == "Kl"] <- "KL"
preliminary_data$individual[preliminary_data$individual == "Jm"] <- "JM"
preliminary_data$individual[preliminary_data$individual == "Dw"] <- "DW"
preliminary_data$individual[preliminary_data$individual == "Bo"] <- "BO"
preliminary_data$individual[preliminary_data$individual == "Lx"] <- "LX"
preliminary_data$individual[preliminary_data$individual == "Lu"] <- "LU"
preliminary_data$individual[preliminary_data$individual == "Mi"] <- "MI"
preliminary_data$individual[preliminary_data$individual == "Df"] <- "DF"
###################################################################################
## Change to factor and numeric
factor_cols <- c("pos_beh","context", "substrate", "hab_type", "individual")
numeric_cols <- c("sun", "therm_t", "t_lo", "t_hi", "amb_t")
preliminary_data[factor_cols] <- lapply(preliminary_data[factor_cols], as.factor)
preliminary_data[numeric_cols] <- lapply(preliminary_data[numeric_cols], as.numeric)
str(preliminary_data)
#Re-name
preliminary_data2 <- preliminary_data
View(preliminary_data2)
###################################################################################
#Add time of day column
preliminary_data2$time_od <- NA
preliminary_data2$time_od[preliminary_data2$time <= 32400] <- "e_morning"
preliminary_data2$time_od[preliminary_data2$time > 32400 & preliminary_data2$time <= 43200] <- "l_morning"
preliminary_data2$time_od[preliminary_data2$time > 43200 & preliminary_data2$time <= 50400] <- "e_afternoon"
preliminary_data2$time_od[preliminary_data2$time > 50400 & preliminary_data2$time < 61200] <- "l_afternoon"
preliminary_data2$time_od[preliminary_data2$time >= 61200] <- "evening"
preliminary_data2$time_od <- factor(preliminary_data2$time_od, levels = c("e_morning", "l_morning", "e_afternoon", "l_afternoon", "evening"))
levels(preliminary_data2$time_od)
#create tidy database for analysis
write.csv(preliminary_data2, "C:/Users/nw185_000/Documents/Iowa/Dissertation/Data/individual-project-nwackerly/_data/_tidy/prelim_data_tidycols.csv", row.names=F)
|
#======================================================================================================
# LSUserRs Data Cleaning Template (Title Your Script)
# Say a couple of the things that it does here. Who wrote it?
# When was the last edit? What does it do? Does it work with any data type?
# Rubber duck this as much as possible because you won't remember
# what you did in 6 months. Especially if you come up with something clever.
#======================================================================================================
# TRY YOUR BEST TO NOT JUST COPY AND PASTE CODE, GOOGLE WHAT YOU WANT, GET FAMILIAR WITH A FUNCTION'S
# ARGUMENTS AND EMBRACE YOUR INNER NERD AND READ THE DOCUMENTATION!!
#======================================================================================================
# Import Libraries
#--------------------------------------------------
# Load all libraries at the start of your script, remember they have to be installed first!
#======================================================================================================
# Set up your working directory
#--------------------------------------------------
# Use the space below to set your wd as you please.
# Try to pick a path that makes sense
#======================================================================================================
# Load in your data
#--------------------------------------------------
# After telling R where to look in your computer/dropbox/google drive/R Project grab what you need!
# Make sure to load in both datasets as we will want them both in our analysis.
# We also want to make sure to clean both datasets as we are going.
#======================================================================================================
# Inspect the structure of your data
#--------------------------------------------------
# R is going to do its best to guess what kind of data each of the columns of your spreadsheet are.
# Sometimes it thinks things are lists (especially if importing from SPSS)
# Go through each variable as you would with other programs and make sure you set it to what
# you think will be most useful later. The big thing to check here is categorical variables, and
# if you see any sort of string/character variables.
#======================================================================================================
# Check for Import Errors
#--------------------------------------------------
# Use a combination of the names(), View(), table(), is.na(), and complete.cases() to get a brief summary of what is
# going on in your data set to be sure there were minimal import errors and your data looks like
# you want it to. It might also be worth plottting some variables and use some common sense to find mistakes.
# Are there any participants with 999 as their subject number? Negative values where there shouldn't be?
# If there are, note them and fix these before starting any sort of statistical screening!
#======================================================================================================
# Cleaning Free Text Response Data
#--------------------------------------------------
# In your data cleaning before you might have noticed that participants were able to freely respond
# with whatever gender they wanted. Most data look to fall within the normal binary, but the computer
# needs things to be exactly the same before making an easy split?
# What would be the laziest, most effecient way to fix the gender column? What format does the variable
# have to be in order to make the changes that you need?
# When you have it figured out, make sure to run the code from top to bottom to make sure things go in
# the right order!!! As we are not dealing with huge amounts of data, the table() function will help out.
#======================================================================================================
# Merging Data
#--------------------------------------------------
# Often we will have data from other spreadsheets we want to attach such as demographic data
# to behavioral responses. Using the data.table functionality, let's merge our two csv
# files together so that we have every variable accessible to us for this analysis.
#======================================================================================================
# Checking for Univariate Outliers
#--------------------------------------------------
#======================================================================================================
# Checking for Multivariate Outliers
#--------------------------------------------------
#======================================================================================================
# Checking for Skew and Kurtosis
#--------------------------------------------------
#======================================================================================================
# Exporting Data
#--------------------------------------------------
# It's best practice to separate your cleaning and your analysis into separate scripts.
# Export the dataset you have into a new csv file into a directory that would make sense to
# someone who has never seen your project before.
#====================================================================================================== | /BasicCleaningAndAnalysis/scripts/dataCleaningTemplate.R | no_license | davidjohnbaker1/LSUseRs | R | false | false | 5,394 | r | #======================================================================================================
# LSUserRs Data Cleaning Template (Title Your Script)
# Say a couple of the things that it does here. Who wrote it?
# When was the last edit? What does it do? Does it work with any data type?
# Rubber duck this as much as possible because you won't remember
# what you did in 6 months. Especially if you come up with something clever.
#======================================================================================================
# TRY YOUR BEST TO NOT JUST COPY AND PASTE CODE, GOOGLE WHAT YOU WANT, GET FAMILIAR WITH A FUNCTION'S
# ARGUMENTS AND EMBRACE YOUR INNER NERD AND READ THE DOCUMENTATION!!
#======================================================================================================
# Import Libraries
#--------------------------------------------------
# Load all libraries at the start of your script, remember they have to be installed first!
#======================================================================================================
# Set up your working directory
#--------------------------------------------------
# Use the space below to set your wd as you please.
# Try to pick a path that makes sense
#======================================================================================================
# Load in your data
#--------------------------------------------------
# After telling R where to look in your computer/dropbox/google drive/R Project grab what you need!
# Make sure to load in both datasets as we will want them both in our analysis.
# We also want to make sure to clean both datasets as we are going.
#======================================================================================================
# Inspect the structure of your data
#--------------------------------------------------
# R is going to do its best to guess what kind of data each of the columns of your spreadsheet are.
# Sometimes it thinks things are lists (especially if importing from SPSS)
# Go through each variable as you would with other programs and make sure you set it to what
# you think will be most useful later. The big thing to check here is categorical variables, and
# if you see any sort of string/character variables.
#======================================================================================================
# Check for Import Errors
#--------------------------------------------------
# Use a combination of the names(), View(), table(), is.na(), and complete.cases() to get a brief summary of what is
# going on in your data set to be sure there were minimal import errors and your data looks like
# you want it to. It might also be worth plottting some variables and use some common sense to find mistakes.
# Are there any participants with 999 as their subject number? Negative values where there shouldn't be?
# If there are, note them and fix these before starting any sort of statistical screening!
#======================================================================================================
# Cleaning Free Text Response Data
#--------------------------------------------------
# In your data cleaning before you might have noticed that participants were able to freely respond
# with whatever gender they wanted. Most data look to fall within the normal binary, but the computer
# needs things to be exactly the same before making an easy split?
# What would be the laziest, most effecient way to fix the gender column? What format does the variable
# have to be in order to make the changes that you need?
# When you have it figured out, make sure to run the code from top to bottom to make sure things go in
# the right order!!! As we are not dealing with huge amounts of data, the table() function will help out.
#======================================================================================================
# Merging Data
#--------------------------------------------------
# Often we will have data from other spreadsheets we want to attach such as demographic data
# to behavioral responses. Using the data.table functionality, let's merge our two csv
# files together so that we have every variable accessible to us for this analysis.
#======================================================================================================
# Checking for Univariate Outliers
#--------------------------------------------------
#======================================================================================================
# Checking for Multivariate Outliers
#--------------------------------------------------
#======================================================================================================
# Checking for Skew and Kurtosis
#--------------------------------------------------
#======================================================================================================
# Exporting Data
#--------------------------------------------------
# It's best practice to separate your cleaning and your analysis into separate scripts.
# Export the dataset you have into a new csv file into a directory that would make sense to
# someone who has never seen your project before.
#====================================================================================================== |
pollutantmean <- function(directory, pollutant, id = 1:332) {
# print ( paste("directory: ", directory))
# print ( paste("pollutant: ", pollutant))
# print ( paste("id: ", id))
my_monitor <- data.frame(Date=as.Date(character()),
sulfate=numeric(),
nitrate=numeric(),
ID=character(),
stringsAsFactors=FALSE)
for (filename in id){
if (filename < 10) filename <- paste("00", filename, ".csv", sep="")
else if (filename < 100) filename <- paste("0", filename, ".csv", sep="")
else filename <- paste(filename, ".csv", sep="")
full_filename <- paste (directory, "/", filename, sep="")
# print (full_filename)
temp_frame <- read.csv (file=full_filename,head=TRUE,sep=",")
my_monitor <- rbind (my_monitor, temp_frame)
}
if (pollutant == "nitrate") {
# print ("nitrate")
round(mean(my_monitor$nitrate, na.rm=TRUE), digits=3)
}
else if (pollutant == "sulfate") {
# print ("sulfate")
round(mean(my_monitor$sulfate, na.rm=TRUE), digits=3)
}
else {
print (paste ("pollutant of ", pollutant, " no supported", sep=""))
}
}
| /pollutantmean.R | no_license | mwgravitt/datasciencecoursera | R | false | false | 1,232 | r |
pollutantmean <- function(directory, pollutant, id = 1:332) {
# print ( paste("directory: ", directory))
# print ( paste("pollutant: ", pollutant))
# print ( paste("id: ", id))
my_monitor <- data.frame(Date=as.Date(character()),
sulfate=numeric(),
nitrate=numeric(),
ID=character(),
stringsAsFactors=FALSE)
for (filename in id){
if (filename < 10) filename <- paste("00", filename, ".csv", sep="")
else if (filename < 100) filename <- paste("0", filename, ".csv", sep="")
else filename <- paste(filename, ".csv", sep="")
full_filename <- paste (directory, "/", filename, sep="")
# print (full_filename)
temp_frame <- read.csv (file=full_filename,head=TRUE,sep=",")
my_monitor <- rbind (my_monitor, temp_frame)
}
if (pollutant == "nitrate") {
# print ("nitrate")
round(mean(my_monitor$nitrate, na.rm=TRUE), digits=3)
}
else if (pollutant == "sulfate") {
# print ("sulfate")
round(mean(my_monitor$sulfate, na.rm=TRUE), digits=3)
}
else {
print (paste ("pollutant of ", pollutant, " no supported", sep=""))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.varImpact.R
\name{print.varImpact}
\alias{print.varImpact}
\title{Custom printing of the varImpact results.}
\usage{
\method{print}{varImpact}(x, ...)
}
\arguments{
\item{x}{Results object from varImpact.}
\item{...}{Further arguments passed to or from other methods.}
}
\description{
Shows the consistent results by default. If there are no consistent results
it shows all results.
}
| /man/print.varImpact.Rd | no_license | guhjy/varImpact | R | false | true | 471 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.varImpact.R
\name{print.varImpact}
\alias{print.varImpact}
\title{Custom printing of the varImpact results.}
\usage{
\method{print}{varImpact}(x, ...)
}
\arguments{
\item{x}{Results object from varImpact.}
\item{...}{Further arguments passed to or from other methods.}
}
\description{
Shows the consistent results by default. If there are no consistent results
it shows all results.
}
|
\name{ConstructTG}
\alias{ConstructTG}
\title{
Construct target genes for a TF using TF-bound genes and differentially expressed genes from ChIP-chip or ChIP-seq and TF perturbation gene expression data.
}
\description{
This function requires users to first analyze their own ChIP-chip and ChIP-seq data to detect significant peaks and then annotate the peaks with their corresponding regulated target genes using the annotatePeaks function in the GSCA package. Users must also use the limma package to detect differentially expressed genes in their gene expression data (preprocessing and noramzliation can be done with any algorithm the user desires), then the resulting output needs to be annotated into Entrez GeneIDs. Finally, with both inputs ConstructTG will identify the activated and repressed TF target genes.
}
\usage{
ConstructTG(annonPeaksOut, limmaOut)
}
\arguments{
\item{annonPeaksOut}{
Output from the annotatePeaks function in the GSCA package. Contains the genes that correspond to the significant peaks detected from TF ChIP-chip or ChIP-seq data.
}
\item{limmaOut}{
Differential expression output from the limma package, and requires the first column of the data.frame to contain the EntrezGeneIDs that match the microarray probeset IDs.
}
}
\details{
This function is designed as one method to allow users to construct target genes after obtaining a list of significant peaks from ChIP-chip or ChIP-seq data and differential expression results from using limma to anaylze their microarray data. It is not designed to be flexible to account for all methods to obtain TF-bound and/or differentially expressed genes. Users can choose to manually intersect their own TF-bound and differentially expressed genes by classifying activated genes as genes, whose expression increases when the TF expression increases and repressed genes as genes, who expression decreases when the TF expression increases. Note, that significant cutoffs for peaks and differentially expressed genes need to be already applied prior to input.
}
\value{
Returns a list with two items:
\item{PosTG }{Activated TF target genes}
\item{NegTG }{Repressed TF target genes}
}
\references{
George Wu, et al. ChIP-PED enhances the analysis of ChIP-seq and ChIP-chip data. Bioinformatics 2013 Apr 23;29(9):1182-1189.
}
\author{
Zhicheng Ji, Hongkai Ji
}
\examples{
### Read in example ChIP-seq analyzed data output from GSE11431
### for Oct4 in ESCs directly downloaded from NCBI GEO
path <- system.file("extdata",package="GSCA")
chipxfile <- read.delim(paste(path,"GSM288346_ES_Oct4.txt",sep="/"),
header=FALSE,stringsAsFactors=FALSE)
### annotate each peak with the corresponding gene target
annon.out <- annotatePeaks(chipxfile,"mm8",10000,5000)
### Read in example limma output from gene expression data obtained
### by analyzing Oct4 RNAi knockdown gene with RMA then limma
### from the raw CEL files in GSE4189
### The first column contains the Entrez GeneID for each probeset ID
### annotated using the mouse4302.db package in Bioconductor.
gp.out <- read.delim(paste(path,"Pou5f1_E14TG2a_GSE4189_Limma.txt",sep="/"),
stringsAsFactors=FALSE)
ConstructTG(annon.out,gp.out)
}
\keyword{ target genes }
| /man/ConstructTG.Rd | no_license | zji90/GSCA | R | false | false | 3,270 | rd | \name{ConstructTG}
\alias{ConstructTG}
\title{
Construct target genes for a TF using TF-bound genes and differentially expressed genes from ChIP-chip or ChIP-seq and TF perturbation gene expression data.
}
\description{
This function requires users to first analyze their own ChIP-chip and ChIP-seq data to detect significant peaks and then annotate the peaks with their corresponding regulated target genes using the annotatePeaks function in the GSCA package. Users must also use the limma package to detect differentially expressed genes in their gene expression data (preprocessing and noramzliation can be done with any algorithm the user desires), then the resulting output needs to be annotated into Entrez GeneIDs. Finally, with both inputs ConstructTG will identify the activated and repressed TF target genes.
}
\usage{
ConstructTG(annonPeaksOut, limmaOut)
}
\arguments{
\item{annonPeaksOut}{
Output from the annotatePeaks function in the GSCA package. Contains the genes that correspond to the significant peaks detected from TF ChIP-chip or ChIP-seq data.
}
\item{limmaOut}{
Differential expression output from the limma package, and requires the first column of the data.frame to contain the EntrezGeneIDs that match the microarray probeset IDs.
}
}
\details{
This function is designed as one method to allow users to construct target genes after obtaining a list of significant peaks from ChIP-chip or ChIP-seq data and differential expression results from using limma to anaylze their microarray data. It is not designed to be flexible to account for all methods to obtain TF-bound and/or differentially expressed genes. Users can choose to manually intersect their own TF-bound and differentially expressed genes by classifying activated genes as genes, whose expression increases when the TF expression increases and repressed genes as genes, who expression decreases when the TF expression increases. Note, that significant cutoffs for peaks and differentially expressed genes need to be already applied prior to input.
}
\value{
Returns a list with two items:
\item{PosTG }{Activated TF target genes}
\item{NegTG }{Repressed TF target genes}
}
\references{
George Wu, et al. ChIP-PED enhances the analysis of ChIP-seq and ChIP-chip data. Bioinformatics 2013 Apr 23;29(9):1182-1189.
}
\author{
Zhicheng Ji, Hongkai Ji
}
\examples{
### Read in example ChIP-seq analyzed data output from GSE11431
### for Oct4 in ESCs directly downloaded from NCBI GEO
path <- system.file("extdata",package="GSCA")
chipxfile <- read.delim(paste(path,"GSM288346_ES_Oct4.txt",sep="/"),
header=FALSE,stringsAsFactors=FALSE)
### annotate each peak with the corresponding gene target
annon.out <- annotatePeaks(chipxfile,"mm8",10000,5000)
### Read in example limma output from gene expression data obtained
### by analyzing Oct4 RNAi knockdown gene with RMA then limma
### from the raw CEL files in GSE4189
### The first column contains the Entrez GeneID for each probeset ID
### annotated using the mouse4302.db package in Bioconductor.
gp.out <- read.delim(paste(path,"Pou5f1_E14TG2a_GSE4189_Limma.txt",sep="/"),
stringsAsFactors=FALSE)
ConstructTG(annon.out,gp.out)
}
\keyword{ target genes }
|
library("dplyr")
library("tidyverse")
# Substitute missing values of embarked with "S"
titanic_original$embarked[is.na(titanic_original$embarked)] <- "S"
# substitue missing values in age with average
age_mean <- mean(titanic_original$age,na.rm=TRUE)
titanic_original$age[is.na(titanic_original$age)] <- age_mean
# Input dummy text for passengers who did not make it onto lifeboats
titanic_original$boat[is.na(titanic_original$boat)] <- "N/A"
# Insert a has_cabin_number column filled with dummy values
dummy_cabin <- titanic_original %>% select(cabin) %>% mutate(has_cabin_number = as.numeric(!is.na(cabin)))
# Create a new data frame called titanic_clean
titanic_clean <- data.frame(titanic_original,
"has_cabin_number" = dummy_cabin$has_cabin_number)
# Write a new CSV file
write.csv(titanic_clean,"titanic_clean.csv") | /Handling_Missing_Values/titanic.R | no_license | curtishiga/Springboard_Intro_to_DS | R | false | false | 856 | r | library("dplyr")
library("tidyverse")
# Substitute missing values of embarked with "S"
titanic_original$embarked[is.na(titanic_original$embarked)] <- "S"
# substitue missing values in age with average
age_mean <- mean(titanic_original$age,na.rm=TRUE)
titanic_original$age[is.na(titanic_original$age)] <- age_mean
# Input dummy text for passengers who did not make it onto lifeboats
titanic_original$boat[is.na(titanic_original$boat)] <- "N/A"
# Insert a has_cabin_number column filled with dummy values
dummy_cabin <- titanic_original %>% select(cabin) %>% mutate(has_cabin_number = as.numeric(!is.na(cabin)))
# Create a new data frame called titanic_clean
titanic_clean <- data.frame(titanic_original,
"has_cabin_number" = dummy_cabin$has_cabin_number)
# Write a new CSV file
write.csv(titanic_clean,"titanic_clean.csv") |
library(matrixcalc)
ginv <- function(X, tol = sqrt(.Machine$double.eps)) {
dnx <- dimnames(X)
if (is.null(dnx))
dnx <- vector("list", 2)
s <- svd(X)
nz <- s$d > tol * s$d[1]
structure(if (any(nz))
s$v[, nz] %*% (t(s$u[, nz])/s$d[nz])
else X, dimnames = dnx[2:1])
}
#One iteration to run Newton Raphson to get optimal weights
Inv<-function(M,I) {
check_sin<-is.singular.matrix(M)
if(check_sin==FALSE) inv<-try(solve(M),TRUE) else inv<-try(solve(M+I),TRUE)
if(length(inv)==1) {
M<-.00001*M
check_sin<-is.singular.matrix(M)
if(check_sin==FALSE) inv<-solve(M) else inv<-solve(M+I)
inv<-.00001*inv
}
inv
}
D_weight<-function(W,T,X,d,q) {
M3<-upinfor(W,T[1,],X,3)
M4<-upinfor(W,T[2,],X,4)
M5<-upinfor(W,T[3,],X,5)
I3<-10^-10*diag(3)
I4<-10^-10*diag(4)
I5<-10^-10*diag(5)
inv3<-Inv(M3,I3)
inv4<-Inv(M4,I4)
inv5<-Inv(M5,I5)
f1<-D_weight_1(q,W,T[1,],T[2,],T[3,],X,inv3,inv4,inv5)
f2<-D_weight_2(q,W,T[1,],T[2,],T[3,],X,inv3,inv4,inv5)
newweight<-W-d*(f1%*%ginv(f2))
newweight
}
DD_weight<-function(W,T,X,d,I4,I5,order) {
M4<-upinfor(W,T,X,order-1)
M5<-upinfor(W,T,X,order)
inv1<-Inv(M4,I4)
inv<-Inv(M5,I5)
f1<-DD_weight_1(W,T,X,inv,inv1,order)
f2<-DD_weight_2(W,T,X,inv,inv1,order)
newweight<-W-d*(f1%*%ginv(f2))
newweight
}
c_weight<-function(W,T,X,d,p,order,UB,I) {
M<-upinfor(W,T,X,order)
if(exp(UB)<999) {
inv<-ginv(M)
} else {
inv<-Inv(M,I)
}
f1<-c_weight_1(W,T,X,inv,p,order)
f2<-c_weight_2(W,T,X,inv,p,order)
newweight<-W-d*(f1%*%ginv(f2))
newweight
}
#Newton Raphson method to get optimal weights for given design points "X"
#S_weight<-function(X,T,e1,q,p,order) {
S_weight<-function(X,T,e1,f,...){
diff<-10
W<-rep(1/length(X),length(X)-1)
while(diff>e1) {
d<-1
#NW<-D_weight(W,T,X,d,q)
#NW<-D_weight(W,T,X,d,...)
#NW<-c_weight(W,T,X,d,p,order)
#NW<-c_weight(W,T,X,d,...)
NW<-f(W,T,X,d,...)
minW<-min(min(NW),1-sum(NW))
while(minW<0 & d>.0001) {
d<-d/2
#NW<-D_weight(W,T,X,d,q)
#NW<-D_weight(W,T,X,d,...)
#NW<-c_weight(W,T,X,d,p,order)
#NW<-c_weight(W,T,X,d,...)
NW<-f(W,T,X,d,...)
minW<-min(min(NW),1-sum(NW))
}
NW<-c(NW,1-sum(NW))
n<-length(NW)
minW<-min(NW)
diff<-max(abs(W-NW[1:n-1]))
if (abs(minW)<.0000001||minW<0) {
for(i in 1:n)
if (NW[i]==minW) NW[i]<-0
}
D<-rbind(X,NW)
for (i in 1:n)
if (D[2,i]==0) D[,i]<-NA
X<-D[1,]
W<-D[2,]
X<-na.omit(X)
W<-na.omit(W)
W<-W[1:(length(X)-1)]
}
W<-c(W,1-sum(W))
D<-rbind(X,W)
D
}
| /Opt5PL/R/ff.R | no_license | akhikolla/InformationHouse | R | false | false | 3,067 | r | library(matrixcalc)
ginv <- function(X, tol = sqrt(.Machine$double.eps)) {
dnx <- dimnames(X)
if (is.null(dnx))
dnx <- vector("list", 2)
s <- svd(X)
nz <- s$d > tol * s$d[1]
structure(if (any(nz))
s$v[, nz] %*% (t(s$u[, nz])/s$d[nz])
else X, dimnames = dnx[2:1])
}
#One iteration to run Newton Raphson to get optimal weights
Inv<-function(M,I) {
check_sin<-is.singular.matrix(M)
if(check_sin==FALSE) inv<-try(solve(M),TRUE) else inv<-try(solve(M+I),TRUE)
if(length(inv)==1) {
M<-.00001*M
check_sin<-is.singular.matrix(M)
if(check_sin==FALSE) inv<-solve(M) else inv<-solve(M+I)
inv<-.00001*inv
}
inv
}
D_weight<-function(W,T,X,d,q) {
M3<-upinfor(W,T[1,],X,3)
M4<-upinfor(W,T[2,],X,4)
M5<-upinfor(W,T[3,],X,5)
I3<-10^-10*diag(3)
I4<-10^-10*diag(4)
I5<-10^-10*diag(5)
inv3<-Inv(M3,I3)
inv4<-Inv(M4,I4)
inv5<-Inv(M5,I5)
f1<-D_weight_1(q,W,T[1,],T[2,],T[3,],X,inv3,inv4,inv5)
f2<-D_weight_2(q,W,T[1,],T[2,],T[3,],X,inv3,inv4,inv5)
newweight<-W-d*(f1%*%ginv(f2))
newweight
}
DD_weight<-function(W,T,X,d,I4,I5,order) {
M4<-upinfor(W,T,X,order-1)
M5<-upinfor(W,T,X,order)
inv1<-Inv(M4,I4)
inv<-Inv(M5,I5)
f1<-DD_weight_1(W,T,X,inv,inv1,order)
f2<-DD_weight_2(W,T,X,inv,inv1,order)
newweight<-W-d*(f1%*%ginv(f2))
newweight
}
c_weight<-function(W,T,X,d,p,order,UB,I) {
M<-upinfor(W,T,X,order)
if(exp(UB)<999) {
inv<-ginv(M)
} else {
inv<-Inv(M,I)
}
f1<-c_weight_1(W,T,X,inv,p,order)
f2<-c_weight_2(W,T,X,inv,p,order)
newweight<-W-d*(f1%*%ginv(f2))
newweight
}
#Newton Raphson method to get optimal weights for given design points "X"
#S_weight<-function(X,T,e1,q,p,order) {
S_weight<-function(X,T,e1,f,...){
diff<-10
W<-rep(1/length(X),length(X)-1)
while(diff>e1) {
d<-1
#NW<-D_weight(W,T,X,d,q)
#NW<-D_weight(W,T,X,d,...)
#NW<-c_weight(W,T,X,d,p,order)
#NW<-c_weight(W,T,X,d,...)
NW<-f(W,T,X,d,...)
minW<-min(min(NW),1-sum(NW))
while(minW<0 & d>.0001) {
d<-d/2
#NW<-D_weight(W,T,X,d,q)
#NW<-D_weight(W,T,X,d,...)
#NW<-c_weight(W,T,X,d,p,order)
#NW<-c_weight(W,T,X,d,...)
NW<-f(W,T,X,d,...)
minW<-min(min(NW),1-sum(NW))
}
NW<-c(NW,1-sum(NW))
n<-length(NW)
minW<-min(NW)
diff<-max(abs(W-NW[1:n-1]))
if (abs(minW)<.0000001||minW<0) {
for(i in 1:n)
if (NW[i]==minW) NW[i]<-0
}
D<-rbind(X,NW)
for (i in 1:n)
if (D[2,i]==0) D[,i]<-NA
X<-D[1,]
W<-D[2,]
X<-na.omit(X)
W<-na.omit(W)
W<-W[1:(length(X)-1)]
}
W<-c(W,1-sum(W))
D<-rbind(X,W)
D
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 7797
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 7796
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 7796
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/wmiforward/stmt16_68_69.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2350
c no.of clauses 7797
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 7796
c
c QBFLIB/Basler/wmiforward/stmt16_68_69.qdimacs 2350 7797 E1 [1] 0 269 2080 7796 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Basler/wmiforward/stmt16_68_69/stmt16_68_69.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 709 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 7797
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 7796
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 7796
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/wmiforward/stmt16_68_69.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2350
c no.of clauses 7797
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 7796
c
c QBFLIB/Basler/wmiforward/stmt16_68_69.qdimacs 2350 7797 E1 [1] 0 269 2080 7796 RED
|
## continuous-continuous model postprocessing for copula calibration - postprocess
rm(list=ls())
libs <- c("copula", "magrittr", "rstan", "sfsmisc", "tidyverse")
invisible(lapply(libs, library, character.only = TRUE))
# set working directory
wdir<-file.path("/home/nathan/Dropbox/njames/school/PhD/misc/conferences/Biopharm2019/bayes_cop_calib_code")
# plot directory
pdir<-file.path(wdir,"plots")
# load continuous-continuous simulation data
load(file.path(wdir,"sims", "cc_simulations.RData"))
# load different simulations scenarios
# use cc_calib_simarray (from ACCRE) not cc_calib_simarray_local
cc_calib_simarray <- readRDS(file.path(wdir,"cc_calib_simarray.rds"))
cc_calib_simarray_trim<-cc_calib_simarray %>% select(n, theta_2, mu_e2, mu_s2, rep_id, scn_id, sim_id)
cc_sim_merged<-merge(cc_sim_dat, cc_calib_simarray_trim, by=c("scn_id","rep_id","sim_id"))
# sims
# (3 true p_e2) x (3 true p_s2) x (3 trt group corr) x (50 reps)
# (3 samp size) x (3 models)
## make all 9 (3 samp size, 3 mod) 3x3 plots for each of the 4 params
# make calibration plot of drawn parameter vs. posterior mean
mkplot <- function(nn, mm){
dat<-subset(cc_sim_merged, n==nn & mod_num==mm)
add_abline <- function() {geom_abline(slope=1,intercept=0, alpha=0.3)}
add_smooth <- function() {stat_smooth(method="loess", size=0.7, se=FALSE)}
add_facet <- function() {facet_grid(rows=vars(mu_e2), cols=vars(mu_s2),
labeller = "label_both")}
no_legend <- function() {theme(legend.position = "none")}
d1 <-dat %>% filter(param=="mu_e_diff")
d2 <-dat %>% filter(param=="mu_s_diff")
d3 <-dat %>% filter(param=="theta_1")
d4 <-dat %>% filter(param=="theta_2")
p1<-ggplot(d1,aes(x=draw_par, y=mean, col=factor(theta_2))) +
add_abline() + add_smooth() + add_facet() + no_legend() + ggtitle("mu_e_diff")
p2<-ggplot(d2,aes(x=draw_par, y=mean, col=factor(theta_2))) +
add_abline() + add_smooth() + add_facet() + ggtitle("mu_s_diff")
p3<-ggplot(d3,aes(x=draw_par, y=mean, col=factor(theta_2))) +
add_abline() + add_smooth() + add_facet() + no_legend() + ylim(-1,1) + ggtitle("theta_1")
p4<-ggplot(d4,aes(x=draw_par, y=mean, col=factor(theta_2))) +
add_abline() + add_smooth() + add_facet() + ylim(-1,1) + ggtitle("theta_2")
title <- ggdraw() +
draw_label(paste0("continuous-continuous model ",mm," (n=",nn,")"),
fontface = 'bold', x = 0, hjust = 0) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 7))
pg<-plot_grid(p1,p2,p3,p4, rel_widths = c(1,1.5))
plot_grid(title, pg, ncol = 1,
# rel_heights values control vertical title margins
rel_heights = c(0.1, 1))
}
# make histogram of divergences
mkplot2 <- function(nn, mm){
dat<-subset(cc_sim_merged, n==nn & mod_num==mm)
s1<-dat %>% pull(sim_id) %>% unique()
cc_parm_div_dat %>% filter(sim_id %in% s1) %>%
ggplot(aes(x=n_div,fill=factor(theta_2))) +
geom_histogram(bins=40)+
facet_grid(rows=vars(mu_e2),cols=vars(mu_s2),labeller="label_both")+
ggtitle(paste0("divergences continuous-continuous model ",mm," (n=",nn,")"))
}
mkplot(50,1)
mkplot2(50,1)
ns<-c(50,100,200)
mods<-1:3
all<-expand.grid(ns,mods)
for (i in 1:nrow(all)){
title <- paste0("cc_mod",all[i,2],"_n",all[i,1],".pdf")
p <- mkplot(all[i,1],all[i,2])
p2 <- mkplot2(all[i,1],all[i,2])
pdf(file.path(pdir,title),width=11,height=8.5)
print(p)
print(p2)
dev.off()
}
if (0){
# for n=200, model 1, 3*3 table of mu_e_diff, mu_s_diff, theta_1, theta_2
# with rows faceted by true mu_e2, cols by true mu_s2
cc_sim_merged %>% filter(n==200, mod_num==1, param=="mu_e_diff") %>%
ggplot(aes(x=draw_par,y=mean,col=factor(theta_2))) +
geom_abline(slope=1,intercept=0, alpha=0.3) +
stat_smooth(method="loess", size=0.7, se=FALSE)+
facet_grid(rows=vars(mu_e2),cols=vars(mu_s2))
cc_sim_merged %>% filter(n==200, mod_num==1, param=="mu_s_diff") %>%
ggplot(aes(x=draw_par,y=mean,col=factor(theta_2))) +
geom_abline(slope=1,intercept=0, alpha=0.3) +
stat_smooth(method="loess", size=0.7, se=FALSE)+
facet_grid(rows=vars(mu_e2),cols=vars(mu_s2))
cc_sim_merged %>% filter(n==200, mod_num==1, param=="theta_1") %>%
ggplot(aes(x=draw_par,y=mean,col=factor(theta_2))) +
geom_abline(slope=1,intercept=0, alpha=0.3) +
stat_smooth(method="loess", size=0.7, se=FALSE)+
facet_grid(rows=vars(mu_e2),cols=vars(mu_s2))
cc_sim_merged %>% filter(n==200, mod_num==1, param=="theta_2") %>%
ggplot(aes(x=draw_par,y=mean,col=factor(theta_2))) +
geom_abline(slope=1,intercept=0, alpha=0.3) +
stat_smooth(method="loess", size=0.7, se=FALSE)+
facet_grid(rows=vars(mu_e2),cols=vars(mu_s2))
}
| /24_cc_calib_plot.R | no_license | ntjames/asa_biopharm_2019 | R | false | false | 4,796 | r | ## continuous-continuous model postprocessing for copula calibration - postprocess
rm(list=ls())
libs <- c("copula", "magrittr", "rstan", "sfsmisc", "tidyverse")
invisible(lapply(libs, library, character.only = TRUE))
# set working directory
wdir<-file.path("/home/nathan/Dropbox/njames/school/PhD/misc/conferences/Biopharm2019/bayes_cop_calib_code")
# plot directory
pdir<-file.path(wdir,"plots")
# load continuous-continuous simulation data
load(file.path(wdir,"sims", "cc_simulations.RData"))
# load different simulations scenarios
# use cc_calib_simarray (from ACCRE) not cc_calib_simarray_local
cc_calib_simarray <- readRDS(file.path(wdir,"cc_calib_simarray.rds"))
cc_calib_simarray_trim<-cc_calib_simarray %>% select(n, theta_2, mu_e2, mu_s2, rep_id, scn_id, sim_id)
cc_sim_merged<-merge(cc_sim_dat, cc_calib_simarray_trim, by=c("scn_id","rep_id","sim_id"))
# sims
# (3 true p_e2) x (3 true p_s2) x (3 trt group corr) x (50 reps)
# (3 samp size) x (3 models)
## make all 9 (3 samp size, 3 mod) 3x3 plots for each of the 4 params
# make calibration plot of drawn parameter vs. posterior mean
mkplot <- function(nn, mm){
dat<-subset(cc_sim_merged, n==nn & mod_num==mm)
add_abline <- function() {geom_abline(slope=1,intercept=0, alpha=0.3)}
add_smooth <- function() {stat_smooth(method="loess", size=0.7, se=FALSE)}
add_facet <- function() {facet_grid(rows=vars(mu_e2), cols=vars(mu_s2),
labeller = "label_both")}
no_legend <- function() {theme(legend.position = "none")}
d1 <-dat %>% filter(param=="mu_e_diff")
d2 <-dat %>% filter(param=="mu_s_diff")
d3 <-dat %>% filter(param=="theta_1")
d4 <-dat %>% filter(param=="theta_2")
p1<-ggplot(d1,aes(x=draw_par, y=mean, col=factor(theta_2))) +
add_abline() + add_smooth() + add_facet() + no_legend() + ggtitle("mu_e_diff")
p2<-ggplot(d2,aes(x=draw_par, y=mean, col=factor(theta_2))) +
add_abline() + add_smooth() + add_facet() + ggtitle("mu_s_diff")
p3<-ggplot(d3,aes(x=draw_par, y=mean, col=factor(theta_2))) +
add_abline() + add_smooth() + add_facet() + no_legend() + ylim(-1,1) + ggtitle("theta_1")
p4<-ggplot(d4,aes(x=draw_par, y=mean, col=factor(theta_2))) +
add_abline() + add_smooth() + add_facet() + ylim(-1,1) + ggtitle("theta_2")
title <- ggdraw() +
draw_label(paste0("continuous-continuous model ",mm," (n=",nn,")"),
fontface = 'bold', x = 0, hjust = 0) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 7))
pg<-plot_grid(p1,p2,p3,p4, rel_widths = c(1,1.5))
plot_grid(title, pg, ncol = 1,
# rel_heights values control vertical title margins
rel_heights = c(0.1, 1))
}
# make histogram of divergences
mkplot2 <- function(nn, mm){
dat<-subset(cc_sim_merged, n==nn & mod_num==mm)
s1<-dat %>% pull(sim_id) %>% unique()
cc_parm_div_dat %>% filter(sim_id %in% s1) %>%
ggplot(aes(x=n_div,fill=factor(theta_2))) +
geom_histogram(bins=40)+
facet_grid(rows=vars(mu_e2),cols=vars(mu_s2),labeller="label_both")+
ggtitle(paste0("divergences continuous-continuous model ",mm," (n=",nn,")"))
}
mkplot(50,1)
mkplot2(50,1)
ns<-c(50,100,200)
mods<-1:3
all<-expand.grid(ns,mods)
for (i in 1:nrow(all)){
title <- paste0("cc_mod",all[i,2],"_n",all[i,1],".pdf")
p <- mkplot(all[i,1],all[i,2])
p2 <- mkplot2(all[i,1],all[i,2])
pdf(file.path(pdir,title),width=11,height=8.5)
print(p)
print(p2)
dev.off()
}
if (0){
# for n=200, model 1, 3*3 table of mu_e_diff, mu_s_diff, theta_1, theta_2
# with rows faceted by true mu_e2, cols by true mu_s2
cc_sim_merged %>% filter(n==200, mod_num==1, param=="mu_e_diff") %>%
ggplot(aes(x=draw_par,y=mean,col=factor(theta_2))) +
geom_abline(slope=1,intercept=0, alpha=0.3) +
stat_smooth(method="loess", size=0.7, se=FALSE)+
facet_grid(rows=vars(mu_e2),cols=vars(mu_s2))
cc_sim_merged %>% filter(n==200, mod_num==1, param=="mu_s_diff") %>%
ggplot(aes(x=draw_par,y=mean,col=factor(theta_2))) +
geom_abline(slope=1,intercept=0, alpha=0.3) +
stat_smooth(method="loess", size=0.7, se=FALSE)+
facet_grid(rows=vars(mu_e2),cols=vars(mu_s2))
cc_sim_merged %>% filter(n==200, mod_num==1, param=="theta_1") %>%
ggplot(aes(x=draw_par,y=mean,col=factor(theta_2))) +
geom_abline(slope=1,intercept=0, alpha=0.3) +
stat_smooth(method="loess", size=0.7, se=FALSE)+
facet_grid(rows=vars(mu_e2),cols=vars(mu_s2))
cc_sim_merged %>% filter(n==200, mod_num==1, param=="theta_2") %>%
ggplot(aes(x=draw_par,y=mean,col=factor(theta_2))) +
geom_abline(slope=1,intercept=0, alpha=0.3) +
stat_smooth(method="loess", size=0.7, se=FALSE)+
facet_grid(rows=vars(mu_e2),cols=vars(mu_s2))
}
|
context("discrete_entropy")
nn <- 10
# does not add up to 1
kVec <- rnorm(nn)^2 + 1
kProbs <- kVec / sum(kVec)
kUniformDistr <- rep(1 / nn, nn)
test_that("discrete_entropy computes right entropy for uniform", {
# entropy of uniform is always larger than any other vector
expect_lt(discrete_entropy(kProbs), discrete_entropy(kUniformDistr))
# 0 times 0 is zero (remove attribute)
expect_equal(log2(length(kUniformDistr)), c(discrete_entropy(kUniformDistr)))
# check that base works correctly
expect_equal(log2(length(kUniformDistr))/log2(exp(1)),
c(discrete_entropy(kUniformDistr, base = exp(1))))
# 'base' attribute
expect_equal(attr(discrete_entropy(kProbs), "base"), "2")
})
test_that("entropy of certain event is 0", {
# entropy of sure event is 0
expect_equal(0, c(discrete_entropy(c(1, 0, 0))))
})
test_that("probabilities must be non-negative for entropy", {
# negative values are not allowed
expect_error(discrete_entropy(c(-0.5, 0)))
})
test_that("smooting increases entropy", {
# test smoothing stuff
expect_error(c(discrete_entropy(kProbs, prior.weight = 2)))
expect_error(c(discrete_entropy(kProbs, prior.weight = -1)))
# smoothing increases entropy
expect_gt(discrete_entropy(kProbs, prior.weight = 0.1),
discrete_entropy(kProbs, prior.weight = 0))
})
| /tests/testthat/test_discrete_entropy.R | no_license | cran/ForeCA | R | false | false | 1,339 | r | context("discrete_entropy")
nn <- 10
# does not add up to 1
kVec <- rnorm(nn)^2 + 1
kProbs <- kVec / sum(kVec)
kUniformDistr <- rep(1 / nn, nn)
test_that("discrete_entropy computes right entropy for uniform", {
# entropy of uniform is always larger than any other vector
expect_lt(discrete_entropy(kProbs), discrete_entropy(kUniformDistr))
# 0 times 0 is zero (remove attribute)
expect_equal(log2(length(kUniformDistr)), c(discrete_entropy(kUniformDistr)))
# check that base works correctly
expect_equal(log2(length(kUniformDistr))/log2(exp(1)),
c(discrete_entropy(kUniformDistr, base = exp(1))))
# 'base' attribute
expect_equal(attr(discrete_entropy(kProbs), "base"), "2")
})
test_that("entropy of certain event is 0", {
# entropy of sure event is 0
expect_equal(0, c(discrete_entropy(c(1, 0, 0))))
})
test_that("probabilities must be non-negative for entropy", {
# negative values are not allowed
expect_error(discrete_entropy(c(-0.5, 0)))
})
test_that("smooting increases entropy", {
# test smoothing stuff
expect_error(c(discrete_entropy(kProbs, prior.weight = 2)))
expect_error(c(discrete_entropy(kProbs, prior.weight = -1)))
# smoothing increases entropy
expect_gt(discrete_entropy(kProbs, prior.weight = 0.1),
discrete_entropy(kProbs, prior.weight = 0))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/momentum_routines.R
\name{read.gene.mapping.info}
\alias{read.gene.mapping.info}
\title{read in detailed molecular mapping info from hdf5 file as written out by "-d" option of velocyto.py}
\usage{
read.gene.mapping.info(fname, cell.clusters = NULL,
internal.priming.info = NULL, min.exon.count = 10,
n.cores = defaultNCores())
}
\arguments{
\item{fname}{name of the hdf5 detailed molecular mapping (debug) file, written out by velocyto.py}
\item{cell.clusters}{optional cell cluster factor}
\item{internal.priming.info}{optionall internal priming info, as produced by find.ip.sites() function}
\item{min.exon.count}{minimal total (dataset-wide) number of molecules for an exon to be considered expressed}
\item{n.cores}{number of cores to use}
}
\value{
a list containing gene structural information data structure ($gene.df, with el,il, nex,nipconc,nipdisc columns corresponding to the log10 exonic length, intronic length, number of exons, numebr of internal concordant and discordant priming sites, respectively), and $info tables from the hdf5 file with an additional per-cluster entry $cluster.feature.counts table showing per-feature (rows) per-cluster (column) molecule counts (if cell.clusters are not supplied $info$cluster.feauture.counts will contain one column, 'all' giving dataset-wide counts)
}
\description{
read in detailed molecular mapping info from hdf5 file as written out by "-d" option of velocyto.py
}
| /man/read.gene.mapping.info.Rd | no_license | ilyakorsunsky/velocyto.R | R | false | true | 1,512 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/momentum_routines.R
\name{read.gene.mapping.info}
\alias{read.gene.mapping.info}
\title{read in detailed molecular mapping info from hdf5 file as written out by "-d" option of velocyto.py}
\usage{
read.gene.mapping.info(fname, cell.clusters = NULL,
internal.priming.info = NULL, min.exon.count = 10,
n.cores = defaultNCores())
}
\arguments{
\item{fname}{name of the hdf5 detailed molecular mapping (debug) file, written out by velocyto.py}
\item{cell.clusters}{optional cell cluster factor}
\item{internal.priming.info}{optionall internal priming info, as produced by find.ip.sites() function}
\item{min.exon.count}{minimal total (dataset-wide) number of molecules for an exon to be considered expressed}
\item{n.cores}{number of cores to use}
}
\value{
a list containing gene structural information data structure ($gene.df, with el,il, nex,nipconc,nipdisc columns corresponding to the log10 exonic length, intronic length, number of exons, numebr of internal concordant and discordant priming sites, respectively), and $info tables from the hdf5 file with an additional per-cluster entry $cluster.feature.counts table showing per-feature (rows) per-cluster (column) molecule counts (if cell.clusters are not supplied $info$cluster.feauture.counts will contain one column, 'all' giving dataset-wide counts)
}
\description{
read in detailed molecular mapping info from hdf5 file as written out by "-d" option of velocyto.py
}
|
Message <- R6::R6Class("Message",
public = list(
jsonrpc = 2,
to_json = function() {
},
format = function() {
json <- self$to_json()
paste0("Content-Length: ", nchar(json, type = "bytes"), "\r\n\r\n", json)
}
)
)
Request <- R6::R6Class("Request",
inherit = Message,
public = list(
id = NULL,
method = NULL,
params = NULL,
initialize = function(id, method, params=NULL) {
self$id <- id
self$method <- method
self$params <- params
},
to_json = function() {
payload <- list()
payload$jsonrpc <- self$jsonrpc
payload$id <- self$id
payload$method <- jsonlite::unbox(self$method)
if (!is.null(self$params)) {
payload$params <- self$params
}
jsonlite::toJSON(payload, auto_unbox = TRUE)
}
)
)
Notification <- R6::R6Class("Notification",
inherit = Message,
public = list(
method = NULL,
params = NULL,
initialize = function(method, params=NULL) {
self$method <- method
self$params <- params
},
to_json = function() {
payload <- list()
payload$jsonrpc <- jsonlite::unbox(self$jsonrpc)
payload$method <- jsonlite::unbox(self$method)
if (!is.null(self$params)) {
payload$params <- self$params
}
jsonlite::toJSON(payload, auto_unbox = TRUE)
}
)
)
Response <- R6::R6Class("Response",
inherit = Message,
public = list(
id = NULL,
result = NULL,
error = NULL,
initialize = function(id, result=NULL, error=NULL) {
self$id <- id
self$result <- result
self$error <- error
},
to_json = function() {
payload <- list()
payload$jsonrpc <- self$jsonrpc
payload$id <- self$id
if (!is.null(self$result)) {
payload$result <- self$result
}
if (!is.null(self$error)) {
payload$error <- self$error
}
jsonlite::toJSON(payload, auto_unbox = TRUE)
}
)
)
| /R/jsonrpc.R | no_license | Ikuyadeu/languageserver | R | false | false | 2,308 | r | Message <- R6::R6Class("Message",
public = list(
jsonrpc = 2,
to_json = function() {
},
format = function() {
json <- self$to_json()
paste0("Content-Length: ", nchar(json, type = "bytes"), "\r\n\r\n", json)
}
)
)
Request <- R6::R6Class("Request",
inherit = Message,
public = list(
id = NULL,
method = NULL,
params = NULL,
initialize = function(id, method, params=NULL) {
self$id <- id
self$method <- method
self$params <- params
},
to_json = function() {
payload <- list()
payload$jsonrpc <- self$jsonrpc
payload$id <- self$id
payload$method <- jsonlite::unbox(self$method)
if (!is.null(self$params)) {
payload$params <- self$params
}
jsonlite::toJSON(payload, auto_unbox = TRUE)
}
)
)
Notification <- R6::R6Class("Notification",
inherit = Message,
public = list(
method = NULL,
params = NULL,
initialize = function(method, params=NULL) {
self$method <- method
self$params <- params
},
to_json = function() {
payload <- list()
payload$jsonrpc <- jsonlite::unbox(self$jsonrpc)
payload$method <- jsonlite::unbox(self$method)
if (!is.null(self$params)) {
payload$params <- self$params
}
jsonlite::toJSON(payload, auto_unbox = TRUE)
}
)
)
Response <- R6::R6Class("Response",
inherit = Message,
public = list(
id = NULL,
result = NULL,
error = NULL,
initialize = function(id, result=NULL, error=NULL) {
self$id <- id
self$result <- result
self$error <- error
},
to_json = function() {
payload <- list()
payload$jsonrpc <- self$jsonrpc
payload$id <- self$id
if (!is.null(self$result)) {
payload$result <- self$result
}
if (!is.null(self$error)) {
payload$error <- self$error
}
jsonlite::toJSON(payload, auto_unbox = TRUE)
}
)
)
|
library(TAM)
### Name: tam.wle
### Title: Weighted Likelihood Estimation and Maximum Likelihood Estimation
### of Person Parameters
### Aliases: tam.wle tam.mml.wle tam.mml.wle2 tam_jml_wle print.tam.wle
### summary.tam.wle
### Keywords: WLE MLE Person parameter estimation print
### ** Examples
#############################################################################
# EXAMPLE 1: 1PL model, data.sim.rasch
#############################################################################
data(data.sim.rasch)
# estimate Rasch model
mod1 <- TAM::tam.mml(resp=data.sim.rasch)
# WLE estimation
wle1 <- TAM::tam.wle( mod1 )
## WLE Reliability=0.894
print(wle1)
summary(wle1)
# scoring for a different dataset containing same items (first 10 persons in sim.rasch)
wle2 <- TAM::tam.wle( mod1, score.resp=data.sim.rasch[1:10,])
#--- WLE estimation without using a TAM object
#* create an input list
input <- list( resp=data.sim.rasch, AXsi=mod1$AXsi, B=mod1$B )
#* estimation
wle2b <- TAM::tam.mml.wle2( input )
## Not run:
##D #############################################################################
##D # EXAMPLE 2: 3-dimensional Rasch model | data.read from sirt package
##D #############################################################################
##D
##D data(data.read, package="sirt")
##D # define Q-matrix
##D Q <- matrix(0,12,3)
##D Q[ cbind( 1:12, rep(1:3,each=4) ) ] <- 1
##D # redefine data: create some missings for first three cases
##D resp <- data.read
##D resp[1:2, 5:12] <- NA
##D resp[3,1:4] <- NA
##D ## > head(resp)
##D ## A1 A2 A3 A4 B1 B2 B3 B4 C1 C2 C3 C4
##D ## 2 1 1 1 1 NA NA NA NA NA NA NA NA
##D ## 22 1 1 0 0 NA NA NA NA NA NA NA NA
##D ## 23 NA NA NA NA 1 0 1 1 1 1 1 1
##D ## 41 1 1 1 1 1 1 1 1 1 1 1 1
##D ## 43 1 0 0 1 0 0 1 1 1 0 1 0
##D ## 63 1 1 0 0 1 0 1 1 1 1 1 1
##D
##D # estimate 3-dimensional Rasch model
##D mod <- TAM::tam.mml( resp=resp, Q=Q, control=list(snodes=1000,maxiter=50) )
##D summary(mod)
##D
##D # WLE estimates
##D wmod <- TAM::tam.wle(mod, Msteps=3)
##D summary(wmod)
##D ## head(round(wmod,2))
##D ## pid N.items PersonScores.Dim01 PersonScores.Dim02 PersonScores.Dim03
##D ## 2 1 4 3.7 0.3 0.3
##D ## 22 2 4 2.0 0.3 0.3
##D ## 23 3 8 0.3 3.0 3.7
##D ## 41 4 12 3.7 3.7 3.7
##D ## 43 5 12 2.0 2.0 2.0
##D ## 63 6 12 2.0 3.0 3.7
##D ## PersonMax.Dim01 PersonMax.Dim02 PersonMax.Dim03 theta.Dim01 theta.Dim02
##D ## 2 4.0 0.6 0.6 1.06 NA
##D ## 22 4.0 0.6 0.6 -0.96 NA
##D ## 23 0.6 4.0 4.0 NA -0.07
##D ## 41 4.0 4.0 4.0 1.06 0.82
##D ## 43 4.0 4.0 4.0 -0.96 -1.11
##D ## 63 4.0 4.0 4.0 -0.96 -0.07
##D ## theta.Dim03 error.Dim01 error.Dim02 error.Dim03 WLE.rel.Dim01
##D ## 2 NA 1.50 NA NA -0.1
##D ## 22 NA 1.11 NA NA -0.1
##D ## 23 0.25 NA 1.17 1.92 -0.1
##D ## 41 0.25 1.50 1.48 1.92 -0.1
##D ## 43 -1.93 1.11 1.10 1.14 -0.1
##D
##D # (1) Note that estimated WLE reliabilities are not trustworthy in this example.
##D # (2) If cases do not possess any observations on dimensions, then WLEs
##D # and their corresponding standard errors are set to NA.
##D
##D #############################################################################
##D # EXAMPLE 3: Partial credit model | Comparison WLEs with PP package
##D #############################################################################
##D
##D library(PP)
##D data(data.gpcm)
##D dat <- data.gpcm
##D I <- ncol(dat)
##D
##D #****************************************
##D #*** Model 1: Partial Credit Model
##D
##D # estimation in TAM
##D mod1 <- TAM::tam.mml( dat )
##D summary(mod1)
##D
##D #-- WLE estimation in TAM
##D tamw1 <- TAM::tam.wle( mod1 )
##D
##D #-- WLE estimation with PP package
##D # convert AXsi parameters into thres parameters for PP
##D AXsi0 <- - mod1$AXsi[,-1]
##D b <- AXsi0
##D K <- ncol(AXsi0)
##D for (cc in 2:K){
##D b[,cc] <- AXsi0[,cc] - AXsi0[,cc-1]
##D }
##D # WLE estimation in PP
##D ppw1 <- PP::PP_gpcm( respm=as.matrix(dat), thres=t(b), slopes=rep(1,I) )
##D
##D #-- compare results
##D dfr <- cbind( tamw1[, c("theta","error") ], ppw1$resPP)
##D head( round(dfr,3))
##D ## theta error resPP.estimate resPP.SE nsteps
##D ## 1 -1.006 0.973 -1.006 0.973 8
##D ## 2 -0.122 0.904 -0.122 0.904 8
##D ## 3 0.640 0.836 0.640 0.836 8
##D ## 4 0.640 0.836 0.640 0.836 8
##D ## 5 0.640 0.836 0.640 0.836 8
##D ## 6 -1.941 1.106 -1.941 1.106 8
##D plot( dfr$resPP.estimate, dfr$theta, pch=16, xlab="PP", ylab="TAM")
##D lines( c(-10,10), c(-10,10) )
##D
##D #****************************************
##D #*** Model 2: Generalized partial Credit Model
##D
##D # estimation in TAM
##D mod2 <- TAM::tam.mml.2pl( dat, irtmodel="GPCM" )
##D summary(mod2)
##D
##D #-- WLE estimation in TAM
##D tamw2 <- TAM::tam.wle( mod2 )
##D
##D #-- WLE estimation in PP
##D # convert AXsi parameters into thres and slopes parameters for PP
##D AXsi0 <- - mod2$AXsi[,-1]
##D slopes <- mod2$B[,2,1]
##D K <- ncol(AXsi0)
##D slopesM <- matrix( slopes, I, ncol=K )
##D AXsi0 <- AXsi0 / slopesM
##D b <- AXsi0
##D for (cc in 2:K){
##D b[,cc] <- AXsi0[,cc] - AXsi0[,cc-1]
##D }
##D # estimation in PP
##D ppw2 <- PP::PP_gpcm( respm=as.matrix(dat), thres=t(b), slopes=slopes )
##D
##D #-- compare results
##D dfr <- cbind( tamw2[, c("theta","error") ], ppw2$resPP)
##D head( round(dfr,3))
##D ## theta error resPP.estimate resPP.SE nsteps
##D ## 1 -0.476 0.971 -0.476 0.971 13
##D ## 2 -0.090 0.973 -0.090 0.973 13
##D ## 3 0.311 0.960 0.311 0.960 13
##D ## 4 0.311 0.960 0.311 0.960 13
##D ## 5 1.749 0.813 1.749 0.813 13
##D ## 6 -1.513 1.032 -1.513 1.032 13
## End(Not run)
| /data/genthat_extracted_code/TAM/examples/tam.wle.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 6,805 | r | library(TAM)
### Name: tam.wle
### Title: Weighted Likelihood Estimation and Maximum Likelihood Estimation
### of Person Parameters
### Aliases: tam.wle tam.mml.wle tam.mml.wle2 tam_jml_wle print.tam.wle
### summary.tam.wle
### Keywords: WLE MLE Person parameter estimation print
### ** Examples
#############################################################################
# EXAMPLE 1: 1PL model, data.sim.rasch
#############################################################################
data(data.sim.rasch)
# estimate Rasch model
mod1 <- TAM::tam.mml(resp=data.sim.rasch)
# WLE estimation
wle1 <- TAM::tam.wle( mod1 )
## WLE Reliability=0.894
print(wle1)
summary(wle1)
# scoring for a different dataset containing same items (first 10 persons in sim.rasch)
wle2 <- TAM::tam.wle( mod1, score.resp=data.sim.rasch[1:10,])
#--- WLE estimation without using a TAM object
#* create an input list
input <- list( resp=data.sim.rasch, AXsi=mod1$AXsi, B=mod1$B )
#* estimation
wle2b <- TAM::tam.mml.wle2( input )
## Not run:
##D #############################################################################
##D # EXAMPLE 2: 3-dimensional Rasch model | data.read from sirt package
##D #############################################################################
##D
##D data(data.read, package="sirt")
##D # define Q-matrix
##D Q <- matrix(0,12,3)
##D Q[ cbind( 1:12, rep(1:3,each=4) ) ] <- 1
##D # redefine data: create some missings for first three cases
##D resp <- data.read
##D resp[1:2, 5:12] <- NA
##D resp[3,1:4] <- NA
##D ## > head(resp)
##D ## A1 A2 A3 A4 B1 B2 B3 B4 C1 C2 C3 C4
##D ## 2 1 1 1 1 NA NA NA NA NA NA NA NA
##D ## 22 1 1 0 0 NA NA NA NA NA NA NA NA
##D ## 23 NA NA NA NA 1 0 1 1 1 1 1 1
##D ## 41 1 1 1 1 1 1 1 1 1 1 1 1
##D ## 43 1 0 0 1 0 0 1 1 1 0 1 0
##D ## 63 1 1 0 0 1 0 1 1 1 1 1 1
##D
##D # estimate 3-dimensional Rasch model
##D mod <- TAM::tam.mml( resp=resp, Q=Q, control=list(snodes=1000,maxiter=50) )
##D summary(mod)
##D
##D # WLE estimates
##D wmod <- TAM::tam.wle(mod, Msteps=3)
##D summary(wmod)
##D ## head(round(wmod,2))
##D ## pid N.items PersonScores.Dim01 PersonScores.Dim02 PersonScores.Dim03
##D ## 2 1 4 3.7 0.3 0.3
##D ## 22 2 4 2.0 0.3 0.3
##D ## 23 3 8 0.3 3.0 3.7
##D ## 41 4 12 3.7 3.7 3.7
##D ## 43 5 12 2.0 2.0 2.0
##D ## 63 6 12 2.0 3.0 3.7
##D ## PersonMax.Dim01 PersonMax.Dim02 PersonMax.Dim03 theta.Dim01 theta.Dim02
##D ## 2 4.0 0.6 0.6 1.06 NA
##D ## 22 4.0 0.6 0.6 -0.96 NA
##D ## 23 0.6 4.0 4.0 NA -0.07
##D ## 41 4.0 4.0 4.0 1.06 0.82
##D ## 43 4.0 4.0 4.0 -0.96 -1.11
##D ## 63 4.0 4.0 4.0 -0.96 -0.07
##D ## theta.Dim03 error.Dim01 error.Dim02 error.Dim03 WLE.rel.Dim01
##D ## 2 NA 1.50 NA NA -0.1
##D ## 22 NA 1.11 NA NA -0.1
##D ## 23 0.25 NA 1.17 1.92 -0.1
##D ## 41 0.25 1.50 1.48 1.92 -0.1
##D ## 43 -1.93 1.11 1.10 1.14 -0.1
##D
##D # (1) Note that estimated WLE reliabilities are not trustworthy in this example.
##D # (2) If cases do not possess any observations on dimensions, then WLEs
##D # and their corresponding standard errors are set to NA.
##D
##D #############################################################################
##D # EXAMPLE 3: Partial credit model | Comparison WLEs with PP package
##D #############################################################################
##D
##D library(PP)
##D data(data.gpcm)
##D dat <- data.gpcm
##D I <- ncol(dat)
##D
##D #****************************************
##D #*** Model 1: Partial Credit Model
##D
##D # estimation in TAM
##D mod1 <- TAM::tam.mml( dat )
##D summary(mod1)
##D
##D #-- WLE estimation in TAM
##D tamw1 <- TAM::tam.wle( mod1 )
##D
##D #-- WLE estimation with PP package
##D # convert AXsi parameters into thres parameters for PP
##D AXsi0 <- - mod1$AXsi[,-1]
##D b <- AXsi0
##D K <- ncol(AXsi0)
##D for (cc in 2:K){
##D b[,cc] <- AXsi0[,cc] - AXsi0[,cc-1]
##D }
##D # WLE estimation in PP
##D ppw1 <- PP::PP_gpcm( respm=as.matrix(dat), thres=t(b), slopes=rep(1,I) )
##D
##D #-- compare results
##D dfr <- cbind( tamw1[, c("theta","error") ], ppw1$resPP)
##D head( round(dfr,3))
##D ## theta error resPP.estimate resPP.SE nsteps
##D ## 1 -1.006 0.973 -1.006 0.973 8
##D ## 2 -0.122 0.904 -0.122 0.904 8
##D ## 3 0.640 0.836 0.640 0.836 8
##D ## 4 0.640 0.836 0.640 0.836 8
##D ## 5 0.640 0.836 0.640 0.836 8
##D ## 6 -1.941 1.106 -1.941 1.106 8
##D plot( dfr$resPP.estimate, dfr$theta, pch=16, xlab="PP", ylab="TAM")
##D lines( c(-10,10), c(-10,10) )
##D
##D #****************************************
##D #*** Model 2: Generalized partial Credit Model
##D
##D # estimation in TAM
##D mod2 <- TAM::tam.mml.2pl( dat, irtmodel="GPCM" )
##D summary(mod2)
##D
##D #-- WLE estimation in TAM
##D tamw2 <- TAM::tam.wle( mod2 )
##D
##D #-- WLE estimation in PP
##D # convert AXsi parameters into thres and slopes parameters for PP
##D AXsi0 <- - mod2$AXsi[,-1]
##D slopes <- mod2$B[,2,1]
##D K <- ncol(AXsi0)
##D slopesM <- matrix( slopes, I, ncol=K )
##D AXsi0 <- AXsi0 / slopesM
##D b <- AXsi0
##D for (cc in 2:K){
##D b[,cc] <- AXsi0[,cc] - AXsi0[,cc-1]
##D }
##D # estimation in PP
##D ppw2 <- PP::PP_gpcm( respm=as.matrix(dat), thres=t(b), slopes=slopes )
##D
##D #-- compare results
##D dfr <- cbind( tamw2[, c("theta","error") ], ppw2$resPP)
##D head( round(dfr,3))
##D ## theta error resPP.estimate resPP.SE nsteps
##D ## 1 -0.476 0.971 -0.476 0.971 13
##D ## 2 -0.090 0.973 -0.090 0.973 13
##D ## 3 0.311 0.960 0.311 0.960 13
##D ## 4 0.311 0.960 0.311 0.960 13
##D ## 5 1.749 0.813 1.749 0.813 13
##D ## 6 -1.513 1.032 -1.513 1.032 13
## End(Not run)
|
#thirty <- function() { ##The name of the function
setwd("C:/Week3/ProgAssignment3-data")
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
hospital <- read.csv("hospital-data.csv", colClasses = "character")
outcome.hospital <- merge(outcome, hospital, by = "Provider.Number")
death <- as.numeric(outcome.hospital[, 11]) ## Heart attack outcome
npatient <- as.numeric(outcome.hospital[, 15])
owner <- factor(outcome.hospital$Hospital.Ownership)
best <- function(state, outcome) {
## Read outcome data
## Check that state and outcome are valid
## Return hospital name in that state with lowest 30-day death rate
}
# library(lattice) #needed for xyplot
#xyplot(death~npatient|owner,type = c("p","r"),
# main="Heart Attack 30-day Death Rate by Ownership",
# ylab="30-day Death Rate", xlab="Number of Patients Seen")
#outcome[outcome$State] <- as.numeric(outcome$State)
#outcome[, 11] <- as.numeric(outcome[, 11])
#outcome[, 17] <- as.numeric(outcome[, 17])
#outcome[, 23] <- as.numeric(outcome[, 23])
##subsetting a data frame:
#outcome2 <- outcome[outcome$State > 19,] ## keep states having more than 19 hospitals
#median11 <- median(outcome[,11], na.rm=TRUE) # 30-day death rate from heart attack
#print(median11)
#median17 <- median(outcome[,17], na.rm=TRUE) # 30-day death rate from heart failure
#print(median17)
#median23 <- median(outcome[, 23], na.rm=TRUE) # 30-day death rate from pneumonia
#print(median23)
# death <- outcome2[, 11]
# state <- outcome2$State
# boxplot(death ~ state, main="Heart Attack 30-day Death Rate by State",
# ylab="30-day Death Rate")
#par(mfrow = c(3, 1)) #show three rows of graphs, one column wide
#hist(outcome[, 11], main = "Heart Attack", xlim= c(10,20), xlab="30-day Death Rate") #histogram
#hist(outcome[, 17], main = "Heart Failure",xlim=c(10,20), xlab="30-day Death Rate") #histogram
#hist(outcome[, 23], main = "Pneumonia", xlim=c(10,20), xlab="30-day Death Rate", prob=TRUE) #histogram
#mytable <- table(outcome$State) summary table of number of hospitals in each state
#print(mytable)
#names <- names(outcome) # the variable names in the data frame
#names
}
| /thirty.R | no_license | mandydog/Rthirdweek | R | false | false | 2,383 | r | #thirty <- function() { ##The name of the function
setwd("C:/Week3/ProgAssignment3-data")
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
hospital <- read.csv("hospital-data.csv", colClasses = "character")
outcome.hospital <- merge(outcome, hospital, by = "Provider.Number")
death <- as.numeric(outcome.hospital[, 11]) ## Heart attack outcome
npatient <- as.numeric(outcome.hospital[, 15])
owner <- factor(outcome.hospital$Hospital.Ownership)
best <- function(state, outcome) {
## Read outcome data
## Check that state and outcome are valid
## Return hospital name in that state with lowest 30-day death rate
}
# library(lattice) #needed for xyplot
#xyplot(death~npatient|owner,type = c("p","r"),
# main="Heart Attack 30-day Death Rate by Ownership",
# ylab="30-day Death Rate", xlab="Number of Patients Seen")
#outcome[outcome$State] <- as.numeric(outcome$State)
#outcome[, 11] <- as.numeric(outcome[, 11])
#outcome[, 17] <- as.numeric(outcome[, 17])
#outcome[, 23] <- as.numeric(outcome[, 23])
##subsetting a data frame:
#outcome2 <- outcome[outcome$State > 19,] ## keep states having more than 19 hospitals
#median11 <- median(outcome[,11], na.rm=TRUE) # 30-day death rate from heart attack
#print(median11)
#median17 <- median(outcome[,17], na.rm=TRUE) # 30-day death rate from heart failure
#print(median17)
#median23 <- median(outcome[, 23], na.rm=TRUE) # 30-day death rate from pneumonia
#print(median23)
# death <- outcome2[, 11]
# state <- outcome2$State
# boxplot(death ~ state, main="Heart Attack 30-day Death Rate by State",
# ylab="30-day Death Rate")
#par(mfrow = c(3, 1)) #show three rows of graphs, one column wide
#hist(outcome[, 11], main = "Heart Attack", xlim= c(10,20), xlab="30-day Death Rate") #histogram
#hist(outcome[, 17], main = "Heart Failure",xlim=c(10,20), xlab="30-day Death Rate") #histogram
#hist(outcome[, 23], main = "Pneumonia", xlim=c(10,20), xlab="30-day Death Rate", prob=TRUE) #histogram
#mytable <- table(outcome$State) summary table of number of hospitals in each state
#print(mytable)
#names <- names(outcome) # the variable names in the data frame
#names
}
|
library(pbapply)
library(dplyr)
generate_data <- function(k, m, sd) {
return(rnorm(k, m, sd))
}
estimate_ci <- function(data) {
est_m <- mean(data)
est_sd <- sd(data)
est_k <- length(data)
return(
c(
est_m - 1.96*est_sd/sqrt(est_k-1),
est_m + 1.96*est_sd/sqrt(est_k-1)
)
)
}
evaluate <- function(est, m) {
return(c(est[1] < m && m < est[2], est[2] - est[1]))
}
iteration <- function(k, m, sd) {
data <- generate_data(k, m, sd)
est <- estimate_ci(data)
return(evaluate(est, m))
}
iteration(10, 100, 30)
simulation <- function(n, k, m, sd) {
results <- pbsapply(1:n, function(x) {iteration(k, m, sd)})
rowMeans(results)
}
simulation(1000, 1000, 100, 5)
sim_data <- data.frame(n=(1:10) * 100)
stats <- t(sapply(sim_data$n,
function(n) {
simulation(1000, n, 100, 5)
}
))
sim_data <- cbind(sim_data, stats)
# RD
generate_data <- function(n) {
base_ec_results <- runif(n, 0, 5)
share <- (runif(n, 0, 10) + base_ec_results)/15
winner <- share > 0.5
ec_results <- base_ec_results + winner*3 + rnorm(n)
return(data.frame(share=share,
winner=winner,
ec_results=ec_results))
}
data <- generate_data(10000)
estimate <- function(data, h) {
data <- data[abs(data$share - 0.5) < h,]
return(mean(data$ec_results[data$winner]) -
mean(data$ec_results[!data$winner]))
}
evaluate <- function(est) {
return((est - 3))
}
iteration <- function(k, h) {
data <- generate_data(k)
est <- estimate(data, h)
return(evaluate(est))
}
simulation <- function(n, k, h) {
results <- pbsapply(1:n, function(x) {iteration(k, h)})
c(mean(results), var(results))
}
simulation(1000, 10000, 0.6)
simulation(1000, 10000, 0.5)
simulation(1000, 10000, 0.4)
simulation(1000, 10000, 0.3)
simulation(1000, 10000, 0.2)
simulation(1000, 10000, 0.1)
| /week7.R | permissive | Daria631/Dasha | R | false | false | 1,863 | r | library(pbapply)
library(dplyr)
generate_data <- function(k, m, sd) {
return(rnorm(k, m, sd))
}
estimate_ci <- function(data) {
est_m <- mean(data)
est_sd <- sd(data)
est_k <- length(data)
return(
c(
est_m - 1.96*est_sd/sqrt(est_k-1),
est_m + 1.96*est_sd/sqrt(est_k-1)
)
)
}
evaluate <- function(est, m) {
return(c(est[1] < m && m < est[2], est[2] - est[1]))
}
iteration <- function(k, m, sd) {
data <- generate_data(k, m, sd)
est <- estimate_ci(data)
return(evaluate(est, m))
}
iteration(10, 100, 30)
simulation <- function(n, k, m, sd) {
results <- pbsapply(1:n, function(x) {iteration(k, m, sd)})
rowMeans(results)
}
simulation(1000, 1000, 100, 5)
sim_data <- data.frame(n=(1:10) * 100)
stats <- t(sapply(sim_data$n,
function(n) {
simulation(1000, n, 100, 5)
}
))
sim_data <- cbind(sim_data, stats)
# RD
generate_data <- function(n) {
base_ec_results <- runif(n, 0, 5)
share <- (runif(n, 0, 10) + base_ec_results)/15
winner <- share > 0.5
ec_results <- base_ec_results + winner*3 + rnorm(n)
return(data.frame(share=share,
winner=winner,
ec_results=ec_results))
}
data <- generate_data(10000)
estimate <- function(data, h) {
data <- data[abs(data$share - 0.5) < h,]
return(mean(data$ec_results[data$winner]) -
mean(data$ec_results[!data$winner]))
}
evaluate <- function(est) {
return((est - 3))
}
iteration <- function(k, h) {
data <- generate_data(k)
est <- estimate(data, h)
return(evaluate(est))
}
simulation <- function(n, k, h) {
results <- pbsapply(1:n, function(x) {iteration(k, h)})
c(mean(results), var(results))
}
simulation(1000, 10000, 0.6)
simulation(1000, 10000, 0.5)
simulation(1000, 10000, 0.4)
simulation(1000, 10000, 0.3)
simulation(1000, 10000, 0.2)
simulation(1000, 10000, 0.1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/updateRendered.r
\name{setPlot}
\alias{setPlot}
\title{update an plotOutput object. Can be used instead of renderPlot.}
\usage{
setPlot(id, expr, app = getApp(), update.env = parent.frame(),
quoted = FALSE, ...)
}
\description{
Similar to updatePlot but no need to provide session object
}
| /man/setPlot.Rd | no_license | skranz/shinyEvents | R | false | true | 371 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/updateRendered.r
\name{setPlot}
\alias{setPlot}
\title{update an plotOutput object. Can be used instead of renderPlot.}
\usage{
setPlot(id, expr, app = getApp(), update.env = parent.frame(),
quoted = FALSE, ...)
}
\description{
Similar to updatePlot but no need to provide session object
}
|
### demonstration of the map area unit problem example:
require(aegis.bathymetry)
maup = map_area_unit_problem( just_return_results=TRUE ) #default is bathymetry data
x = maup$resolution
# x = log( maup$n / maup$resolution^2 ) # data density
yrange = range( maup$min, maup$max )
plot(mean ~ x, maup, pch=20, ylim=yrange)
lines( median ~ x, maup, col="green", lwd=1)
lines(min ~ x, maup, col="red", lwd=4)
lines(max ~ x, maup, col="blue", lwd=4)
lines( I(mean+sd) ~ x, maup, col="gray", lwd=2)
lines( I(mean-sd) ~ x, maup, col="gray", lwd=2)
abline( v=attr(maup, "variogram")$fft$localrange ) # 380 km
plot( sd~ x, maup)
abline( v=attr(maup, "variogram")$fft$localrange ) # 380 km
| /inst/scripts/92_maup.R | permissive | jae0/aegis.bathymetry | R | false | false | 738 | r |
### demonstration of the map area unit problem example:
require(aegis.bathymetry)
maup = map_area_unit_problem( just_return_results=TRUE ) #default is bathymetry data
x = maup$resolution
# x = log( maup$n / maup$resolution^2 ) # data density
yrange = range( maup$min, maup$max )
plot(mean ~ x, maup, pch=20, ylim=yrange)
lines( median ~ x, maup, col="green", lwd=1)
lines(min ~ x, maup, col="red", lwd=4)
lines(max ~ x, maup, col="blue", lwd=4)
lines( I(mean+sd) ~ x, maup, col="gray", lwd=2)
lines( I(mean-sd) ~ x, maup, col="gray", lwd=2)
abline( v=attr(maup, "variogram")$fft$localrange ) # 380 km
plot( sd~ x, maup)
abline( v=attr(maup, "variogram")$fft$localrange ) # 380 km
|
#' Read in cleaned Black Knight data for Fairfax County
#'
#'
#'
#' @return dataframe of black knight data for Fairfax county, with all variables that were created in the clean and preclean phase.
#' @export
#'
#' @examples jur <- read_jurisdiction("fairfax", rmd = TRUE)
read_fairfax <- function() {
filepath <- "fairfax"
filename <- paste0("L:/Libraries/RegHsg/Data/", filepath,
"/", filepath, "-cleaned-data.csv")
if (!file.exists(filename)) {
stop("cleaned data not found in Data directory")
} else {
read_csv(filename,
col_types = cols(
county_fips = col_character(),
county_name = col_character(),
assessorsparcelnumberapnpin = col_character(),
propaddress = col_character(),
propcity = col_character(),
propstate = col_character(),
propzip = col_double(),
propunitno = col_character(),
prophouseno = col_character(),
propstreetname = col_character(),
propstreetsuffix = col_character(),
lat = col_double(),
long = col_double(),
tract = col_character(),
owneroccupiedresidential = col_character(),
countylandusedescription = col_character(),
zoning = col_character(),
buildingarea = col_double(),
noofbuildings = col_double(),
noofstories = col_character(),
numberofunits = col_double(),
yearbuilt = col_double(),
lotsize_acres = col_double(),
lotsize_sf = col_double(),
address_type = col_character(),
category = col_character(),
category_detail = col_character(),
residential = col_double(),
building_type = col_character(),
area_tax = col_double(),
zone_tax = col_character(),
possibleSF_tax = col_double(),
stories_tax = col_character(),
category_tax = col_character(),
parcel_id = col_character(),
num_livunit = col_double(),
dwellings_per_parcel = col_double(),
land_per_parcel = col_double(),
assessedlandvalue = col_double(),
assessedimprovementvalue = col_double(),
totalassessedvalue = col_double(),
numberofunits_tax = col_double(),
parcel_address = col_character(),
area_tax_sum = col_character(),
parcel_area = col_double(),
sf_area = col_double(),
units_area = col_character(),
containsLarge = col_logical(),
maxsize = col_double(),
vacant_flag = col_logical()
))
}
}
| /Macros/read-fairfax.R | no_license | NeighborhoodInfoDC/RegHsg | R | false | false | 2,945 | r |
#' Read in cleaned Black Knight data for Fairfax County
#'
#'
#'
#' @return dataframe of black knight data for Fairfax county, with all variables that were created in the clean and preclean phase.
#' @export
#'
#' @examples jur <- read_jurisdiction("fairfax", rmd = TRUE)
read_fairfax <- function() {
filepath <- "fairfax"
filename <- paste0("L:/Libraries/RegHsg/Data/", filepath,
"/", filepath, "-cleaned-data.csv")
if (!file.exists(filename)) {
stop("cleaned data not found in Data directory")
} else {
read_csv(filename,
col_types = cols(
county_fips = col_character(),
county_name = col_character(),
assessorsparcelnumberapnpin = col_character(),
propaddress = col_character(),
propcity = col_character(),
propstate = col_character(),
propzip = col_double(),
propunitno = col_character(),
prophouseno = col_character(),
propstreetname = col_character(),
propstreetsuffix = col_character(),
lat = col_double(),
long = col_double(),
tract = col_character(),
owneroccupiedresidential = col_character(),
countylandusedescription = col_character(),
zoning = col_character(),
buildingarea = col_double(),
noofbuildings = col_double(),
noofstories = col_character(),
numberofunits = col_double(),
yearbuilt = col_double(),
lotsize_acres = col_double(),
lotsize_sf = col_double(),
address_type = col_character(),
category = col_character(),
category_detail = col_character(),
residential = col_double(),
building_type = col_character(),
area_tax = col_double(),
zone_tax = col_character(),
possibleSF_tax = col_double(),
stories_tax = col_character(),
category_tax = col_character(),
parcel_id = col_character(),
num_livunit = col_double(),
dwellings_per_parcel = col_double(),
land_per_parcel = col_double(),
assessedlandvalue = col_double(),
assessedimprovementvalue = col_double(),
totalassessedvalue = col_double(),
numberofunits_tax = col_double(),
parcel_address = col_character(),
area_tax_sum = col_character(),
parcel_area = col_double(),
sf_area = col_double(),
units_area = col_character(),
containsLarge = col_logical(),
maxsize = col_double(),
vacant_flag = col_logical()
))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_example_apps.R
\name{runAppExample}
\alias{runAppExample}
\title{Run demo shiny apps to test out shinyinputtables functionality}
\usage{
runAppExample(demoName)
}
\arguments{
\item{demoName}{Single length character vector denoting the name of the desired demo app to run.}
}
\description{
Run a selected shiny app demo that showcases particulars aspects
of the functionality provided by the \code{shinyinputtables} package extension for Shiny.
}
\details{
Currently the only supported demos are currently called "demo_01" and "demo_02".
Will describe each available demo in a separate section here
}
| /man/runAppExample.Rd | permissive | AnthonyEbert/shinyinputtables | R | false | true | 682 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_example_apps.R
\name{runAppExample}
\alias{runAppExample}
\title{Run demo shiny apps to test out shinyinputtables functionality}
\usage{
runAppExample(demoName)
}
\arguments{
\item{demoName}{Single length character vector denoting the name of the desired demo app to run.}
}
\description{
Run a selected shiny app demo that showcases particulars aspects
of the functionality provided by the \code{shinyinputtables} package extension for Shiny.
}
\details{
Currently the only supported demos are currently called "demo_01" and "demo_02".
Will describe each available demo in a separate section here
}
|
#' Automated parameter blocking procedure for efficient MCMC sampling
#'
#' Runs NIMBLE's automated blocking procedure for a given model object, to dynamically determine a blocking scheme of the continuous-valued model nodes. This blocking scheme is designed to produce efficient MCMC sampling (defined as number of effective samples generated per second of algorithm runtime). See Turek, et al (2015) for details of this algorithm. This also (optionally) compares this blocked MCMC against several static MCMC algorithms, including all univariate sampling, blocking of all continuous-valued nodes, NIMBLE's default MCMC configuration, and custom-specified blockings of parameters.
#'
#' This method allows for fine-tuned usage of the automated blocking procedure. However, the main entry point to the automatic blocking procedure is intended to be through either buildMCMC(..., autoBlock = TRUE), or configureMCMC(..., autoBlock = TRUE).
#'
#' @author Daniel Turek
#'
#' @seealso configureMCMC buildMCMC
#'
#' @param Rmodel A NIMBLE model object, created from \code{\link{nimbleModel}}.
#'
#' @param autoIt The number of MCMC iterations to run intermediate MCMC algorithms, through the course of the procedure. Default 20,000.
#'
#' @param run List of additional MCMC algorithms to compare against the automated blocking MCMC. These may be specified as: the character string 'all' to denote blocking all continuous-valued nodes; the character string 'default' to denote NIMBLE's default MCMC configuration; a named list element consisting of a quoted code block, which when executed returns an MCMC configuration object for comparison; a custom-specificed blocking scheme, specified as a named list element which itself is a list of character vectors, where each character vector specifies the nodes in a particular block. Default is c('all', 'default').
#'
#' @param verbose Logical specifying whether to output considerable details of the automated block procedure, through the course of execution. Default FALSE.
#'
#' @param setSeed Logical specificying whether to call set.seed(0) prior to beginning the blocking procedure. Default TRUE.
#'
#' @param makePlots Logical specifying whether to plot the hierarchical clustering dendrograms, through the course of execution. Default FALSE.
#'
#' @param round Logical specifying whether to round the final output results to two decimal places. Default TRUE.
#'
#' @return Returns a named list containing elements:
#' \itemize{
#' \item \code{summary}: A data frame containing a numerical summary of the performance of all MCMC algorithms (including that from automated blocking)
#' \item \code{autoGroups}: A list specifying the parameter blockings converged on by the automated blocking procedure
#' \item \code{conf}: A NIMBLE MCMC configuration object corresponding to the results of the automated blocking procedure
#' }
#'
#' @references
#'
#' Turek, D., de Valpine, P., Paciorek, C., and Anderson-Bergman, C. (2015). Automated Parameter Blocking for Efficient Markov-Chain Monte Carlo Sampling. <arXiv:1503.05621>.
#'
#' @export
autoBlock <- function(Rmodel,
autoIt = 20000,
run = list('all', 'default'),
setSeed = TRUE,
verbose = FALSE,
makePlots = FALSE,
round = TRUE ) {
if(autoIt < 10000) stop('Minimum auto-blocking iterations is 10,000')
control <- list(niter=autoIt, setSeed=setSeed, verbose=verbose, makePlots=makePlots)
ab <- autoBlockClass(Rmodel, control)
if(!'auto' %in% run) run <- c(run, 'auto') ## always use 'autoBlock' routine
ab$run(run)
abList <- list(ab)
names(abList)[1] <- 'model'
df <- createDFfromABlist(abList, autoIt)
dfmin <- reduceDF(df, round = round)
cat('\nAuto-Blocking summary:\n')
print(dfmin)
lastAutoInd <- max(grep('^auto', ab$naming)) ## index of final 'auto' iteration
lastAutoGrouping <- ab$grouping[[lastAutoInd]] ## grouping of final 'auto' iteration
nonTrivialGroups <- lastAutoGrouping[unlist(lapply(lastAutoGrouping, function(x) length(x)>1))]
if(length(nonTrivialGroups) > 0) {
cat('\nAuto-Blocking converged on the node groupings:\n')
for(i in seq_along(nonTrivialGroups)) {
group <- nonTrivialGroups[[i]]
cat(paste0('[', i, '] '))
cat(paste0(group, collapse = ', '))
cat('\n')
}
} else cat('\nAuto-Blocking converged on all scalar (univariate) sampling\n')
cat('\n')
## create a new MCMC conf with the autoBlock groupings:
conf <- configureMCMC(Rmodel, nodes = NULL, print = FALSE)
for(nodeGroup in lastAutoGrouping) addSamplerToConf(Rmodel, conf, nodeGroup)
retList <- list(summary=dfmin, autoGroups=nonTrivialGroups, conf=conf)
return(invisible(retList))
}
autoBlockModel <- setRefClass(
Class = 'autoBlockModel',
fields = list(
Rmodel_orig = 'ANY',
Rmodel = 'ANY',
Cmodel = 'ANY',
md = 'ANY',
scalarNodeVector = 'character',
scalarNodeVectorCont = 'character',
scalarNodeVectorDisc = 'character',
nodeGroupScalars = 'list',
nodeGroupAllBlocked = 'list',
monitorsVector = 'character',
initialMCMCconf = 'ANY'
),
methods = list(
initialize = function(Rmodel_orig) {
Rmodel_orig <<- Rmodel_orig
md <<- Rmodel_orig$modelDef
Rmodel <<- Rmodel_orig$newModel(replicate = TRUE, check = FALSE)
##nimCopy(from = Rmodel_orig, to = Rmodel, logProb = TRUE)
##for(var in ls(Rmodel_orig$isDataEnv)) Rmodel$isDataEnv[[var]] <<- Rmodel_orig$isDataEnv[[var]] ## copies data flags to the new model
scalarNodeVector <<- Rmodel$getNodeNames(stochOnly=TRUE, includeData=FALSE, returnScalarComponents=TRUE)
discreteInd <- as.logical(sapply(scalarNodeVector, function(n) Rmodel$isDiscrete(n), USE.NAMES=FALSE))
constrainedInd <- sapply(scalarNodeVector, function(n) Rmodel$getDistribution(n) %in% c('dwish', 'dinvwish', 'ddirch'), USE.NAMES=FALSE)
wholeNodeInd <- discreteInd | constrainedInd
scalarNodeVectorCont <<- scalarNodeVector[!wholeNodeInd] ## making work with discrete nodes
scalarNodeVectorDisc <<- scalarNodeVector[ wholeNodeInd] ## making work with discrete nodes, wishart inverse-wishart, and dirichlet
if(length(scalarNodeVectorCont) == 0) stop('autoBlocking only works with one or more continuous-valued model nodes') ## making work with discrete nodes
nodeGroupScalars <<- c(unique(lapply(scalarNodeVectorDisc, Rmodel$expandNodeNames)), scalarNodeVectorCont) ## making work with discrete nodes, and also with dmulti distributions
##nodeGroupAllBlocked <<- list(scalarNodeVector) ## making work with discrete nodes
##nodeGroupAllBlocked <<- c(lapply(scalarNodeVectorDisc, function(x) x), list(scalarNodeVectorCont)) ## making work with discrete nodes
nodeGroupAllBlocked <<- c(unique(lapply(scalarNodeVectorDisc, Rmodel$expandNodeNames)), list(scalarNodeVectorCont)) ## making work with discrete nodes, and also with dmulti distributions
monitorsVector <<- Rmodel$getNodeNames(stochOnly=TRUE, includeData=FALSE)
},
## here is where the initial MCMC conf is created, for re-use -- for new version
createInitialMCMCconf = function(runList) {
initialMCMCconf <<- configureMCMC(Rmodel, print = FALSE)
nInitialSamplers <- length(initialMCMCconf$samplerConfs)
initialMCMCconf$addSampler(target = scalarNodeVectorCont[1], type = 'slice', print=FALSE) ## add one slice sampler
initialMCMCconf$addSampler(target = scalarNodeVectorCont[1], type = 'RW', print=FALSE) ## add one RW sampler
initialMCMCconf$addSampler(target = scalarNodeVectorCont[1], type = 'RW_block', print=FALSE, silent = TRUE) ## add one RW_block sampler
addCustomizedSamplersToInitialMCMCconf(runList)
initialMCMCconf$addMonitors(monitorsVector, print=FALSE)
RinitialMCMC <- buildMCMC(initialMCMCconf)
Cmodel <<- compileNimble(Rmodel)
CinitialMCMC <- compileNimble(RinitialMCMC, project = Rmodel) ## (new version) yes, we need this compileNimble call -- this is the whole point!
initialMCMCconf$setSamplers(1:nInitialSamplers, print=FALSE) ## important for new version: removes all news samplers added to initial MCMC conf
},
addCustomizedSamplersToInitialMCMCconf = function(runListCode) {
if(is.list(runListCode)) { lapply(runListCode, function(el) addCustomizedSamplersToInitialMCMCconf(el)); return() }
if(is.call(runListCode)) {
if(is.call(runListCode[[1]]) && length(runListCode[[1]])==3 && runListCode[[1]][[3]]=='addSampler') {
runListCode[[1]][[2]] <- as.name('initialMCMCconf')
eval(substitute(RUNLISTCODE, list(RUNLISTCODE=runListCode)))
return()
}
lapply(runListCode, function(el) addCustomizedSamplersToInitialMCMCconf(el))
return()
}
},
createGroups = function(listOfBlocks = list()) {
listOfBlocks <- lapply(listOfBlocks, function(blk) Rmodel$expandNodeNames(blk, returnScalarComponents=TRUE))
if(any(unlist(listOfBlocks) %in% scalarNodeVectorDisc)) stop('cannot put block sampler on discrete-valued model nodes')
nodes <- scalarNodeVector
nodes <- setdiff(nodes, unlist(listOfBlocks))
nodeList <- lapply(nodes, function(x) x)
for(ng in listOfBlocks) nodeList[[length(nodeList)+1]] <- ng
return(nodeList)
},
resetCmodelInitialValues = function() {
nimCopy(from = Rmodel_orig, to = Cmodel, logProb = TRUE)
calculate(Cmodel)
}
)
)
autoBlockParamDefaults <- function() {
list(
makePlots = FALSE,
niter = 20000,
setSeed = TRUE,
verbose = FALSE
)
}
autoBlockClass <- setRefClass(
Class = 'autoBlockClass',
fields = list(
## special
abModel = 'ANY',
it = 'numeric',
## overall control
makePlots = 'logical',
niter = 'numeric',
setSeed = 'logical',
verbose = 'logical',
## persistant lists of historical data
naming = 'list',
candidateGroups = 'list',
grouping = 'list',
groupSizes = 'list',
groupIDs = 'list',
samplers = 'list',
timing = 'list',
ess = 'list',
essPT = 'list',
empCov = 'list',
empCor = 'list',
distMatrix = 'list',
hTree = 'list'
),
methods = list(
initialize = function(Rmodel, control=list()) {
abModel <<- autoBlockModel(Rmodel)
defaultsList <- autoBlockParamDefaults()
for(i in seq_along(defaultsList)) if(is.null(control[[names(defaultsList)[i]]])) control[[names(defaultsList)[i]]] <- defaultsList[[i]]
for(i in seq_along(control)) eval(substitute(verbose <<- VALUE, list(verbose=as.name(names(control)[i]), VALUE=control[[i]])))
it <<- 0
},
run = function(runList) {
if(!is.list(runList)) stop('runList argument should be a list')
if(is.null(names(runList))) names(runList) <- rep('', length(runList))
abModel$createInitialMCMCconf(runList) ## here is where the initial MCMC conf is created, for re-use -- for new version
for(i in seq_along(runList)) {
runListElement <- runList[[i]]
runListName <- names(runList)[i]
if(is.character(runListElement)) {
type <- runListElement
} else if(is.list(runListElement)) {
type <- 'blocks'
} else if(inherits(runListElement, '{')) {
type <- 'conf'
} else stop('don\'t understand element in run list')
switch(type,
none = { confList <- list(createConfFromGroups(abModel$nodeGroupScalars))
runConfListAndSaveBest(confList, 'none') },
all = { confList <- list(createConfFromGroups(abModel$nodeGroupAllBlocked))
runConfListAndSaveBest(confList, 'all') },
default = { ##confList <- list(configureMCMC(oldConf = abModel$initialMCMCconf))
## forcing this processing through createConfFromGroups()
## in order to always standardize the ordering of samplers;
## even though this might result in a different sampler ordering
## than the true NIMBLE 'default' MCMC conf
##groups <- determineGroupsFromConf(abModel$initialMCMCconf)
groups <- lapply(determineGroupsFromConf(abModel$initialMCMCconf), function(nodes) unique(abModel$Rmodel$expandNodeNames(nodes))) ## making work with dmulti distribution
confList <- list(createConfFromGroups(groups))
runConfListAndSaveBest(confList, 'default') },
blocks = { confList <- list(createConfFromGroups(abModel$createGroups(runListElement)))
name <- if(runListName == '') 'customBlocks' else runListName
runConfListAndSaveBest(confList, name) },
conf = { Rmodel <- abModel$Rmodel ## just hoping that the customConf will find this
confList <- list(eval(runListElement, envir=environment()))
name <- if(runListName == '') 'customConf' else runListName
runConfListAndSaveBest(confList, name) },
auto = { autoIt <- 0
while((autoIt < 2) || ((!groupingsEquiv(grouping[[it]], grouping[[it-1]])) && (min(essPT[[it]]) > min(essPT[[it-1]])))) {
candidateGroupsList <- if(autoIt==0) list(abModel$nodeGroupScalars) else determineCandidateGroupsFromCurrentSample()
confList <- lapply(candidateGroupsList, function(groups) createConfFromGroups(groups))
runConfListAndSaveBest(confList, paste0('auto',autoIt), auto=TRUE)
autoIt <- autoIt + 1
}
},
stop('don\'t understand element in run list'))
}
names(candidateGroups) <<- naming
names(grouping) <<- naming
names(groupSizes) <<- naming
names(groupIDs) <<- naming
names(samplers) <<- naming
names(timing) <<- naming
names(ess) <<- naming
names(essPT) <<- naming
},
determineCandidateGroupsFromCurrentSample = function() {
cutree_heights <- seq(0, 1, by=0.1)
cutreeList <- lapply(cutree_heights, function(height) cutree(hTree[[it]], h = height))
names(cutreeList) <- paste0('cut', cutree_heights)
uniqueCutreeList <- unique(cutreeList)
for(i in seq_along(uniqueCutreeList)) { for(j in seq_along(cutreeList)) { if(all(uniqueCutreeList[[i]]==cutreeList[[j]])) { names(uniqueCutreeList)[i] <- names(cutreeList)[j]; break } } }
candidateGroupsList <- lapply(uniqueCutreeList, function(ct) determineGroupsFromCutree(ct))
return(candidateGroupsList)
},
determineGroupsFromCutree = function(ct) {
groupsContOnly <- lapply(unique(ct), function(x) names(ct)[ct==x]) ## making work with discrete nodes
##groupsAllNodes <- c(lapply(abModel$scalarNodeVectorDisc, function(x) x), groupsContOnly) ## making work with discrete nodes
groupsAllNodes <- c(unique(lapply(abModel$scalarNodeVectorDisc, abModel$Rmodel$expandNodeNames)), groupsContOnly) ## making work with discrete nodes and dmulti distribution
return(groupsAllNodes) ## making work with discrete nodes
},
runConfListAndSaveBest = function(confList, name, auto=FALSE) {
lapply(confList, function(conf) checkOverMCMCconf(conf))
RmcmcList <- lapply(confList, function(conf) buildMCMC(conf))
CmcmcList <- compileNimble(RmcmcList, project = abModel$Rmodel)
if(!is.list(CmcmcList)) CmcmcList <- list(CmcmcList) ## make sure compileNimble() returns a list...
timingList <- essList <- essPTList <- essPTminList <- list()
for(i in seq_along(CmcmcList)) {
if(setSeed) set.seed(0)
abModel$resetCmodelInitialValues()
timingList[[i]] <- as.numeric(system.time(CmcmcList[[i]]$run(niter, progressBar = FALSE))[3])
burnedSamples <- extractAndBurnSamples(CmcmcList[[i]])
essList[[i]] <- apply(burnedSamples, 2, effectiveSize)
essList[[i]] <- essList[[i]][essList[[i]] > 0] ## exclude nodes with ESS=0 -- for discrete nodes which are fixed to a certain value; making work with discrete nodes
essPTList[[i]] <- essList[[i]] / timingList[[i]]
essPTminList[[i]] <- sort(essPTList[[i]])[1]
}
bestInd <- as.numeric(which(unlist(essPTminList) == max(unlist(essPTminList))))
if(length(bestInd) > 1) stop('there should never be an exact tie for the best...')
if(!is.null(names(confList))) name <- paste0(name, '-', names(confList)[bestInd])
it <<- it + 1
naming[[it]] <<- name
candidateGroups[[it]] <<- lapply(confList, function(conf) determineGroupsFromConf(conf))
grouping[[it]] <<- candidateGroups[[it]][[bestInd]]
groupSizes[[it]] <<- determineNodeGroupSizesFromGroups(grouping[[it]])
groupIDs[[it]] <<- determineNodeGroupIDsFromGroups(grouping[[it]])
samplers[[it]] <<- determineSamplersFromGroupsAndConf(grouping[[it]], confList[[bestInd]])
timing[[it]] <<- timingList[[bestInd]]
ess[[it]] <<- essList[[bestInd]]
essPT[[it]] <<- sort(essPTList[[bestInd]])
if(auto) {
burnedSamples <- extractAndBurnSamples(CmcmcList[[bestInd]])
burnedSamples <- burnedSamples[, abModel$scalarNodeVectorCont] ## making work with discrete nodes
##empCov[[it]] <<- cov(burnedSamples)
e <- try(cov(burnedSamples))
if(inherits(e, 'try-error')) {
message('try-error, going into browser'); browser(); 1; 2
} else empCov[[it]] <<- e
##empCor[[it]] <<- cov2cor(empCov[[it]])
e <- try(cov2cor(empCov[[it]]))
if(inherits(e, 'try-error')) {
message('try-error, going into browser'); browser(); 3; 4
} else empCor[[it]] <<- e
distMatrix[[it]] <<- as.dist(1 - abs(empCor[[it]]))
##hTree[[it]] <<- hclust(distMatrix[[it]])
e <- try(hclust(distMatrix[[it]]))
if(inherits(e, 'try-error')) {
message('try-error, going into browser'); browser(); 5; 6
} else hTree[[it]] <<- e
}
if(verbose) printCurrent(name, confList[[bestInd]])
if(makePlots && auto) makeCurrentPlots(name)
},
extractAndBurnSamples = function(Cmcmc) {
samples <- as.matrix(Cmcmc$mvSamples)
## make sure we don't keep samples from deterministic nodes
namesToKeep <- setdiff(dimnames(samples)[[2]], abModel$Rmodel$getNodeNames(determOnly=TRUE, returnScalarComponents=TRUE))
burnedSamples <- samples[(floor(niter/2)+1):niter, namesToKeep]
burnedSamples
},
determineGroupsFromConf = function(conf) {
groups <- list()
for(ss in conf$samplerConfs) {
if(ss$name == 'crossLevel') {
topNodes <- ss$target
lowNodes <- conf$model$getDependencies(topNodes, self=FALSE, stochOnly=TRUE, includeData=FALSE)
nodes <- c(topNodes, lowNodes)
} else {
nodes <- ss$target
}
groups[[length(groups)+1]] <- conf$model$expandNodeNames(nodes, returnScalarComponents=TRUE)
}
return(groups)
},
determineNodeGroupSizesFromGroups = function(groups) {
groupSizeVector <- numeric(0)
for(gp in groups) for(node in gp) groupSizeVector[[node]] <- length(gp)
return(groupSizeVector)
},
determineNodeGroupIDsFromGroups = function(groups) {
groupIDvector <- numeric(0)
for(i in seq_along(groups)) for(node in groups[[i]]) groupIDvector[[node]] <- i
return(groupIDvector)
},
determineSamplersFromGroupsAndConf = function(groups, conf) {
samplerConfs <- conf$samplerConfs
if(length(groups) != length(samplerConfs)) stop('something wrong')
samplerVector <- character(0)
for(i in seq_along(groups)) for(node in groups[[i]]) samplerVector[[node]] <- samplerConfs[[i]]$name
return(samplerVector)
},
createConfFromGroups = function(groups) {
groups <- sortGroups(groups)
##conf <- configureMCMC(Rmodel, nodes=NULL, monitors=character(0)) ## original version
conf <- configureMCMC(oldConf = abModel$initialMCMCconf, print = FALSE) ## new version
conf$setSamplers() ## new version -- removes all the samplers from initalMCMCconf
for(nodeGroup in groups) addSamplerToConf(abModel$Rmodel, conf, nodeGroup)
return(conf)
},
sortGroups = function(groups) {
eachGroupSorted <- lapply(groups, sort)
groupsAsStrings <- lapply(eachGroupSorted, function(grp) paste0(grp, collapse = '_'))
sortedInd <- sort(unlist(groupsAsStrings), index.return = TRUE)$ix
sortedGroups <- eachGroupSorted[sortedInd]
return(sortedGroups)
},
checkOverMCMCconf = function(conf) {
warn <- FALSE
for(ss in conf$samplerConfs) {
## if(ss$name == 'posterior_predictive') {
## msg <- 'using \'posterior_predictive\' sampler may lead to results we don\'t want'
## cat(paste0('\nWARNING: ', msg, '\n\n')); warning(msg)
## }
if(grepl('^conjugate_', ss$name) && getNimbleOption('verifyConjugatePosteriors')) {
##msg <- 'conjugate sampler running slow due to checking the posterior'
##cat(paste0('\nWARNING: ', msg, '\n\n')); warning(msg)
warn <- TRUE
}
}
if(warn) {
msg <- 'Conjugate sampler functions in \'default\' conf are running slow due to verifying the posterior;\nThis behaviour can be changed using a NIMBLE package option.'
warning(msg, call. = FALSE)
}
},
printCurrent = function(name, conf) {
cat(paste0('\n################################\nbegin iteration ', it, ': ', name, '\n################################\n'))
if(length(candidateGroups[[it]]) > 1) { cat('\ncandidate groups:\n'); cg<-candidateGroups[[it]]; for(i in seq_along(cg)) { cat(paste0('\n',names(cg)[i],':\n')); printGrouping(cg[[i]]) } }
cat('\ngroups:\n'); printGrouping(grouping[[it]])
cat('\nsamplers:\n'); conf$getSamplers()
cat(paste0('\nMCMC runtime: ', round(timing[[it]], 1), ' seconds\n'))
cat('\nESS:\n'); print(round(ess[[it]], 0))
cat('\nESS/time:\n'); print(round(essPT[[it]], 1))
cat(paste0('\n################################\nend iteration ', it, ': ', name, '\n################################\n\n'))
},
makeCurrentPlots = function(name) {
dev.new()
if(inherits(try(plot(as.dendrogram(hTree[[it]]), ylim=c(0,1), main=name), silent=TRUE), 'try-error')) dev.off()
},
printGrouping = function(g) {
for(i in seq_along(g)) cat(paste0('[', i, '] ', paste0(g[[i]], collapse=', '), '\n'))
},
groupingsEquiv = function(grouping1, grouping2) {
grouping1 <- lapply(grouping1, sort)
grouping2 <- lapply(grouping2, sort)
while(length(grouping1) > 0) {
grp1 <- grouping1[[1]]
found <- FALSE
for(i in seq_along(grouping2)) {
grp2 <- grouping2[[i]]
if(identical(grp1, grp2)) {
found <- TRUE
grouping1[1] <- grouping2[i] <- NULL
break
}
}
if(!found) return(FALSE)
}
if(length(grouping2) == 0) return(TRUE) else return(FALSE)
}
)
)
addSamplerToConf <- function(Rmodel, conf, nodeGroup) {
if(length(nodeGroup) > 1) {
conf$addSampler(target = nodeGroup, type = 'RW_block', print = FALSE, silent = TRUE); return()
}
if(!(nodeGroup %in% Rmodel$getNodeNames()) && !Rmodel$isDiscrete(nodeGroup)) {
conf$addSampler(target = nodeGroup, type = 'RW', print = FALSE); return()
}
if(nodeGroup %in% Rmodel$getMaps('nodeNamesEnd')) {
##cat(paste0('warning: using \'posterior_predictive\' sampler for node ', nodeGroup, ' may lead to results we don\'t want\n\n'))
conf$addSampler(target = nodeGroup, type = 'posterior_predictive', print = FALSE); return()
}
## conjugacyResult <- Rmodel$checkConjugacy(nodeGroup)
## if((!is.null(conjugacyResult)) && conjOveride) {
## conf$addSampler(target = ??????, type = conjugacyResult$samplerType, control = conjugacyResult$control, print = FALSE); return()
## }
if(Rmodel$isBinary(nodeGroup)) {
conf$addSampler(target = nodeGroup, type = 'binary', print = FALSE); return()
}
if(Rmodel$isDiscrete(nodeGroup)) {
if(Rmodel$getDistribution(nodeGroup) == 'dmulti') {
conf$addSampler(target = nodeGroup, type = 'RW_multinomial', print = FALSE); return()
}
conf$addSampler(target = nodeGroup, type = 'slice', print = FALSE); return()
}
if(length(Rmodel$expandNodeNames(nodeGroup, returnScalarComponents = TRUE)) > 1) {
conf$addSampler(target = nodeGroup, type = 'RW_block', print = FALSE, silent = TRUE); return()
}
conf$addSampler(target = nodeGroup, type = 'RW', print = FALSE); return()
}
createDFfromABlist <- function(lst, niter) {
df <- data.frame(model=character(), blocking=character(), timing=numeric(), node=character(), groupSize = numeric(), groupID = numeric(), sampler = character(), ess=numeric(), essPT=numeric(), stringsAsFactors=FALSE)
for(iAB in seq_along(lst)) {
ab <- lst[[iAB]]
abName <- names(lst)[iAB]
for(iBlock in seq_along(ab$naming)) {
blocking <- ab$naming[[iBlock]]
timing <- ab$timing[[iBlock]]
ess <- ab$ess[[iBlock]]
nodes <- names(ess)
essPT <- ab$essPT[[iBlock]][nodes] ## sort
groupSizes <- ab$groupSizes[[iBlock]][nodes] ##
groupIDs <- ab$groupIDs[[iBlock]][nodes] ##
samplers <- ab$samplers[[iBlock]][nodes] ##
newIndDF <- (1:length(nodes)) + dim(df)[1]
df[newIndDF,] <- NA
df[newIndDF,]$model <- abName
df[newIndDF,]$blocking <- blocking
df[newIndDF,]$timing <- timing
df[newIndDF,]$node <- nodes
df[newIndDF,]$groupSize <- groupSizes
df[newIndDF,]$groupID <- groupIDs
df[newIndDF,]$sampler <- samplers
df[newIndDF,]$ess <- ess
df[newIndDF,]$essPT <- essPT
}
}
df$timePer10k <- df$timing * 10000/niter
df$essPer10k <- df$ess * 10000/niter * 2
df$Efficiency <- df$essPer10k / df$timePer10k
df$mcmc <- gsub('-.+', '', df$blocking)
return(df)
}
plotABS <- function(df, xlimToMin=FALSE, together) {
models <- unique(df$model)
nModels <- length(models)
if(missing(together)) together <- if(nModels <= 5) TRUE else FALSE
nVertPlots <- if(together) nModels*2 else nModels
xVarNames <- c('ess', 'essPT')
parCmd <- quote(par(mfrow=c(nVertPlots,1),mar=c(1,0,1,0),tcl=-.1,mgp=c(3,0,0),cex.axis=.7))
if(together) { eval(parCmd) }
for(xVarName in xVarNames) {
if(!together) { eval(parCmd) }
maxMinXVar<-0; for(mod in models) {dfMod<-df[df$model==mod,]; blks<-unique(dfMod$blocking); for(blk in blks) {maxMinXVar<-max(maxMinXVar,min(dfMod[dfMod$blocking==blk,xVarName]))}}
maxXVar <- if(xlimToMin) maxMinXVar else max(df[, xVarName])
xlim <- c(maxXVar*-0.05, maxXVar)
maxTiming <- max(df[, 'timing'])
for(mod in models) {
dfMod <- df[df$model==mod,]
blockings <- unique(dfMod$blocking)
nBlockings <- length(blockings)
bestBlk<-''; bestEssPT<-0; for(blk in blockings) { if(min(dfMod[dfMod$blocking==blk,'essPT'])>bestEssPT) {bestEssPT<-min(dfMod[dfMod$blocking==blk,'essPT']); bestBlk<-blk} }
plot(-100,-100,xlim=xlim,ylim=c(0,nBlockings+1),xlab='',ylab='',main=paste0(xVarName, ' for model ', mod))
for(iBlocking in 1:nBlockings) {
blocking <- blockings[iBlocking]
dfModBlock <- dfMod[dfMod$blocking==blocking,]
xVarValues <- dfModBlock[,xVarName]
groupSizes <- dfModBlock[,'groupSize']
timing <- dfModBlock[,'timing'][1] # only first element
timingOnXaxis <- timing/maxTiming * xlim[2]
yCoord <- nBlockings+1-iBlocking
lines(x=c(0,timingOnXaxis), y=rep(yCoord,2), lty=1, lwd=2, col='lightgrey')
col <- if(blocking == bestBlk) 'green' else 'black'
text(x=xVarValues, y=yCoord, labels=groupSizes, cex=0.7, col=col)
col <- if(blocking == bestBlk) 'green' else 'blue'
text(x=xlim[1], y=yCoord, labels=blocking, col=col)
if(timing==maxTiming) text(xlim[2], yCoord+1, paste0('t = ',round(timing,1)))
}
}
}
}
printMinTimeABS <- function(df, round=TRUE, addAutoMax=TRUE, sortOutput=FALSE) {
namesToRemove <- intersect(c('groupID', 'sampler'), names(df))
for(name in namesToRemove) { ind <- which(names(df)==name); df <- df[, -ind] }
models <- unique(df$model)
cat('\n')
dfReturn <- data.frame()
for(mod in models) {
dfMod <- df[df$model == mod, ]
blockings <- unique(dfMod$blocking)
dfOut <- dfMod[numeric(0), ]
for(blk in blockings) {
dfModBlk <- dfMod[dfMod$blocking == blk, ]
ind <- which(dfModBlk$essPT == min(dfModBlk$essPT))[1]
dfOut[dim(dfOut)[1] + 1, ] <- dfModBlk[ind, ]
}
if(sortOutput) dfOut <- dfOut[sort(dfOut$essPT,index.return=TRUE)$ix, ]
dimnames(dfOut)[[1]] <- 1:(dim(dfOut)[1])
if(round) {
dfOut$timing <- round(dfOut$timing, 2)
dfOut$timePer10k <- round(dfOut$timePer10k, 2)
dfOut$ess <- round(dfOut$ess, 1)
dfOut$essPer10k <- round(dfOut$essPer10k, 1)
dfOut$essPT <- round(dfOut$essPT, 1)
dfOut$Efficiency <- round(dfOut$Efficiency, 1)
}
if(addAutoMax && ('auto0' %in% blockings)) {
autoBlockings <- blockings[grepl('^auto', blockings)]
dfAuto <- dfOut[dfOut$blocking %in% autoBlockings,]
maxEffInd <- which(dfAuto$Efficiency == max(dfAuto$Efficiency))
nextInd <- dim(dfOut)[1] + 1
dfOut[nextInd,] <- dfAuto[maxEffInd,]
dfOut[nextInd, 'blocking'] <- dfOut[nextInd, 'mcmc'] <- 'autoMax'
}
print(dfOut)
cat('\n')
dfReturn <- rbind(dfReturn, dfOut)
}
return(invisible(dfReturn))
}
reduceDF <- function(df, addAutoMax=TRUE, sortOutput=TRUE, round=TRUE) {
df = data.frame(mcmc=df$mcmc, node=df$node, S=df$essPer10k, C=df$timePer10k, Efficiency=df$Efficiency, stringsAsFactors=FALSE)
dfOut <- df[numeric(), ]
mcmcs <- unique(df$mcmc)
for(mcmc in mcmcs) {
dfBlk <- df[df$mcmc == mcmc, ]
ind <- which(dfBlk$Efficiency == min(dfBlk$Efficiency))[1]
dfOut[dim(dfOut)[1]+1, ] <- dfBlk[ind, ]
}
dfOut[dfOut$mcmc=='auto0', 'mcmc'] <- 'All Scalar'
dfOut[dfOut$mcmc=='all', 'mcmc'] <- 'All Blocked'
dfOut[dfOut$mcmc=='default', 'mcmc'] <- 'Default'
if(addAutoMax) {
autoBlockings <- dfOut$mcmc[grepl('^auto', dfOut$mcmc)]
autoLast <- autoBlockings[length(autoBlockings)]
## replace autoLast with 'autoMax'
dfOut[dfOut$mcmc==autoLast, 'mcmc'] <- 'Auto-Blocking'
## remove any remaining 'auto#' entries
dfOut <- dfOut[!dfOut$mcmc %in% autoBlockings,]
}
if(sortOutput) dfOut <- dfOut[sort(dfOut$Efficiency,index.return=TRUE)$ix, ]
dimnames(dfOut)[[1]] <- 1:(dim(dfOut)[1])
if(round) {
dfOut$S <- round(dfOut$S, 2)
dfOut$C <- round(dfOut$C, 2)
dfOut$Efficiency <- round(dfOut$Efficiency, 2)
}
return(dfOut)
}
| /packages/nimble/R/MCMC_autoBlock.R | permissive | DRJP/nimble | R | false | false | 34,217 | r | #' Automated parameter blocking procedure for efficient MCMC sampling
#'
#' Runs NIMBLE's automated blocking procedure for a given model object, to dynamically determine a blocking scheme of the continuous-valued model nodes. This blocking scheme is designed to produce efficient MCMC sampling (defined as number of effective samples generated per second of algorithm runtime). See Turek, et al (2015) for details of this algorithm. This also (optionally) compares this blocked MCMC against several static MCMC algorithms, including all univariate sampling, blocking of all continuous-valued nodes, NIMBLE's default MCMC configuration, and custom-specified blockings of parameters.
#'
#' This method allows for fine-tuned usage of the automated blocking procedure. However, the main entry point to the automatic blocking procedure is intended to be through either buildMCMC(..., autoBlock = TRUE), or configureMCMC(..., autoBlock = TRUE).
#'
#' @author Daniel Turek
#'
#' @seealso configureMCMC buildMCMC
#'
#' @param Rmodel A NIMBLE model object, created from \code{\link{nimbleModel}}.
#'
#' @param autoIt The number of MCMC iterations to run intermediate MCMC algorithms, through the course of the procedure. Default 20,000.
#'
#' @param run List of additional MCMC algorithms to compare against the automated blocking MCMC. These may be specified as: the character string 'all' to denote blocking all continuous-valued nodes; the character string 'default' to denote NIMBLE's default MCMC configuration; a named list element consisting of a quoted code block, which when executed returns an MCMC configuration object for comparison; a custom-specificed blocking scheme, specified as a named list element which itself is a list of character vectors, where each character vector specifies the nodes in a particular block. Default is c('all', 'default').
#'
#' @param verbose Logical specifying whether to output considerable details of the automated block procedure, through the course of execution. Default FALSE.
#'
#' @param setSeed Logical specificying whether to call set.seed(0) prior to beginning the blocking procedure. Default TRUE.
#'
#' @param makePlots Logical specifying whether to plot the hierarchical clustering dendrograms, through the course of execution. Default FALSE.
#'
#' @param round Logical specifying whether to round the final output results to two decimal places. Default TRUE.
#'
#' @return Returns a named list containing elements:
#' \itemize{
#' \item \code{summary}: A data frame containing a numerical summary of the performance of all MCMC algorithms (including that from automated blocking)
#' \item \code{autoGroups}: A list specifying the parameter blockings converged on by the automated blocking procedure
#' \item \code{conf}: A NIMBLE MCMC configuration object corresponding to the results of the automated blocking procedure
#' }
#'
#' @references
#'
#' Turek, D., de Valpine, P., Paciorek, C., and Anderson-Bergman, C. (2015). Automated Parameter Blocking for Efficient Markov-Chain Monte Carlo Sampling. <arXiv:1503.05621>.
#'
#' @export
autoBlock <- function(Rmodel,
autoIt = 20000,
run = list('all', 'default'),
setSeed = TRUE,
verbose = FALSE,
makePlots = FALSE,
round = TRUE ) {
if(autoIt < 10000) stop('Minimum auto-blocking iterations is 10,000')
control <- list(niter=autoIt, setSeed=setSeed, verbose=verbose, makePlots=makePlots)
ab <- autoBlockClass(Rmodel, control)
if(!'auto' %in% run) run <- c(run, 'auto') ## always use 'autoBlock' routine
ab$run(run)
abList <- list(ab)
names(abList)[1] <- 'model'
df <- createDFfromABlist(abList, autoIt)
dfmin <- reduceDF(df, round = round)
cat('\nAuto-Blocking summary:\n')
print(dfmin)
lastAutoInd <- max(grep('^auto', ab$naming)) ## index of final 'auto' iteration
lastAutoGrouping <- ab$grouping[[lastAutoInd]] ## grouping of final 'auto' iteration
nonTrivialGroups <- lastAutoGrouping[unlist(lapply(lastAutoGrouping, function(x) length(x)>1))]
if(length(nonTrivialGroups) > 0) {
cat('\nAuto-Blocking converged on the node groupings:\n')
for(i in seq_along(nonTrivialGroups)) {
group <- nonTrivialGroups[[i]]
cat(paste0('[', i, '] '))
cat(paste0(group, collapse = ', '))
cat('\n')
}
} else cat('\nAuto-Blocking converged on all scalar (univariate) sampling\n')
cat('\n')
## create a new MCMC conf with the autoBlock groupings:
conf <- configureMCMC(Rmodel, nodes = NULL, print = FALSE)
for(nodeGroup in lastAutoGrouping) addSamplerToConf(Rmodel, conf, nodeGroup)
retList <- list(summary=dfmin, autoGroups=nonTrivialGroups, conf=conf)
return(invisible(retList))
}
autoBlockModel <- setRefClass(
Class = 'autoBlockModel',
fields = list(
Rmodel_orig = 'ANY',
Rmodel = 'ANY',
Cmodel = 'ANY',
md = 'ANY',
scalarNodeVector = 'character',
scalarNodeVectorCont = 'character',
scalarNodeVectorDisc = 'character',
nodeGroupScalars = 'list',
nodeGroupAllBlocked = 'list',
monitorsVector = 'character',
initialMCMCconf = 'ANY'
),
methods = list(
initialize = function(Rmodel_orig) {
Rmodel_orig <<- Rmodel_orig
md <<- Rmodel_orig$modelDef
Rmodel <<- Rmodel_orig$newModel(replicate = TRUE, check = FALSE)
##nimCopy(from = Rmodel_orig, to = Rmodel, logProb = TRUE)
##for(var in ls(Rmodel_orig$isDataEnv)) Rmodel$isDataEnv[[var]] <<- Rmodel_orig$isDataEnv[[var]] ## copies data flags to the new model
scalarNodeVector <<- Rmodel$getNodeNames(stochOnly=TRUE, includeData=FALSE, returnScalarComponents=TRUE)
discreteInd <- as.logical(sapply(scalarNodeVector, function(n) Rmodel$isDiscrete(n), USE.NAMES=FALSE))
constrainedInd <- sapply(scalarNodeVector, function(n) Rmodel$getDistribution(n) %in% c('dwish', 'dinvwish', 'ddirch'), USE.NAMES=FALSE)
wholeNodeInd <- discreteInd | constrainedInd
scalarNodeVectorCont <<- scalarNodeVector[!wholeNodeInd] ## making work with discrete nodes
scalarNodeVectorDisc <<- scalarNodeVector[ wholeNodeInd] ## making work with discrete nodes, wishart inverse-wishart, and dirichlet
if(length(scalarNodeVectorCont) == 0) stop('autoBlocking only works with one or more continuous-valued model nodes') ## making work with discrete nodes
nodeGroupScalars <<- c(unique(lapply(scalarNodeVectorDisc, Rmodel$expandNodeNames)), scalarNodeVectorCont) ## making work with discrete nodes, and also with dmulti distributions
##nodeGroupAllBlocked <<- list(scalarNodeVector) ## making work with discrete nodes
##nodeGroupAllBlocked <<- c(lapply(scalarNodeVectorDisc, function(x) x), list(scalarNodeVectorCont)) ## making work with discrete nodes
nodeGroupAllBlocked <<- c(unique(lapply(scalarNodeVectorDisc, Rmodel$expandNodeNames)), list(scalarNodeVectorCont)) ## making work with discrete nodes, and also with dmulti distributions
monitorsVector <<- Rmodel$getNodeNames(stochOnly=TRUE, includeData=FALSE)
},
## here is where the initial MCMC conf is created, for re-use -- for new version
createInitialMCMCconf = function(runList) {
initialMCMCconf <<- configureMCMC(Rmodel, print = FALSE)
nInitialSamplers <- length(initialMCMCconf$samplerConfs)
initialMCMCconf$addSampler(target = scalarNodeVectorCont[1], type = 'slice', print=FALSE) ## add one slice sampler
initialMCMCconf$addSampler(target = scalarNodeVectorCont[1], type = 'RW', print=FALSE) ## add one RW sampler
initialMCMCconf$addSampler(target = scalarNodeVectorCont[1], type = 'RW_block', print=FALSE, silent = TRUE) ## add one RW_block sampler
addCustomizedSamplersToInitialMCMCconf(runList)
initialMCMCconf$addMonitors(monitorsVector, print=FALSE)
RinitialMCMC <- buildMCMC(initialMCMCconf)
Cmodel <<- compileNimble(Rmodel)
CinitialMCMC <- compileNimble(RinitialMCMC, project = Rmodel) ## (new version) yes, we need this compileNimble call -- this is the whole point!
initialMCMCconf$setSamplers(1:nInitialSamplers, print=FALSE) ## important for new version: removes all news samplers added to initial MCMC conf
},
addCustomizedSamplersToInitialMCMCconf = function(runListCode) {
if(is.list(runListCode)) { lapply(runListCode, function(el) addCustomizedSamplersToInitialMCMCconf(el)); return() }
if(is.call(runListCode)) {
if(is.call(runListCode[[1]]) && length(runListCode[[1]])==3 && runListCode[[1]][[3]]=='addSampler') {
runListCode[[1]][[2]] <- as.name('initialMCMCconf')
eval(substitute(RUNLISTCODE, list(RUNLISTCODE=runListCode)))
return()
}
lapply(runListCode, function(el) addCustomizedSamplersToInitialMCMCconf(el))
return()
}
},
createGroups = function(listOfBlocks = list()) {
listOfBlocks <- lapply(listOfBlocks, function(blk) Rmodel$expandNodeNames(blk, returnScalarComponents=TRUE))
if(any(unlist(listOfBlocks) %in% scalarNodeVectorDisc)) stop('cannot put block sampler on discrete-valued model nodes')
nodes <- scalarNodeVector
nodes <- setdiff(nodes, unlist(listOfBlocks))
nodeList <- lapply(nodes, function(x) x)
for(ng in listOfBlocks) nodeList[[length(nodeList)+1]] <- ng
return(nodeList)
},
resetCmodelInitialValues = function() {
nimCopy(from = Rmodel_orig, to = Cmodel, logProb = TRUE)
calculate(Cmodel)
}
)
)
autoBlockParamDefaults <- function() {
list(
makePlots = FALSE,
niter = 20000,
setSeed = TRUE,
verbose = FALSE
)
}
autoBlockClass <- setRefClass(
Class = 'autoBlockClass',
fields = list(
## special
abModel = 'ANY',
it = 'numeric',
## overall control
makePlots = 'logical',
niter = 'numeric',
setSeed = 'logical',
verbose = 'logical',
## persistant lists of historical data
naming = 'list',
candidateGroups = 'list',
grouping = 'list',
groupSizes = 'list',
groupIDs = 'list',
samplers = 'list',
timing = 'list',
ess = 'list',
essPT = 'list',
empCov = 'list',
empCor = 'list',
distMatrix = 'list',
hTree = 'list'
),
methods = list(
initialize = function(Rmodel, control=list()) {
abModel <<- autoBlockModel(Rmodel)
defaultsList <- autoBlockParamDefaults()
for(i in seq_along(defaultsList)) if(is.null(control[[names(defaultsList)[i]]])) control[[names(defaultsList)[i]]] <- defaultsList[[i]]
for(i in seq_along(control)) eval(substitute(verbose <<- VALUE, list(verbose=as.name(names(control)[i]), VALUE=control[[i]])))
it <<- 0
},
run = function(runList) {
if(!is.list(runList)) stop('runList argument should be a list')
if(is.null(names(runList))) names(runList) <- rep('', length(runList))
abModel$createInitialMCMCconf(runList) ## here is where the initial MCMC conf is created, for re-use -- for new version
for(i in seq_along(runList)) {
runListElement <- runList[[i]]
runListName <- names(runList)[i]
if(is.character(runListElement)) {
type <- runListElement
} else if(is.list(runListElement)) {
type <- 'blocks'
} else if(inherits(runListElement, '{')) {
type <- 'conf'
} else stop('don\'t understand element in run list')
switch(type,
none = { confList <- list(createConfFromGroups(abModel$nodeGroupScalars))
runConfListAndSaveBest(confList, 'none') },
all = { confList <- list(createConfFromGroups(abModel$nodeGroupAllBlocked))
runConfListAndSaveBest(confList, 'all') },
default = { ##confList <- list(configureMCMC(oldConf = abModel$initialMCMCconf))
## forcing this processing through createConfFromGroups()
## in order to always standardize the ordering of samplers;
## even though this might result in a different sampler ordering
## than the true NIMBLE 'default' MCMC conf
##groups <- determineGroupsFromConf(abModel$initialMCMCconf)
groups <- lapply(determineGroupsFromConf(abModel$initialMCMCconf), function(nodes) unique(abModel$Rmodel$expandNodeNames(nodes))) ## making work with dmulti distribution
confList <- list(createConfFromGroups(groups))
runConfListAndSaveBest(confList, 'default') },
blocks = { confList <- list(createConfFromGroups(abModel$createGroups(runListElement)))
name <- if(runListName == '') 'customBlocks' else runListName
runConfListAndSaveBest(confList, name) },
conf = { Rmodel <- abModel$Rmodel ## just hoping that the customConf will find this
confList <- list(eval(runListElement, envir=environment()))
name <- if(runListName == '') 'customConf' else runListName
runConfListAndSaveBest(confList, name) },
auto = { autoIt <- 0
while((autoIt < 2) || ((!groupingsEquiv(grouping[[it]], grouping[[it-1]])) && (min(essPT[[it]]) > min(essPT[[it-1]])))) {
candidateGroupsList <- if(autoIt==0) list(abModel$nodeGroupScalars) else determineCandidateGroupsFromCurrentSample()
confList <- lapply(candidateGroupsList, function(groups) createConfFromGroups(groups))
runConfListAndSaveBest(confList, paste0('auto',autoIt), auto=TRUE)
autoIt <- autoIt + 1
}
},
stop('don\'t understand element in run list'))
}
names(candidateGroups) <<- naming
names(grouping) <<- naming
names(groupSizes) <<- naming
names(groupIDs) <<- naming
names(samplers) <<- naming
names(timing) <<- naming
names(ess) <<- naming
names(essPT) <<- naming
},
determineCandidateGroupsFromCurrentSample = function() {
cutree_heights <- seq(0, 1, by=0.1)
cutreeList <- lapply(cutree_heights, function(height) cutree(hTree[[it]], h = height))
names(cutreeList) <- paste0('cut', cutree_heights)
uniqueCutreeList <- unique(cutreeList)
for(i in seq_along(uniqueCutreeList)) { for(j in seq_along(cutreeList)) { if(all(uniqueCutreeList[[i]]==cutreeList[[j]])) { names(uniqueCutreeList)[i] <- names(cutreeList)[j]; break } } }
candidateGroupsList <- lapply(uniqueCutreeList, function(ct) determineGroupsFromCutree(ct))
return(candidateGroupsList)
},
determineGroupsFromCutree = function(ct) {
groupsContOnly <- lapply(unique(ct), function(x) names(ct)[ct==x]) ## making work with discrete nodes
##groupsAllNodes <- c(lapply(abModel$scalarNodeVectorDisc, function(x) x), groupsContOnly) ## making work with discrete nodes
groupsAllNodes <- c(unique(lapply(abModel$scalarNodeVectorDisc, abModel$Rmodel$expandNodeNames)), groupsContOnly) ## making work with discrete nodes and dmulti distribution
return(groupsAllNodes) ## making work with discrete nodes
},
runConfListAndSaveBest = function(confList, name, auto=FALSE) {
lapply(confList, function(conf) checkOverMCMCconf(conf))
RmcmcList <- lapply(confList, function(conf) buildMCMC(conf))
CmcmcList <- compileNimble(RmcmcList, project = abModel$Rmodel)
if(!is.list(CmcmcList)) CmcmcList <- list(CmcmcList) ## make sure compileNimble() returns a list...
timingList <- essList <- essPTList <- essPTminList <- list()
for(i in seq_along(CmcmcList)) {
if(setSeed) set.seed(0)
abModel$resetCmodelInitialValues()
timingList[[i]] <- as.numeric(system.time(CmcmcList[[i]]$run(niter, progressBar = FALSE))[3])
burnedSamples <- extractAndBurnSamples(CmcmcList[[i]])
essList[[i]] <- apply(burnedSamples, 2, effectiveSize)
essList[[i]] <- essList[[i]][essList[[i]] > 0] ## exclude nodes with ESS=0 -- for discrete nodes which are fixed to a certain value; making work with discrete nodes
essPTList[[i]] <- essList[[i]] / timingList[[i]]
essPTminList[[i]] <- sort(essPTList[[i]])[1]
}
bestInd <- as.numeric(which(unlist(essPTminList) == max(unlist(essPTminList))))
if(length(bestInd) > 1) stop('there should never be an exact tie for the best...')
if(!is.null(names(confList))) name <- paste0(name, '-', names(confList)[bestInd])
it <<- it + 1
naming[[it]] <<- name
candidateGroups[[it]] <<- lapply(confList, function(conf) determineGroupsFromConf(conf))
grouping[[it]] <<- candidateGroups[[it]][[bestInd]]
groupSizes[[it]] <<- determineNodeGroupSizesFromGroups(grouping[[it]])
groupIDs[[it]] <<- determineNodeGroupIDsFromGroups(grouping[[it]])
samplers[[it]] <<- determineSamplersFromGroupsAndConf(grouping[[it]], confList[[bestInd]])
timing[[it]] <<- timingList[[bestInd]]
ess[[it]] <<- essList[[bestInd]]
essPT[[it]] <<- sort(essPTList[[bestInd]])
if(auto) {
burnedSamples <- extractAndBurnSamples(CmcmcList[[bestInd]])
burnedSamples <- burnedSamples[, abModel$scalarNodeVectorCont] ## making work with discrete nodes
##empCov[[it]] <<- cov(burnedSamples)
e <- try(cov(burnedSamples))
if(inherits(e, 'try-error')) {
message('try-error, going into browser'); browser(); 1; 2
} else empCov[[it]] <<- e
##empCor[[it]] <<- cov2cor(empCov[[it]])
e <- try(cov2cor(empCov[[it]]))
if(inherits(e, 'try-error')) {
message('try-error, going into browser'); browser(); 3; 4
} else empCor[[it]] <<- e
distMatrix[[it]] <<- as.dist(1 - abs(empCor[[it]]))
##hTree[[it]] <<- hclust(distMatrix[[it]])
e <- try(hclust(distMatrix[[it]]))
if(inherits(e, 'try-error')) {
message('try-error, going into browser'); browser(); 5; 6
} else hTree[[it]] <<- e
}
if(verbose) printCurrent(name, confList[[bestInd]])
if(makePlots && auto) makeCurrentPlots(name)
},
extractAndBurnSamples = function(Cmcmc) {
samples <- as.matrix(Cmcmc$mvSamples)
## make sure we don't keep samples from deterministic nodes
namesToKeep <- setdiff(dimnames(samples)[[2]], abModel$Rmodel$getNodeNames(determOnly=TRUE, returnScalarComponents=TRUE))
burnedSamples <- samples[(floor(niter/2)+1):niter, namesToKeep]
burnedSamples
},
determineGroupsFromConf = function(conf) {
groups <- list()
for(ss in conf$samplerConfs) {
if(ss$name == 'crossLevel') {
topNodes <- ss$target
lowNodes <- conf$model$getDependencies(topNodes, self=FALSE, stochOnly=TRUE, includeData=FALSE)
nodes <- c(topNodes, lowNodes)
} else {
nodes <- ss$target
}
groups[[length(groups)+1]] <- conf$model$expandNodeNames(nodes, returnScalarComponents=TRUE)
}
return(groups)
},
determineNodeGroupSizesFromGroups = function(groups) {
groupSizeVector <- numeric(0)
for(gp in groups) for(node in gp) groupSizeVector[[node]] <- length(gp)
return(groupSizeVector)
},
determineNodeGroupIDsFromGroups = function(groups) {
groupIDvector <- numeric(0)
for(i in seq_along(groups)) for(node in groups[[i]]) groupIDvector[[node]] <- i
return(groupIDvector)
},
determineSamplersFromGroupsAndConf = function(groups, conf) {
samplerConfs <- conf$samplerConfs
if(length(groups) != length(samplerConfs)) stop('something wrong')
samplerVector <- character(0)
for(i in seq_along(groups)) for(node in groups[[i]]) samplerVector[[node]] <- samplerConfs[[i]]$name
return(samplerVector)
},
createConfFromGroups = function(groups) {
groups <- sortGroups(groups)
##conf <- configureMCMC(Rmodel, nodes=NULL, monitors=character(0)) ## original version
conf <- configureMCMC(oldConf = abModel$initialMCMCconf, print = FALSE) ## new version
conf$setSamplers() ## new version -- removes all the samplers from initalMCMCconf
for(nodeGroup in groups) addSamplerToConf(abModel$Rmodel, conf, nodeGroup)
return(conf)
},
sortGroups = function(groups) {
eachGroupSorted <- lapply(groups, sort)
groupsAsStrings <- lapply(eachGroupSorted, function(grp) paste0(grp, collapse = '_'))
sortedInd <- sort(unlist(groupsAsStrings), index.return = TRUE)$ix
sortedGroups <- eachGroupSorted[sortedInd]
return(sortedGroups)
},
checkOverMCMCconf = function(conf) {
warn <- FALSE
for(ss in conf$samplerConfs) {
## if(ss$name == 'posterior_predictive') {
## msg <- 'using \'posterior_predictive\' sampler may lead to results we don\'t want'
## cat(paste0('\nWARNING: ', msg, '\n\n')); warning(msg)
## }
if(grepl('^conjugate_', ss$name) && getNimbleOption('verifyConjugatePosteriors')) {
##msg <- 'conjugate sampler running slow due to checking the posterior'
##cat(paste0('\nWARNING: ', msg, '\n\n')); warning(msg)
warn <- TRUE
}
}
if(warn) {
msg <- 'Conjugate sampler functions in \'default\' conf are running slow due to verifying the posterior;\nThis behaviour can be changed using a NIMBLE package option.'
warning(msg, call. = FALSE)
}
},
printCurrent = function(name, conf) {
cat(paste0('\n################################\nbegin iteration ', it, ': ', name, '\n################################\n'))
if(length(candidateGroups[[it]]) > 1) { cat('\ncandidate groups:\n'); cg<-candidateGroups[[it]]; for(i in seq_along(cg)) { cat(paste0('\n',names(cg)[i],':\n')); printGrouping(cg[[i]]) } }
cat('\ngroups:\n'); printGrouping(grouping[[it]])
cat('\nsamplers:\n'); conf$getSamplers()
cat(paste0('\nMCMC runtime: ', round(timing[[it]], 1), ' seconds\n'))
cat('\nESS:\n'); print(round(ess[[it]], 0))
cat('\nESS/time:\n'); print(round(essPT[[it]], 1))
cat(paste0('\n################################\nend iteration ', it, ': ', name, '\n################################\n\n'))
},
makeCurrentPlots = function(name) {
dev.new()
if(inherits(try(plot(as.dendrogram(hTree[[it]]), ylim=c(0,1), main=name), silent=TRUE), 'try-error')) dev.off()
},
printGrouping = function(g) {
for(i in seq_along(g)) cat(paste0('[', i, '] ', paste0(g[[i]], collapse=', '), '\n'))
},
groupingsEquiv = function(grouping1, grouping2) {
grouping1 <- lapply(grouping1, sort)
grouping2 <- lapply(grouping2, sort)
while(length(grouping1) > 0) {
grp1 <- grouping1[[1]]
found <- FALSE
for(i in seq_along(grouping2)) {
grp2 <- grouping2[[i]]
if(identical(grp1, grp2)) {
found <- TRUE
grouping1[1] <- grouping2[i] <- NULL
break
}
}
if(!found) return(FALSE)
}
if(length(grouping2) == 0) return(TRUE) else return(FALSE)
}
)
)
addSamplerToConf <- function(Rmodel, conf, nodeGroup) {
if(length(nodeGroup) > 1) {
conf$addSampler(target = nodeGroup, type = 'RW_block', print = FALSE, silent = TRUE); return()
}
if(!(nodeGroup %in% Rmodel$getNodeNames()) && !Rmodel$isDiscrete(nodeGroup)) {
conf$addSampler(target = nodeGroup, type = 'RW', print = FALSE); return()
}
if(nodeGroup %in% Rmodel$getMaps('nodeNamesEnd')) {
##cat(paste0('warning: using \'posterior_predictive\' sampler for node ', nodeGroup, ' may lead to results we don\'t want\n\n'))
conf$addSampler(target = nodeGroup, type = 'posterior_predictive', print = FALSE); return()
}
## conjugacyResult <- Rmodel$checkConjugacy(nodeGroup)
## if((!is.null(conjugacyResult)) && conjOveride) {
## conf$addSampler(target = ??????, type = conjugacyResult$samplerType, control = conjugacyResult$control, print = FALSE); return()
## }
if(Rmodel$isBinary(nodeGroup)) {
conf$addSampler(target = nodeGroup, type = 'binary', print = FALSE); return()
}
if(Rmodel$isDiscrete(nodeGroup)) {
if(Rmodel$getDistribution(nodeGroup) == 'dmulti') {
conf$addSampler(target = nodeGroup, type = 'RW_multinomial', print = FALSE); return()
}
conf$addSampler(target = nodeGroup, type = 'slice', print = FALSE); return()
}
if(length(Rmodel$expandNodeNames(nodeGroup, returnScalarComponents = TRUE)) > 1) {
conf$addSampler(target = nodeGroup, type = 'RW_block', print = FALSE, silent = TRUE); return()
}
conf$addSampler(target = nodeGroup, type = 'RW', print = FALSE); return()
}
createDFfromABlist <- function(lst, niter) {
df <- data.frame(model=character(), blocking=character(), timing=numeric(), node=character(), groupSize = numeric(), groupID = numeric(), sampler = character(), ess=numeric(), essPT=numeric(), stringsAsFactors=FALSE)
for(iAB in seq_along(lst)) {
ab <- lst[[iAB]]
abName <- names(lst)[iAB]
for(iBlock in seq_along(ab$naming)) {
blocking <- ab$naming[[iBlock]]
timing <- ab$timing[[iBlock]]
ess <- ab$ess[[iBlock]]
nodes <- names(ess)
essPT <- ab$essPT[[iBlock]][nodes] ## sort
groupSizes <- ab$groupSizes[[iBlock]][nodes] ##
groupIDs <- ab$groupIDs[[iBlock]][nodes] ##
samplers <- ab$samplers[[iBlock]][nodes] ##
newIndDF <- (1:length(nodes)) + dim(df)[1]
df[newIndDF,] <- NA
df[newIndDF,]$model <- abName
df[newIndDF,]$blocking <- blocking
df[newIndDF,]$timing <- timing
df[newIndDF,]$node <- nodes
df[newIndDF,]$groupSize <- groupSizes
df[newIndDF,]$groupID <- groupIDs
df[newIndDF,]$sampler <- samplers
df[newIndDF,]$ess <- ess
df[newIndDF,]$essPT <- essPT
}
}
df$timePer10k <- df$timing * 10000/niter
df$essPer10k <- df$ess * 10000/niter * 2
df$Efficiency <- df$essPer10k / df$timePer10k
df$mcmc <- gsub('-.+', '', df$blocking)
return(df)
}
plotABS <- function(df, xlimToMin=FALSE, together) {
models <- unique(df$model)
nModels <- length(models)
if(missing(together)) together <- if(nModels <= 5) TRUE else FALSE
nVertPlots <- if(together) nModels*2 else nModels
xVarNames <- c('ess', 'essPT')
parCmd <- quote(par(mfrow=c(nVertPlots,1),mar=c(1,0,1,0),tcl=-.1,mgp=c(3,0,0),cex.axis=.7))
if(together) { eval(parCmd) }
for(xVarName in xVarNames) {
if(!together) { eval(parCmd) }
maxMinXVar<-0; for(mod in models) {dfMod<-df[df$model==mod,]; blks<-unique(dfMod$blocking); for(blk in blks) {maxMinXVar<-max(maxMinXVar,min(dfMod[dfMod$blocking==blk,xVarName]))}}
maxXVar <- if(xlimToMin) maxMinXVar else max(df[, xVarName])
xlim <- c(maxXVar*-0.05, maxXVar)
maxTiming <- max(df[, 'timing'])
for(mod in models) {
dfMod <- df[df$model==mod,]
blockings <- unique(dfMod$blocking)
nBlockings <- length(blockings)
bestBlk<-''; bestEssPT<-0; for(blk in blockings) { if(min(dfMod[dfMod$blocking==blk,'essPT'])>bestEssPT) {bestEssPT<-min(dfMod[dfMod$blocking==blk,'essPT']); bestBlk<-blk} }
plot(-100,-100,xlim=xlim,ylim=c(0,nBlockings+1),xlab='',ylab='',main=paste0(xVarName, ' for model ', mod))
for(iBlocking in 1:nBlockings) {
blocking <- blockings[iBlocking]
dfModBlock <- dfMod[dfMod$blocking==blocking,]
xVarValues <- dfModBlock[,xVarName]
groupSizes <- dfModBlock[,'groupSize']
timing <- dfModBlock[,'timing'][1] # only first element
timingOnXaxis <- timing/maxTiming * xlim[2]
yCoord <- nBlockings+1-iBlocking
lines(x=c(0,timingOnXaxis), y=rep(yCoord,2), lty=1, lwd=2, col='lightgrey')
col <- if(blocking == bestBlk) 'green' else 'black'
text(x=xVarValues, y=yCoord, labels=groupSizes, cex=0.7, col=col)
col <- if(blocking == bestBlk) 'green' else 'blue'
text(x=xlim[1], y=yCoord, labels=blocking, col=col)
if(timing==maxTiming) text(xlim[2], yCoord+1, paste0('t = ',round(timing,1)))
}
}
}
}
printMinTimeABS <- function(df, round=TRUE, addAutoMax=TRUE, sortOutput=FALSE) {
namesToRemove <- intersect(c('groupID', 'sampler'), names(df))
for(name in namesToRemove) { ind <- which(names(df)==name); df <- df[, -ind] }
models <- unique(df$model)
cat('\n')
dfReturn <- data.frame()
for(mod in models) {
dfMod <- df[df$model == mod, ]
blockings <- unique(dfMod$blocking)
dfOut <- dfMod[numeric(0), ]
for(blk in blockings) {
dfModBlk <- dfMod[dfMod$blocking == blk, ]
ind <- which(dfModBlk$essPT == min(dfModBlk$essPT))[1]
dfOut[dim(dfOut)[1] + 1, ] <- dfModBlk[ind, ]
}
if(sortOutput) dfOut <- dfOut[sort(dfOut$essPT,index.return=TRUE)$ix, ]
dimnames(dfOut)[[1]] <- 1:(dim(dfOut)[1])
if(round) {
dfOut$timing <- round(dfOut$timing, 2)
dfOut$timePer10k <- round(dfOut$timePer10k, 2)
dfOut$ess <- round(dfOut$ess, 1)
dfOut$essPer10k <- round(dfOut$essPer10k, 1)
dfOut$essPT <- round(dfOut$essPT, 1)
dfOut$Efficiency <- round(dfOut$Efficiency, 1)
}
if(addAutoMax && ('auto0' %in% blockings)) {
autoBlockings <- blockings[grepl('^auto', blockings)]
dfAuto <- dfOut[dfOut$blocking %in% autoBlockings,]
maxEffInd <- which(dfAuto$Efficiency == max(dfAuto$Efficiency))
nextInd <- dim(dfOut)[1] + 1
dfOut[nextInd,] <- dfAuto[maxEffInd,]
dfOut[nextInd, 'blocking'] <- dfOut[nextInd, 'mcmc'] <- 'autoMax'
}
print(dfOut)
cat('\n')
dfReturn <- rbind(dfReturn, dfOut)
}
return(invisible(dfReturn))
}
reduceDF <- function(df, addAutoMax=TRUE, sortOutput=TRUE, round=TRUE) {
df = data.frame(mcmc=df$mcmc, node=df$node, S=df$essPer10k, C=df$timePer10k, Efficiency=df$Efficiency, stringsAsFactors=FALSE)
dfOut <- df[numeric(), ]
mcmcs <- unique(df$mcmc)
for(mcmc in mcmcs) {
dfBlk <- df[df$mcmc == mcmc, ]
ind <- which(dfBlk$Efficiency == min(dfBlk$Efficiency))[1]
dfOut[dim(dfOut)[1]+1, ] <- dfBlk[ind, ]
}
dfOut[dfOut$mcmc=='auto0', 'mcmc'] <- 'All Scalar'
dfOut[dfOut$mcmc=='all', 'mcmc'] <- 'All Blocked'
dfOut[dfOut$mcmc=='default', 'mcmc'] <- 'Default'
if(addAutoMax) {
autoBlockings <- dfOut$mcmc[grepl('^auto', dfOut$mcmc)]
autoLast <- autoBlockings[length(autoBlockings)]
## replace autoLast with 'autoMax'
dfOut[dfOut$mcmc==autoLast, 'mcmc'] <- 'Auto-Blocking'
## remove any remaining 'auto#' entries
dfOut <- dfOut[!dfOut$mcmc %in% autoBlockings,]
}
if(sortOutput) dfOut <- dfOut[sort(dfOut$Efficiency,index.return=TRUE)$ix, ]
dimnames(dfOut)[[1]] <- 1:(dim(dfOut)[1])
if(round) {
dfOut$S <- round(dfOut$S, 2)
dfOut$C <- round(dfOut$C, 2)
dfOut$Efficiency <- round(dfOut$Efficiency, 2)
}
return(dfOut)
}
|
test_that("outputs a data.frame", {
expect_s3_class(sda, "data.frame")
})
test_that("outputs the expected snapshot", {
skip_if(r_version_is_older_than(4))
expect_snapshot(sda)
})
test_that("outputs like r2dii.analysis::target_sda()", {
# This integration (not unit) test checks if this function keeps its promise.
# It may overlap with other unit tests, but here we focus on dependencies
# Such a strong dependency on upstream packages makes this test fragile
skip_on_cran()
sort_df <- function(data) data[sort(names(data))]
# Pick small data for speed. "package::f()" is to emphasize integration
loanbook <- r2dii.data::loanbook_demo[125:150, ]
ald <- filter(r2dii.data::ald_demo[1:100, ], !is.na(.data$emission_factor))
scenario <- r2dii.data::co2_intensity_scenario_demo
expected <- r2dii.match::match_name(loanbook, ald) %>%
r2dii.match::prioritize() %>%
r2dii.analysis::target_sda(ald, co2_intensity_scenario = scenario) %>%
sort_df() %>%
vapply(typeof, character(1))
actual <- sda %>%
sort_df() %>%
vapply(typeof, character(1))
expect_equal(actual, expected)
})
| /tests/testthat/test-sda.R | permissive | jdhoffa/r2dii.plot | R | false | false | 1,128 | r | test_that("outputs a data.frame", {
expect_s3_class(sda, "data.frame")
})
test_that("outputs the expected snapshot", {
skip_if(r_version_is_older_than(4))
expect_snapshot(sda)
})
test_that("outputs like r2dii.analysis::target_sda()", {
# This integration (not unit) test checks if this function keeps its promise.
# It may overlap with other unit tests, but here we focus on dependencies
# Such a strong dependency on upstream packages makes this test fragile
skip_on_cran()
sort_df <- function(data) data[sort(names(data))]
# Pick small data for speed. "package::f()" is to emphasize integration
loanbook <- r2dii.data::loanbook_demo[125:150, ]
ald <- filter(r2dii.data::ald_demo[1:100, ], !is.na(.data$emission_factor))
scenario <- r2dii.data::co2_intensity_scenario_demo
expected <- r2dii.match::match_name(loanbook, ald) %>%
r2dii.match::prioritize() %>%
r2dii.analysis::target_sda(ald, co2_intensity_scenario = scenario) %>%
sort_df() %>%
vapply(typeof, character(1))
actual <- sda %>%
sort_df() %>%
vapply(typeof, character(1))
expect_equal(actual, expected)
})
|
## vim:textwidth=80:expandtab:shiftwidth=2:softtabstop=2
library(oce)
context("Accessors")
test_that("retrieve units", {
data("ctd")
## pre 20160430 expect_equal(ctd[["temperatureUnit"]], list(unit=expression(degree*C), scale="ITS-90"))
## pre 20160430 expect_equal(ctd[["temperature unit"]], expression(degree*C))
## pre 20160430 expect_equal(ctd[["temperature scale"]], "ITS-90")
expect_equal(ctd[["temperatureUnit"]], list(unit=expression(degree*C), scale="IPTS-68"))
expect_equal(ctd[["temperature unit"]], expression(degree*C))
expect_equal(ctd[["temperature scale"]], "IPTS-68")
expect_equal(ctd[["pressureUnit"]], list(unit=expression(dbar), scale=""))
expect_equal(ctd[["pressure unit"]], expression(dbar))
expect_equal(ctd[["pressure scale"]], "")
})
test_that("alter units", {
data("ctd")
ctd[["metadata"]]$units$salinity <- list(unit=expression(foo), scale="bar")
expect_equal(ctd[["salinityUnit"]], list(unit=expression(foo), scale="bar"))
})
| /tests/testthat/test_accessors.R | no_license | AnneMTreasure/oce | R | false | false | 1,089 | r | ## vim:textwidth=80:expandtab:shiftwidth=2:softtabstop=2
library(oce)
context("Accessors")
test_that("retrieve units", {
data("ctd")
## pre 20160430 expect_equal(ctd[["temperatureUnit"]], list(unit=expression(degree*C), scale="ITS-90"))
## pre 20160430 expect_equal(ctd[["temperature unit"]], expression(degree*C))
## pre 20160430 expect_equal(ctd[["temperature scale"]], "ITS-90")
expect_equal(ctd[["temperatureUnit"]], list(unit=expression(degree*C), scale="IPTS-68"))
expect_equal(ctd[["temperature unit"]], expression(degree*C))
expect_equal(ctd[["temperature scale"]], "IPTS-68")
expect_equal(ctd[["pressureUnit"]], list(unit=expression(dbar), scale=""))
expect_equal(ctd[["pressure unit"]], expression(dbar))
expect_equal(ctd[["pressure scale"]], "")
})
test_that("alter units", {
data("ctd")
ctd[["metadata"]]$units$salinity <- list(unit=expression(foo), scale="bar")
expect_equal(ctd[["salinityUnit"]], list(unit=expression(foo), scale="bar"))
})
|
#' Retrieve historic weather data for the Netherlands
#'
#' This function retrieves historic weather data collected by the official KNMI weather stations. See spatialrisk::knmi_stations for a list of the official KNMI weather stations.
#'
#' @param startyear start year for historic weather data.
#' @param endyear end year for historic weather data.
#'
#' @return Data frame containing weather data and meta data for weather station locations.
#'
#' @format The returned data frame contains the following columns:
#' \itemize{
#' \item station = ID of measurement station;
#' \item date = Date;
#' \item FH = Hourly mean wind speed (in 0.1 m/s)
#' \item FX = Maximum wind gust (in 0.1 m/s) during the hourly division;
#' \item T = Temperature (in 0.1 degrees Celsius) at 1.50 m at the time of observation;
#' \item DR = Precipitation duration (in 0.1 hour) during the hourly division;
#' \item RH = Hourly precipitation amount (in 0.1 mm) (-1 for <0.05 mm);
#' \item city = City where the measurement station is located;
#' \item lon = Longitude of station (crs = 4326);
#' \item lat = Latitude of station (crs = 4326).
#' }
#'
#' @import dplyr
#' @import fs
#' @importFrom lubridate year
#' @importFrom lubridate today
#' @importFrom lubridate ymd
#' @importFrom utils setTxtProgressBar
#' @importFrom utils txtProgressBar
#' @importFrom utils data
#' @importFrom utils download.file
#' @import vroom
#'
#' @author Martin Haringa
#'
#' @examples
#' \dontrun{
#' knmi_historic_data(2015, 2019)
#' }
#'
#' @export
knmi_historic_data <- function(startyear, endyear){
# get reference data
utils::data("knmi_stations", envir = environment())
id_stations <- knmi_stations$station
if ( startyear < 1951 ) { stop("Historic weather data before the year 1951 is not available.") }
if( endyear > lubridate::year(lubridate::today()) ) { stop("Year end should not be greater than the current year.") }
historic_levels <- cut(startyear:endyear,
breaks = c(1951, seq(1960, 2200, by = 10)),
labels = paste0(seq(1951, 2191, by = 10), "-", seq(1960, 2200, by = 10)),
include.lowest = TRUE, dig.lab = 5)
periods <- unique(as.character(historic_levels))
# Create a new directory
tmp <- fs::dir_create(fs::file_temp())
# Set progress bar
pb <- utils::txtProgressBar(max = length(id_stations), style = 3)
# create new files in the new directory
for (i in 1:length(id_stations)){
utils::setTxtProgressBar(pb, i)
for (j in 1:length(periods)){
new_file <- fs::file_create(fs::path(tmp, paste0("knmi_", id_stations[i], "_", periods[j], ".zip")))
knmi_url <- paste0("https://cdn.knmi.nl/knmi/map/page/klimatologie/gegevens/uurgegevens/uurgeg_", id_stations[i], "_", periods[j], ".zip")
utils::download.file(knmi_url, new_file, quiet = TRUE)
}
}
files <- fs::dir_ls(tmp, glob = "*zip")
# Select existing files
files_exist <- files[as.logical(fs::file_size(files) > "1KB")]
# Read files into R
suppressMessages(
df <- vroom::vroom(files_exist, skip = 31, delim = ",",
col_select = list(station = 1, date = YYYYMMDD, HH, DD, FH, FF, FX, T, T10, TD, DR, RH))[-1,]
)
# Delete directory
fs::dir_delete(tmp)
df$date <- lubridate::ymd(df$date)
df_selection <- dplyr::filter(df, lubridate::year(date) >= startyear, lubridate::year(date) <= endyear)
# Add metadata
df_meta <- dplyr::left_join(df_selection, knmi_stations[, c("station", "city", "lon", "lat")], by = "station")
return(df_meta)
}
| /R/knmi_historic.R | no_license | emailhy/spatialrisk | R | false | false | 3,582 | r | #' Retrieve historic weather data for the Netherlands
#'
#' This function retrieves historic weather data collected by the official KNMI weather stations. See spatialrisk::knmi_stations for a list of the official KNMI weather stations.
#'
#' @param startyear start year for historic weather data.
#' @param endyear end year for historic weather data.
#'
#' @return Data frame containing weather data and meta data for weather station locations.
#'
#' @format The returned data frame contains the following columns:
#' \itemize{
#' \item station = ID of measurement station;
#' \item date = Date;
#' \item FH = Hourly mean wind speed (in 0.1 m/s)
#' \item FX = Maximum wind gust (in 0.1 m/s) during the hourly division;
#' \item T = Temperature (in 0.1 degrees Celsius) at 1.50 m at the time of observation;
#' \item DR = Precipitation duration (in 0.1 hour) during the hourly division;
#' \item RH = Hourly precipitation amount (in 0.1 mm) (-1 for <0.05 mm);
#' \item city = City where the measurement station is located;
#' \item lon = Longitude of station (crs = 4326);
#' \item lat = Latitude of station (crs = 4326).
#' }
#'
#' @import dplyr
#' @import fs
#' @importFrom lubridate year
#' @importFrom lubridate today
#' @importFrom lubridate ymd
#' @importFrom utils setTxtProgressBar
#' @importFrom utils txtProgressBar
#' @importFrom utils data
#' @importFrom utils download.file
#' @import vroom
#'
#' @author Martin Haringa
#'
#' @examples
#' \dontrun{
#' knmi_historic_data(2015, 2019)
#' }
#'
#' @export
knmi_historic_data <- function(startyear, endyear){
# get reference data
utils::data("knmi_stations", envir = environment())
id_stations <- knmi_stations$station
if ( startyear < 1951 ) { stop("Historic weather data before the year 1951 is not available.") }
if( endyear > lubridate::year(lubridate::today()) ) { stop("Year end should not be greater than the current year.") }
historic_levels <- cut(startyear:endyear,
breaks = c(1951, seq(1960, 2200, by = 10)),
labels = paste0(seq(1951, 2191, by = 10), "-", seq(1960, 2200, by = 10)),
include.lowest = TRUE, dig.lab = 5)
periods <- unique(as.character(historic_levels))
# Create a new directory
tmp <- fs::dir_create(fs::file_temp())
# Set progress bar
pb <- utils::txtProgressBar(max = length(id_stations), style = 3)
# create new files in the new directory
for (i in 1:length(id_stations)){
utils::setTxtProgressBar(pb, i)
for (j in 1:length(periods)){
new_file <- fs::file_create(fs::path(tmp, paste0("knmi_", id_stations[i], "_", periods[j], ".zip")))
knmi_url <- paste0("https://cdn.knmi.nl/knmi/map/page/klimatologie/gegevens/uurgegevens/uurgeg_", id_stations[i], "_", periods[j], ".zip")
utils::download.file(knmi_url, new_file, quiet = TRUE)
}
}
files <- fs::dir_ls(tmp, glob = "*zip")
# Select existing files
files_exist <- files[as.logical(fs::file_size(files) > "1KB")]
# Read files into R
suppressMessages(
df <- vroom::vroom(files_exist, skip = 31, delim = ",",
col_select = list(station = 1, date = YYYYMMDD, HH, DD, FH, FF, FX, T, T10, TD, DR, RH))[-1,]
)
# Delete directory
fs::dir_delete(tmp)
df$date <- lubridate::ymd(df$date)
df_selection <- dplyr::filter(df, lubridate::year(date) >= startyear, lubridate::year(date) <= endyear)
# Add metadata
df_meta <- dplyr::left_join(df_selection, knmi_stations[, c("station", "city", "lon", "lat")], by = "station")
return(df_meta)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elementwise.R
\docType{class}
\name{Logistic-class}
\alias{Logistic-class}
\alias{.Logistic}
\alias{Logistic}
\alias{Logistic}
\alias{to_numeric,Logistic-method}
\alias{sign_from_args,Logistic-method}
\alias{is_atom_convex,Logistic-method}
\alias{is_atom_concave,Logistic-method}
\alias{is_incr,Logistic-method}
\alias{is_decr,Logistic-method}
\alias{graph_implementation,Logistic-method}
\title{The Logistic class.}
\usage{
Logistic(x)
\S4method{to_numeric}{Logistic}(object, values)
\S4method{sign_from_args}{Logistic}(object)
\S4method{is_atom_convex}{Logistic}(object)
\S4method{is_atom_concave}{Logistic}(object)
\S4method{is_incr}{Logistic}(object, idx)
\S4method{is_decr}{Logistic}(object, idx)
\S4method{graph_implementation}{Logistic}(object, arg_objs, size,
data = NA_real_)
}
\arguments{
\item{x}{An \linkS4class{Expression} or numeric constant.}
\item{object}{A \linkS4class{Logistic} object.}
\item{values}{A list of arguments to the atom.}
\item{idx}{An index into the atom.}
\item{arg_objs}{A list of linear expressions for each argument.}
\item{size}{A vector with two elements representing the size of the resulting expression.}
\item{data}{A list of additional data required by the atom.}
}
\description{
This class represents the elementwise operation \eqn{\log(1 + e^x)}.
This is a special case of log(sum(exp)) that evaluates to a vector rather than to a scalar,
which is useful for logistic regression.
}
\section{Methods (by generic)}{
\itemize{
\item \code{to_numeric}: Evaluates \code{e^x} elementwise, adds one, and takes the natural logarithm.
\item \code{sign_from_args}: The atom is positive.
\item \code{is_atom_convex}: The atom is convex.
\item \code{is_atom_concave}: The atom is not concave.
\item \code{is_incr}: The atom is weakly increasing.
\item \code{is_decr}: The atom is not weakly decreasing.
\item \code{graph_implementation}: The graph implementation of the atom.
}}
\section{Slots}{
\describe{
\item{\code{x}}{An \linkS4class{Expression} or numeric constant.}
}}
| /man/Logistic-class.Rd | permissive | vishalbelsare/CVXR | R | false | true | 2,111 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elementwise.R
\docType{class}
\name{Logistic-class}
\alias{Logistic-class}
\alias{.Logistic}
\alias{Logistic}
\alias{Logistic}
\alias{to_numeric,Logistic-method}
\alias{sign_from_args,Logistic-method}
\alias{is_atom_convex,Logistic-method}
\alias{is_atom_concave,Logistic-method}
\alias{is_incr,Logistic-method}
\alias{is_decr,Logistic-method}
\alias{graph_implementation,Logistic-method}
\title{The Logistic class.}
\usage{
Logistic(x)
\S4method{to_numeric}{Logistic}(object, values)
\S4method{sign_from_args}{Logistic}(object)
\S4method{is_atom_convex}{Logistic}(object)
\S4method{is_atom_concave}{Logistic}(object)
\S4method{is_incr}{Logistic}(object, idx)
\S4method{is_decr}{Logistic}(object, idx)
\S4method{graph_implementation}{Logistic}(object, arg_objs, size,
data = NA_real_)
}
\arguments{
\item{x}{An \linkS4class{Expression} or numeric constant.}
\item{object}{A \linkS4class{Logistic} object.}
\item{values}{A list of arguments to the atom.}
\item{idx}{An index into the atom.}
\item{arg_objs}{A list of linear expressions for each argument.}
\item{size}{A vector with two elements representing the size of the resulting expression.}
\item{data}{A list of additional data required by the atom.}
}
\description{
This class represents the elementwise operation \eqn{\log(1 + e^x)}.
This is a special case of log(sum(exp)) that evaluates to a vector rather than to a scalar,
which is useful for logistic regression.
}
\section{Methods (by generic)}{
\itemize{
\item \code{to_numeric}: Evaluates \code{e^x} elementwise, adds one, and takes the natural logarithm.
\item \code{sign_from_args}: The atom is positive.
\item \code{is_atom_convex}: The atom is convex.
\item \code{is_atom_concave}: The atom is not concave.
\item \code{is_incr}: The atom is weakly increasing.
\item \code{is_decr}: The atom is not weakly decreasing.
\item \code{graph_implementation}: The graph implementation of the atom.
}}
\section{Slots}{
\describe{
\item{\code{x}}{An \linkS4class{Expression} or numeric constant.}
}}
|
context("List data package identifiers")
testthat::test_that("list_data_package_identifiers() works", {
vcr::use_cassette("list_data_package_identifiers", {
res <- list_data_package_identifiers("knb-lter-ble")
})
expect_equal(class(res), "numeric")
expect_true(length(res) > 0)
})
| /tests/testthat/test_list_data_package_identifiers.R | permissive | ropensci/EDIutils | R | false | false | 294 | r | context("List data package identifiers")
testthat::test_that("list_data_package_identifiers() works", {
vcr::use_cassette("list_data_package_identifiers", {
res <- list_data_package_identifiers("knb-lter-ble")
})
expect_equal(class(res), "numeric")
expect_true(length(res) > 0)
})
|
#method = one of "anova", "poisson", "class" or "exp".
#If method is missing then the routine tries to make an intelligent guess. If y is a survival object, then method = "exp" is assumed, if y has 2 columns then method = "poisson" is assumed, if y is a factor then method = "class" is assumed, otherwise method = "anova" is
#x
#Different Control Parameters
#cp, minsplit, minbucket, maxdepth, maxcompete, xval,
control_1 = rpart.control(cp = 0.005, minsplit=3, minbucket = 2)
control_1 = rpart.control(cp = 0.005, minsplit=10, minbucket = 5)
m1 = rpart(play ~ . , data=game, x=T, model=T, control=control_1)
m1
m1$frame
rpart.plot(m1, nn=T)
m1$model
class(m1$model)
summary(m1)
print(m1)
#
library(rattle)
library(RColorBrewer)
fancyRpartPlot(m1)
| /52-CART/zz-test.R | no_license | DUanalytics/rAnalytics | R | false | false | 755 | r |
#method = one of "anova", "poisson", "class" or "exp".
#If method is missing then the routine tries to make an intelligent guess. If y is a survival object, then method = "exp" is assumed, if y has 2 columns then method = "poisson" is assumed, if y is a factor then method = "class" is assumed, otherwise method = "anova" is
#x
#Different Control Parameters
#cp, minsplit, minbucket, maxdepth, maxcompete, xval,
control_1 = rpart.control(cp = 0.005, minsplit=3, minbucket = 2)
control_1 = rpart.control(cp = 0.005, minsplit=10, minbucket = 5)
m1 = rpart(play ~ . , data=game, x=T, model=T, control=control_1)
m1
m1$frame
rpart.plot(m1, nn=T)
m1$model
class(m1$model)
summary(m1)
print(m1)
#
library(rattle)
library(RColorBrewer)
fancyRpartPlot(m1)
|
## Caching the Inverse of a Matrix
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then the cachesolve should retrieve the inverse
## from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | wangjiezhe/ProgrammingAssignment2 | R | false | false | 1,083 | r | ## Caching the Inverse of a Matrix
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then the cachesolve should retrieve the inverse
## from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
## The three functions below are designed to cache and retrieve the inverse of a matrix.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve function retrieves the inverse from the cache.
## The function makeCacheMatrix can only be executed if the matrix supplied is square.
## The function checkSquare is being called to test if the matrix supplied is a square matrix.
## If not, this will result in an error meassage and the function makeCacheMatrix will not be executed
## We assume that the matrix supplied is invertible (i.e. determinant <>0)
checkSquare <- function(M) {
if (nrow(M) == ncol(M)) {}
else {stop ("error, your matrix should be square!")}
}
## This function creates a special "matrix" object that can cache its inverse
## The object created contains 4 functions: set, get, setInverse, getInverse
makeCacheMatrix <- function(x = matrix()) {
checkSquare(x)
Inv <- NULL
set <- function(M){
checkSquare(M)
x <<- M
Inv <<- NULL
}
get <- function() x
setInverse <- function(solve) Inv <<- solve
getInverse <- function() Inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by the function makeCacheMatrix
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve function retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
Inv <- x$getInverse()
if (!is.null(Inv)) {
message("getting cached data")
return(Inv)
}
data <- x$get()
Inv <- solve(data, ...)
x$setInverse(Inv)
Inv
}
| /cachematrix.R | no_license | elineheemskerk/ProgrammingAssignment2 | R | false | false | 1,771 | r | ## The three functions below are designed to cache and retrieve the inverse of a matrix.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve function retrieves the inverse from the cache.
## The function makeCacheMatrix can only be executed if the matrix supplied is square.
## The function checkSquare is being called to test if the matrix supplied is a square matrix.
## If not, this will result in an error meassage and the function makeCacheMatrix will not be executed
## We assume that the matrix supplied is invertible (i.e. determinant <>0)
checkSquare <- function(M) {
if (nrow(M) == ncol(M)) {}
else {stop ("error, your matrix should be square!")}
}
## This function creates a special "matrix" object that can cache its inverse
## The object created contains 4 functions: set, get, setInverse, getInverse
makeCacheMatrix <- function(x = matrix()) {
checkSquare(x)
Inv <- NULL
set <- function(M){
checkSquare(M)
x <<- M
Inv <<- NULL
}
get <- function() x
setInverse <- function(solve) Inv <<- solve
getInverse <- function() Inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by the function makeCacheMatrix
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve function retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
Inv <- x$getInverse()
if (!is.null(Inv)) {
message("getting cached data")
return(Inv)
}
data <- x$get()
Inv <- solve(data, ...)
x$setInverse(Inv)
Inv
}
|
source(here("R/analyze/assign_treatment.R"), local = TRUE)
newey_ate <- function(crimes, citations) {
propensity_scores <-
assign_treatment(citations_tract_month = citations) %>%
calc_propensity_score(crime_tract_month = crimes)
cutoff <- ymd(20180628)
outcome_data <- crimes %>%
dplyr::filter(group_date > cutoff) %>%
inner_join(propensity_scores, c("census_tract")) %>%
rename(p = propensity,
y = citations,
d = group_date)
treated_outcomes <- outcome_data %>% dplyr::filter(D) %>% select(p, d ,y)
control_outcomes <- outcome_data %>% dplyr::filter(!D) %>% select(p, d, y)
treated_comparisons <- pmap_dbl(treated_outcomes, ~{
control_outcomes %>%
dplyr::filter(d == .y) %>%
mutate(w = (p - .x)^-2) %$%
weighted.mean(y, w)
})
tibble(
ate = mean(treated_outcomes$y - treated_comparisons)
)
}
did <- function(crime, citations) {
treatments <- assign_treatment(citations)
model_data <- crime %>%
inner_join(treatments, c("census_tract")) %>%
mutate(D = as.numeric(D),
t = as.numeric(group_date > GLOBALS$cutoff))
lm(reports ~ D*t + census_tract, data = model_data)
}
| /R/analyze/model_fitting.R | no_license | nateybear/austin-homelessness-map | R | false | false | 1,201 | r | source(here("R/analyze/assign_treatment.R"), local = TRUE)
newey_ate <- function(crimes, citations) {
propensity_scores <-
assign_treatment(citations_tract_month = citations) %>%
calc_propensity_score(crime_tract_month = crimes)
cutoff <- ymd(20180628)
outcome_data <- crimes %>%
dplyr::filter(group_date > cutoff) %>%
inner_join(propensity_scores, c("census_tract")) %>%
rename(p = propensity,
y = citations,
d = group_date)
treated_outcomes <- outcome_data %>% dplyr::filter(D) %>% select(p, d ,y)
control_outcomes <- outcome_data %>% dplyr::filter(!D) %>% select(p, d, y)
treated_comparisons <- pmap_dbl(treated_outcomes, ~{
control_outcomes %>%
dplyr::filter(d == .y) %>%
mutate(w = (p - .x)^-2) %$%
weighted.mean(y, w)
})
tibble(
ate = mean(treated_outcomes$y - treated_comparisons)
)
}
did <- function(crime, citations) {
treatments <- assign_treatment(citations)
model_data <- crime %>%
inner_join(treatments, c("census_tract")) %>%
mutate(D = as.numeric(D),
t = as.numeric(group_date > GLOBALS$cutoff))
lm(reports ~ D*t + census_tract, data = model_data)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_anal.R
\name{FeatureCorrelation}
\alias{FeatureCorrelation}
\title{Main function to calculate correlation of all other feature to a given feature name}
\usage{
FeatureCorrelation(mbSetObj, dist.name, taxrank, taxa, variable, datatype,
shotfeat, shotgunid)
}
\arguments{
\item{mbSetObj}{Input the name of the mbSetObj.}
}
\description{
This functions calculate correlation of all other feature to a given feature name.
}
\author{
Jeff Xia \email{jeff.xia@mcgill.ca}
McGill University, Canada
License: GNU GPL (>= 2)
}
| /MicrobiomeAnalystR/man/FeatureCorrelation.Rd | no_license | FADHLyemen/MicrobiomeAnalystR | R | false | true | 605 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_anal.R
\name{FeatureCorrelation}
\alias{FeatureCorrelation}
\title{Main function to calculate correlation of all other feature to a given feature name}
\usage{
FeatureCorrelation(mbSetObj, dist.name, taxrank, taxa, variable, datatype,
shotfeat, shotgunid)
}
\arguments{
\item{mbSetObj}{Input the name of the mbSetObj.}
}
\description{
This functions calculate correlation of all other feature to a given feature name.
}
\author{
Jeff Xia \email{jeff.xia@mcgill.ca}
McGill University, Canada
License: GNU GPL (>= 2)
}
|
vec <- 1:5
vec <- seq(1, 5, 1)
for (i in vec) {
cat(i, "\n")
}
kraje <- c("Polska", "Grecja", "Polska", "Polska")
for (i in kraje) {
cat(i, "\n")
}
for (i in iris) {
i[1]+i[2]
}
lapply(iris, function(i){
i[1]+i[2]
})
wynik <- numeric(length(vec))
for (i in vec) {
wynik[i] <- i
}
wynik <- lapply(vec, function(i){
i
})
wynik <- sapply(vec, function(i){
i
})
x <- 0
repeat({
i <- runif(1)
x <- x + 1
if (x > 10) break()
cat(i, "\n")
})
wynik <- replicate(100, {
runif(1)
})
# -----------
# instrukcje warunkowe
i <- 0.2
if (i %in% c(0.5, 0.7))
cat("mniej") else
cat("wiecej")
czy_wieksze_niz_5_8 <- iris$Sepal.Length > 5.8
ifelse(iris$Sepal.Length > 5.8,
iris$Sepal.Length,
c(5.8, 1))
p <- c(0.000,0.1)
ifelse(p < 0.05,
"*",
"")
x <- "Polska4"
switch (x,
Polska = cat("Hura"),
Polska2 = cat("Nie hura"),
Polska3 = cat("Nie nie hura"),
cat("Cos innego")
)
switch2 <- function(expr, ...){
lista <- list(...)
lista[[expr]]
}
switch2("jablko", jablko = 3)
####################
# funkcje
function() 1
(function(x) {
x^2
})(2)
kwadrat <- function(x) {
x^2
}
kwadrat(2)
kwadrat(x = 2)
potega <- function(x, k = 2, delta) {
x^k - delta
}
potega(x=2, 3)
potega(x=2, delta=3)
potega(2, 3)
potega(2, 3)
potega2 <- function(czesc, ...) {
cat(czesc)
potega(2, ...)
}
potega2(3, delta=0, czesc="Hello")
moj_plot <- function(title="Czesc", ...) {
plot(main = title, ...)
}
iris[1,2,drop=FALSE]
co_w_srodku <- function(...) {
list(...)
}
co_w_srodku(1, 2, 3, Polska = 4)
lapply(1:100, function(i) {
i^2
})
tmp <- function(i) {
i^2
}
lapply(1:100, tmp)
class(kwadrat)
x <- iris$Sepal.Length
dystrbuanta <- ecdf(x)
dystrbuanta(5)
ecdf(x)(5)
plot(dystrbuanta)
funkcje <- list(sin, cos, tan)
lapply(funkcje, function(x)x(1))
wektor <- c("A", "B")
lapply(1:2, `[`, wektor)
wektor[2]
`[`(wektor, 2)
`for`()
| /materialy/Wyklad03.R | no_license | ultramargarine/ProgramowanieWizualizacja2017 | R | false | false | 1,939 | r | vec <- 1:5
vec <- seq(1, 5, 1)
for (i in vec) {
cat(i, "\n")
}
kraje <- c("Polska", "Grecja", "Polska", "Polska")
for (i in kraje) {
cat(i, "\n")
}
for (i in iris) {
i[1]+i[2]
}
lapply(iris, function(i){
i[1]+i[2]
})
wynik <- numeric(length(vec))
for (i in vec) {
wynik[i] <- i
}
wynik <- lapply(vec, function(i){
i
})
wynik <- sapply(vec, function(i){
i
})
x <- 0
repeat({
i <- runif(1)
x <- x + 1
if (x > 10) break()
cat(i, "\n")
})
wynik <- replicate(100, {
runif(1)
})
# -----------
# instrukcje warunkowe
i <- 0.2
if (i %in% c(0.5, 0.7))
cat("mniej") else
cat("wiecej")
czy_wieksze_niz_5_8 <- iris$Sepal.Length > 5.8
ifelse(iris$Sepal.Length > 5.8,
iris$Sepal.Length,
c(5.8, 1))
p <- c(0.000,0.1)
ifelse(p < 0.05,
"*",
"")
x <- "Polska4"
switch (x,
Polska = cat("Hura"),
Polska2 = cat("Nie hura"),
Polska3 = cat("Nie nie hura"),
cat("Cos innego")
)
switch2 <- function(expr, ...){
lista <- list(...)
lista[[expr]]
}
switch2("jablko", jablko = 3)
####################
# funkcje
function() 1
(function(x) {
x^2
})(2)
kwadrat <- function(x) {
x^2
}
kwadrat(2)
kwadrat(x = 2)
potega <- function(x, k = 2, delta) {
x^k - delta
}
potega(x=2, 3)
potega(x=2, delta=3)
potega(2, 3)
potega(2, 3)
potega2 <- function(czesc, ...) {
cat(czesc)
potega(2, ...)
}
potega2(3, delta=0, czesc="Hello")
moj_plot <- function(title="Czesc", ...) {
plot(main = title, ...)
}
iris[1,2,drop=FALSE]
co_w_srodku <- function(...) {
list(...)
}
co_w_srodku(1, 2, 3, Polska = 4)
lapply(1:100, function(i) {
i^2
})
tmp <- function(i) {
i^2
}
lapply(1:100, tmp)
class(kwadrat)
x <- iris$Sepal.Length
dystrbuanta <- ecdf(x)
dystrbuanta(5)
ecdf(x)(5)
plot(dystrbuanta)
funkcje <- list(sin, cos, tan)
lapply(funkcje, function(x)x(1))
wektor <- c("A", "B")
lapply(1:2, `[`, wektor)
wektor[2]
`[`(wektor, 2)
`for`()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/support_operations.R
\name{support_add_communication_to_case}
\alias{support_add_communication_to_case}
\title{Adds additional customer communication to an AWS Support case}
\usage{
support_add_communication_to_case(caseId, communicationBody,
ccEmailAddresses, attachmentSetId)
}
\arguments{
\item{caseId}{The AWS Support case ID requested or returned in the call. The case ID
is an alphanumeric string formatted as shown in this example:
case-\emph{12345678910-2013-c4c1d2bf33c5cf47}}
\item{communicationBody}{[required] The body of an email communication to add to the support case.}
\item{ccEmailAddresses}{The email addresses in the CC line of an email to be added to the
support case.}
\item{attachmentSetId}{The ID of a set of one or more attachments for the communication to add
to the case. Create the set by calling AddAttachmentsToSet}
}
\description{
Adds additional customer communication to an AWS Support case. Use the
\code{caseId} parameter to identify the case to which to add communication.
You can list a set of email addresses to copy on the communication by
using the \code{ccEmailAddresses} parameter. The \code{communicationBody} value
contains the text of the communication.
\itemize{
\item You must have a Business or Enterprise support plan to use the AWS
Support API.
\item If you call the AWS Support API from an account that does not have a
Business or Enterprise support plan, the
\code{SubscriptionRequiredException} error message appears. For
information about changing your support plan, see \href{https://aws.amazon.com/premiumsupport/}{AWS Support}.
}
}
\section{Request syntax}{
\preformatted{svc$add_communication_to_case(
caseId = "string",
communicationBody = "string",
ccEmailAddresses = list(
"string"
),
attachmentSetId = "string"
)
}
}
\keyword{internal}
| /paws/man/support_add_communication_to_case.Rd | permissive | sanchezvivi/paws | R | false | true | 1,896 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/support_operations.R
\name{support_add_communication_to_case}
\alias{support_add_communication_to_case}
\title{Adds additional customer communication to an AWS Support case}
\usage{
support_add_communication_to_case(caseId, communicationBody,
ccEmailAddresses, attachmentSetId)
}
\arguments{
\item{caseId}{The AWS Support case ID requested or returned in the call. The case ID
is an alphanumeric string formatted as shown in this example:
case-\emph{12345678910-2013-c4c1d2bf33c5cf47}}
\item{communicationBody}{[required] The body of an email communication to add to the support case.}
\item{ccEmailAddresses}{The email addresses in the CC line of an email to be added to the
support case.}
\item{attachmentSetId}{The ID of a set of one or more attachments for the communication to add
to the case. Create the set by calling AddAttachmentsToSet}
}
\description{
Adds additional customer communication to an AWS Support case. Use the
\code{caseId} parameter to identify the case to which to add communication.
You can list a set of email addresses to copy on the communication by
using the \code{ccEmailAddresses} parameter. The \code{communicationBody} value
contains the text of the communication.
\itemize{
\item You must have a Business or Enterprise support plan to use the AWS
Support API.
\item If you call the AWS Support API from an account that does not have a
Business or Enterprise support plan, the
\code{SubscriptionRequiredException} error message appears. For
information about changing your support plan, see \href{https://aws.amazon.com/premiumsupport/}{AWS Support}.
}
}
\section{Request syntax}{
\preformatted{svc$add_communication_to_case(
caseId = "string",
communicationBody = "string",
ccEmailAddresses = list(
"string"
),
attachmentSetId = "string"
)
}
}
\keyword{internal}
|
#---- This file documents datasets inside the package pjpv2020.01
#- Included datasets:
#- 0) df_prueba : es solo un df para probar mis funciones. Tiene columnas de diversas clases (integer, bool etc...) es para probar las funciones. Quizas faltaria ponerle una columna con vectores o dfs
#- 1) pob_mun_1996_2019: cifras de poblacion municipal del INE. Tiene datos de 1996, (1998-2019) y para 3 categorias de la v. población : {Total, Hombres y Mujeres}
#- 2) cod_mun_INE: códigos municipales del INE (sacados del fichero anterior). Tiene 2 variables que indican cuando se "creo" el municipio (year_first) y si el municipio aun existe (year_last).
#- 3) cod_prov_INE: códigos provinciales del INE (sacados del fichero anterior)
library(tidyverse)
#- 1) cifras de población municipal del INE. Aquí solo cargo el fichero. En su día creé el fichero con el script "./INE_pob-muni/01_arreglar_datos_pob-muni_v1.R". Está al final de este script
ruta_al_fichero <- here::here("data-raw", "pob_mun_1996_2019.csv")
df <- readr::read_csv(ruta_al_fichero)
pjp_data_pob_mun_1996_2019 <- df
#usethis::use_data(pjp_data_pob_mun_1996_2019, overwrite = TRUE) #- los datos se graban en /data/
#- 2) voy a crear un fichero con los códigos municipales
df <- pjp_data_pob_mun_1996_2019 %>% select(-c(poblacion, pob_values))
df <- df %>% group_by(INECodMuni) %>%
mutate(year_first = min(year, na.rm = TRUE)) %>%
mutate(year_last = max(year, na.rm = TRUE)) %>% ungroup()
df <- df %>% group_by(INECodMuni) %>% filter(year == max(year, na.rm = TRUE)) %>% distinct() %>% ungroup() %>% select (-year)
pjp_data_cod_mun_INE <- df
#usethis::use_data(pjp_data_cod_mun_INE, overwrite = TRUE) #- los datos se graban en /data/
#- Por ejemplo, el municipio "Darrical" solo aparece el año 1996, asíque su first_year = 1996 y su year_last = 1996
#- Por ejemplo, el municipio 41904 "Palmar de Troya, El" solo aparece en 2019 xq se creó ese año, asi que su year_first = 2019
#- 3) Voy a crear dodigos provinciales
df <- pjp_data_cod_mun_INE %>% select(INECodProv, INECodProv.n, INECodCCAA, INECodCCAA.n) %>% distinct()
pjp_data_cod_prov_INE <- df
#usethis::use_data(pjp_data_cod_prov_INE, overwrite = TRUE) #- los datos se graban en /data/
#- 4) en lace de códigos eurostat (para ESP) con INE
library(eurostat)
aa <- eurostat::regional_changes_2016 %>% filter(str_detect(code13, "^ES")) #-codigos eurostat para entidades territoriales españoals
#- las tuve que enlazar a mano: NUTS-1 son agrupaciones de CCAA; NUTS-2 son las CCAA y NUTS-3 las provincias, excepto en Baleares y canarias que NUTS-3 son islas!!!
df <- rio::import(here::here("data-raw", "eurostat-codes-for-spain_a.xlsx"))
pjp_data_cod_eurostat_INE <- df
#usethis::use_data(pjp_data_cod_eurostat_INE, overwrite = TRUE) #- los datos se graban en /data/
################# --------------------------------- ##############################
#- Hacia abajo está el script original que utilice en su dia para crear el fichero "pob_mun_1996_2019.csv"
#- Aquí no funciona x pbs con las rutas,
#----- Script original está en: "./INE_pob-muni/01_arreglar_datos_pob-muni_v1.R" --------------------------------
#- Datos INE: poblacion por municipios
#- Revisión del Padrón Municipal a 1 de enero de 2019 (aprobadas por RD 743/2019) http://www.ine.es/dyngs/INEbase/es/operacion.htm?c=Estadistica_C&cid=1254736177011&menu=resultados&idp=1254734710990
#- De está dirección me fui a "Detalle municipal" y me baje un fichero comprimido: https://www.ine.es/pob_xls/pobmun.zip donde hay un fichero Excel para cada año, desde 1996 hasta 2018
#- Plan: voy a descomprimir el archivo en el directorio `tmp`
library(tidyverse)
library(fs)
# creo directorio temporal y descomprimo el archivo pobmun.zip ------------------------------------
# fs::dir_delete("tmp")
path_tmp_dir <- "tmp/pobmun"
fs::dir_create(path_tmp_dir)
unzip(zipfile = here::here("datos_in", "pobmun.zip"),
exdir = path_tmp_dir)
# ver cuantos ficheros hay y su nombre -----------------------
files <- dir(path_tmp_dir) #- 23 ficheros: años 1996 a 2019 (falta 1997, no esta en el INE)
#- Pb: Resulta que los ficheros se llaman igual pero el año tiene solo 2 digitos
# cambiar el nombre de los ficheros -------------------------
files_new <- str_remove(files, "^pobmun")
ff_empieza_por <- function(xx) {
if (str_detect(xx, "^9")) {xx <- paste0("19", xx)}
else {xx <- paste0("20", xx)}
}
files_new <- map_chr(files_new, ff_empieza_por)
files_rr <- dir(path_tmp_dir, full.names = TRUE) #- para poder cambiarles el nombre me hace falta la ruta
files_rr_new <- paste0(path_tmp_dir, "/", files_new) #- la nueva ruta-nombre
# cambio nombre de fichero para ponerle como nombre solo el año
file_move(files_rr, files_rr_new) #- ok las filas ya se llamas 1996.xlsx .... 2005.xls, ... 2019.xlsx
rm(files, files_new, files_rr, files_rr_new)
#- cargo TODOS los ficheros, PERO resulta que su estructura (gracias INE!!!!) no es homogenea:
rutas_a_ficheros <- fs::dir_ls(here::here(path_tmp_dir))
df_list <- map(rutas_a_ficheros, readxl::read_excel, skip = 1) #-
#- He de arreglar las pifias del INE:
#- pifia_1) el año 1998 no tiene linea introductoria en la cabecera
df_list[[2]] <- readxl::read_excel(rutas_a_ficheros[2])
#- pifia_2) los años 2012, 2013, 2014, 2015 y 2017 tenian 2 lineas introductorias en la cabecera,
zz_mal <- c(16, 17, 18, 19, 21)
for (ii in zz_mal) {
df_list[[ii]] <- readxl::read_excel(rutas_a_ficheros[ii], skip = 2)
}
#- puedes verlo con: map(df, names) que ya estan casi bien
#- pifia_3) los 3 años q empiezan x 19..: 1996, 1998 y 1999 tienen una variable menos, les falta el nombre de la Provincia
zz_mal <- c(1,2,3)
for (ii in zz_mal) {
df_list[[ii]] <- df_list[[ii]] %>% mutate(PROVINCIAx = NA_character_) %>% select(1, PROVINCIAx, everything())
}
rm(zz_mal)
#- map(df, names)
#- pifia_4) los ficheros de 2002, 2007, 2009 tienen filas con los totales de las provincias
#- ademas, el fichero de 2016 tiene una fila para el total de la poblacion española
#- df_list[[6]] <- df_list[[6]] %>% filter(!is.na(CMUN)) #- lo arreglare luego mas abajo xq lo puedo hacer todos a la vez
#- Añadir una columna con el año
zz_anyos <- c(1996, 1998:2019)
for (ii in 1:length(df_list)) {
df_list[[ii]] <- df_list[[ii]] %>% mutate(year = zz_anyos[ii])
}
#- unificar los nombres de las columnas
names_ok <- c("CPRO", "PROVINCIA", "CMUN", "MUNICIPIO", "pob_Total", "pob_Hombres", "pob_Mujeres", "year")
for (ii in 1:length(df_list)) {
names(df_list[[ii]]) <- names_ok
}
#- Fusionar los 23 ficheros
df_ok <- df_list[[1]]
for (ii in 2:length(df_list)) {
df_ok <- bind_rows(df_ok, df_list[[ii]])
}
#- la verdad es que se puede fusionar bastante mas rápido
df_ok_2 <- map2_df(df_list, zz_anyos, ~ mutate(.x, year2 = .y)) #- https://stackoverflow.com/questions/42028710/add-new-variable-to-list-of-data-frames-with-purrr-and-mutate-from-dplyr
#- arreglo la pifia nº 4)
df_ok <- df_ok %>% filter(!is.na(CMUN))
df_ok_2 <- df_ok_2 %>% filter(!is.na(CMUN))
#- limpiar el entorno
fs::dir_delete(path_tmp_dir) #- borrar el directorio temporal
zz <- c("df_list", "df_ok")
rm(list= ls()[!(ls() %in% zz)]) #- remueve todo excepto zz
#---------------------------------------------------------------------------------------
#- voy a hacer el fichero LARGO
df <- pivot_longer(df_ok,
cols = starts_with("pob_"),
names_to = "poblacion",
names_prefix = "pob_",
values_to = "pob_values")
df <- df %>% select(- PROVINCIA) %>% rename(MUNICIPIO.orig = MUNICIPIO)
#- aun me di cuenta de otra pifia: alghuna veces (en el fichero de 1998) el CMUN tiene 4 codigos (hay que quitar el ultimo)
df <- df %>% mutate(CMUN = str_extract(CMUN, "^.{3}")) #- me quedo con los 3 primeros digitos
#- codigo municipal de 5 digitos
df <- df %>% mutate(INECodMuni = paste0(CPRO, CMUN))
#- pongo nombre de las provincias (uso el fichero del ultimo año)
df_ultimo <- df_list[[length(df_list)]]
nombre_prov <- df_ultimo %>% select(CPRO, PROVINCIA) %>% distinct()
df <- left_join(df, nombre_prov, by = c("CPRO" = "CPRO"))
zz <- df %>% filter(is.na(PROVINCIA)) #- asi es como me di cuenta de la pifia nº 4 (que ya esta arreglada)
#- ahora vamos a ir poniendo el nombre de los municipios
nombre_muni_2019 <- df_ultimo %>% select(CPRO, CMUN, MUNICIPIO) %>% distinct()
df <- left_join(df, nombre_muni_2019, by = c("CPRO" = "CPRO", "CMUN" = "CMUN"))
#- pero ahora tb habran CMUN sin su nombre (x creacion, separacionn etc...)
zz <- df %>% filter(is.na(MUNICIPIO)) #- asi es como me di cuenta de la pifia nº 4
zz <- df %>% filter(MUNICIPIO.orig != MUNICIPIO) #- hay muchos xq hay muchos municipios que han cambiado de nombre
zz_1 <- df %>% filter(is.na(MUNICIPIO)) %>% distinct(MUNICIPIO.orig, MUNICIPIO)#- estos son los 5 municipios que han desaparecido
#- 1) Darrical: desaparecio en 1997 (se incorporo a Alcolea, provincia de Almeria)
#- 2) Cesuras: desaparecio en 2014 (se incorporo a Oza-Cesuras, provincia de A Coruña)
#- 3) Oza dos Ríos: desaparecio en 2014 (se incorporo a Oza-Cesuras, provincia de A Coruña)
#- 4) Cerdedo: desaparecio en 2017 (se incorporo a Cerdedo-Cotobade, provincia de Pontevedra)
#- 5) Cotobade: desaparecio en 2017 (se incorporo a Cerdedo-Cotobade, provincia de Pontevedra)
#- a estos 5 municipios les voy a poner su nombre en el año que desaparecieron, pero como no cambiaron de nombre no hay que calentarse el cap, con poner NOMBRe.orig suficiente
zz_1 <- df %>% filter(is.na(MUNICIPIO)) #- estos son los 5 municipios que han desaparecido
df <- df %>% mutate(MUNICIPIO = ifelse(is.na(MUNICIPIO), MUNICIPIO.orig, MUNICIPIO))
#- quiero poner el código y nombre provincial, y si es capital de provincia
library(spanishRpoblacion)
INE_ccaa <- INE_padron_muni_96_17 %>% select(INECodProv, INECodCCAA, NombreCCAA) %>% distinct()
df <- left_join(df, INE_ccaa, by = c("CPRO"= "INECodProv"))
names(df)
df <- df %>% rename(INECodProv = CPRO) %>%
rename(NombreProv = PROVINCIA) %>%
rename(NombreMuni = MUNICIPIO) %>%
select(-c(CMUN, MUNICIPIO.orig))
INE_capitales_prov <- INE_padron_muni_96_17 %>% select(INECodMuni, capital_prov) %>% distinct() %>% filter(capital_prov == 1) %>% pull(INECodMuni)
df <- df %>% mutate(capital_prov = ifelse(INECodMuni %in% INE_capitales_prov, 1, 0))
df <- df %>% select(year, INECodMuni, NombreMuni, capital_prov,INECodProv, NombreProv, INECodCCAA, NombreCCAA, poblacion, pob_values)
#- PUES parece que YA LO TENGO!!!!
#- recuerda que INECodMuni es el nombre de 2019 (salvo para los 5 pueblos que han desaparecido, que es el nombre que tenian cuando desaparecieron)
#- voy a añadir las capitales de CCAA
#zz <- df %>% filter(capital_prov == 1) %>% select(-c(year, poblacion, pob_values)) %>% distinct() %>% select(INECodMuni, NombreMuni)
#zz <- df %>% filter(NombreMuni %in% c("Vitoria-Gasteiz", "Santiago de Compostela", "Mérida"))
#- en Canarias tienen capitalidad compartida Tenerife y Las palmas
capitales_CCAA <- c(Sevilla = "41091", Zaragoza = "50297", Oviedo = "33044", Palma = "07040", Tenerife = "38038", Canarias = "35016", Santander = "39075", Toledo = "45168", Valladolid = "47186", Barcelona = "08019", Madrid = "28079", Murcia = "30030", Valencia = "46250", Merida = "06083", Santiago = "15078", Logroño = "26089", Pamplona = "31201", Vitoria = "01059")
capitales_CCAA <- as.data.frame(capitales_CCAA) %>% mutate(capital_CCAA = 1) %>% mutate(capitales_CCAA = as.character(capitales_CCAA))
str(capitales_CCAA)
df <- left_join(df,capitales_CCAA, by = c("INECodMuni" = "capitales_CCAA") )
df <- df %>% mutate(capital_CCAA = ifelse(is.na(capital_CCAA), 0 , 1))
#- voy a renombrar las variables
df <- df %>% rename(INECodMuni.n = NombreMuni) %>%
rename(INECodProv.n = NombreProv) %>%
rename(INECodCCAA.n = NombreCCAA)
#- voy a reordenar las variables
df <- df %>% select(year, INECodMuni, INECodMuni.n, capital_prov, capital_CCAA, INECodProv, INECodProv.n, INECodCCAA, INECodCCAA.n, everything())
#- GUARDO el fichero de datos de pob_munin ---------------------------------------------
#- GUARDO el fichero de datos de pob_munin ---------------------------------------------
#readr::write_csv(df, here::here("datos_out", "pob_mun_1996_2019.csv"))
#readr::write_rds(df, here::here("datos_out", "pob_mun_1996_2019.rds"))
#- limpio tmp
# fs::dir_delete("tmp")
| /data-raw/crear_dfs_a_exportar.R | permissive | perezp44/pjpv2020.01 | R | false | false | 12,503 | r | #---- This file documents datasets inside the package pjpv2020.01
#- Included datasets:
#- 0) df_prueba : es solo un df para probar mis funciones. Tiene columnas de diversas clases (integer, bool etc...) es para probar las funciones. Quizas faltaria ponerle una columna con vectores o dfs
#- 1) pob_mun_1996_2019: cifras de poblacion municipal del INE. Tiene datos de 1996, (1998-2019) y para 3 categorias de la v. población : {Total, Hombres y Mujeres}
#- 2) cod_mun_INE: códigos municipales del INE (sacados del fichero anterior). Tiene 2 variables que indican cuando se "creo" el municipio (year_first) y si el municipio aun existe (year_last).
#- 3) cod_prov_INE: códigos provinciales del INE (sacados del fichero anterior)
library(tidyverse)
#- 1) cifras de población municipal del INE. Aquí solo cargo el fichero. En su día creé el fichero con el script "./INE_pob-muni/01_arreglar_datos_pob-muni_v1.R". Está al final de este script
ruta_al_fichero <- here::here("data-raw", "pob_mun_1996_2019.csv")
df <- readr::read_csv(ruta_al_fichero)
pjp_data_pob_mun_1996_2019 <- df
#usethis::use_data(pjp_data_pob_mun_1996_2019, overwrite = TRUE) #- los datos se graban en /data/
#- 2) voy a crear un fichero con los códigos municipales
df <- pjp_data_pob_mun_1996_2019 %>% select(-c(poblacion, pob_values))
df <- df %>% group_by(INECodMuni) %>%
mutate(year_first = min(year, na.rm = TRUE)) %>%
mutate(year_last = max(year, na.rm = TRUE)) %>% ungroup()
df <- df %>% group_by(INECodMuni) %>% filter(year == max(year, na.rm = TRUE)) %>% distinct() %>% ungroup() %>% select (-year)
pjp_data_cod_mun_INE <- df
#usethis::use_data(pjp_data_cod_mun_INE, overwrite = TRUE) #- los datos se graban en /data/
#- Por ejemplo, el municipio "Darrical" solo aparece el año 1996, asíque su first_year = 1996 y su year_last = 1996
#- Por ejemplo, el municipio 41904 "Palmar de Troya, El" solo aparece en 2019 xq se creó ese año, asi que su year_first = 2019
#- 3) Voy a crear dodigos provinciales
df <- pjp_data_cod_mun_INE %>% select(INECodProv, INECodProv.n, INECodCCAA, INECodCCAA.n) %>% distinct()
pjp_data_cod_prov_INE <- df
#usethis::use_data(pjp_data_cod_prov_INE, overwrite = TRUE) #- los datos se graban en /data/
#- 4) en lace de códigos eurostat (para ESP) con INE
library(eurostat)
aa <- eurostat::regional_changes_2016 %>% filter(str_detect(code13, "^ES")) #-codigos eurostat para entidades territoriales españoals
#- las tuve que enlazar a mano: NUTS-1 son agrupaciones de CCAA; NUTS-2 son las CCAA y NUTS-3 las provincias, excepto en Baleares y canarias que NUTS-3 son islas!!!
df <- rio::import(here::here("data-raw", "eurostat-codes-for-spain_a.xlsx"))
pjp_data_cod_eurostat_INE <- df
#usethis::use_data(pjp_data_cod_eurostat_INE, overwrite = TRUE) #- los datos se graban en /data/
################# --------------------------------- ##############################
#- Hacia abajo está el script original que utilice en su dia para crear el fichero "pob_mun_1996_2019.csv"
#- Aquí no funciona x pbs con las rutas,
#----- Script original está en: "./INE_pob-muni/01_arreglar_datos_pob-muni_v1.R" --------------------------------
#- Datos INE: poblacion por municipios
#- Revisión del Padrón Municipal a 1 de enero de 2019 (aprobadas por RD 743/2019) http://www.ine.es/dyngs/INEbase/es/operacion.htm?c=Estadistica_C&cid=1254736177011&menu=resultados&idp=1254734710990
#- De está dirección me fui a "Detalle municipal" y me baje un fichero comprimido: https://www.ine.es/pob_xls/pobmun.zip donde hay un fichero Excel para cada año, desde 1996 hasta 2018
#- Plan: voy a descomprimir el archivo en el directorio `tmp`
library(tidyverse)
library(fs)
# creo directorio temporal y descomprimo el archivo pobmun.zip ------------------------------------
# fs::dir_delete("tmp")
path_tmp_dir <- "tmp/pobmun"
fs::dir_create(path_tmp_dir)
unzip(zipfile = here::here("datos_in", "pobmun.zip"),
exdir = path_tmp_dir)
# ver cuantos ficheros hay y su nombre -----------------------
files <- dir(path_tmp_dir) #- 23 ficheros: años 1996 a 2019 (falta 1997, no esta en el INE)
#- Pb: Resulta que los ficheros se llaman igual pero el año tiene solo 2 digitos
# cambiar el nombre de los ficheros -------------------------
files_new <- str_remove(files, "^pobmun")
ff_empieza_por <- function(xx) {
if (str_detect(xx, "^9")) {xx <- paste0("19", xx)}
else {xx <- paste0("20", xx)}
}
files_new <- map_chr(files_new, ff_empieza_por)
files_rr <- dir(path_tmp_dir, full.names = TRUE) #- para poder cambiarles el nombre me hace falta la ruta
files_rr_new <- paste0(path_tmp_dir, "/", files_new) #- la nueva ruta-nombre
# cambio nombre de fichero para ponerle como nombre solo el año
file_move(files_rr, files_rr_new) #- ok las filas ya se llamas 1996.xlsx .... 2005.xls, ... 2019.xlsx
rm(files, files_new, files_rr, files_rr_new)
#- cargo TODOS los ficheros, PERO resulta que su estructura (gracias INE!!!!) no es homogenea:
rutas_a_ficheros <- fs::dir_ls(here::here(path_tmp_dir))
df_list <- map(rutas_a_ficheros, readxl::read_excel, skip = 1) #-
#- He de arreglar las pifias del INE:
#- pifia_1) el año 1998 no tiene linea introductoria en la cabecera
df_list[[2]] <- readxl::read_excel(rutas_a_ficheros[2])
#- pifia_2) los años 2012, 2013, 2014, 2015 y 2017 tenian 2 lineas introductorias en la cabecera,
zz_mal <- c(16, 17, 18, 19, 21)
for (ii in zz_mal) {
df_list[[ii]] <- readxl::read_excel(rutas_a_ficheros[ii], skip = 2)
}
#- puedes verlo con: map(df, names) que ya estan casi bien
#- pifia_3) los 3 años q empiezan x 19..: 1996, 1998 y 1999 tienen una variable menos, les falta el nombre de la Provincia
zz_mal <- c(1,2,3)
for (ii in zz_mal) {
df_list[[ii]] <- df_list[[ii]] %>% mutate(PROVINCIAx = NA_character_) %>% select(1, PROVINCIAx, everything())
}
rm(zz_mal)
#- map(df, names)
#- pifia_4) los ficheros de 2002, 2007, 2009 tienen filas con los totales de las provincias
#- ademas, el fichero de 2016 tiene una fila para el total de la poblacion española
#- df_list[[6]] <- df_list[[6]] %>% filter(!is.na(CMUN)) #- lo arreglare luego mas abajo xq lo puedo hacer todos a la vez
#- Añadir una columna con el año
zz_anyos <- c(1996, 1998:2019)
for (ii in 1:length(df_list)) {
df_list[[ii]] <- df_list[[ii]] %>% mutate(year = zz_anyos[ii])
}
#- unificar los nombres de las columnas
names_ok <- c("CPRO", "PROVINCIA", "CMUN", "MUNICIPIO", "pob_Total", "pob_Hombres", "pob_Mujeres", "year")
for (ii in 1:length(df_list)) {
names(df_list[[ii]]) <- names_ok
}
#- Fusionar los 23 ficheros
df_ok <- df_list[[1]]
for (ii in 2:length(df_list)) {
df_ok <- bind_rows(df_ok, df_list[[ii]])
}
#- la verdad es que se puede fusionar bastante mas rápido
df_ok_2 <- map2_df(df_list, zz_anyos, ~ mutate(.x, year2 = .y)) #- https://stackoverflow.com/questions/42028710/add-new-variable-to-list-of-data-frames-with-purrr-and-mutate-from-dplyr
#- arreglo la pifia nº 4)
df_ok <- df_ok %>% filter(!is.na(CMUN))
df_ok_2 <- df_ok_2 %>% filter(!is.na(CMUN))
#- limpiar el entorno
fs::dir_delete(path_tmp_dir) #- borrar el directorio temporal
zz <- c("df_list", "df_ok")
rm(list= ls()[!(ls() %in% zz)]) #- remueve todo excepto zz
#---------------------------------------------------------------------------------------
#- voy a hacer el fichero LARGO
df <- pivot_longer(df_ok,
cols = starts_with("pob_"),
names_to = "poblacion",
names_prefix = "pob_",
values_to = "pob_values")
df <- df %>% select(- PROVINCIA) %>% rename(MUNICIPIO.orig = MUNICIPIO)
#- aun me di cuenta de otra pifia: alghuna veces (en el fichero de 1998) el CMUN tiene 4 codigos (hay que quitar el ultimo)
df <- df %>% mutate(CMUN = str_extract(CMUN, "^.{3}")) #- me quedo con los 3 primeros digitos
#- codigo municipal de 5 digitos
df <- df %>% mutate(INECodMuni = paste0(CPRO, CMUN))
#- pongo nombre de las provincias (uso el fichero del ultimo año)
df_ultimo <- df_list[[length(df_list)]]
nombre_prov <- df_ultimo %>% select(CPRO, PROVINCIA) %>% distinct()
df <- left_join(df, nombre_prov, by = c("CPRO" = "CPRO"))
zz <- df %>% filter(is.na(PROVINCIA)) #- asi es como me di cuenta de la pifia nº 4 (que ya esta arreglada)
#- ahora vamos a ir poniendo el nombre de los municipios
nombre_muni_2019 <- df_ultimo %>% select(CPRO, CMUN, MUNICIPIO) %>% distinct()
df <- left_join(df, nombre_muni_2019, by = c("CPRO" = "CPRO", "CMUN" = "CMUN"))
#- pero ahora tb habran CMUN sin su nombre (x creacion, separacionn etc...)
zz <- df %>% filter(is.na(MUNICIPIO)) #- asi es como me di cuenta de la pifia nº 4
zz <- df %>% filter(MUNICIPIO.orig != MUNICIPIO) #- hay muchos xq hay muchos municipios que han cambiado de nombre
zz_1 <- df %>% filter(is.na(MUNICIPIO)) %>% distinct(MUNICIPIO.orig, MUNICIPIO)#- estos son los 5 municipios que han desaparecido
#- 1) Darrical: desaparecio en 1997 (se incorporo a Alcolea, provincia de Almeria)
#- 2) Cesuras: desaparecio en 2014 (se incorporo a Oza-Cesuras, provincia de A Coruña)
#- 3) Oza dos Ríos: desaparecio en 2014 (se incorporo a Oza-Cesuras, provincia de A Coruña)
#- 4) Cerdedo: desaparecio en 2017 (se incorporo a Cerdedo-Cotobade, provincia de Pontevedra)
#- 5) Cotobade: desaparecio en 2017 (se incorporo a Cerdedo-Cotobade, provincia de Pontevedra)
#- a estos 5 municipios les voy a poner su nombre en el año que desaparecieron, pero como no cambiaron de nombre no hay que calentarse el cap, con poner NOMBRe.orig suficiente
zz_1 <- df %>% filter(is.na(MUNICIPIO)) #- estos son los 5 municipios que han desaparecido
df <- df %>% mutate(MUNICIPIO = ifelse(is.na(MUNICIPIO), MUNICIPIO.orig, MUNICIPIO))
#- quiero poner el código y nombre provincial, y si es capital de provincia
library(spanishRpoblacion)
INE_ccaa <- INE_padron_muni_96_17 %>% select(INECodProv, INECodCCAA, NombreCCAA) %>% distinct()
df <- left_join(df, INE_ccaa, by = c("CPRO"= "INECodProv"))
names(df)
df <- df %>% rename(INECodProv = CPRO) %>%
rename(NombreProv = PROVINCIA) %>%
rename(NombreMuni = MUNICIPIO) %>%
select(-c(CMUN, MUNICIPIO.orig))
INE_capitales_prov <- INE_padron_muni_96_17 %>% select(INECodMuni, capital_prov) %>% distinct() %>% filter(capital_prov == 1) %>% pull(INECodMuni)
df <- df %>% mutate(capital_prov = ifelse(INECodMuni %in% INE_capitales_prov, 1, 0))
df <- df %>% select(year, INECodMuni, NombreMuni, capital_prov,INECodProv, NombreProv, INECodCCAA, NombreCCAA, poblacion, pob_values)
#- PUES parece que YA LO TENGO!!!!
#- recuerda que INECodMuni es el nombre de 2019 (salvo para los 5 pueblos que han desaparecido, que es el nombre que tenian cuando desaparecieron)
#- voy a añadir las capitales de CCAA
#zz <- df %>% filter(capital_prov == 1) %>% select(-c(year, poblacion, pob_values)) %>% distinct() %>% select(INECodMuni, NombreMuni)
#zz <- df %>% filter(NombreMuni %in% c("Vitoria-Gasteiz", "Santiago de Compostela", "Mérida"))
#- en Canarias tienen capitalidad compartida Tenerife y Las palmas
capitales_CCAA <- c(Sevilla = "41091", Zaragoza = "50297", Oviedo = "33044", Palma = "07040", Tenerife = "38038", Canarias = "35016", Santander = "39075", Toledo = "45168", Valladolid = "47186", Barcelona = "08019", Madrid = "28079", Murcia = "30030", Valencia = "46250", Merida = "06083", Santiago = "15078", Logroño = "26089", Pamplona = "31201", Vitoria = "01059")
capitales_CCAA <- as.data.frame(capitales_CCAA) %>% mutate(capital_CCAA = 1) %>% mutate(capitales_CCAA = as.character(capitales_CCAA))
str(capitales_CCAA)
df <- left_join(df,capitales_CCAA, by = c("INECodMuni" = "capitales_CCAA") )
df <- df %>% mutate(capital_CCAA = ifelse(is.na(capital_CCAA), 0 , 1))
#- voy a renombrar las variables
df <- df %>% rename(INECodMuni.n = NombreMuni) %>%
rename(INECodProv.n = NombreProv) %>%
rename(INECodCCAA.n = NombreCCAA)
#- voy a reordenar las variables
df <- df %>% select(year, INECodMuni, INECodMuni.n, capital_prov, capital_CCAA, INECodProv, INECodProv.n, INECodCCAA, INECodCCAA.n, everything())
#- GUARDO el fichero de datos de pob_munin ---------------------------------------------
#- GUARDO el fichero de datos de pob_munin ---------------------------------------------
#readr::write_csv(df, here::here("datos_out", "pob_mun_1996_2019.csv"))
#readr::write_rds(df, here::here("datos_out", "pob_mun_1996_2019.rds"))
#- limpio tmp
# fs::dir_delete("tmp")
|
triad<-function(df,
N=20,
eqfun=mean,
Nsort=TRUE,
...) {
sc<-c(df$s1,df$s2)
sig<-sd(sc,na.rm=TRUE)
df$s1<-df$s1/sig
df$s2<-df$s2/sig
##
tab<-table(df$nm)
tab<-sort(tab,decreasing=TRUE)
if (!Nsort) {
tab<-tab[tab>10]
tab<-sample(tab)
}
tab<-tab[1:N]
##
nodes<-names(tab)
titles<-strsplit(nodes,'-')
titles<-unique(unlist(titles))
##
f<-function(x,eqfun,...) {
tt<-c(x$t1,x$t2)
ss<-c(x$s1,x$s2)
nms<-strsplit(unique(x$nm),'-')[[1]]
m1<-eqfun(ss[tt==nms[1]],...)
m2<-eqfun(ss[tt==nms[2]],...)
m1-m2
}
out<-list()
for (i in 1:length(nodes)) {
print(rep(i,100))
x1<-df[df$nm==nodes[i],]
base.titles<-unique(c(x1$t1,x1$t2))
for (j in titles) {
print(j)
if (!(j %in% base.titles)) {
nm2<-paste(sort(c(base.titles[1],j)),collapse="-")
x2<-df[df$nm==nm2,]
nm3<-paste(sort(c(base.titles[2],j)),collapse="-")
x3<-df[df$nm==nm3,]
if (nrow(x2)>0 & nrow(x3)>0) {
del.base<-f(x1,eqfun=eqfun,...)
del2<-f(x2,eqfun=eqfun,...)
del3<-f(x3,eqfun=eqfun,...)
sign2<-ifelse(base.titles[1]>j,-1,+1)
sign3<-ifelse(base.titles[2]<j,-1,+1)
del.triad<-sign2*del2+sign3*del3
out[[paste(i,j)]]<-c(nodes[i],j,del.base,del.triad,nrow(x1),(nrow(x2)+nrow(x3)))
}
}
}
}
##
x<-data.frame(do.call("rbind",out))
for (i in 2:ncol(x)) x[,i]<-as.numeric(x[,i])
x
}
source("emp_prep.R")
emp<-df
load("net_sim.Rdata")
sim<-df
##########################################3
##mean
##empirical
x1<-triad(emp,N=25)
##simulated
load("net_sim.Rdata")
x2<-triad(sim,N=25)
par(mgp=c(2,1,0))
plot(x1[,6]/x1[,5],x1[,3]-x1[,4],pch=19,xlab="sample size ratio",ylab="direct minus implied",col='red',cex=.8)
abline(h=0,col='gray')
points(x2[,6]/x2[,5],x2[,3]-x2[,4],pch=19,cex=.4,col='gray')
##mean
##empirical
x1<-triad(emp,N=100,Nsort=FALSE)
##simulated
load("net_sim.Rdata")
x2<-triad(sim,N=100,Nsort=FALSE)
par(mfrow=c(4,1),mgp=c(2,1,0),mar=c(3,3,1,1),oma=rep(.5,4))
qu<-quantile(x1[,5],c(.25,.5,.75))
g1<-cut(x1[,5],c(-Inf,qu,Inf))
g2<-cut(x2[,5],c(-Inf,qu,Inf))
for (i in levels(g1)) {
tmp<-x1[g1==i,]
plot(tmp[,6]/tmp[,5],tmp[,3]-tmp[,4],pch=19,xlab="sample size ratio",ylab="direct minus implied",col='red',cex=.8)
abline(h=0,col='gray')
tmp<-x2[g2==i,]
cc<-col2rgb("blue")
c1<-rgb(cc[1],cc[2],cc[3],max=255,alpha=50)
points(tmp[,6]/tmp[,5],tmp[,3]-tmp[,4],pch=19,cex=.8,col=c1)
legend("bottomright",bty='n',legend=i)
}
##########################################3
##quantiles
x1<-x2<-list()
for (qu in c(.1,.25,.75,.9)) {
x1[[as.character(qu)]]<-triad(emp,N=100,eqfun=quantile,probs=.2)
x2[[as.character(qu)]]<-triad(sim,N=100,eqfun=quantile,probs=.2)
}
par(mfrow=c(2,2),mgp=c(2,1,0),mar=c(3,3,1,1),oma=rep(.5,4))
for (i in 1:length(x1)) {
nm<-names(x1)[i]
plot(x1[[i]][,6]/x1[[i]][,5],x1[[i]][,3]-x1[[i]][,4],pch=19,xlab="sample size ratio",ylab="direct minus implied",col='red',cex=.8)
points(x2[[i]][,6]/x2[[i]][,5],x2[[i]][,3]-x2[[i]][,4],pch=19,cex=.4,col='gray')
abline(h=0,col='gray')
legend("topright",bty='n',paste("quantile",nm))
}
| /triads.R | no_license | ben-domingue/network_equating | R | false | false | 3,498 | r |
triad<-function(df,
N=20,
eqfun=mean,
Nsort=TRUE,
...) {
sc<-c(df$s1,df$s2)
sig<-sd(sc,na.rm=TRUE)
df$s1<-df$s1/sig
df$s2<-df$s2/sig
##
tab<-table(df$nm)
tab<-sort(tab,decreasing=TRUE)
if (!Nsort) {
tab<-tab[tab>10]
tab<-sample(tab)
}
tab<-tab[1:N]
##
nodes<-names(tab)
titles<-strsplit(nodes,'-')
titles<-unique(unlist(titles))
##
f<-function(x,eqfun,...) {
tt<-c(x$t1,x$t2)
ss<-c(x$s1,x$s2)
nms<-strsplit(unique(x$nm),'-')[[1]]
m1<-eqfun(ss[tt==nms[1]],...)
m2<-eqfun(ss[tt==nms[2]],...)
m1-m2
}
out<-list()
for (i in 1:length(nodes)) {
print(rep(i,100))
x1<-df[df$nm==nodes[i],]
base.titles<-unique(c(x1$t1,x1$t2))
for (j in titles) {
print(j)
if (!(j %in% base.titles)) {
nm2<-paste(sort(c(base.titles[1],j)),collapse="-")
x2<-df[df$nm==nm2,]
nm3<-paste(sort(c(base.titles[2],j)),collapse="-")
x3<-df[df$nm==nm3,]
if (nrow(x2)>0 & nrow(x3)>0) {
del.base<-f(x1,eqfun=eqfun,...)
del2<-f(x2,eqfun=eqfun,...)
del3<-f(x3,eqfun=eqfun,...)
sign2<-ifelse(base.titles[1]>j,-1,+1)
sign3<-ifelse(base.titles[2]<j,-1,+1)
del.triad<-sign2*del2+sign3*del3
out[[paste(i,j)]]<-c(nodes[i],j,del.base,del.triad,nrow(x1),(nrow(x2)+nrow(x3)))
}
}
}
}
##
x<-data.frame(do.call("rbind",out))
for (i in 2:ncol(x)) x[,i]<-as.numeric(x[,i])
x
}
source("emp_prep.R")
emp<-df
load("net_sim.Rdata")
sim<-df
##########################################3
##mean
##empirical
x1<-triad(emp,N=25)
##simulated
load("net_sim.Rdata")
x2<-triad(sim,N=25)
par(mgp=c(2,1,0))
plot(x1[,6]/x1[,5],x1[,3]-x1[,4],pch=19,xlab="sample size ratio",ylab="direct minus implied",col='red',cex=.8)
abline(h=0,col='gray')
points(x2[,6]/x2[,5],x2[,3]-x2[,4],pch=19,cex=.4,col='gray')
##mean
##empirical
x1<-triad(emp,N=100,Nsort=FALSE)
##simulated
load("net_sim.Rdata")
x2<-triad(sim,N=100,Nsort=FALSE)
par(mfrow=c(4,1),mgp=c(2,1,0),mar=c(3,3,1,1),oma=rep(.5,4))
qu<-quantile(x1[,5],c(.25,.5,.75))
g1<-cut(x1[,5],c(-Inf,qu,Inf))
g2<-cut(x2[,5],c(-Inf,qu,Inf))
for (i in levels(g1)) {
tmp<-x1[g1==i,]
plot(tmp[,6]/tmp[,5],tmp[,3]-tmp[,4],pch=19,xlab="sample size ratio",ylab="direct minus implied",col='red',cex=.8)
abline(h=0,col='gray')
tmp<-x2[g2==i,]
cc<-col2rgb("blue")
c1<-rgb(cc[1],cc[2],cc[3],max=255,alpha=50)
points(tmp[,6]/tmp[,5],tmp[,3]-tmp[,4],pch=19,cex=.8,col=c1)
legend("bottomright",bty='n',legend=i)
}
##########################################3
##quantiles
x1<-x2<-list()
for (qu in c(.1,.25,.75,.9)) {
x1[[as.character(qu)]]<-triad(emp,N=100,eqfun=quantile,probs=.2)
x2[[as.character(qu)]]<-triad(sim,N=100,eqfun=quantile,probs=.2)
}
par(mfrow=c(2,2),mgp=c(2,1,0),mar=c(3,3,1,1),oma=rep(.5,4))
for (i in 1:length(x1)) {
nm<-names(x1)[i]
plot(x1[[i]][,6]/x1[[i]][,5],x1[[i]][,3]-x1[[i]][,4],pch=19,xlab="sample size ratio",ylab="direct minus implied",col='red',cex=.8)
points(x2[[i]][,6]/x2[[i]][,5],x2[[i]][,3]-x2[[i]][,4],pch=19,cex=.4,col='gray')
abline(h=0,col='gray')
legend("topright",bty='n',paste("quantile",nm))
}
|
# Example codes for running a linear regression analysis
# Read in the country-savings data set
savings <- read.table("Lecture07_data_regression_in_matrix_terms.txt", header=T)
# Summary statistics of the data
summary(savings)
# multiple linear regression: y - personal savings, x1 - pop15, x2 - pop75
g1 <- lm(sr~pop15+pop75, data=savings)
# Summary of regression results: parameter estimation & sd, t-test for
# individual predictor, R2 and adjusted R2, F-test for the model
r1 = summary(g2)
# Variance-covariance matrix for estimations
# The unscaled matrix, the same as (X'X)^(-1)
r1$cov.unscaled
# Estimate of standard deviation
r1$sigma
# Scaled matrix, the same as (X'X)^(-1) * sigma^2
r1$cov.unscaled * (r1$sigma)^2
# ANOVA table for regression
anova(g1)
# 95% confidence interval for parameters
confint(g1)
| /Sample Code/Lecture 7/Richard_Davis_Lecture07_R_regression_in_matrix_terms.R | no_license | rdavis22/BIOS-7060 | R | false | false | 831 | r | # Example codes for running a linear regression analysis
# Read in the country-savings data set
savings <- read.table("Lecture07_data_regression_in_matrix_terms.txt", header=T)
# Summary statistics of the data
summary(savings)
# multiple linear regression: y - personal savings, x1 - pop15, x2 - pop75
g1 <- lm(sr~pop15+pop75, data=savings)
# Summary of regression results: parameter estimation & sd, t-test for
# individual predictor, R2 and adjusted R2, F-test for the model
r1 = summary(g2)
# Variance-covariance matrix for estimations
# The unscaled matrix, the same as (X'X)^(-1)
r1$cov.unscaled
# Estimate of standard deviation
r1$sigma
# Scaled matrix, the same as (X'X)^(-1) * sigma^2
r1$cov.unscaled * (r1$sigma)^2
# ANOVA table for regression
anova(g1)
# 95% confidence interval for parameters
confint(g1)
|
testlist <- list(type = 1L, z = 2.05226840065033e-289)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609893661-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 108 | r | testlist <- list(type = 1L, z = 2.05226840065033e-289)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
testlist <- list(x = structure(c(2.31635987669322e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::doubleCenterBiasCorrected,testlist)
str(result) | /multivariance/inst/testfiles/doubleCenterBiasCorrected/AFL_doubleCenterBiasCorrected/doubleCenterBiasCorrected_valgrind_files/1613140662-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 321 | r | testlist <- list(x = structure(c(2.31635987669322e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::doubleCenterBiasCorrected,testlist)
str(result) |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/subclu.R
\name{SubClu}
\alias{SubClu}
\title{The SubClu Algorithm for Subspace Clustering}
\usage{
SubClu(data, epsilon = 4, minSupport = 4)
}
\arguments{
\item{data}{A Matrix of input data.}
\item{epsilon}{size of environment parameter for DBSCAN}
\item{minSupport}{minimum number of points parameter for DBSCAN}
}
\description{
The SubClu Algorithm follows a bottom-up framework, in which one-dimensional
clusters are generated with DBSCAN and then each cluster is expanded one
dimension at a time into a dimension that is known to have a cluster that only
differs in one dimension from this cluster. This expansion is done using
DBSCAN with the same parameters that were used for the original DBSCAN that
produced the clusters.
}
\examples{
data("subspace_dataset")
SubClu(subspace_dataset,epsilon=1,minSupport=5)
}
\references{
Karin Kailing, Hans-Peter Kriegel and Peer Kröger
\emph{Density-Connected Subspace Clustering for High-Dimensional Data}
}
\seealso{
Other subspace.clustering.algorithms: \code{\link{CLIQUE}};
\code{\link{FIRES}}; \code{\link{P3C}};
\code{\link{ProClus}}
}
| /man/SubClu.Rd | no_license | Sarmentor/subspace | R | false | false | 1,184 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/subclu.R
\name{SubClu}
\alias{SubClu}
\title{The SubClu Algorithm for Subspace Clustering}
\usage{
SubClu(data, epsilon = 4, minSupport = 4)
}
\arguments{
\item{data}{A Matrix of input data.}
\item{epsilon}{size of environment parameter for DBSCAN}
\item{minSupport}{minimum number of points parameter for DBSCAN}
}
\description{
The SubClu Algorithm follows a bottom-up framework, in which one-dimensional
clusters are generated with DBSCAN and then each cluster is expanded one
dimension at a time into a dimension that is known to have a cluster that only
differs in one dimension from this cluster. This expansion is done using
DBSCAN with the same parameters that were used for the original DBSCAN that
produced the clusters.
}
\examples{
data("subspace_dataset")
SubClu(subspace_dataset,epsilon=1,minSupport=5)
}
\references{
Karin Kailing, Hans-Peter Kriegel and Peer Kröger
\emph{Density-Connected Subspace Clustering for High-Dimensional Data}
}
\seealso{
Other subspace.clustering.algorithms: \code{\link{CLIQUE}};
\code{\link{FIRES}}; \code{\link{P3C}};
\code{\link{ProClus}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HA_to_LF.R
\name{HA_to_LF}
\alias{HA_to_LF}
\title{Transform home away data to local foreign data}
\usage{
HA_to_LF(data_agro_HA)
}
\arguments{
\item{data_agro_HA}{data home away}
}
\value{
a data frame of class data_agro_LF
}
\description{
Transform home away data to local foreign data
}
\details{
change home to local and away to foreign
}
| /man/HA_to_LF.Rd | no_license | gaelleVF/PPBstats-PPBmelange | R | false | true | 421 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HA_to_LF.R
\name{HA_to_LF}
\alias{HA_to_LF}
\title{Transform home away data to local foreign data}
\usage{
HA_to_LF(data_agro_HA)
}
\arguments{
\item{data_agro_HA}{data home away}
}
\value{
a data frame of class data_agro_LF
}
\description{
Transform home away data to local foreign data
}
\details{
change home to local and away to foreign
}
|
\name{des}
\alias{des}
\title{Desription of a data frame or a variable}
\description{Description of a data frame or a variable or wildcard for variable names}
\usage{
des(x=.data, select, exclude)
}
\arguments{
\item{x}{an object such as a vector (variable), a matrix, a table, a list or a data frame}
\item{select}{expression, indicating columns to select from '.data.'}
\item{exclude}{expression, indicating columns to exclude}
}
\details{The default value of x (ie if no argument is supplied) is '.data'. If 'x' is a data frame, its variable names will be listed with class and the description of each variable.
If 'x' is a variable, the environment and attached data frame containing 'x' will be described.
For a data frame containing too many variables, 'select' and 'exclude' can be specified to display fewer variable descriptions at a time. Unlike 'keepData', these two arguments do not have any permanent effect on the data frame.}
\author{Virasakdi Chongsuvivatwong
\email{ <cvirasak@medicine.psu.ac.th>}
}
\seealso{'use', 'summ', 'label.var', 'subset' and 'keepData'}
\examples{
data(Oswego)
use(Oswego)
# In the tutorial, when "oswego.rec" which is an EpiInfo file is available,
# instead of typing the above two lines, one can directly type:
# use("oswego.rec")
des() # This is one of the most useful Epicalc functions!
#### Detection of variables of the same name in different data frames.
# Note that 'age' is a variable in '.data' due to the function 'use'.
des(Oswego) # Same results. Note that 'age' is also in 'Oswego'.
des(infert) # The third 'age' is in another data frame,
# from the datasets package in R, 'infert'.
attach(infert)
search() # Show all data frames that are in the search path
des(sex) # 'sex' is found only in '.data'
des(induced)
age <- "abc" # Just a silly example for a variable
des(age) # Shows all occurrences of 'age', wherever it is
rm(age)
detachAllData()
#### Wildcard for variables
use(Oswego)
des("c*") # Show all variables starting with 'c'
des("?????") # Show all variables with 5 characters in the name
agegr <- cut(age, breaks=c(0,20,40,60,80))
label.var(agegr, "age group")
# Note that the above line incoperates 'agegr' into '.data
# making it eligible to be included in the group under the following wildcard
des("age*")
#### Subset of variables in .data
des(select = 1:5) # First five variables
des(select = age:onsetdate) # Same results
des(select = c(1,2,5,20))
des(select = c(age, sex, onsetdate, fruitsalad))
des(select = sex:chocolate)
## The following six lines give the same results
des(select = -(sex:chocolate))
des(select = -sex:-chocolate)
des(select = -(2:19))
des(select = -19:-2)
des(exclude = sex:chocolate)
des(exclude = 2:19)
#### Wildcard: same effects with or without 'select'
des(select = "c*")
des("c*")
## Exclusion using wildcard, however, needs an 'exclude' argument.
des(exclude = "c*")
}
\keyword{database}
| /man/des.rd | no_license | cran/epicalc | R | false | false | 3,007 | rd | \name{des}
\alias{des}
\title{Desription of a data frame or a variable}
\description{Description of a data frame or a variable or wildcard for variable names}
\usage{
des(x=.data, select, exclude)
}
\arguments{
\item{x}{an object such as a vector (variable), a matrix, a table, a list or a data frame}
\item{select}{expression, indicating columns to select from '.data.'}
\item{exclude}{expression, indicating columns to exclude}
}
\details{The default value of x (ie if no argument is supplied) is '.data'. If 'x' is a data frame, its variable names will be listed with class and the description of each variable.
If 'x' is a variable, the environment and attached data frame containing 'x' will be described.
For a data frame containing too many variables, 'select' and 'exclude' can be specified to display fewer variable descriptions at a time. Unlike 'keepData', these two arguments do not have any permanent effect on the data frame.}
\author{Virasakdi Chongsuvivatwong
\email{ <cvirasak@medicine.psu.ac.th>}
}
\seealso{'use', 'summ', 'label.var', 'subset' and 'keepData'}
\examples{
data(Oswego)
use(Oswego)
# In the tutorial, when "oswego.rec" which is an EpiInfo file is available,
# instead of typing the above two lines, one can directly type:
# use("oswego.rec")
des() # This is one of the most useful Epicalc functions!
#### Detection of variables of the same name in different data frames.
# Note that 'age' is a variable in '.data' due to the function 'use'.
des(Oswego) # Same results. Note that 'age' is also in 'Oswego'.
des(infert) # The third 'age' is in another data frame,
# from the datasets package in R, 'infert'.
attach(infert)
search() # Show all data frames that are in the search path
des(sex) # 'sex' is found only in '.data'
des(induced)
age <- "abc" # Just a silly example for a variable
des(age) # Shows all occurrences of 'age', wherever it is
rm(age)
detachAllData()
#### Wildcard for variables
use(Oswego)
des("c*") # Show all variables starting with 'c'
des("?????") # Show all variables with 5 characters in the name
agegr <- cut(age, breaks=c(0,20,40,60,80))
label.var(agegr, "age group")
# Note that the above line incoperates 'agegr' into '.data
# making it eligible to be included in the group under the following wildcard
des("age*")
#### Subset of variables in .data
des(select = 1:5) # First five variables
des(select = age:onsetdate) # Same results
des(select = c(1,2,5,20))
des(select = c(age, sex, onsetdate, fruitsalad))
des(select = sex:chocolate)
## The following six lines give the same results
des(select = -(sex:chocolate))
des(select = -sex:-chocolate)
des(select = -(2:19))
des(select = -19:-2)
des(exclude = sex:chocolate)
des(exclude = 2:19)
#### Wildcard: same effects with or without 'select'
des(select = "c*")
des("c*")
## Exclusion using wildcard, however, needs an 'exclude' argument.
des(exclude = "c*")
}
\keyword{database}
|
volclynder=function(dia=5,len=100){
vol=pi*dia^2*len/4
return(vol)
}
#lazy execution
volcylinder1=function(dia,len,rad){
vol=pi*dia^2*len/4
return(vol)
}
volcylinder2=function(dia,len,rad){
vol=pi*dia^2*len/4
print(rad)
return(vol)
}
| /volClynder.R | no_license | suman083/sawaymDataScience | R | false | false | 260 | r | volclynder=function(dia=5,len=100){
vol=pi*dia^2*len/4
return(vol)
}
#lazy execution
volcylinder1=function(dia,len,rad){
vol=pi*dia^2*len/4
return(vol)
}
volcylinder2=function(dia,len,rad){
vol=pi*dia^2*len/4
print(rad)
return(vol)
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 6276
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 6126
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 6126
c
c Input Parameter (command line, file):
c input filename QBFLIB/Kronegger-Pfandler-Pichler/bomb/p10-10.pddl_planlen=1.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 270
c no.of clauses 6276
c no.of taut cls 100
c
c Output Parameters:
c remaining no.of clauses 6126
c
c QBFLIB/Kronegger-Pfandler-Pichler/bomb/p10-10.pddl_planlen=1.qdimacs 270 6276 E1 [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 24 25 26 27 28 30 31 33 35 36 38 39 41 42 43 44 45 46 47 48 49 50 51 52 53 55 56 57 58 59 60 62 63 64 65 66 67 68 70 71 72 73 74 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270] 100 10 120 6126 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Kronegger-Pfandler-Pichler/bomb/p10-10.pddl_planlen=1/p10-10.pddl_planlen=1.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 1,224 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 6276
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 6126
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 6126
c
c Input Parameter (command line, file):
c input filename QBFLIB/Kronegger-Pfandler-Pichler/bomb/p10-10.pddl_planlen=1.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 270
c no.of clauses 6276
c no.of taut cls 100
c
c Output Parameters:
c remaining no.of clauses 6126
c
c QBFLIB/Kronegger-Pfandler-Pichler/bomb/p10-10.pddl_planlen=1.qdimacs 270 6276 E1 [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 24 25 26 27 28 30 31 33 35 36 38 39 41 42 43 44 45 46 47 48 49 50 51 52 53 55 56 57 58 59 60 62 63 64 65 66 67 68 70 71 72 73 74 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270] 100 10 120 6126 RED
|
#' Safe SQL String
#'
#' @param var String to be made safe
#'
#' @return Safe sql string
#' @export
#'
#' @examples
#' safeSQLVar("unsafe' string")
safeSQLVar <- function(var) {
ret_vec <- rep("", length(var))
for (i in 1:length(var)) {
if (is.null(var[i]) | identical(var[i], character(0))) {
ret_vec[i] <- "NULL"
}else if (is.na(var[i])) {
ret_vec[i] <- "NULL"
} else if (is.character(var[i])) {
ret_vec[i] <- paste("'", gsub("'", "\\\\'", var[i]), "'", sep = "")
} else if (is.numeric(var[i])) {
ret_vec[i] <- as.character(var[i])
} else if (is.double(var[i])) {
ret_vec[i] <- paste("'", as.character(var[i]), "'", sep = "")
} else {
ret_vec[i] <- var[i]
}
}
return(ret_vec)
}
| /R/helpers.R | no_license | willforbes/rsqlhelpers | R | false | false | 755 | r | #' Safe SQL String
#'
#' @param var String to be made safe
#'
#' @return Safe sql string
#' @export
#'
#' @examples
#' safeSQLVar("unsafe' string")
safeSQLVar <- function(var) {
ret_vec <- rep("", length(var))
for (i in 1:length(var)) {
if (is.null(var[i]) | identical(var[i], character(0))) {
ret_vec[i] <- "NULL"
}else if (is.na(var[i])) {
ret_vec[i] <- "NULL"
} else if (is.character(var[i])) {
ret_vec[i] <- paste("'", gsub("'", "\\\\'", var[i]), "'", sep = "")
} else if (is.numeric(var[i])) {
ret_vec[i] <- as.character(var[i])
} else if (is.double(var[i])) {
ret_vec[i] <- paste("'", as.character(var[i]), "'", sep = "")
} else {
ret_vec[i] <- var[i]
}
}
return(ret_vec)
}
|
expect_not_null <- function (object, info = NULL, label = NULL) {
act <- quasi_label(rlang::enquo(object), label)
expect(!is.null(act$val), sprintf("%s is null.", act$lab),
info = info)
invisible(act$val)
}
expect_node <- function (node, ...) {
cond <- list(...)
if (!is.null(cond$id)) cond$id <- as_id(cond$id)
if (!is.null(cond$parents)) cond$parents <- as_id(cond$parents)
if (!is.null(cond$children)) cond$children <- as_id(cond$children)
expect_true(all(names(cond) %in% names(node)))
lapply(names(cond), function (name) {
expect_equal(cond[[name]], node[[name]], info = name)
})
invisible(TRUE)
}
dummy_plot <- function () {
on.exit(dev.off())
png(tempfile(fileext = '.png'))
dev.control("enable")
plot(seq(10))
recordPlot()
}
| /tests/testthat/helper-misc.R | permissive | lbartnik/repository | R | false | false | 794 | r | expect_not_null <- function (object, info = NULL, label = NULL) {
act <- quasi_label(rlang::enquo(object), label)
expect(!is.null(act$val), sprintf("%s is null.", act$lab),
info = info)
invisible(act$val)
}
expect_node <- function (node, ...) {
cond <- list(...)
if (!is.null(cond$id)) cond$id <- as_id(cond$id)
if (!is.null(cond$parents)) cond$parents <- as_id(cond$parents)
if (!is.null(cond$children)) cond$children <- as_id(cond$children)
expect_true(all(names(cond) %in% names(node)))
lapply(names(cond), function (name) {
expect_equal(cond[[name]], node[[name]], info = name)
})
invisible(TRUE)
}
dummy_plot <- function () {
on.exit(dev.off())
png(tempfile(fileext = '.png'))
dev.control("enable")
plot(seq(10))
recordPlot()
}
|
# Functions to calculate the distance
# from each breakpoint to user-provided loci (e.g. TSS)
#' generateData2
#' Prepare data for dist2motif
#' @keywords simulate
#' @import dplyr
#' @export
generateData2 <- function(..., df, breakpoints, sim=FALSE, chroms){
if(missing(df) && missing(breakpoints)) stop("\n[!] Must provide either a df or bed file! Exiting.")
if(!missing(df)) {
cat("Reading data from df\n")
real_data <- df %>%
dplyr::filter(...) %>%
dplyr::mutate(pos = bp) %>%
dplyr::select(chrom, pos)
} else if(!missing(breakpoints)) {
cat("Reading data from bed file\n")
real_data <- read.table(breakpoints, header = F)
if(ncol(real_data)>=3){
# real_data <- real_data[c(1,2)]
# real_data$end <- real_data[2] + 2
real_data <- real_data[c(1,2,3)]
colnames(real_data) <- c("chrom", "start", "end")
real_data <- real_data %>%
tidyr::gather(stend, val, -chrom) %>%
dplyr::mutate(pos=val) %>%
dplyr::select(chrom, pos)
} else if(ncol(real_data)==2) {
colnames(real_data) <- c("chrom", "pos")
} else stop("Badly formatted bed file. Exiting")
}
real_data <- real_data %>%
dplyr::filter(chrom %in% chroms) %>%
droplevels()
if(sim) {
byIteration <- list()
#run each iteration
for (i in 1:sim){
cat("Running simulation", i, "of", sim, "\n")
simByChrom <- list()
for (c in levels(real_data$chrom)){
hitCount <- nrow(real_data[real_data$chrom== c,])
hitCount <- (hitCount)
if (i == 1){
cat(paste("Simulating", hitCount, "breakpoints on chromosome", c), "\n")
}
bp_data <- bpSim(nSites = hitCount, byChrom = c)
bp_data$iteration <- i
simByChrom[[c]] <- bp_data
}
result <- as.data.frame(do.call(rbind, simByChrom))
rownames(result) <- NULL
byIteration[[i]] <- result
}
#combine each iteration into one data frame
# final <- dplyr::bind_rows(byIteration)
final <- as.data.frame(do.call(rbind, byIteration))
final$iteration <- as.factor(final$iteration)
return(final)
} else{
real_data$iteration <- as.factor(1)
return(real_data)
}
}
#' dist2Motif2
#' Calculate the distance from each breakpoint to closest motif in a directory of files
#' @keywords motif
#' @import ggplot2 dplyr tidyr
#' @importFrom plyr round_any
#' @export
dist2motif2 <- function(..., df, breakpoints, feature_file, featureDir = system.file("extdata", "features", package="svBreaks"), chroms=c('2L', '2R', '3L', '3R', '4', 'X', 'Y'), sim=FALSE, position = 'centre') {
if(missing(df) && missing(breakpoints)) stop("\n[!] Must provide either a df or bed file! Exiting.")
if(!dir.exists(featureDir)) stop("Not a dir")
if(length(grep(list.files(featureDir), pattern = '.bed', value=T)) == 0) stop("\n[!] Must provide a directory containing bed files! Exiting.")
print(grep(list.files(featureDir), pattern = '.bed', value=T))
bp_data <- svBreaks::generateData2(..., df=df, breakpoints=breakpoints, sim=sim, chroms=chroms)
cat("Calculating distances to", position, 'of regions', sep = " ", "\n")
# svCount <- table(bp_data$chrom)
# bp_data <- subset(bp_data, chrom %in% names(svCount[svCount >= 5]))
# bp_data <- droplevels(bp_data)
minDist <- function(p) {
index <- which.min(abs(tss_df$pos - p))
closestTss <- tss_df$pos[index]
chrom <- as.character(tss_df$chrom[index])
dist <- (p - closestTss)
list(p, closestTss, dist, chrom)
}
scores <- list()
add_col <- function(x){
if(is.null(x$V3)){
x$V3 <- x$V2 + 2
}
return(x)
}
if(!missing(feature_file)){
fileNames <- list(feature_file)
} else{
fileNames <- list.files(featureDir, pattern = ".bed")
}
# cat("Analysing all files in directory:", bedFiles, "\n")
for (i in 1:length(fileNames)){
filename <- basename(tools::file_path_sans_ext(fileNames[i]))
parts <- unlist(strsplit(filename, split = '\\.'))
feature <- parts[1]
cat("Analysing file:", filename, 'with feature:', feature, "\n")
feature_locations <- read.table(paste(featureDir, fileNames[i], sep='/'), header = F)
feature_locations <- add_col(feature_locations)
feature_locations <- feature_locations[,c(1,2,3)]
colnames(feature_locations) <- c("chrom", "start", "end")
# fCount <- table(feature_locations$chrom)
#
# bp_data <- subset(bp_data, chrom %in% names(svCount[svCount >= 5])) %>% droplevels()
#
flocs <- feature_locations %>%
dplyr::filter(chrom %in% levels(bp_data$chrom)) %>%
droplevels()
if(length(levels(flocs$chrom)) < length(levels(bp_data$chrom))){
cat("Feature:", feature, "not found on all chroms. Trimming real data to match chroms in ", filename, paste0("[",levels(flocs$chrom), "]"), "\n")
bp_data <- bp_data %>%
dplyr::filter(chrom %in% levels(flocs$chrom)) %>%
droplevels()
}
if(position == 'centre'){
flocs <- flocs %>%
dplyr::mutate(middle = as.integer(((end+start)/2)+1)) %>%
dplyr::mutate(pos = as.integer(middle-1)) %>%
dplyr::select(chrom, pos)
} else if(position == 'edge'){
flocs <- flocs %>%
tidyr::gather(c, pos, start:end, factor_key=TRUE) %>%
dplyr::select(chrom, pos)
}
byIteration <- list()
for (j in levels(bp_data$iteration)){
byChrom <- list()
df1 <- dplyr::filter(bp_data, iteration == j)
for (c in levels(bp_data$chrom)) {
df <- dplyr::filter(df1, chrom == c)
tss_df <- dplyr::filter(flocs, chrom == c)
dist2tss <- lapply(df$pos, minDist)
dist2tss <- do.call(rbind, dist2tss)
new <- data.frame(matrix(unlist(dist2tss), nrow=nrow(df)))
new$iteration <- j
new$feature <- as.factor(feature)
colnames(new) <- c("bp", "closest_tss", "min_dist", "chrom", "iteration", "feature")
byChrom[[c]] <- new
}
perIter <- do.call(rbind, byChrom)
byIteration[[j]] <- perIter
}
dist2feat <- do.call(rbind, byIteration)
scores[[i]] <- dist2feat
}
final <- do.call(rbind, scores)
rownames(final) <- NULL
final$iteration <- as.factor(final$iteration)
final$chrom <- as.character(final$chrom)
final$min_dist <- as.numeric(as.character(final$min_dist))
return(final)
}
inRange <- function(r, s, w, p){
# r = real
# s = sim
# w = window
# p = percentage required in w
in_range <- plyr::round_any(p*nrow(r), 1)
bps_within_range <- r %>%
dplyr::group_by(feature, iteration) %>%
dplyr::mutate(min_count = sum(abs(min_dist) <= w)) %>%
dplyr::filter(min_count >= in_range) %>%
dplyr::ungroup() %>%
droplevels()
sim_within_range <- s %>%
dplyr::group_by(feature, iteration) %>%
dplyr::mutate(min_count = sum(abs(min_dist) <= w)) %>%
dplyr::filter(min_count >= in_range) %>%
dplyr::ungroup() %>%
droplevels()
removed_features <- r %>% dplyr::filter(!feature %in% levels(bps_within_range$feature)) %>% droplevels()
removed_features <- levels(removed_features$feature)
if(length(removed_features) > 0){
cat("Dropping features:", paste0("'", removed_features, "'"), "with fewer than 10 hits wihin +/-", lim, "Kb\n")
if(length(levels(bps_within_range$feature))==0){
stop("There are no feaures in dir that have >= ", in_window, "breakpoints within specified range ",paste0("(lim=", lim,"Kb) "), "Exiting.")
}
}
s <- s %>%
dplyr::filter(feature %in% levels(bps_within_range$feature)) %>%
droplevels()
printCloseHits <- function(x){
for(f in levels(x$feature)){
r <- dplyr::filter(x, feature == f)
bps_in_window <- r[abs(r$min_dist)<=w,]
perc_in_window <- plyr::round_any((nrow(bps_in_window)/nrow(r))*100, 1)
cat("There are ", paste0(nrow(bps_in_window), "/", nrow(bps_within_range), " [", perc_in_window, "%", "]"), "breakpoints within specified range", paste0("(lim=", lim,"Kb) "), "of feature:", f, "\n")
}
}
printCloseHits <- function(x){
x %>%
dplyr::group_by(iteration, feature) %>%
dplyr::filter(abs(min_dist)<=w) %>%
dplyr::summarise(perc = plyr::round_any(n()/nrow(x)*100,1),
count = n(),
total = nrow(x))
}
printCloseHits(s)
}
# distOverlay
#'
#' Calculate the distance from each breakpoint to closest motif
#' Overlay the same number of random simulated breakpoints
#' @keywords motif
#' @import dplyr ggplot2 ggpubr
#' @importFrom plyr round_any
#' @export
distOverlay2 <- function(..., df, breakpoints, feature_file, featureDir = system.file("extdata", "features", package="svBreaks"),
from='bps', chroms=c('2L', '2R', '3L', '3R', '4', 'X', 'Y'),
lim=10, n=5, plot = TRUE, histo=FALSE, position = 'edge', threshold=0.05, write=FALSE, out_dir) {
window_bps <- lim*1e3
window <- window_bps/5
if(!missing(df)){
bp_count <- nrow(df)
} else {
b <- read.delim(breakpoints)
bp_count <- nrow(b)
}
real_data <- svBreaks::dist2motif2(..., df=df, breakpoints=breakpoints, feature_file=feature_file, featureDir=featureDir, position=position, chroms=chroms)
sim_data <- svBreaks::dist2motif2(..., df=df, breakpoints=breakpoints, feature_file=feature_file, featureDir=featureDir, sim=n, position=position, chroms=chroms)
in_range <- plyr::round_any(threshold*nrow(real_data), 1)
bps_within_range <- real_data %>%
dplyr::group_by(feature, iteration) %>%
dplyr::mutate(within_threshold = sum(abs(min_dist)<=window),
required_hits = plyr::round_any(threshold*bp_count, 1)) %>%
dplyr::filter(within_threshold >= required_hits) %>%
dplyr::ungroup() %>%
droplevels()
sim_within_range <- sim_data %>%
dplyr::group_by(feature, iteration) %>%
dplyr::mutate(within_threshold = sum(abs(min_dist)<=window),
required_hits = plyr::round_any(threshold*bp_count, 1)) %>%
dplyr::filter(within_threshold >= required_hits) %>%
dplyr::ungroup() %>%
droplevels()
removed_features <- real_data %>% dplyr::filter(!feature %in% levels(bps_within_range$feature)) %>% droplevels()
removed_features <- levels(removed_features$feature)
if(length(removed_features) > 0){
cat("Dropping features:", paste0("'", removed_features, "'"), "with fewer than 10 hits wihin +/-", lim, "Kb\n")
if(length(levels(bps_within_range$feature))==0){
stop("There are no feaures in dir that have >= ", in_range, "breakpoints within specified range ",paste0("(lim=", lim,"Kb) "), "Exiting.")
}
sim_data <- sim_data %>%
dplyr::filter(feature %in% levels(bps_within_range$feature)) %>%
droplevels()
}
# for(f in levels(bps_within_range$feature)){
# x <- dplyr::filter(bps_within_range, feature == f)
# perc_in_window <- plyr::round_any((nrow(bps_in_window)/nrow(x))*100, 1)
# cat("There are ", paste0(nrow(bps_in_window), "/", nrow(bps_within_range), " [", perc_in_window, "%", "]"), "breakpoints within specified range", paste0("(lim=", lim,"Kb) "), "of feature:", f, "\n")
# }
printCloseHits <- function(x){
total <- plyr::round_any(nrow(x)/length(levels(x$feature)), 1)
x <- x %>%
dplyr::group_by(iteration, feature) %>%
dplyr::filter(abs(min_dist)<=window) %>%
dplyr::summarise(perc = plyr::round_any((n()/total)*100,1),
count = n(),
total = total) %>%
dplyr::ungroup()
return(x)
}
bp_c <- (printCloseHits(real_data))
cat(paste0("There are ", bp_c$count, "/", bp_c$total, " [", bp_c$perc, "%", "]", " breakpoints within specified range", " (lim=", window/1e3,"Kb) ", "of feature: ", bp_c$feature, "\n"))
print(printCloseHits(sim_data))
real_data <- bps_within_range
real_data$Source <- as.factor("Real")
sim_data$Source <- as.factor("Sim")
dummy_iterations <- list()
for (i in levels(sim_data$iteration)){
real_data$iteration <- as.factor(i)
dummy_iterations[[i]] <- real_data
}
real_data <- do.call(rbind, dummy_iterations)
rownames(real_data) <- NULL
real_data$iteration <- factor(real_data$iteration, levels = 1:n)
sim_data$iteration <- factor(sim_data$iteration, levels = 1:n)
# Perform significance testing
pVals_and_df <- svBreaks::simSig2(r = real_data, s = sim_data, max_dist = w)
combined <- pVals_and_df[[1]] %>% ungroup()
pVals <- pVals_and_df[[2]] %>% ungroup()
if(write){
if(missing(out_dir) || !dir.exists(out_dir)){
stop("Must specify an existing directory to write data to. Exiting")
}
bps_in_window <- bps_in_window %>%
dplyr::mutate(start = as.numeric(as.character(bp))-1,
end = as.numeric(as.character(bp))+1,
min_dist = paste0(feature, ":", min_dist)) %>%
dplyr::filter(iteration == 1) %>%
dplyr::select(chrom, bp, end, min_dist, feature) %>%
droplevels()
svBreaks::writeBed(df = bps_in_window, name = paste0("Bps_", lim, "kb_", bps_in_window$feature, ".bed")[1], outDir = out_dir)
for(i in 1:(length(levels(combined$iteration)))){
df_by_iter <- combined %>% dplyr::filter(iteration == i) %>% droplevels()
for(s in levels(combined$Source)){
df_by_source <- df_by_iter %>%
dplyr::filter(Source == s) %>%
dplyr::mutate(start = as.numeric(as.character(bp))-1,
end = as.numeric(as.character(bp))+1,
min_dist = paste0(feature, ":", min_dist)) %>%
dplyr::select(chrom, start, end, min_dist, Source, iteration, feature) %>%
droplevels()
svBreaks::writeBed(df = df_by_source, name = paste0(df_by_source$Source, "_", df_by_source$feature, "_", df_by_source$iteration, ".bed")[1], outDir = out_dir)
}
}
# combined %>%
# dplyr::group_by(Source, iteration) %>%
# dplyr::mutate(end = as.integer(bp)+2) %>%
# dplyr::select(chrom, bp, end, Source, iteration) %>%
# svBreaks::writeBed(df= ., name = paste0(.$Source, "_", .$iteration, ".bed")[1], outDir = '~/Desktop')
}
if(plot){
print(plotdistanceOverlay2(..., distances=combined, from=from, histo=histo, facetPlot=FALSE, byChrom=byChrom, lim=lim, n=n, position=position ))
print(pVals)
}else{
print(pVals)
return(list(combined, pVals))
}
}
#' plotdistanceOverlay
#'
#' Plot the distance overlay
#' @param d Dataframe containing combined real + sim data (d <- distOverlay())
#' @import dplyr ggplot2 scales
#' @importFrom cowplot plot_grid
#' @importFrom colorspace rainbow_hcl
#' @keywords distance
#' @export
plotdistanceOverlay2 <- function(..., distances, from='bps', lim=5, n, position='centre', histo=FALSE, binWidth){
grDevices::pdf(NULL)
threshold <- lim*1e3
scale <- "(Kb)"
if(missing(n)){
n <- length(levels(distances$iteration))
}
if(histo & missing(binWidth)) binWidth = threshold/10
lims <- c(as.numeric(paste0("-", threshold)), threshold)
brks <- c(as.numeric(paste0("-", threshold)),
0,
threshold)
labs <- as.character(brks/1e3)
expnd <- c(0, 0)
new <- distances %>%
dplyr::filter(!(Source == "Real" & as.character(as.numeric(iteration)) > 1)) %>%
dplyr::mutate(iteration = as.factor(ifelse(Source=='Real', 0, iteration))) %>%
dplyr::filter(abs(min_dist)<=threshold) %>%
droplevels()
real_fill <- '#3D9DEB'
iterFill <- colorspace::rainbow_hcl(n)
colours <- c(real_fill, iterFill)
plts <- list()
for (i in 1:(length(levels(new$feature)))){
distances <- new %>%
dplyr::filter(feature == levels(new$feature)[i])
p <- ggplot(distances)
if(histo) {
p <- p + geom_histogram(data=distances[distances$Source=="Sim",], aes(min_dist, y=(..count..), fill = Source, group = iteration), alpha = 0.3, binwidth = binWidth, position="identity")
p <- p + geom_histogram(data=distances[distances$Source=="Real",], aes(min_dist, y=..count.., fill = Source, group = iteration), alpha = 0.7, binwidth = binWidth, position="identity")
p <- p + scale_fill_manual(values=colours)
p <- p + scale_y_continuous(paste("Count per", binWidth, "bp bins"))
} else {
p <- ggplot(distances)
p <- p + stat_density(data=distances[distances$Source=="Sim",], aes(x=min_dist, y=..count.., group = interaction(iteration, Source), colour = iteration), alpha = 0.7, size=0.5, position="identity", geom="line")
p <- p + stat_density(data=distances[distances$Source=="Real",], aes(x=min_dist, y=..count.., group = interaction(iteration, Source), colour = iteration), size=2, position="identity", geom="line")
sim_rep1 <- distances %>%
dplyr::filter(Source == "Sim",
iteration == 1) %>%
droplevels()
p <- p + geom_rug(data=distances[distances$Source=="Real",], aes(min_dist, colour = iteration), sides = "b")
p <- p + geom_rug(data=sim_rep1, aes(min_dist, colour = iteration), sides = "t")
p <- p + scale_color_manual(values=colours)
}
p <- p + scale_x_continuous(
limits = lims,
breaks = brks,
expand = expnd,
labels = labs
)
p <- p +
theme(
legend.position = "none",
panel.background = element_blank(),
plot.background = element_rect(fill = "transparent", colour = NA),
axis.line.x = element_line(color = "black", size = 0.5),
axis.text.x = element_text(size = 12),
axis.line.y = element_line(color = "black", size = 0.5),
plot.title = element_text(size=22, hjust = 0.5)
)
p <- p + labs(title = paste(distances$feature[1], "\n", position))
plts[[i]] <- p
}
cat("Plotting", length(levels(new$feature)), "plots", "\n")
grDevices::dev.off()
cowplot::plot_grid(plotlist=plts)
}
# simSig2
#'
#' Calculate stats for simulated data
#' @keywords simsig
#' @import dplyr ggplot2 ggpubr
#' @importFrom plyr round_any
#' @importFrom PerformanceAnalytics kurtosis
#' @export
simSig2 <- function(r, s, test=NA, max_dist=5000){
cat("Calculating descriptive statistics\n")
arrange_data <- function(x){
x <- x %>%
dplyr::group_by(feature, iteration) %>%
dplyr::mutate(count = n(),
median = median(min_dist),
mean = mean(min_dist),
trimmed_mean = mean(min_dist, trim=0.35),
trimmed_median = median(min_dist, trim=0.35),
sd = sd(min_dist),
Source = Source)
# This was reducing the data and breaking in the stats loop...
# dplyr::filter(abs(min_dist) <= max_dist ) %>%
# p95 <- quantile(x$min_dist, 0.85)
# p05 <- quantile(x$min_dist, 0.25)
# # d <- x %>% dplyr::arrange(-min_dist)
# trimmed_data <- x[x$min_dist<= p95 & x$min_dist >= p05,] %>%
# dplyr::arrange(-min_dist)
# x$trimmed_mean <- mean(abs(trimmed_data$min_dist))
# mean(x$min_dist[x$min_dist<= p95 & x$min_dist >= p05])
#
# mean(x[which(x$min_dist <= p95 & x$min_dist >= p05)])
# meanTrunc <- mean(numVec[which(numVec <= p95 & numVec >= p05)])
return(x)
}
# r <- r %>% dplyr::filter(iteration==1)
simulated <- arrange_data(s)
real <- arrange_data(r)
combined <- suppressWarnings(dplyr::full_join(real, simulated))
combined$Source <- as.factor(combined$Source)
simbyFeat = list()
for (f in levels(combined$feature)){
pVals = list()
c <- dplyr::filter(combined, feature==f)
for(i in levels(c$iteration)){
df <- dplyr::filter(c, iteration==i)
rl <- dplyr::filter(df, Source == "Real")
sm <- dplyr::filter(df, Source == "Sim")
result1 <- tryCatch(suppressWarnings(ks.test(rl$min_dist, sm$min_dist)), error=function(err) NA)
# result1 <- suppressWarnings(ks.test(rl$min_dist, sm$min_dist))
# if(!is.na(result1)){
ksPval <- round(result1$p.value, 4)
# }else{
# ksPval <- 1
# }
result2 <- car::leveneTest(df$min_dist, df$Source, center='median')
result3 <- stats::bartlett.test(df$min_dist, df$Source)
bPval <- round(result3$p.value, 4)
lPval <- round(result2$`Pr(>F)`[1], 4)
rmed <- round(median(rl$min_dist)/1e3, 2)
smed <- round(median(sm$min_dist)/1e3, 2)
# rtrim <- round(median(rl$min_dist, trim=0.25)/1000, 2)
rtrim <- rl$trimmed_median[1]/1e3
strim <- sm$trimmed_median[1]/1e3
# strim <- round(median(sm$min_dist, trim=0.25)/1000, 2)
rsd <- round(sd(rl$min_dist)/1e3, 2)
ssd <- round(sd(sm$min_dist)/1e3, 2)
rKurtosis <- round(PerformanceAnalytics::kurtosis(rl$min_dist), 2)
sKurtosis <- round(PerformanceAnalytics::kurtosis(sm$min_dist), 2)
rSkew <- round(PerformanceAnalytics::skewness(rl$min_dist), 2)
sSkew <- round(PerformanceAnalytics::skewness(sm$min_dist), 2)
# fStat <- var.test(min_dist ~ Source , df, alternative = "two.sided")
# fRatio <- round(fStat$statistic, 2)
# fStat <- round(fStat$p.value, 4)
sig <- ifelse(lPval <= 0.001, "***",
ifelse(lPval <= 0.01, "**",
ifelse(lPval <= 0.05, "*", "")))
vals <- data.frame(iteration = i,
feature = f,
KS = ksPval,
Levenes = lPval,
# Bartlett = bPval,
# Fstat_ratio = fRatio,
# Fstat = fStat,
real_median = rmed,
sim_median = smed,
real_trim_med = rtrim,
sim_trim_med = strim,
real_sd = rsd,
sim_sd = ssd,
real_kurtosis = rKurtosis,
sim_kurtosis = sKurtosis,
real_skew = rSkew,
sim_skew = sSkew,
sig = sig)
pVals[[i]] <- vals
}
pVals_df <- do.call(rbind, pVals)
simbyFeat[[f]] <- pVals_df
}
combined_sig_vals <- do.call(rbind, simbyFeat)
rownames(combined_sig_vals) <- NULL
combined_sig_vals <- combined_sig_vals %>%
arrange(Levenes, KS)
# print(pVals_df, row.names = FALSE)
## Boxplot per chrom
# colours <- c("#E7B800", "#00AFBB")
# cat("Plotting qq plot of min distances\n")
# qqnorm(combined$min_dist)
# qqline(combined$min_dist, col = 2)
# p <- ggplot(combined)
# p <- p + geom_boxplot(aes(chrom, min_dist, fill = Source), alpha = 0.6)
# p <- p + scale_y_continuous("Distance", limits=c(-5000, 5000))
# p <- p + facet_wrap(~iteration, ncol = 2)
# p <- p + scale_fill_manual(values = colours)
# p
return(list(combined, combined_sig_vals))
}
add_col <- function(x){
if(is.null(x$V3)){
x$V3 <- x[,2] + 2
}
return(x)
}
writeBed <- function(df, outDir=NULL, name='regions.bed', svBreaks=FALSE){
if(missing(outDir)){
outDir <- getwd()
cat("Writing to", outDir, "\n")
}
df <- add_col(df)
# colnames(df[,c(1,2,3)]) <- c("chrom", "start", "end")
# names(df)[1:3] <- c("chrom", "start", "end")
df <- df %>%
filter(as.numeric(df[2]) < as.numeric(df[3])) %>%
droplevels()
cat(paste(outDir,name, sep='/'), "\n")
write.table(df, file = paste(outDir,name, sep='/'), row.names=F, col.names=F, sep="\t", quote = FALSE)
}
| /R/dist2motif2.R | permissive | nriddiford/svBreaks | R | false | false | 23,613 | r | # Functions to calculate the distance
# from each breakpoint to user-provided loci (e.g. TSS)
#' generateData2
#' Prepare data for dist2motif
#' @keywords simulate
#' @import dplyr
#' @export
generateData2 <- function(..., df, breakpoints, sim=FALSE, chroms){
if(missing(df) && missing(breakpoints)) stop("\n[!] Must provide either a df or bed file! Exiting.")
if(!missing(df)) {
cat("Reading data from df\n")
real_data <- df %>%
dplyr::filter(...) %>%
dplyr::mutate(pos = bp) %>%
dplyr::select(chrom, pos)
} else if(!missing(breakpoints)) {
cat("Reading data from bed file\n")
real_data <- read.table(breakpoints, header = F)
if(ncol(real_data)>=3){
# real_data <- real_data[c(1,2)]
# real_data$end <- real_data[2] + 2
real_data <- real_data[c(1,2,3)]
colnames(real_data) <- c("chrom", "start", "end")
real_data <- real_data %>%
tidyr::gather(stend, val, -chrom) %>%
dplyr::mutate(pos=val) %>%
dplyr::select(chrom, pos)
} else if(ncol(real_data)==2) {
colnames(real_data) <- c("chrom", "pos")
} else stop("Badly formatted bed file. Exiting")
}
real_data <- real_data %>%
dplyr::filter(chrom %in% chroms) %>%
droplevels()
if(sim) {
byIteration <- list()
#run each iteration
for (i in 1:sim){
cat("Running simulation", i, "of", sim, "\n")
simByChrom <- list()
for (c in levels(real_data$chrom)){
hitCount <- nrow(real_data[real_data$chrom== c,])
hitCount <- (hitCount)
if (i == 1){
cat(paste("Simulating", hitCount, "breakpoints on chromosome", c), "\n")
}
bp_data <- bpSim(nSites = hitCount, byChrom = c)
bp_data$iteration <- i
simByChrom[[c]] <- bp_data
}
result <- as.data.frame(do.call(rbind, simByChrom))
rownames(result) <- NULL
byIteration[[i]] <- result
}
#combine each iteration into one data frame
# final <- dplyr::bind_rows(byIteration)
final <- as.data.frame(do.call(rbind, byIteration))
final$iteration <- as.factor(final$iteration)
return(final)
} else{
real_data$iteration <- as.factor(1)
return(real_data)
}
}
#' dist2Motif2
#' Calculate the distance from each breakpoint to closest motif in a directory of files
#' @keywords motif
#' @import ggplot2 dplyr tidyr
#' @importFrom plyr round_any
#' @export
dist2motif2 <- function(..., df, breakpoints, feature_file, featureDir = system.file("extdata", "features", package="svBreaks"), chroms=c('2L', '2R', '3L', '3R', '4', 'X', 'Y'), sim=FALSE, position = 'centre') {
if(missing(df) && missing(breakpoints)) stop("\n[!] Must provide either a df or bed file! Exiting.")
if(!dir.exists(featureDir)) stop("Not a dir")
if(length(grep(list.files(featureDir), pattern = '.bed', value=T)) == 0) stop("\n[!] Must provide a directory containing bed files! Exiting.")
print(grep(list.files(featureDir), pattern = '.bed', value=T))
bp_data <- svBreaks::generateData2(..., df=df, breakpoints=breakpoints, sim=sim, chroms=chroms)
cat("Calculating distances to", position, 'of regions', sep = " ", "\n")
# svCount <- table(bp_data$chrom)
# bp_data <- subset(bp_data, chrom %in% names(svCount[svCount >= 5]))
# bp_data <- droplevels(bp_data)
minDist <- function(p) {
index <- which.min(abs(tss_df$pos - p))
closestTss <- tss_df$pos[index]
chrom <- as.character(tss_df$chrom[index])
dist <- (p - closestTss)
list(p, closestTss, dist, chrom)
}
scores <- list()
add_col <- function(x){
if(is.null(x$V3)){
x$V3 <- x$V2 + 2
}
return(x)
}
if(!missing(feature_file)){
fileNames <- list(feature_file)
} else{
fileNames <- list.files(featureDir, pattern = ".bed")
}
# cat("Analysing all files in directory:", bedFiles, "\n")
for (i in 1:length(fileNames)){
filename <- basename(tools::file_path_sans_ext(fileNames[i]))
parts <- unlist(strsplit(filename, split = '\\.'))
feature <- parts[1]
cat("Analysing file:", filename, 'with feature:', feature, "\n")
feature_locations <- read.table(paste(featureDir, fileNames[i], sep='/'), header = F)
feature_locations <- add_col(feature_locations)
feature_locations <- feature_locations[,c(1,2,3)]
colnames(feature_locations) <- c("chrom", "start", "end")
# fCount <- table(feature_locations$chrom)
#
# bp_data <- subset(bp_data, chrom %in% names(svCount[svCount >= 5])) %>% droplevels()
#
flocs <- feature_locations %>%
dplyr::filter(chrom %in% levels(bp_data$chrom)) %>%
droplevels()
if(length(levels(flocs$chrom)) < length(levels(bp_data$chrom))){
cat("Feature:", feature, "not found on all chroms. Trimming real data to match chroms in ", filename, paste0("[",levels(flocs$chrom), "]"), "\n")
bp_data <- bp_data %>%
dplyr::filter(chrom %in% levels(flocs$chrom)) %>%
droplevels()
}
if(position == 'centre'){
flocs <- flocs %>%
dplyr::mutate(middle = as.integer(((end+start)/2)+1)) %>%
dplyr::mutate(pos = as.integer(middle-1)) %>%
dplyr::select(chrom, pos)
} else if(position == 'edge'){
flocs <- flocs %>%
tidyr::gather(c, pos, start:end, factor_key=TRUE) %>%
dplyr::select(chrom, pos)
}
byIteration <- list()
for (j in levels(bp_data$iteration)){
byChrom <- list()
df1 <- dplyr::filter(bp_data, iteration == j)
for (c in levels(bp_data$chrom)) {
df <- dplyr::filter(df1, chrom == c)
tss_df <- dplyr::filter(flocs, chrom == c)
dist2tss <- lapply(df$pos, minDist)
dist2tss <- do.call(rbind, dist2tss)
new <- data.frame(matrix(unlist(dist2tss), nrow=nrow(df)))
new$iteration <- j
new$feature <- as.factor(feature)
colnames(new) <- c("bp", "closest_tss", "min_dist", "chrom", "iteration", "feature")
byChrom[[c]] <- new
}
perIter <- do.call(rbind, byChrom)
byIteration[[j]] <- perIter
}
dist2feat <- do.call(rbind, byIteration)
scores[[i]] <- dist2feat
}
final <- do.call(rbind, scores)
rownames(final) <- NULL
final$iteration <- as.factor(final$iteration)
final$chrom <- as.character(final$chrom)
final$min_dist <- as.numeric(as.character(final$min_dist))
return(final)
}
inRange <- function(r, s, w, p){
# r = real
# s = sim
# w = window
# p = percentage required in w
in_range <- plyr::round_any(p*nrow(r), 1)
bps_within_range <- r %>%
dplyr::group_by(feature, iteration) %>%
dplyr::mutate(min_count = sum(abs(min_dist) <= w)) %>%
dplyr::filter(min_count >= in_range) %>%
dplyr::ungroup() %>%
droplevels()
sim_within_range <- s %>%
dplyr::group_by(feature, iteration) %>%
dplyr::mutate(min_count = sum(abs(min_dist) <= w)) %>%
dplyr::filter(min_count >= in_range) %>%
dplyr::ungroup() %>%
droplevels()
removed_features <- r %>% dplyr::filter(!feature %in% levels(bps_within_range$feature)) %>% droplevels()
removed_features <- levels(removed_features$feature)
if(length(removed_features) > 0){
cat("Dropping features:", paste0("'", removed_features, "'"), "with fewer than 10 hits wihin +/-", lim, "Kb\n")
if(length(levels(bps_within_range$feature))==0){
stop("There are no feaures in dir that have >= ", in_window, "breakpoints within specified range ",paste0("(lim=", lim,"Kb) "), "Exiting.")
}
}
s <- s %>%
dplyr::filter(feature %in% levels(bps_within_range$feature)) %>%
droplevels()
printCloseHits <- function(x){
for(f in levels(x$feature)){
r <- dplyr::filter(x, feature == f)
bps_in_window <- r[abs(r$min_dist)<=w,]
perc_in_window <- plyr::round_any((nrow(bps_in_window)/nrow(r))*100, 1)
cat("There are ", paste0(nrow(bps_in_window), "/", nrow(bps_within_range), " [", perc_in_window, "%", "]"), "breakpoints within specified range", paste0("(lim=", lim,"Kb) "), "of feature:", f, "\n")
}
}
printCloseHits <- function(x){
x %>%
dplyr::group_by(iteration, feature) %>%
dplyr::filter(abs(min_dist)<=w) %>%
dplyr::summarise(perc = plyr::round_any(n()/nrow(x)*100,1),
count = n(),
total = nrow(x))
}
printCloseHits(s)
}
# distOverlay
#'
#' Calculate the distance from each breakpoint to closest motif
#' Overlay the same number of random simulated breakpoints
#' @keywords motif
#' @import dplyr ggplot2 ggpubr
#' @importFrom plyr round_any
#' @export
distOverlay2 <- function(..., df, breakpoints, feature_file, featureDir = system.file("extdata", "features", package="svBreaks"),
from='bps', chroms=c('2L', '2R', '3L', '3R', '4', 'X', 'Y'),
lim=10, n=5, plot = TRUE, histo=FALSE, position = 'edge', threshold=0.05, write=FALSE, out_dir) {
window_bps <- lim*1e3
window <- window_bps/5
if(!missing(df)){
bp_count <- nrow(df)
} else {
b <- read.delim(breakpoints)
bp_count <- nrow(b)
}
real_data <- svBreaks::dist2motif2(..., df=df, breakpoints=breakpoints, feature_file=feature_file, featureDir=featureDir, position=position, chroms=chroms)
sim_data <- svBreaks::dist2motif2(..., df=df, breakpoints=breakpoints, feature_file=feature_file, featureDir=featureDir, sim=n, position=position, chroms=chroms)
in_range <- plyr::round_any(threshold*nrow(real_data), 1)
bps_within_range <- real_data %>%
dplyr::group_by(feature, iteration) %>%
dplyr::mutate(within_threshold = sum(abs(min_dist)<=window),
required_hits = plyr::round_any(threshold*bp_count, 1)) %>%
dplyr::filter(within_threshold >= required_hits) %>%
dplyr::ungroup() %>%
droplevels()
sim_within_range <- sim_data %>%
dplyr::group_by(feature, iteration) %>%
dplyr::mutate(within_threshold = sum(abs(min_dist)<=window),
required_hits = plyr::round_any(threshold*bp_count, 1)) %>%
dplyr::filter(within_threshold >= required_hits) %>%
dplyr::ungroup() %>%
droplevels()
removed_features <- real_data %>% dplyr::filter(!feature %in% levels(bps_within_range$feature)) %>% droplevels()
removed_features <- levels(removed_features$feature)
if(length(removed_features) > 0){
cat("Dropping features:", paste0("'", removed_features, "'"), "with fewer than 10 hits wihin +/-", lim, "Kb\n")
if(length(levels(bps_within_range$feature))==0){
stop("There are no feaures in dir that have >= ", in_range, "breakpoints within specified range ",paste0("(lim=", lim,"Kb) "), "Exiting.")
}
sim_data <- sim_data %>%
dplyr::filter(feature %in% levels(bps_within_range$feature)) %>%
droplevels()
}
# for(f in levels(bps_within_range$feature)){
# x <- dplyr::filter(bps_within_range, feature == f)
# perc_in_window <- plyr::round_any((nrow(bps_in_window)/nrow(x))*100, 1)
# cat("There are ", paste0(nrow(bps_in_window), "/", nrow(bps_within_range), " [", perc_in_window, "%", "]"), "breakpoints within specified range", paste0("(lim=", lim,"Kb) "), "of feature:", f, "\n")
# }
printCloseHits <- function(x){
total <- plyr::round_any(nrow(x)/length(levels(x$feature)), 1)
x <- x %>%
dplyr::group_by(iteration, feature) %>%
dplyr::filter(abs(min_dist)<=window) %>%
dplyr::summarise(perc = plyr::round_any((n()/total)*100,1),
count = n(),
total = total) %>%
dplyr::ungroup()
return(x)
}
bp_c <- (printCloseHits(real_data))
cat(paste0("There are ", bp_c$count, "/", bp_c$total, " [", bp_c$perc, "%", "]", " breakpoints within specified range", " (lim=", window/1e3,"Kb) ", "of feature: ", bp_c$feature, "\n"))
print(printCloseHits(sim_data))
real_data <- bps_within_range
real_data$Source <- as.factor("Real")
sim_data$Source <- as.factor("Sim")
dummy_iterations <- list()
for (i in levels(sim_data$iteration)){
real_data$iteration <- as.factor(i)
dummy_iterations[[i]] <- real_data
}
real_data <- do.call(rbind, dummy_iterations)
rownames(real_data) <- NULL
real_data$iteration <- factor(real_data$iteration, levels = 1:n)
sim_data$iteration <- factor(sim_data$iteration, levels = 1:n)
# Perform significance testing
pVals_and_df <- svBreaks::simSig2(r = real_data, s = sim_data, max_dist = w)
combined <- pVals_and_df[[1]] %>% ungroup()
pVals <- pVals_and_df[[2]] %>% ungroup()
if(write){
if(missing(out_dir) || !dir.exists(out_dir)){
stop("Must specify an existing directory to write data to. Exiting")
}
bps_in_window <- bps_in_window %>%
dplyr::mutate(start = as.numeric(as.character(bp))-1,
end = as.numeric(as.character(bp))+1,
min_dist = paste0(feature, ":", min_dist)) %>%
dplyr::filter(iteration == 1) %>%
dplyr::select(chrom, bp, end, min_dist, feature) %>%
droplevels()
svBreaks::writeBed(df = bps_in_window, name = paste0("Bps_", lim, "kb_", bps_in_window$feature, ".bed")[1], outDir = out_dir)
for(i in 1:(length(levels(combined$iteration)))){
df_by_iter <- combined %>% dplyr::filter(iteration == i) %>% droplevels()
for(s in levels(combined$Source)){
df_by_source <- df_by_iter %>%
dplyr::filter(Source == s) %>%
dplyr::mutate(start = as.numeric(as.character(bp))-1,
end = as.numeric(as.character(bp))+1,
min_dist = paste0(feature, ":", min_dist)) %>%
dplyr::select(chrom, start, end, min_dist, Source, iteration, feature) %>%
droplevels()
svBreaks::writeBed(df = df_by_source, name = paste0(df_by_source$Source, "_", df_by_source$feature, "_", df_by_source$iteration, ".bed")[1], outDir = out_dir)
}
}
# combined %>%
# dplyr::group_by(Source, iteration) %>%
# dplyr::mutate(end = as.integer(bp)+2) %>%
# dplyr::select(chrom, bp, end, Source, iteration) %>%
# svBreaks::writeBed(df= ., name = paste0(.$Source, "_", .$iteration, ".bed")[1], outDir = '~/Desktop')
}
if(plot){
print(plotdistanceOverlay2(..., distances=combined, from=from, histo=histo, facetPlot=FALSE, byChrom=byChrom, lim=lim, n=n, position=position ))
print(pVals)
}else{
print(pVals)
return(list(combined, pVals))
}
}
#' plotdistanceOverlay
#'
#' Plot the distance overlay
#' @param d Dataframe containing combined real + sim data (d <- distOverlay())
#' @import dplyr ggplot2 scales
#' @importFrom cowplot plot_grid
#' @importFrom colorspace rainbow_hcl
#' @keywords distance
#' @export
plotdistanceOverlay2 <- function(..., distances, from='bps', lim=5, n, position='centre', histo=FALSE, binWidth){
grDevices::pdf(NULL)
threshold <- lim*1e3
scale <- "(Kb)"
if(missing(n)){
n <- length(levels(distances$iteration))
}
if(histo & missing(binWidth)) binWidth = threshold/10
lims <- c(as.numeric(paste0("-", threshold)), threshold)
brks <- c(as.numeric(paste0("-", threshold)),
0,
threshold)
labs <- as.character(brks/1e3)
expnd <- c(0, 0)
new <- distances %>%
dplyr::filter(!(Source == "Real" & as.character(as.numeric(iteration)) > 1)) %>%
dplyr::mutate(iteration = as.factor(ifelse(Source=='Real', 0, iteration))) %>%
dplyr::filter(abs(min_dist)<=threshold) %>%
droplevels()
real_fill <- '#3D9DEB'
iterFill <- colorspace::rainbow_hcl(n)
colours <- c(real_fill, iterFill)
plts <- list()
for (i in 1:(length(levels(new$feature)))){
distances <- new %>%
dplyr::filter(feature == levels(new$feature)[i])
p <- ggplot(distances)
if(histo) {
p <- p + geom_histogram(data=distances[distances$Source=="Sim",], aes(min_dist, y=(..count..), fill = Source, group = iteration), alpha = 0.3, binwidth = binWidth, position="identity")
p <- p + geom_histogram(data=distances[distances$Source=="Real",], aes(min_dist, y=..count.., fill = Source, group = iteration), alpha = 0.7, binwidth = binWidth, position="identity")
p <- p + scale_fill_manual(values=colours)
p <- p + scale_y_continuous(paste("Count per", binWidth, "bp bins"))
} else {
p <- ggplot(distances)
p <- p + stat_density(data=distances[distances$Source=="Sim",], aes(x=min_dist, y=..count.., group = interaction(iteration, Source), colour = iteration), alpha = 0.7, size=0.5, position="identity", geom="line")
p <- p + stat_density(data=distances[distances$Source=="Real",], aes(x=min_dist, y=..count.., group = interaction(iteration, Source), colour = iteration), size=2, position="identity", geom="line")
sim_rep1 <- distances %>%
dplyr::filter(Source == "Sim",
iteration == 1) %>%
droplevels()
p <- p + geom_rug(data=distances[distances$Source=="Real",], aes(min_dist, colour = iteration), sides = "b")
p <- p + geom_rug(data=sim_rep1, aes(min_dist, colour = iteration), sides = "t")
p <- p + scale_color_manual(values=colours)
}
p <- p + scale_x_continuous(
limits = lims,
breaks = brks,
expand = expnd,
labels = labs
)
p <- p +
theme(
legend.position = "none",
panel.background = element_blank(),
plot.background = element_rect(fill = "transparent", colour = NA),
axis.line.x = element_line(color = "black", size = 0.5),
axis.text.x = element_text(size = 12),
axis.line.y = element_line(color = "black", size = 0.5),
plot.title = element_text(size=22, hjust = 0.5)
)
p <- p + labs(title = paste(distances$feature[1], "\n", position))
plts[[i]] <- p
}
cat("Plotting", length(levels(new$feature)), "plots", "\n")
grDevices::dev.off()
cowplot::plot_grid(plotlist=plts)
}
# simSig2
#'
#' Calculate stats for simulated data
#' @keywords simsig
#' @import dplyr ggplot2 ggpubr
#' @importFrom plyr round_any
#' @importFrom PerformanceAnalytics kurtosis
#' @export
simSig2 <- function(r, s, test=NA, max_dist=5000){
cat("Calculating descriptive statistics\n")
arrange_data <- function(x){
x <- x %>%
dplyr::group_by(feature, iteration) %>%
dplyr::mutate(count = n(),
median = median(min_dist),
mean = mean(min_dist),
trimmed_mean = mean(min_dist, trim=0.35),
trimmed_median = median(min_dist, trim=0.35),
sd = sd(min_dist),
Source = Source)
# This was reducing the data and breaking in the stats loop...
# dplyr::filter(abs(min_dist) <= max_dist ) %>%
# p95 <- quantile(x$min_dist, 0.85)
# p05 <- quantile(x$min_dist, 0.25)
# # d <- x %>% dplyr::arrange(-min_dist)
# trimmed_data <- x[x$min_dist<= p95 & x$min_dist >= p05,] %>%
# dplyr::arrange(-min_dist)
# x$trimmed_mean <- mean(abs(trimmed_data$min_dist))
# mean(x$min_dist[x$min_dist<= p95 & x$min_dist >= p05])
#
# mean(x[which(x$min_dist <= p95 & x$min_dist >= p05)])
# meanTrunc <- mean(numVec[which(numVec <= p95 & numVec >= p05)])
return(x)
}
# r <- r %>% dplyr::filter(iteration==1)
simulated <- arrange_data(s)
real <- arrange_data(r)
combined <- suppressWarnings(dplyr::full_join(real, simulated))
combined$Source <- as.factor(combined$Source)
simbyFeat = list()
for (f in levels(combined$feature)){
pVals = list()
c <- dplyr::filter(combined, feature==f)
for(i in levels(c$iteration)){
df <- dplyr::filter(c, iteration==i)
rl <- dplyr::filter(df, Source == "Real")
sm <- dplyr::filter(df, Source == "Sim")
result1 <- tryCatch(suppressWarnings(ks.test(rl$min_dist, sm$min_dist)), error=function(err) NA)
# result1 <- suppressWarnings(ks.test(rl$min_dist, sm$min_dist))
# if(!is.na(result1)){
ksPval <- round(result1$p.value, 4)
# }else{
# ksPval <- 1
# }
result2 <- car::leveneTest(df$min_dist, df$Source, center='median')
result3 <- stats::bartlett.test(df$min_dist, df$Source)
bPval <- round(result3$p.value, 4)
lPval <- round(result2$`Pr(>F)`[1], 4)
rmed <- round(median(rl$min_dist)/1e3, 2)
smed <- round(median(sm$min_dist)/1e3, 2)
# rtrim <- round(median(rl$min_dist, trim=0.25)/1000, 2)
rtrim <- rl$trimmed_median[1]/1e3
strim <- sm$trimmed_median[1]/1e3
# strim <- round(median(sm$min_dist, trim=0.25)/1000, 2)
rsd <- round(sd(rl$min_dist)/1e3, 2)
ssd <- round(sd(sm$min_dist)/1e3, 2)
rKurtosis <- round(PerformanceAnalytics::kurtosis(rl$min_dist), 2)
sKurtosis <- round(PerformanceAnalytics::kurtosis(sm$min_dist), 2)
rSkew <- round(PerformanceAnalytics::skewness(rl$min_dist), 2)
sSkew <- round(PerformanceAnalytics::skewness(sm$min_dist), 2)
# fStat <- var.test(min_dist ~ Source , df, alternative = "two.sided")
# fRatio <- round(fStat$statistic, 2)
# fStat <- round(fStat$p.value, 4)
sig <- ifelse(lPval <= 0.001, "***",
ifelse(lPval <= 0.01, "**",
ifelse(lPval <= 0.05, "*", "")))
vals <- data.frame(iteration = i,
feature = f,
KS = ksPval,
Levenes = lPval,
# Bartlett = bPval,
# Fstat_ratio = fRatio,
# Fstat = fStat,
real_median = rmed,
sim_median = smed,
real_trim_med = rtrim,
sim_trim_med = strim,
real_sd = rsd,
sim_sd = ssd,
real_kurtosis = rKurtosis,
sim_kurtosis = sKurtosis,
real_skew = rSkew,
sim_skew = sSkew,
sig = sig)
pVals[[i]] <- vals
}
pVals_df <- do.call(rbind, pVals)
simbyFeat[[f]] <- pVals_df
}
combined_sig_vals <- do.call(rbind, simbyFeat)
rownames(combined_sig_vals) <- NULL
combined_sig_vals <- combined_sig_vals %>%
arrange(Levenes, KS)
# print(pVals_df, row.names = FALSE)
## Boxplot per chrom
# colours <- c("#E7B800", "#00AFBB")
# cat("Plotting qq plot of min distances\n")
# qqnorm(combined$min_dist)
# qqline(combined$min_dist, col = 2)
# p <- ggplot(combined)
# p <- p + geom_boxplot(aes(chrom, min_dist, fill = Source), alpha = 0.6)
# p <- p + scale_y_continuous("Distance", limits=c(-5000, 5000))
# p <- p + facet_wrap(~iteration, ncol = 2)
# p <- p + scale_fill_manual(values = colours)
# p
return(list(combined, combined_sig_vals))
}
add_col <- function(x){
if(is.null(x$V3)){
x$V3 <- x[,2] + 2
}
return(x)
}
writeBed <- function(df, outDir=NULL, name='regions.bed', svBreaks=FALSE){
if(missing(outDir)){
outDir <- getwd()
cat("Writing to", outDir, "\n")
}
df <- add_col(df)
# colnames(df[,c(1,2,3)]) <- c("chrom", "start", "end")
# names(df)[1:3] <- c("chrom", "start", "end")
df <- df %>%
filter(as.numeric(df[2]) < as.numeric(df[3])) %>%
droplevels()
cat(paste(outDir,name, sep='/'), "\n")
write.table(df, file = paste(outDir,name, sep='/'), row.names=F, col.names=F, sep="\t", quote = FALSE)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.